cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

offload.c (56703B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
      2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
      3
      4#include <linux/skbuff.h>
      5#include <net/devlink.h>
      6#include <net/pkt_cls.h>
      7
      8#include "cmsg.h"
      9#include "main.h"
     10#include "conntrack.h"
     11#include "../nfpcore/nfp_cpp.h"
     12#include "../nfpcore/nfp_nsp.h"
     13#include "../nfp_app.h"
     14#include "../nfp_main.h"
     15#include "../nfp_net.h"
     16#include "../nfp_port.h"
     17
     18#define NFP_FLOWER_SUPPORTED_TCPFLAGS \
     19	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
     20	 TCPHDR_PSH | TCPHDR_URG)
     21
     22#define NFP_FLOWER_SUPPORTED_CTLFLAGS \
     23	(FLOW_DIS_IS_FRAGMENT | \
     24	 FLOW_DIS_FIRST_FRAG)
     25
     26#define NFP_FLOWER_WHITELIST_DISSECTOR \
     27	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
     28	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
     29	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
     30	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
     31	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
     32	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
     33	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
     34	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
     35	 BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
     36	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
     37	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
     38	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
     39	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
     40	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
     41	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
     42	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
     43	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
     44	 BIT(FLOW_DISSECTOR_KEY_CT) | \
     45	 BIT(FLOW_DISSECTOR_KEY_META) | \
     46	 BIT(FLOW_DISSECTOR_KEY_IP))
     47
     48#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
     49	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
     50	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
     51	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
     52	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
     53	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
     54	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
     55	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
     56
     57#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
     58	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
     59	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
     60
     61#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
     62	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
     63	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
     64
     65#define NFP_FLOWER_MERGE_FIELDS \
     66	(NFP_FLOWER_LAYER_PORT | \
     67	 NFP_FLOWER_LAYER_MAC | \
     68	 NFP_FLOWER_LAYER_TP | \
     69	 NFP_FLOWER_LAYER_IPV4 | \
     70	 NFP_FLOWER_LAYER_IPV6)
     71
     72#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
     73	(NFP_FLOWER_LAYER_EXT_META | \
     74	 NFP_FLOWER_LAYER_PORT | \
     75	 NFP_FLOWER_LAYER_MAC | \
     76	 NFP_FLOWER_LAYER_IPV4 | \
     77	 NFP_FLOWER_LAYER_IPV6)
     78
     79struct nfp_flower_merge_check {
     80	union {
     81		struct {
     82			__be16 tci;
     83			struct nfp_flower_mac_mpls l2;
     84			struct nfp_flower_tp_ports l4;
     85			union {
     86				struct nfp_flower_ipv4 ipv4;
     87				struct nfp_flower_ipv6 ipv6;
     88			};
     89		};
     90		unsigned long vals[8];
     91	};
     92};
     93
     94int
     95nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
     96		     u8 mtype)
     97{
     98	u32 meta_len, key_len, mask_len, act_len, tot_len;
     99	struct sk_buff *skb;
    100	unsigned char *msg;
    101
    102	meta_len =  sizeof(struct nfp_fl_rule_metadata);
    103	key_len = nfp_flow->meta.key_len;
    104	mask_len = nfp_flow->meta.mask_len;
    105	act_len = nfp_flow->meta.act_len;
    106
    107	tot_len = meta_len + key_len + mask_len + act_len;
    108
    109	/* Convert to long words as firmware expects
    110	 * lengths in units of NFP_FL_LW_SIZ.
    111	 */
    112	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
    113	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
    114	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
    115
    116	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
    117	if (!skb)
    118		return -ENOMEM;
    119
    120	msg = nfp_flower_cmsg_get_data(skb);
    121	memcpy(msg, &nfp_flow->meta, meta_len);
    122	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
    123	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
    124	memcpy(&msg[meta_len + key_len + mask_len],
    125	       nfp_flow->action_data, act_len);
    126
    127	/* Convert back to bytes as software expects
    128	 * lengths in units of bytes.
    129	 */
    130	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
    131	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
    132	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
    133
    134	nfp_ctrl_tx(app->ctrl, skb);
    135
    136	return 0;
    137}
    138
    139static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule)
    140{
    141	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
    142	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
    143	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
    144	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
    145}
    146
    147static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule)
    148{
    149	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
    150	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
    151}
    152
    153static int
    154nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
    155			  u32 *key_layer_two, int *key_size, bool ipv6,
    156			  struct netlink_ext_ack *extack)
    157{
    158	if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
    159	    (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
    160		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
    161		return -EOPNOTSUPP;
    162	}
    163
    164	if (enc_opts->len > 0) {
    165		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
    166		*key_size += sizeof(struct nfp_flower_geneve_options);
    167	}
    168
    169	return 0;
    170}
    171
    172static int
    173nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
    174			      struct flow_dissector_key_enc_opts *enc_op,
    175			      u32 *key_layer_two, u8 *key_layer, int *key_size,
    176			      struct nfp_flower_priv *priv,
    177			      enum nfp_flower_tun_type *tun_type, bool ipv6,
    178			      struct netlink_ext_ack *extack)
    179{
    180	int err;
    181
    182	switch (enc_ports->dst) {
    183	case htons(IANA_VXLAN_UDP_PORT):
    184		*tun_type = NFP_FL_TUNNEL_VXLAN;
    185		*key_layer |= NFP_FLOWER_LAYER_VXLAN;
    186
    187		if (ipv6) {
    188			*key_layer |= NFP_FLOWER_LAYER_EXT_META;
    189			*key_size += sizeof(struct nfp_flower_ext_meta);
    190			*key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
    191			*key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
    192		} else {
    193			*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
    194		}
    195
    196		if (enc_op) {
    197			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
    198			return -EOPNOTSUPP;
    199		}
    200		break;
    201	case htons(GENEVE_UDP_PORT):
    202		if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
    203			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
    204			return -EOPNOTSUPP;
    205		}
    206		*tun_type = NFP_FL_TUNNEL_GENEVE;
    207		*key_layer |= NFP_FLOWER_LAYER_EXT_META;
    208		*key_size += sizeof(struct nfp_flower_ext_meta);
    209		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
    210
    211		if (ipv6) {
    212			*key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
    213			*key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
    214		} else {
    215			*key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
    216		}
    217
    218		if (!enc_op)
    219			break;
    220		if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
    221			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
    222			return -EOPNOTSUPP;
    223		}
    224		err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
    225						ipv6, extack);
    226		if (err)
    227			return err;
    228		break;
    229	default:
    230		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
    231		return -EOPNOTSUPP;
    232	}
    233
    234	return 0;
    235}
    236
    237int
    238nfp_flower_calculate_key_layers(struct nfp_app *app,
    239				struct net_device *netdev,
    240				struct nfp_fl_key_ls *ret_key_ls,
    241				struct flow_rule *rule,
    242				enum nfp_flower_tun_type *tun_type,
    243				struct netlink_ext_ack *extack)
    244{
    245	struct flow_dissector *dissector = rule->match.dissector;
    246	struct flow_match_basic basic = { NULL, NULL};
    247	struct nfp_flower_priv *priv = app->priv;
    248	u32 key_layer_two;
    249	u8 key_layer;
    250	int key_size;
    251	int err;
    252
    253	if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
    254		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
    255		return -EOPNOTSUPP;
    256	}
    257
    258	/* If any tun dissector is used then the required set must be used. */
    259	if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
    260	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
    261	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
    262	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
    263	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
    264		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
    265		return -EOPNOTSUPP;
    266	}
    267
    268	key_layer_two = 0;
    269	key_layer = NFP_FLOWER_LAYER_PORT;
    270	key_size = sizeof(struct nfp_flower_meta_tci) +
    271		   sizeof(struct nfp_flower_in_port);
    272
    273	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
    274	    flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
    275		key_layer |= NFP_FLOWER_LAYER_MAC;
    276		key_size += sizeof(struct nfp_flower_mac_mpls);
    277	}
    278
    279	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
    280		struct flow_match_vlan vlan;
    281
    282		flow_rule_match_vlan(rule, &vlan);
    283		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
    284		    vlan.key->vlan_priority) {
    285			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
    286			return -EOPNOTSUPP;
    287		}
    288		if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
    289		    !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
    290			key_layer |= NFP_FLOWER_LAYER_EXT_META;
    291			key_size += sizeof(struct nfp_flower_ext_meta);
    292			key_size += sizeof(struct nfp_flower_vlan);
    293			key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
    294		}
    295	}
    296
    297	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
    298		struct flow_match_vlan cvlan;
    299
    300		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
    301			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
    302			return -EOPNOTSUPP;
    303		}
    304
    305		flow_rule_match_vlan(rule, &cvlan);
    306		if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
    307			key_layer |= NFP_FLOWER_LAYER_EXT_META;
    308			key_size += sizeof(struct nfp_flower_ext_meta);
    309			key_size += sizeof(struct nfp_flower_vlan);
    310			key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
    311		}
    312	}
    313
    314	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
    315		struct flow_match_enc_opts enc_op = { NULL, NULL };
    316		struct flow_match_ipv4_addrs ipv4_addrs;
    317		struct flow_match_ipv6_addrs ipv6_addrs;
    318		struct flow_match_control enc_ctl;
    319		struct flow_match_ports enc_ports;
    320		bool ipv6_tun = false;
    321
    322		flow_rule_match_enc_control(rule, &enc_ctl);
    323
    324		if (enc_ctl.mask->addr_type != 0xffff) {
    325			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
    326			return -EOPNOTSUPP;
    327		}
    328
    329		ipv6_tun = enc_ctl.key->addr_type ==
    330				FLOW_DISSECTOR_KEY_IPV6_ADDRS;
    331		if (ipv6_tun &&
    332		    !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
    333			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
    334			return -EOPNOTSUPP;
    335		}
    336
    337		if (!ipv6_tun &&
    338		    enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
    339			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
    340			return -EOPNOTSUPP;
    341		}
    342
    343		if (ipv6_tun) {
    344			flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
    345			if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
    346				       sizeof(ipv6_addrs.mask->dst))) {
    347				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
    348				return -EOPNOTSUPP;
    349			}
    350		} else {
    351			flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
    352			if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
    353				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
    354				return -EOPNOTSUPP;
    355			}
    356		}
    357
    358		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
    359			flow_rule_match_enc_opts(rule, &enc_op);
    360
    361		if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
    362			/* check if GRE, which has no enc_ports */
    363			if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
    364				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
    365				return -EOPNOTSUPP;
    366			}
    367
    368			*tun_type = NFP_FL_TUNNEL_GRE;
    369			key_layer |= NFP_FLOWER_LAYER_EXT_META;
    370			key_size += sizeof(struct nfp_flower_ext_meta);
    371			key_layer_two |= NFP_FLOWER_LAYER2_GRE;
    372
    373			if (ipv6_tun) {
    374				key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
    375				key_size +=
    376					sizeof(struct nfp_flower_ipv6_udp_tun);
    377			} else {
    378				key_size +=
    379					sizeof(struct nfp_flower_ipv4_udp_tun);
    380			}
    381
    382			if (enc_op.key) {
    383				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
    384				return -EOPNOTSUPP;
    385			}
    386		} else {
    387			flow_rule_match_enc_ports(rule, &enc_ports);
    388			if (enc_ports.mask->dst != cpu_to_be16(~0)) {
    389				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
    390				return -EOPNOTSUPP;
    391			}
    392
    393			err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
    394							    enc_op.key,
    395							    &key_layer_two,
    396							    &key_layer,
    397							    &key_size, priv,
    398							    tun_type, ipv6_tun,
    399							    extack);
    400			if (err)
    401				return err;
    402
    403			/* Ensure the ingress netdev matches the expected
    404			 * tun type.
    405			 */
    406			if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
    407				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
    408				return -EOPNOTSUPP;
    409			}
    410		}
    411	}
    412
    413	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
    414		flow_rule_match_basic(rule, &basic);
    415
    416	if (basic.mask && basic.mask->n_proto) {
    417		/* Ethernet type is present in the key. */
    418		switch (basic.key->n_proto) {
    419		case cpu_to_be16(ETH_P_IP):
    420			key_layer |= NFP_FLOWER_LAYER_IPV4;
    421			key_size += sizeof(struct nfp_flower_ipv4);
    422			break;
    423
    424		case cpu_to_be16(ETH_P_IPV6):
    425			key_layer |= NFP_FLOWER_LAYER_IPV6;
    426			key_size += sizeof(struct nfp_flower_ipv6);
    427			break;
    428
    429		/* Currently we do not offload ARP
    430		 * because we rely on it to get to the host.
    431		 */
    432		case cpu_to_be16(ETH_P_ARP):
    433			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
    434			return -EOPNOTSUPP;
    435
    436		case cpu_to_be16(ETH_P_MPLS_UC):
    437		case cpu_to_be16(ETH_P_MPLS_MC):
    438			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
    439				key_layer |= NFP_FLOWER_LAYER_MAC;
    440				key_size += sizeof(struct nfp_flower_mac_mpls);
    441			}
    442			break;
    443
    444		/* Will be included in layer 2. */
    445		case cpu_to_be16(ETH_P_8021Q):
    446			break;
    447
    448		default:
    449			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
    450			return -EOPNOTSUPP;
    451		}
    452	} else if (nfp_flower_check_higher_than_mac(rule)) {
    453		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
    454		return -EOPNOTSUPP;
    455	}
    456
    457	if (basic.mask && basic.mask->ip_proto) {
    458		switch (basic.key->ip_proto) {
    459		case IPPROTO_TCP:
    460		case IPPROTO_UDP:
    461		case IPPROTO_SCTP:
    462		case IPPROTO_ICMP:
    463		case IPPROTO_ICMPV6:
    464			key_layer |= NFP_FLOWER_LAYER_TP;
    465			key_size += sizeof(struct nfp_flower_tp_ports);
    466			break;
    467		}
    468	}
    469
    470	if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
    471	    nfp_flower_check_higher_than_l3(rule)) {
    472		NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
    473		return -EOPNOTSUPP;
    474	}
    475
    476	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
    477		struct flow_match_tcp tcp;
    478		u32 tcp_flags;
    479
    480		flow_rule_match_tcp(rule, &tcp);
    481		tcp_flags = be16_to_cpu(tcp.key->flags);
    482
    483		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
    484			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
    485			return -EOPNOTSUPP;
    486		}
    487
    488		/* We only support PSH and URG flags when either
    489		 * FIN, SYN or RST is present as well.
    490		 */
    491		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
    492		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
    493			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
    494			return -EOPNOTSUPP;
    495		}
    496
    497		/* We need to store TCP flags in the either the IPv4 or IPv6 key
    498		 * space, thus we need to ensure we include a IPv4/IPv6 key
    499		 * layer if we have not done so already.
    500		 */
    501		if (!basic.key) {
    502			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
    503			return -EOPNOTSUPP;
    504		}
    505
    506		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
    507		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
    508			switch (basic.key->n_proto) {
    509			case cpu_to_be16(ETH_P_IP):
    510				key_layer |= NFP_FLOWER_LAYER_IPV4;
    511				key_size += sizeof(struct nfp_flower_ipv4);
    512				break;
    513
    514			case cpu_to_be16(ETH_P_IPV6):
    515					key_layer |= NFP_FLOWER_LAYER_IPV6;
    516				key_size += sizeof(struct nfp_flower_ipv6);
    517				break;
    518
    519			default:
    520				NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
    521				return -EOPNOTSUPP;
    522			}
    523		}
    524	}
    525
    526	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
    527		struct flow_match_control ctl;
    528
    529		flow_rule_match_control(rule, &ctl);
    530		if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
    531			NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
    532			return -EOPNOTSUPP;
    533		}
    534	}
    535
    536	ret_key_ls->key_layer = key_layer;
    537	ret_key_ls->key_layer_two = key_layer_two;
    538	ret_key_ls->key_size = key_size;
    539
    540	return 0;
    541}
    542
    543struct nfp_fl_payload *
    544nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
    545{
    546	struct nfp_fl_payload *flow_pay;
    547
    548	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
    549	if (!flow_pay)
    550		return NULL;
    551
    552	flow_pay->meta.key_len = key_layer->key_size;
    553	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
    554	if (!flow_pay->unmasked_data)
    555		goto err_free_flow;
    556
    557	flow_pay->meta.mask_len = key_layer->key_size;
    558	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
    559	if (!flow_pay->mask_data)
    560		goto err_free_unmasked;
    561
    562	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
    563	if (!flow_pay->action_data)
    564		goto err_free_mask;
    565
    566	flow_pay->nfp_tun_ipv4_addr = 0;
    567	flow_pay->nfp_tun_ipv6 = NULL;
    568	flow_pay->meta.flags = 0;
    569	INIT_LIST_HEAD(&flow_pay->linked_flows);
    570	flow_pay->in_hw = false;
    571	flow_pay->pre_tun_rule.dev = NULL;
    572
    573	return flow_pay;
    574
    575err_free_mask:
    576	kfree(flow_pay->mask_data);
    577err_free_unmasked:
    578	kfree(flow_pay->unmasked_data);
    579err_free_flow:
    580	kfree(flow_pay);
    581	return NULL;
    582}
    583
    584static int
    585nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
    586				     struct nfp_flower_merge_check *merge,
    587				     u8 *last_act_id, int *act_out)
    588{
    589	struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
    590	struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
    591	struct nfp_fl_set_ip4_addrs *ipv4_add;
    592	struct nfp_fl_set_ipv6_addr *ipv6_add;
    593	struct nfp_fl_push_vlan *push_vlan;
    594	struct nfp_fl_pre_tunnel *pre_tun;
    595	struct nfp_fl_set_tport *tport;
    596	struct nfp_fl_set_eth *eth;
    597	struct nfp_fl_act_head *a;
    598	unsigned int act_off = 0;
    599	bool ipv6_tun = false;
    600	u8 act_id = 0;
    601	u8 *ports;
    602	int i;
    603
    604	while (act_off < flow->meta.act_len) {
    605		a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
    606		act_id = a->jump_id;
    607
    608		switch (act_id) {
    609		case NFP_FL_ACTION_OPCODE_OUTPUT:
    610			if (act_out)
    611				(*act_out)++;
    612			break;
    613		case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
    614			push_vlan = (struct nfp_fl_push_vlan *)a;
    615			if (push_vlan->vlan_tci)
    616				merge->tci = cpu_to_be16(0xffff);
    617			break;
    618		case NFP_FL_ACTION_OPCODE_POP_VLAN:
    619			merge->tci = cpu_to_be16(0);
    620			break;
    621		case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
    622			/* New tunnel header means l2 to l4 can be matched. */
    623			eth_broadcast_addr(&merge->l2.mac_dst[0]);
    624			eth_broadcast_addr(&merge->l2.mac_src[0]);
    625			memset(&merge->l4, 0xff,
    626			       sizeof(struct nfp_flower_tp_ports));
    627			if (ipv6_tun)
    628				memset(&merge->ipv6, 0xff,
    629				       sizeof(struct nfp_flower_ipv6));
    630			else
    631				memset(&merge->ipv4, 0xff,
    632				       sizeof(struct nfp_flower_ipv4));
    633			break;
    634		case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
    635			eth = (struct nfp_fl_set_eth *)a;
    636			for (i = 0; i < ETH_ALEN; i++)
    637				merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
    638			for (i = 0; i < ETH_ALEN; i++)
    639				merge->l2.mac_src[i] |=
    640					eth->eth_addr_mask[ETH_ALEN + i];
    641			break;
    642		case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
    643			ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
    644			merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
    645			merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
    646			break;
    647		case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
    648			ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
    649			merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
    650			merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
    651			break;
    652		case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
    653			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
    654			for (i = 0; i < 4; i++)
    655				merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
    656					ipv6_add->ipv6[i].mask;
    657			break;
    658		case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
    659			ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
    660			for (i = 0; i < 4; i++)
    661				merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
    662					ipv6_add->ipv6[i].mask;
    663			break;
    664		case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
    665			ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
    666			merge->ipv6.ip_ext.ttl |=
    667				ipv6_tc_hl_fl->ipv6_hop_limit_mask;
    668			merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
    669			merge->ipv6.ipv6_flow_label_exthdr |=
    670				ipv6_tc_hl_fl->ipv6_label_mask;
    671			break;
    672		case NFP_FL_ACTION_OPCODE_SET_UDP:
    673		case NFP_FL_ACTION_OPCODE_SET_TCP:
    674			tport = (struct nfp_fl_set_tport *)a;
    675			ports = (u8 *)&merge->l4.port_src;
    676			for (i = 0; i < 4; i++)
    677				ports[i] |= tport->tp_port_mask[i];
    678			break;
    679		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
    680			pre_tun = (struct nfp_fl_pre_tunnel *)a;
    681			ipv6_tun = be16_to_cpu(pre_tun->flags) &
    682					NFP_FL_PRE_TUN_IPV6;
    683			break;
    684		case NFP_FL_ACTION_OPCODE_PRE_LAG:
    685		case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
    686			break;
    687		default:
    688			return -EOPNOTSUPP;
    689		}
    690
    691		act_off += a->len_lw << NFP_FL_LW_SIZ;
    692	}
    693
    694	if (last_act_id)
    695		*last_act_id = act_id;
    696
    697	return 0;
    698}
    699
    700static int
    701nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
    702				struct nfp_flower_merge_check *merge,
    703				bool extra_fields)
    704{
    705	struct nfp_flower_meta_tci *meta_tci;
    706	u8 *mask = flow->mask_data;
    707	u8 key_layer, match_size;
    708
    709	memset(merge, 0, sizeof(struct nfp_flower_merge_check));
    710
    711	meta_tci = (struct nfp_flower_meta_tci *)mask;
    712	key_layer = meta_tci->nfp_flow_key_layer;
    713
    714	if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
    715		return -EOPNOTSUPP;
    716
    717	merge->tci = meta_tci->tci;
    718	mask += sizeof(struct nfp_flower_meta_tci);
    719
    720	if (key_layer & NFP_FLOWER_LAYER_EXT_META)
    721		mask += sizeof(struct nfp_flower_ext_meta);
    722
    723	mask += sizeof(struct nfp_flower_in_port);
    724
    725	if (key_layer & NFP_FLOWER_LAYER_MAC) {
    726		match_size = sizeof(struct nfp_flower_mac_mpls);
    727		memcpy(&merge->l2, mask, match_size);
    728		mask += match_size;
    729	}
    730
    731	if (key_layer & NFP_FLOWER_LAYER_TP) {
    732		match_size = sizeof(struct nfp_flower_tp_ports);
    733		memcpy(&merge->l4, mask, match_size);
    734		mask += match_size;
    735	}
    736
    737	if (key_layer & NFP_FLOWER_LAYER_IPV4) {
    738		match_size = sizeof(struct nfp_flower_ipv4);
    739		memcpy(&merge->ipv4, mask, match_size);
    740	}
    741
    742	if (key_layer & NFP_FLOWER_LAYER_IPV6) {
    743		match_size = sizeof(struct nfp_flower_ipv6);
    744		memcpy(&merge->ipv6, mask, match_size);
    745	}
    746
    747	return 0;
    748}
    749
    750static int
    751nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
    752		     struct nfp_fl_payload *sub_flow2)
    753{
    754	/* Two flows can be merged if sub_flow2 only matches on bits that are
    755	 * either matched by sub_flow1 or set by a sub_flow1 action. This
    756	 * ensures that every packet that hits sub_flow1 and recirculates is
    757	 * guaranteed to hit sub_flow2.
    758	 */
    759	struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
    760	int err, act_out = 0;
    761	u8 last_act_id = 0;
    762
    763	err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
    764					      true);
    765	if (err)
    766		return err;
    767
    768	err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
    769					      false);
    770	if (err)
    771		return err;
    772
    773	err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
    774						   &last_act_id, &act_out);
    775	if (err)
    776		return err;
    777
    778	/* Must only be 1 output action and it must be the last in sequence. */
    779	if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
    780		return -EOPNOTSUPP;
    781
    782	/* Reject merge if sub_flow2 matches on something that is not matched
    783	 * on or set in an action by sub_flow1.
    784	 */
    785	err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
    786			    sub_flow1_merge.vals,
    787			    sizeof(struct nfp_flower_merge_check) * 8);
    788	if (err)
    789		return -EINVAL;
    790
    791	return 0;
    792}
    793
    794static unsigned int
    795nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
    796			    bool *tunnel_act)
    797{
    798	unsigned int act_off = 0, act_len;
    799	struct nfp_fl_act_head *a;
    800	u8 act_id = 0;
    801
    802	while (act_off < len) {
    803		a = (struct nfp_fl_act_head *)&act_src[act_off];
    804		act_len = a->len_lw << NFP_FL_LW_SIZ;
    805		act_id = a->jump_id;
    806
    807		switch (act_id) {
    808		case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
    809			if (tunnel_act)
    810				*tunnel_act = true;
    811			fallthrough;
    812		case NFP_FL_ACTION_OPCODE_PRE_LAG:
    813			memcpy(act_dst + act_off, act_src + act_off, act_len);
    814			break;
    815		default:
    816			return act_off;
    817		}
    818
    819		act_off += act_len;
    820	}
    821
    822	return act_off;
    823}
    824
    825static int
    826nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
    827{
    828	struct nfp_fl_act_head *a;
    829	unsigned int act_off = 0;
    830
    831	while (act_off < len) {
    832		a = (struct nfp_fl_act_head *)&acts[act_off];
    833
    834		if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
    835			*vlan = (struct nfp_fl_push_vlan *)a;
    836		else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
    837			return -EOPNOTSUPP;
    838
    839		act_off += a->len_lw << NFP_FL_LW_SIZ;
    840	}
    841
    842	/* Ensure any VLAN push also has an egress action. */
    843	if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
    844		return -EOPNOTSUPP;
    845
    846	return 0;
    847}
    848
    849static int
    850nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
    851{
    852	struct nfp_fl_set_tun *tun;
    853	struct nfp_fl_act_head *a;
    854	unsigned int act_off = 0;
    855
    856	while (act_off < len) {
    857		a = (struct nfp_fl_act_head *)&acts[act_off];
    858
    859		if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
    860			tun = (struct nfp_fl_set_tun *)a;
    861			tun->outer_vlan_tpid = vlan->vlan_tpid;
    862			tun->outer_vlan_tci = vlan->vlan_tci;
    863
    864			return 0;
    865		}
    866
    867		act_off += a->len_lw << NFP_FL_LW_SIZ;
    868	}
    869
    870	/* Return error if no tunnel action is found. */
    871	return -EOPNOTSUPP;
    872}
    873
    874static int
    875nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
    876			struct nfp_fl_payload *sub_flow2,
    877			struct nfp_fl_payload *merge_flow)
    878{
    879	unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
    880	struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
    881	bool tunnel_act = false;
    882	char *merge_act;
    883	int err;
    884
    885	/* The last action of sub_flow1 must be output - do not merge this. */
    886	sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
    887	sub2_act_len = sub_flow2->meta.act_len;
    888
    889	if (!sub2_act_len)
    890		return -EINVAL;
    891
    892	if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
    893		return -EINVAL;
    894
    895	/* A shortcut can only be applied if there is a single action. */
    896	if (sub1_act_len)
    897		merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
    898	else
    899		merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
    900
    901	merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
    902	merge_act = merge_flow->action_data;
    903
    904	/* Copy any pre-actions to the start of merge flow action list. */
    905	pre_off1 = nfp_flower_copy_pre_actions(merge_act,
    906					       sub_flow1->action_data,
    907					       sub1_act_len, &tunnel_act);
    908	merge_act += pre_off1;
    909	sub1_act_len -= pre_off1;
    910	pre_off2 = nfp_flower_copy_pre_actions(merge_act,
    911					       sub_flow2->action_data,
    912					       sub2_act_len, NULL);
    913	merge_act += pre_off2;
    914	sub2_act_len -= pre_off2;
    915
    916	/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
    917	 * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
    918	 * valid merge.
    919	 */
    920	if (tunnel_act) {
    921		char *post_tun_acts = &sub_flow2->action_data[pre_off2];
    922
    923		err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
    924						  &post_tun_push_vlan);
    925		if (err)
    926			return err;
    927
    928		if (post_tun_push_vlan) {
    929			pre_off2 += sizeof(*post_tun_push_vlan);
    930			sub2_act_len -= sizeof(*post_tun_push_vlan);
    931		}
    932	}
    933
    934	/* Copy remaining actions from sub_flows 1 and 2. */
    935	memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
    936
    937	if (post_tun_push_vlan) {
    938		/* Update tunnel action in merge to include VLAN push. */
    939		err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
    940						 post_tun_push_vlan);
    941		if (err)
    942			return err;
    943
    944		merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
    945	}
    946
    947	merge_act += sub1_act_len;
    948	memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
    949
    950	return 0;
    951}
    952
    953/* Flow link code should only be accessed under RTNL. */
    954static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
    955{
    956	list_del(&link->merge_flow.list);
    957	list_del(&link->sub_flow.list);
    958	kfree(link);
    959}
    960
    961static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
    962				    struct nfp_fl_payload *sub_flow)
    963{
    964	struct nfp_fl_payload_link *link;
    965
    966	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
    967		if (link->sub_flow.flow == sub_flow) {
    968			nfp_flower_unlink_flow(link);
    969			return;
    970		}
    971}
    972
    973static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
    974				 struct nfp_fl_payload *sub_flow)
    975{
    976	struct nfp_fl_payload_link *link;
    977
    978	link = kmalloc(sizeof(*link), GFP_KERNEL);
    979	if (!link)
    980		return -ENOMEM;
    981
    982	link->merge_flow.flow = merge_flow;
    983	list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
    984	link->sub_flow.flow = sub_flow;
    985	list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
    986
    987	return 0;
    988}
    989
    990/**
    991 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
    992 * @app:	Pointer to the APP handle
    993 * @sub_flow1:	Initial flow matched to produce merge hint
    994 * @sub_flow2:	Post recirculation flow matched in merge hint
    995 *
    996 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
    997 * and offloading the new, merged flow.
    998 *
    999 * Return: negative value on error, 0 in success.
   1000 */
   1001int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
   1002				     struct nfp_fl_payload *sub_flow1,
   1003				     struct nfp_fl_payload *sub_flow2)
   1004{
   1005	struct nfp_flower_priv *priv = app->priv;
   1006	struct nfp_fl_payload *merge_flow;
   1007	struct nfp_fl_key_ls merge_key_ls;
   1008	struct nfp_merge_info *merge_info;
   1009	u64 parent_ctx = 0;
   1010	int err;
   1011
   1012	ASSERT_RTNL();
   1013
   1014	if (sub_flow1 == sub_flow2 ||
   1015	    nfp_flower_is_merge_flow(sub_flow1) ||
   1016	    nfp_flower_is_merge_flow(sub_flow2))
   1017		return -EINVAL;
   1018
   1019	/* check if the two flows are already merged */
   1020	parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
   1021	parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
   1022	if (rhashtable_lookup_fast(&priv->merge_table,
   1023				   &parent_ctx, merge_table_params)) {
   1024		nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
   1025		return 0;
   1026	}
   1027
   1028	err = nfp_flower_can_merge(sub_flow1, sub_flow2);
   1029	if (err)
   1030		return err;
   1031
   1032	merge_key_ls.key_size = sub_flow1->meta.key_len;
   1033
   1034	merge_flow = nfp_flower_allocate_new(&merge_key_ls);
   1035	if (!merge_flow)
   1036		return -ENOMEM;
   1037
   1038	merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
   1039	merge_flow->ingress_dev = sub_flow1->ingress_dev;
   1040
   1041	memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
   1042	       sub_flow1->meta.key_len);
   1043	memcpy(merge_flow->mask_data, sub_flow1->mask_data,
   1044	       sub_flow1->meta.mask_len);
   1045
   1046	err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
   1047	if (err)
   1048		goto err_destroy_merge_flow;
   1049
   1050	err = nfp_flower_link_flows(merge_flow, sub_flow1);
   1051	if (err)
   1052		goto err_destroy_merge_flow;
   1053
   1054	err = nfp_flower_link_flows(merge_flow, sub_flow2);
   1055	if (err)
   1056		goto err_unlink_sub_flow1;
   1057
   1058	err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow,
   1059					merge_flow->ingress_dev, NULL);
   1060	if (err)
   1061		goto err_unlink_sub_flow2;
   1062
   1063	err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
   1064				     nfp_flower_table_params);
   1065	if (err)
   1066		goto err_release_metadata;
   1067
   1068	merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
   1069	if (!merge_info) {
   1070		err = -ENOMEM;
   1071		goto err_remove_rhash;
   1072	}
   1073	merge_info->parent_ctx = parent_ctx;
   1074	err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
   1075				     merge_table_params);
   1076	if (err)
   1077		goto err_destroy_merge_info;
   1078
   1079	err = nfp_flower_xmit_flow(app, merge_flow,
   1080				   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
   1081	if (err)
   1082		goto err_remove_merge_info;
   1083
   1084	merge_flow->in_hw = true;
   1085	sub_flow1->in_hw = false;
   1086
   1087	return 0;
   1088
   1089err_remove_merge_info:
   1090	WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
   1091					    &merge_info->ht_node,
   1092					    merge_table_params));
   1093err_destroy_merge_info:
   1094	kfree(merge_info);
   1095err_remove_rhash:
   1096	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
   1097					    &merge_flow->fl_node,
   1098					    nfp_flower_table_params));
   1099err_release_metadata:
   1100	nfp_modify_flow_metadata(app, merge_flow);
   1101err_unlink_sub_flow2:
   1102	nfp_flower_unlink_flows(merge_flow, sub_flow2);
   1103err_unlink_sub_flow1:
   1104	nfp_flower_unlink_flows(merge_flow, sub_flow1);
   1105err_destroy_merge_flow:
   1106	kfree(merge_flow->action_data);
   1107	kfree(merge_flow->mask_data);
   1108	kfree(merge_flow->unmasked_data);
   1109	kfree(merge_flow);
   1110	return err;
   1111}
   1112
   1113/**
   1114 * nfp_flower_validate_pre_tun_rule()
   1115 * @app:	Pointer to the APP handle
   1116 * @flow:	Pointer to NFP flow representation of rule
   1117 * @key_ls:	Pointer to NFP key layers structure
   1118 * @extack:	Netlink extended ACK report
   1119 *
   1120 * Verifies the flow as a pre-tunnel rule.
   1121 *
   1122 * Return: negative value on error, 0 if verified.
   1123 */
   1124static int
   1125nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
   1126				 struct nfp_fl_payload *flow,
   1127				 struct nfp_fl_key_ls *key_ls,
   1128				 struct netlink_ext_ack *extack)
   1129{
   1130	struct nfp_flower_priv *priv = app->priv;
   1131	struct nfp_flower_meta_tci *meta_tci;
   1132	struct nfp_flower_mac_mpls *mac;
   1133	u8 *ext = flow->unmasked_data;
   1134	struct nfp_fl_act_head *act;
   1135	u8 *mask = flow->mask_data;
   1136	bool vlan = false;
   1137	int act_offset;
   1138	u8 key_layer;
   1139
   1140	meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
   1141	key_layer = key_ls->key_layer;
   1142	if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
   1143		if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
   1144			u16 vlan_tci = be16_to_cpu(meta_tci->tci);
   1145
   1146			vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
   1147			flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
   1148			vlan = true;
   1149		} else {
   1150			flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
   1151		}
   1152	}
   1153
   1154	if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
   1155		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
   1156		return -EOPNOTSUPP;
   1157	} else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
   1158		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
   1159		return -EOPNOTSUPP;
   1160	}
   1161
   1162	if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
   1163		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
   1164		return -EOPNOTSUPP;
   1165	}
   1166
   1167	if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
   1168	    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
   1169		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
   1170		return -EOPNOTSUPP;
   1171	}
   1172
   1173	if (key_layer & NFP_FLOWER_LAYER_IPV6)
   1174		flow->pre_tun_rule.is_ipv6 = true;
   1175	else
   1176		flow->pre_tun_rule.is_ipv6 = false;
   1177
   1178	/* Skip fields known to exist. */
   1179	mask += sizeof(struct nfp_flower_meta_tci);
   1180	ext += sizeof(struct nfp_flower_meta_tci);
   1181	if (key_ls->key_layer_two) {
   1182		mask += sizeof(struct nfp_flower_ext_meta);
   1183		ext += sizeof(struct nfp_flower_ext_meta);
   1184	}
   1185	mask += sizeof(struct nfp_flower_in_port);
   1186	ext += sizeof(struct nfp_flower_in_port);
   1187
   1188	/* Ensure destination MAC address is fully matched. */
   1189	mac = (struct nfp_flower_mac_mpls *)mask;
   1190	if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
   1191		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
   1192		return -EOPNOTSUPP;
   1193	}
   1194
   1195	/* Ensure source MAC address is fully matched. This is only needed
   1196	 * for firmware with the DECAP_V2 feature enabled. Don't do this
   1197	 * for firmware without this feature to keep old behaviour.
   1198	 */
   1199	if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
   1200		mac = (struct nfp_flower_mac_mpls *)mask;
   1201		if (!is_broadcast_ether_addr(&mac->mac_src[0])) {
   1202			NL_SET_ERR_MSG_MOD(extack,
   1203					   "unsupported pre-tunnel rule: source MAC field must not be masked");
   1204			return -EOPNOTSUPP;
   1205		}
   1206	}
   1207
   1208	if (mac->mpls_lse) {
   1209		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
   1210		return -EOPNOTSUPP;
   1211	}
   1212
   1213	/* Ensure destination MAC address matches pre_tun_dev. */
   1214	mac = (struct nfp_flower_mac_mpls *)ext;
   1215	if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
   1216		NL_SET_ERR_MSG_MOD(extack,
   1217				   "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
   1218		return -EOPNOTSUPP;
   1219	}
   1220
   1221	/* Save mac addresses in pre_tun_rule entry for later use */
   1222	memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN);
   1223	memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN);
   1224
   1225	mask += sizeof(struct nfp_flower_mac_mpls);
   1226	ext += sizeof(struct nfp_flower_mac_mpls);
   1227	if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
   1228	    key_layer & NFP_FLOWER_LAYER_IPV6) {
   1229		/* Flags and proto fields have same offset in IPv4 and IPv6. */
   1230		int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
   1231		int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
   1232		int size;
   1233		int i;
   1234
   1235		size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
   1236			sizeof(struct nfp_flower_ipv4) :
   1237			sizeof(struct nfp_flower_ipv6);
   1238
   1239
   1240		/* Ensure proto and flags are the only IP layer fields. */
   1241		for (i = 0; i < size; i++)
   1242			if (mask[i] && i != ip_flags && i != ip_proto) {
   1243				NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
   1244				return -EOPNOTSUPP;
   1245			}
   1246		ext += size;
   1247		mask += size;
   1248	}
   1249
   1250	if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
   1251		if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
   1252			struct nfp_flower_vlan *vlan_tags;
   1253			u16 vlan_tpid;
   1254			u16 vlan_tci;
   1255
   1256			vlan_tags = (struct nfp_flower_vlan *)ext;
   1257
   1258			vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
   1259			vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid);
   1260
   1261			vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
   1262			flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
   1263			flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid);
   1264			vlan = true;
   1265		} else {
   1266			flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
   1267			flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff);
   1268		}
   1269	}
   1270
   1271	/* Action must be a single egress or pop_vlan and egress. */
   1272	act_offset = 0;
   1273	act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
   1274	if (vlan) {
   1275		if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
   1276			NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
   1277			return -EOPNOTSUPP;
   1278		}
   1279
   1280		act_offset += act->len_lw << NFP_FL_LW_SIZ;
   1281		act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
   1282	}
   1283
   1284	if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
   1285		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
   1286		return -EOPNOTSUPP;
   1287	}
   1288
   1289	act_offset += act->len_lw << NFP_FL_LW_SIZ;
   1290
   1291	/* Ensure there are no more actions after egress. */
   1292	if (act_offset != flow->meta.act_len) {
   1293		NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
   1294		return -EOPNOTSUPP;
   1295	}
   1296
   1297	return 0;
   1298}
   1299
   1300static bool offload_pre_check(struct flow_cls_offload *flow)
   1301{
   1302	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
   1303	struct flow_dissector *dissector = rule->match.dissector;
   1304
   1305	if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
   1306		return false;
   1307
   1308	if (flow->common.chain_index)
   1309		return false;
   1310
   1311	return true;
   1312}
   1313
   1314/**
   1315 * nfp_flower_add_offload() - Adds a new flow to hardware.
   1316 * @app:	Pointer to the APP handle
   1317 * @netdev:	netdev structure.
   1318 * @flow:	TC flower classifier offload structure.
   1319 *
   1320 * Adds a new flow to the repeated hash structure and action payload.
   1321 *
   1322 * Return: negative value on error, 0 if configured successfully.
   1323 */
   1324static int
   1325nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
   1326		       struct flow_cls_offload *flow)
   1327{
   1328	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
   1329	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
   1330	struct nfp_flower_priv *priv = app->priv;
   1331	struct netlink_ext_ack *extack = NULL;
   1332	struct nfp_fl_payload *flow_pay;
   1333	struct nfp_fl_key_ls *key_layer;
   1334	struct nfp_port *port = NULL;
   1335	int err;
   1336
   1337	extack = flow->common.extack;
   1338	if (nfp_netdev_is_nfp_repr(netdev))
   1339		port = nfp_port_from_netdev(netdev);
   1340
   1341	if (is_pre_ct_flow(flow))
   1342		return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
   1343
   1344	if (is_post_ct_flow(flow))
   1345		return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
   1346
   1347	if (!offload_pre_check(flow))
   1348		return -EOPNOTSUPP;
   1349
   1350	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
   1351	if (!key_layer)
   1352		return -ENOMEM;
   1353
   1354	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule,
   1355					      &tun_type, extack);
   1356	if (err)
   1357		goto err_free_key_ls;
   1358
   1359	flow_pay = nfp_flower_allocate_new(key_layer);
   1360	if (!flow_pay) {
   1361		err = -ENOMEM;
   1362		goto err_free_key_ls;
   1363	}
   1364
   1365	err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev,
   1366					    flow_pay, tun_type, extack);
   1367	if (err)
   1368		goto err_destroy_flow;
   1369
   1370	err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack);
   1371	if (err)
   1372		goto err_destroy_flow;
   1373
   1374	if (flow_pay->pre_tun_rule.dev) {
   1375		err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
   1376		if (err)
   1377			goto err_destroy_flow;
   1378	}
   1379
   1380	err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack);
   1381	if (err)
   1382		goto err_destroy_flow;
   1383
   1384	flow_pay->tc_flower_cookie = flow->cookie;
   1385	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
   1386				     nfp_flower_table_params);
   1387	if (err) {
   1388		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
   1389		goto err_release_metadata;
   1390	}
   1391
   1392	if (flow_pay->pre_tun_rule.dev) {
   1393		if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
   1394			struct nfp_predt_entry *predt;
   1395
   1396			predt = kzalloc(sizeof(*predt), GFP_KERNEL);
   1397			if (!predt) {
   1398				err = -ENOMEM;
   1399				goto err_remove_rhash;
   1400			}
   1401			predt->flow_pay = flow_pay;
   1402			INIT_LIST_HEAD(&predt->nn_list);
   1403			spin_lock_bh(&priv->predt_lock);
   1404			list_add(&predt->list_head, &priv->predt_list);
   1405			flow_pay->pre_tun_rule.predt = predt;
   1406			nfp_tun_link_and_update_nn_entries(app, predt);
   1407			spin_unlock_bh(&priv->predt_lock);
   1408		} else {
   1409			err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
   1410		}
   1411	} else {
   1412		err = nfp_flower_xmit_flow(app, flow_pay,
   1413					   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
   1414	}
   1415
   1416	if (err)
   1417		goto err_remove_rhash;
   1418
   1419	if (port)
   1420		port->tc_offload_cnt++;
   1421
   1422	flow_pay->in_hw = true;
   1423
   1424	/* Deallocate flow payload when flower rule has been destroyed. */
   1425	kfree(key_layer);
   1426
   1427	return 0;
   1428
   1429err_remove_rhash:
   1430	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
   1431					    &flow_pay->fl_node,
   1432					    nfp_flower_table_params));
   1433err_release_metadata:
   1434	nfp_modify_flow_metadata(app, flow_pay);
   1435err_destroy_flow:
   1436	if (flow_pay->nfp_tun_ipv6)
   1437		nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
   1438	kfree(flow_pay->action_data);
   1439	kfree(flow_pay->mask_data);
   1440	kfree(flow_pay->unmasked_data);
   1441	kfree(flow_pay);
   1442err_free_key_ls:
   1443	kfree(key_layer);
   1444	return err;
   1445}
   1446
   1447static void
   1448nfp_flower_remove_merge_flow(struct nfp_app *app,
   1449			     struct nfp_fl_payload *del_sub_flow,
   1450			     struct nfp_fl_payload *merge_flow)
   1451{
   1452	struct nfp_flower_priv *priv = app->priv;
   1453	struct nfp_fl_payload_link *link, *temp;
   1454	struct nfp_merge_info *merge_info;
   1455	struct nfp_fl_payload *origin;
   1456	u64 parent_ctx = 0;
   1457	bool mod = false;
   1458	int err;
   1459
   1460	link = list_first_entry(&merge_flow->linked_flows,
   1461				struct nfp_fl_payload_link, merge_flow.list);
   1462	origin = link->sub_flow.flow;
   1463
   1464	/* Re-add rule the merge had overwritten if it has not been deleted. */
   1465	if (origin != del_sub_flow)
   1466		mod = true;
   1467
   1468	err = nfp_modify_flow_metadata(app, merge_flow);
   1469	if (err) {
   1470		nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
   1471		goto err_free_links;
   1472	}
   1473
   1474	if (!mod) {
   1475		err = nfp_flower_xmit_flow(app, merge_flow,
   1476					   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
   1477		if (err) {
   1478			nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
   1479			goto err_free_links;
   1480		}
   1481	} else {
   1482		__nfp_modify_flow_metadata(priv, origin);
   1483		err = nfp_flower_xmit_flow(app, origin,
   1484					   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
   1485		if (err)
   1486			nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
   1487		origin->in_hw = true;
   1488	}
   1489
   1490err_free_links:
   1491	/* Clean any links connected with the merged flow. */
   1492	list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
   1493				 merge_flow.list) {
   1494		u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
   1495
   1496		parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
   1497		nfp_flower_unlink_flow(link);
   1498	}
   1499
   1500	merge_info = rhashtable_lookup_fast(&priv->merge_table,
   1501					    &parent_ctx,
   1502					    merge_table_params);
   1503	if (merge_info) {
   1504		WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
   1505						    &merge_info->ht_node,
   1506						    merge_table_params));
   1507		kfree(merge_info);
   1508	}
   1509
   1510	kfree(merge_flow->action_data);
   1511	kfree(merge_flow->mask_data);
   1512	kfree(merge_flow->unmasked_data);
   1513	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
   1514					    &merge_flow->fl_node,
   1515					    nfp_flower_table_params));
   1516	kfree_rcu(merge_flow, rcu);
   1517}
   1518
   1519void
   1520nfp_flower_del_linked_merge_flows(struct nfp_app *app,
   1521				  struct nfp_fl_payload *sub_flow)
   1522{
   1523	struct nfp_fl_payload_link *link, *temp;
   1524
   1525	/* Remove any merge flow formed from the deleted sub_flow. */
   1526	list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
   1527				 sub_flow.list)
   1528		nfp_flower_remove_merge_flow(app, sub_flow,
   1529					     link->merge_flow.flow);
   1530}
   1531
   1532/**
   1533 * nfp_flower_del_offload() - Removes a flow from hardware.
   1534 * @app:	Pointer to the APP handle
   1535 * @netdev:	netdev structure.
   1536 * @flow:	TC flower classifier offload structure
   1537 *
   1538 * Removes a flow from the repeated hash structure and clears the
   1539 * action payload. Any flows merged from this are also deleted.
   1540 *
   1541 * Return: negative value on error, 0 if removed successfully.
   1542 */
   1543static int
   1544nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
   1545		       struct flow_cls_offload *flow)
   1546{
   1547	struct nfp_flower_priv *priv = app->priv;
   1548	struct nfp_fl_ct_map_entry *ct_map_ent;
   1549	struct netlink_ext_ack *extack = NULL;
   1550	struct nfp_fl_payload *nfp_flow;
   1551	struct nfp_port *port = NULL;
   1552	int err;
   1553
   1554	extack = flow->common.extack;
   1555	if (nfp_netdev_is_nfp_repr(netdev))
   1556		port = nfp_port_from_netdev(netdev);
   1557
   1558	/* Check ct_map_table */
   1559	ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
   1560					    nfp_ct_map_params);
   1561	if (ct_map_ent) {
   1562		err = nfp_fl_ct_del_flow(ct_map_ent);
   1563		return err;
   1564	}
   1565
   1566	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
   1567	if (!nfp_flow) {
   1568		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
   1569		return -ENOENT;
   1570	}
   1571
   1572	err = nfp_modify_flow_metadata(app, nfp_flow);
   1573	if (err)
   1574		goto err_free_merge_flow;
   1575
   1576	if (nfp_flow->nfp_tun_ipv4_addr)
   1577		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
   1578
   1579	if (nfp_flow->nfp_tun_ipv6)
   1580		nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
   1581
   1582	if (!nfp_flow->in_hw) {
   1583		err = 0;
   1584		goto err_free_merge_flow;
   1585	}
   1586
   1587	if (nfp_flow->pre_tun_rule.dev) {
   1588		if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) {
   1589			struct nfp_predt_entry *predt;
   1590
   1591			predt = nfp_flow->pre_tun_rule.predt;
   1592			if (predt) {
   1593				spin_lock_bh(&priv->predt_lock);
   1594				nfp_tun_unlink_and_update_nn_entries(app, predt);
   1595				list_del(&predt->list_head);
   1596				spin_unlock_bh(&priv->predt_lock);
   1597				kfree(predt);
   1598			}
   1599		} else {
   1600			err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
   1601		}
   1602	} else {
   1603		err = nfp_flower_xmit_flow(app, nfp_flow,
   1604					   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
   1605	}
   1606	/* Fall through on error. */
   1607
   1608err_free_merge_flow:
   1609	nfp_flower_del_linked_merge_flows(app, nfp_flow);
   1610	if (port)
   1611		port->tc_offload_cnt--;
   1612	kfree(nfp_flow->action_data);
   1613	kfree(nfp_flow->mask_data);
   1614	kfree(nfp_flow->unmasked_data);
   1615	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
   1616					    &nfp_flow->fl_node,
   1617					    nfp_flower_table_params));
   1618	kfree_rcu(nfp_flow, rcu);
   1619	return err;
   1620}
   1621
   1622static void
   1623__nfp_flower_update_merge_stats(struct nfp_app *app,
   1624				struct nfp_fl_payload *merge_flow)
   1625{
   1626	struct nfp_flower_priv *priv = app->priv;
   1627	struct nfp_fl_payload_link *link;
   1628	struct nfp_fl_payload *sub_flow;
   1629	u64 pkts, bytes, used;
   1630	u32 ctx_id;
   1631
   1632	ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
   1633	pkts = priv->stats[ctx_id].pkts;
   1634	/* Do not cycle subflows if no stats to distribute. */
   1635	if (!pkts)
   1636		return;
   1637	bytes = priv->stats[ctx_id].bytes;
   1638	used = priv->stats[ctx_id].used;
   1639
   1640	/* Reset stats for the merge flow. */
   1641	priv->stats[ctx_id].pkts = 0;
   1642	priv->stats[ctx_id].bytes = 0;
   1643
   1644	/* The merge flow has received stats updates from firmware.
   1645	 * Distribute these stats to all subflows that form the merge.
   1646	 * The stats will collected from TC via the subflows.
   1647	 */
   1648	list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
   1649		sub_flow = link->sub_flow.flow;
   1650		ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
   1651		priv->stats[ctx_id].pkts += pkts;
   1652		priv->stats[ctx_id].bytes += bytes;
   1653		priv->stats[ctx_id].used = max_t(u64, used,
   1654						 priv->stats[ctx_id].used);
   1655	}
   1656}
   1657
   1658void
   1659nfp_flower_update_merge_stats(struct nfp_app *app,
   1660			      struct nfp_fl_payload *sub_flow)
   1661{
   1662	struct nfp_fl_payload_link *link;
   1663
   1664	/* Get merge flows that the subflow forms to distribute their stats. */
   1665	list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
   1666		__nfp_flower_update_merge_stats(app, link->merge_flow.flow);
   1667}
   1668
   1669/**
   1670 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
   1671 * @app:	Pointer to the APP handle
   1672 * @netdev:	Netdev structure.
   1673 * @flow:	TC flower classifier offload structure
   1674 *
   1675 * Populates a flow statistics structure which which corresponds to a
   1676 * specific flow.
   1677 *
   1678 * Return: negative value on error, 0 if stats populated successfully.
   1679 */
   1680static int
   1681nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
   1682		     struct flow_cls_offload *flow)
   1683{
   1684	struct nfp_flower_priv *priv = app->priv;
   1685	struct nfp_fl_ct_map_entry *ct_map_ent;
   1686	struct netlink_ext_ack *extack = NULL;
   1687	struct nfp_fl_payload *nfp_flow;
   1688	u32 ctx_id;
   1689
   1690	/* Check ct_map table first */
   1691	ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
   1692					    nfp_ct_map_params);
   1693	if (ct_map_ent)
   1694		return nfp_fl_ct_stats(flow, ct_map_ent);
   1695
   1696	extack = flow->common.extack;
   1697	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
   1698	if (!nfp_flow) {
   1699		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
   1700		return -EINVAL;
   1701	}
   1702
   1703	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
   1704
   1705	spin_lock_bh(&priv->stats_lock);
   1706	/* If request is for a sub_flow, update stats from merged flows. */
   1707	if (!list_empty(&nfp_flow->linked_flows))
   1708		nfp_flower_update_merge_stats(app, nfp_flow);
   1709
   1710	flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
   1711			  priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used,
   1712			  FLOW_ACTION_HW_STATS_DELAYED);
   1713
   1714	priv->stats[ctx_id].pkts = 0;
   1715	priv->stats[ctx_id].bytes = 0;
   1716	spin_unlock_bh(&priv->stats_lock);
   1717
   1718	return 0;
   1719}
   1720
   1721static int
   1722nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
   1723			struct flow_cls_offload *flower)
   1724{
   1725	if (!eth_proto_is_802_3(flower->common.protocol))
   1726		return -EOPNOTSUPP;
   1727
   1728	switch (flower->command) {
   1729	case FLOW_CLS_REPLACE:
   1730		return nfp_flower_add_offload(app, netdev, flower);
   1731	case FLOW_CLS_DESTROY:
   1732		return nfp_flower_del_offload(app, netdev, flower);
   1733	case FLOW_CLS_STATS:
   1734		return nfp_flower_get_stats(app, netdev, flower);
   1735	default:
   1736		return -EOPNOTSUPP;
   1737	}
   1738}
   1739
   1740static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
   1741					void *type_data, void *cb_priv)
   1742{
   1743	struct flow_cls_common_offload *common = type_data;
   1744	struct nfp_repr *repr = cb_priv;
   1745
   1746	if (!tc_can_offload_extack(repr->netdev, common->extack))
   1747		return -EOPNOTSUPP;
   1748
   1749	switch (type) {
   1750	case TC_SETUP_CLSFLOWER:
   1751		return nfp_flower_repr_offload(repr->app, repr->netdev,
   1752					       type_data);
   1753	case TC_SETUP_CLSMATCHALL:
   1754		return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
   1755						    type_data);
   1756	default:
   1757		return -EOPNOTSUPP;
   1758	}
   1759}
   1760
   1761static LIST_HEAD(nfp_block_cb_list);
   1762
   1763static int nfp_flower_setup_tc_block(struct net_device *netdev,
   1764				     struct flow_block_offload *f)
   1765{
   1766	struct nfp_repr *repr = netdev_priv(netdev);
   1767	struct nfp_flower_repr_priv *repr_priv;
   1768	struct flow_block_cb *block_cb;
   1769
   1770	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
   1771		return -EOPNOTSUPP;
   1772
   1773	repr_priv = repr->app_priv;
   1774	repr_priv->block_shared = f->block_shared;
   1775	f->driver_block_list = &nfp_block_cb_list;
   1776
   1777	switch (f->command) {
   1778	case FLOW_BLOCK_BIND:
   1779		if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
   1780					  &nfp_block_cb_list))
   1781			return -EBUSY;
   1782
   1783		block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
   1784					       repr, repr, NULL);
   1785		if (IS_ERR(block_cb))
   1786			return PTR_ERR(block_cb);
   1787
   1788		flow_block_cb_add(block_cb, f);
   1789		list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
   1790		return 0;
   1791	case FLOW_BLOCK_UNBIND:
   1792		block_cb = flow_block_cb_lookup(f->block,
   1793						nfp_flower_setup_tc_block_cb,
   1794						repr);
   1795		if (!block_cb)
   1796			return -ENOENT;
   1797
   1798		flow_block_cb_remove(block_cb, f);
   1799		list_del(&block_cb->driver_list);
   1800		return 0;
   1801	default:
   1802		return -EOPNOTSUPP;
   1803	}
   1804}
   1805
   1806int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
   1807			enum tc_setup_type type, void *type_data)
   1808{
   1809	switch (type) {
   1810	case TC_SETUP_BLOCK:
   1811		return nfp_flower_setup_tc_block(netdev, type_data);
   1812	default:
   1813		return -EOPNOTSUPP;
   1814	}
   1815}
   1816
   1817struct nfp_flower_indr_block_cb_priv {
   1818	struct net_device *netdev;
   1819	struct nfp_app *app;
   1820	struct list_head list;
   1821};
   1822
   1823static struct nfp_flower_indr_block_cb_priv *
   1824nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
   1825				     struct net_device *netdev)
   1826{
   1827	struct nfp_flower_indr_block_cb_priv *cb_priv;
   1828	struct nfp_flower_priv *priv = app->priv;
   1829
   1830	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
   1831		if (cb_priv->netdev == netdev)
   1832			return cb_priv;
   1833
   1834	return NULL;
   1835}
   1836
   1837static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
   1838					  void *type_data, void *cb_priv)
   1839{
   1840	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
   1841
   1842	switch (type) {
   1843	case TC_SETUP_CLSFLOWER:
   1844		return nfp_flower_repr_offload(priv->app, priv->netdev,
   1845					       type_data);
   1846	default:
   1847		return -EOPNOTSUPP;
   1848	}
   1849}
   1850
   1851void nfp_flower_setup_indr_tc_release(void *cb_priv)
   1852{
   1853	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
   1854
   1855	list_del(&priv->list);
   1856	kfree(priv);
   1857}
   1858
   1859static int
   1860nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
   1861			       struct flow_block_offload *f, void *data,
   1862			       void (*cleanup)(struct flow_block_cb *block_cb))
   1863{
   1864	struct nfp_flower_indr_block_cb_priv *cb_priv;
   1865	struct nfp_flower_priv *priv = app->priv;
   1866	struct flow_block_cb *block_cb;
   1867
   1868	if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
   1869	     !nfp_flower_internal_port_can_offload(app, netdev)) ||
   1870	    (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
   1871	     nfp_flower_internal_port_can_offload(app, netdev)))
   1872		return -EOPNOTSUPP;
   1873
   1874	switch (f->command) {
   1875	case FLOW_BLOCK_BIND:
   1876		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
   1877		if (cb_priv &&
   1878		    flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
   1879					  cb_priv,
   1880					  &nfp_block_cb_list))
   1881			return -EBUSY;
   1882
   1883		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
   1884		if (!cb_priv)
   1885			return -ENOMEM;
   1886
   1887		cb_priv->netdev = netdev;
   1888		cb_priv->app = app;
   1889		list_add(&cb_priv->list, &priv->indr_block_cb_priv);
   1890
   1891		block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
   1892						    cb_priv, cb_priv,
   1893						    nfp_flower_setup_indr_tc_release,
   1894						    f, netdev, sch, data, app, cleanup);
   1895		if (IS_ERR(block_cb)) {
   1896			list_del(&cb_priv->list);
   1897			kfree(cb_priv);
   1898			return PTR_ERR(block_cb);
   1899		}
   1900
   1901		flow_block_cb_add(block_cb, f);
   1902		list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
   1903		return 0;
   1904	case FLOW_BLOCK_UNBIND:
   1905		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
   1906		if (!cb_priv)
   1907			return -ENOENT;
   1908
   1909		block_cb = flow_block_cb_lookup(f->block,
   1910						nfp_flower_setup_indr_block_cb,
   1911						cb_priv);
   1912		if (!block_cb)
   1913			return -ENOENT;
   1914
   1915		flow_indr_block_cb_remove(block_cb, f);
   1916		list_del(&block_cb->driver_list);
   1917		return 0;
   1918	default:
   1919		return -EOPNOTSUPP;
   1920	}
   1921	return 0;
   1922}
   1923
   1924static int
   1925nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data)
   1926{
   1927	if (!data)
   1928		return -EOPNOTSUPP;
   1929
   1930	switch (type) {
   1931	case TC_SETUP_ACT:
   1932		return nfp_setup_tc_act_offload(app, data);
   1933	default:
   1934		return -EOPNOTSUPP;
   1935	}
   1936}
   1937
   1938int
   1939nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
   1940			    enum tc_setup_type type, void *type_data,
   1941			    void *data,
   1942			    void (*cleanup)(struct flow_block_cb *block_cb))
   1943{
   1944	if (!netdev)
   1945		return nfp_setup_tc_no_dev(cb_priv, type, data);
   1946
   1947	if (!nfp_fl_is_netdev_to_offload(netdev))
   1948		return -EOPNOTSUPP;
   1949
   1950	switch (type) {
   1951	case TC_SETUP_BLOCK:
   1952		return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
   1953						      type_data, data, cleanup);
   1954	default:
   1955		return -EOPNOTSUPP;
   1956	}
   1957}