cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ice_tc_lib.c (41697B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (C) 2019-2021, Intel Corporation. */
      3
      4#include "ice.h"
      5#include "ice_tc_lib.h"
      6#include "ice_fltr.h"
      7#include "ice_lib.h"
      8#include "ice_protocol_type.h"
      9
     10/**
     11 * ice_tc_count_lkups - determine lookup count for switch filter
     12 * @flags: TC-flower flags
     13 * @headers: Pointer to TC flower filter header structure
     14 * @fltr: Pointer to outer TC filter structure
     15 *
     16 * Determine lookup count based on TC flower input for switch filter.
     17 */
     18static int
     19ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
     20		   struct ice_tc_flower_fltr *fltr)
     21{
     22	int lkups_cnt = 0;
     23
     24	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
     25		lkups_cnt++;
     26
     27	if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
     28		lkups_cnt++;
     29
     30	if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
     31		lkups_cnt++;
     32
     33	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
     34		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
     35		     ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
     36		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
     37		lkups_cnt++;
     38
     39	if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
     40		lkups_cnt++;
     41
     42	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
     43		lkups_cnt++;
     44
     45	/* are MAC fields specified? */
     46	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
     47		lkups_cnt++;
     48
     49	/* is VLAN specified? */
     50	if (flags & ICE_TC_FLWR_FIELD_VLAN)
     51		lkups_cnt++;
     52
     53	/* are IPv[4|6] fields specified? */
     54	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
     55		     ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
     56		lkups_cnt++;
     57
     58	/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
     59	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
     60		     ICE_TC_FLWR_FIELD_SRC_L4_PORT))
     61		lkups_cnt++;
     62
     63	return lkups_cnt;
     64}
     65
     66static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
     67{
     68	return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
     69}
     70
     71static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
     72{
     73	return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
     74}
     75
     76static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
     77{
     78	return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
     79}
     80
     81static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
     82{
     83	return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
     84}
     85
     86static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
     87{
     88	switch (ip_proto) {
     89	case IPPROTO_TCP:
     90		return ICE_TCP_IL;
     91	case IPPROTO_UDP:
     92		return ICE_UDP_ILOS;
     93	}
     94
     95	return 0;
     96}
     97
     98static enum ice_protocol_type
     99ice_proto_type_from_tunnel(enum ice_tunnel_type type)
    100{
    101	switch (type) {
    102	case TNL_VXLAN:
    103		return ICE_VXLAN;
    104	case TNL_GENEVE:
    105		return ICE_GENEVE;
    106	case TNL_GRETAP:
    107		return ICE_NVGRE;
    108	case TNL_GTPU:
    109		/* NO_PAY profiles will not work with GTP-U */
    110		return ICE_GTP;
    111	case TNL_GTPC:
    112		return ICE_GTP_NO_PAY;
    113	default:
    114		return 0;
    115	}
    116}
    117
    118static enum ice_sw_tunnel_type
    119ice_sw_type_from_tunnel(enum ice_tunnel_type type)
    120{
    121	switch (type) {
    122	case TNL_VXLAN:
    123		return ICE_SW_TUN_VXLAN;
    124	case TNL_GENEVE:
    125		return ICE_SW_TUN_GENEVE;
    126	case TNL_GRETAP:
    127		return ICE_SW_TUN_NVGRE;
    128	case TNL_GTPU:
    129		return ICE_SW_TUN_GTPU;
    130	case TNL_GTPC:
    131		return ICE_SW_TUN_GTPC;
    132	default:
    133		return ICE_NON_TUN;
    134	}
    135}
    136
    137static int
    138ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
    139			 struct ice_adv_lkup_elem *list)
    140{
    141	struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
    142	int i = 0;
    143
    144	if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
    145		u32 tenant_id;
    146
    147		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
    148		switch (fltr->tunnel_type) {
    149		case TNL_VXLAN:
    150		case TNL_GENEVE:
    151			tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
    152			list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
    153			memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
    154			i++;
    155			break;
    156		case TNL_GRETAP:
    157			list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
    158			memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
    159			       "\xff\xff\xff\xff", 4);
    160			i++;
    161			break;
    162		case TNL_GTPC:
    163		case TNL_GTPU:
    164			list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
    165			memcpy(&list[i].m_u.gtp_hdr.teid,
    166			       "\xff\xff\xff\xff", 4);
    167			i++;
    168			break;
    169		default:
    170			break;
    171		}
    172	}
    173
    174	if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
    175		list[i].type = ice_proto_type_from_mac(false);
    176		ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
    177				hdr->l2_key.dst_mac);
    178		ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
    179				hdr->l2_mask.dst_mac);
    180		i++;
    181	}
    182
    183	if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
    184	    (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
    185		list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
    186
    187		if (fltr->gtp_pdu_info_masks.pdu_type) {
    188			list[i].h_u.gtp_hdr.pdu_type =
    189				fltr->gtp_pdu_info_keys.pdu_type << 4;
    190			memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
    191		}
    192
    193		if (fltr->gtp_pdu_info_masks.qfi) {
    194			list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
    195			memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
    196		}
    197
    198		i++;
    199	}
    200
    201	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
    202		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
    203		list[i].type = ice_proto_type_from_ipv4(false);
    204
    205		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
    206			list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
    207			list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
    208		}
    209		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
    210			list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
    211			list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
    212		}
    213		i++;
    214	}
    215
    216	if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
    217		     ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
    218		list[i].type = ice_proto_type_from_ipv6(false);
    219
    220		if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
    221			memcpy(&list[i].h_u.ipv6_hdr.src_addr,
    222			       &hdr->l3_key.src_ipv6_addr,
    223			       sizeof(hdr->l3_key.src_ipv6_addr));
    224			memcpy(&list[i].m_u.ipv6_hdr.src_addr,
    225			       &hdr->l3_mask.src_ipv6_addr,
    226			       sizeof(hdr->l3_mask.src_ipv6_addr));
    227		}
    228		if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
    229			memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
    230			       &hdr->l3_key.dst_ipv6_addr,
    231			       sizeof(hdr->l3_key.dst_ipv6_addr));
    232			memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
    233			       &hdr->l3_mask.dst_ipv6_addr,
    234			       sizeof(hdr->l3_mask.dst_ipv6_addr));
    235		}
    236		i++;
    237	}
    238
    239	if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
    240	    hdr->l3_key.ip_proto == IPPROTO_UDP) {
    241		list[i].type = ICE_UDP_OF;
    242		list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
    243		list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
    244		i++;
    245	}
    246
    247	return i;
    248}
    249
    250/**
    251 * ice_tc_fill_rules - fill filter rules based on TC fltr
    252 * @hw: pointer to HW structure
    253 * @flags: tc flower field flags
    254 * @tc_fltr: pointer to TC flower filter
    255 * @list: list of advance rule elements
    256 * @rule_info: pointer to information about rule
    257 * @l4_proto: pointer to information such as L4 proto type
    258 *
    259 * Fill ice_adv_lkup_elem list based on TC flower flags and
    260 * TC flower headers. This list should be used to add
    261 * advance filter in hardware.
    262 */
    263static int
    264ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
    265		  struct ice_tc_flower_fltr *tc_fltr,
    266		  struct ice_adv_lkup_elem *list,
    267		  struct ice_adv_rule_info *rule_info,
    268		  u16 *l4_proto)
    269{
    270	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
    271	bool inner = false;
    272	int i = 0;
    273
    274	rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
    275	if (tc_fltr->tunnel_type != TNL_LAST) {
    276		i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
    277
    278		headers = &tc_fltr->inner_headers;
    279		inner = true;
    280	}
    281
    282	if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
    283		list[i].type = ice_proto_type_from_etype(inner);
    284		list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
    285		list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
    286		i++;
    287	}
    288
    289	if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
    290		     ICE_TC_FLWR_FIELD_SRC_MAC)) {
    291		struct ice_tc_l2_hdr *l2_key, *l2_mask;
    292
    293		l2_key = &headers->l2_key;
    294		l2_mask = &headers->l2_mask;
    295
    296		list[i].type = ice_proto_type_from_mac(inner);
    297		if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
    298			ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
    299					l2_key->dst_mac);
    300			ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
    301					l2_mask->dst_mac);
    302		}
    303		if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
    304			ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
    305					l2_key->src_mac);
    306			ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
    307					l2_mask->src_mac);
    308		}
    309		i++;
    310	}
    311
    312	/* copy VLAN info */
    313	if (flags & ICE_TC_FLWR_FIELD_VLAN) {
    314		list[i].type = ICE_VLAN_OFOS;
    315		list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
    316		list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
    317		i++;
    318	}
    319
    320	/* copy L3 (IPv[4|6]: src, dest) address */
    321	if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
    322		     ICE_TC_FLWR_FIELD_SRC_IPV4)) {
    323		struct ice_tc_l3_hdr *l3_key, *l3_mask;
    324
    325		list[i].type = ice_proto_type_from_ipv4(inner);
    326		l3_key = &headers->l3_key;
    327		l3_mask = &headers->l3_mask;
    328		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
    329			list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
    330			list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
    331		}
    332		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
    333			list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
    334			list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
    335		}
    336		i++;
    337	} else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
    338			    ICE_TC_FLWR_FIELD_SRC_IPV6)) {
    339		struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
    340		struct ice_tc_l3_hdr *l3_key, *l3_mask;
    341
    342		list[i].type = ice_proto_type_from_ipv6(inner);
    343		ipv6_hdr = &list[i].h_u.ipv6_hdr;
    344		ipv6_mask = &list[i].m_u.ipv6_hdr;
    345		l3_key = &headers->l3_key;
    346		l3_mask = &headers->l3_mask;
    347
    348		if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
    349			memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
    350			       sizeof(l3_key->dst_ipv6_addr));
    351			memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
    352			       sizeof(l3_mask->dst_ipv6_addr));
    353		}
    354		if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
    355			memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
    356			       sizeof(l3_key->src_ipv6_addr));
    357			memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
    358			       sizeof(l3_mask->src_ipv6_addr));
    359		}
    360		i++;
    361	}
    362
    363	/* copy L4 (src, dest) port */
    364	if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
    365		     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
    366		struct ice_tc_l4_hdr *l4_key, *l4_mask;
    367
    368		list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
    369		l4_key = &headers->l4_key;
    370		l4_mask = &headers->l4_mask;
    371
    372		if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
    373			list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
    374			list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
    375		}
    376		if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
    377			list[i].h_u.l4_hdr.src_port = l4_key->src_port;
    378			list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
    379		}
    380		i++;
    381	}
    382
    383	return i;
    384}
    385
    386/**
    387 * ice_tc_tun_get_type - get the tunnel type
    388 * @tunnel_dev: ptr to tunnel device
    389 *
    390 * This function detects appropriate tunnel_type if specified device is
    391 * tunnel device such as VXLAN/Geneve
    392 */
    393static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
    394{
    395	if (netif_is_vxlan(tunnel_dev))
    396		return TNL_VXLAN;
    397	if (netif_is_geneve(tunnel_dev))
    398		return TNL_GENEVE;
    399	if (netif_is_gretap(tunnel_dev) ||
    400	    netif_is_ip6gretap(tunnel_dev))
    401		return TNL_GRETAP;
    402
    403	/* Assume GTP-U by default in case of GTP netdev.
    404	 * GTP-C may be selected later, based on enc_dst_port.
    405	 */
    406	if (netif_is_gtp(tunnel_dev))
    407		return TNL_GTPU;
    408	return TNL_LAST;
    409}
    410
    411bool ice_is_tunnel_supported(struct net_device *dev)
    412{
    413	return ice_tc_tun_get_type(dev) != TNL_LAST;
    414}
    415
    416static int
    417ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
    418			    struct flow_action_entry *act)
    419{
    420	struct ice_repr *repr;
    421
    422	switch (act->id) {
    423	case FLOW_ACTION_DROP:
    424		fltr->action.fltr_act = ICE_DROP_PACKET;
    425		break;
    426
    427	case FLOW_ACTION_REDIRECT:
    428		fltr->action.fltr_act = ICE_FWD_TO_VSI;
    429
    430		if (ice_is_port_repr_netdev(act->dev)) {
    431			repr = ice_netdev_to_repr(act->dev);
    432
    433			fltr->dest_vsi = repr->src_vsi;
    434			fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
    435		} else if (netif_is_ice(act->dev) ||
    436			   ice_is_tunnel_supported(act->dev)) {
    437			fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
    438		} else {
    439			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
    440			return -EINVAL;
    441		}
    442
    443		break;
    444
    445	default:
    446		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
    447		return -EINVAL;
    448	}
    449
    450	return 0;
    451}
    452
    453static int
    454ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
    455{
    456	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
    457	struct ice_adv_rule_info rule_info = { 0 };
    458	struct ice_rule_query_data rule_added;
    459	struct ice_hw *hw = &vsi->back->hw;
    460	struct ice_adv_lkup_elem *list;
    461	u32 flags = fltr->flags;
    462	int lkups_cnt;
    463	int ret;
    464	int i;
    465
    466	if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
    467		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
    468		return -EOPNOTSUPP;
    469	}
    470
    471	lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
    472	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
    473	if (!list)
    474		return -ENOMEM;
    475
    476	i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
    477	if (i != lkups_cnt) {
    478		ret = -EINVAL;
    479		goto exit;
    480	}
    481
    482	/* egress traffic is always redirect to uplink */
    483	if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
    484		fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
    485
    486	rule_info.sw_act.fltr_act = fltr->action.fltr_act;
    487	if (fltr->action.fltr_act != ICE_DROP_PACKET)
    488		rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
    489	/* For now, making priority to be highest, and it also becomes
    490	 * the priority for recipe which will get created as a result of
    491	 * new extraction sequence based on input set.
    492	 * Priority '7' is max val for switch recipe, higher the number
    493	 * results into order of switch rule evaluation.
    494	 */
    495	rule_info.priority = 7;
    496
    497	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
    498		rule_info.sw_act.flag |= ICE_FLTR_RX;
    499		rule_info.sw_act.src = hw->pf_id;
    500		rule_info.rx = true;
    501	} else {
    502		rule_info.sw_act.flag |= ICE_FLTR_TX;
    503		rule_info.sw_act.src = vsi->idx;
    504		rule_info.rx = false;
    505		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
    506		rule_info.flags_info.act_valid = true;
    507	}
    508
    509	/* specify the cookie as filter_rule_id */
    510	rule_info.fltr_rule_id = fltr->cookie;
    511
    512	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
    513	if (ret == -EEXIST) {
    514		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
    515		ret = -EINVAL;
    516		goto exit;
    517	} else if (ret) {
    518		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
    519		goto exit;
    520	}
    521
    522	/* store the output params, which are needed later for removing
    523	 * advanced switch filter
    524	 */
    525	fltr->rid = rule_added.rid;
    526	fltr->rule_id = rule_added.rule_id;
    527	fltr->dest_id = rule_added.vsi_handle;
    528
    529exit:
    530	kfree(list);
    531	return ret;
    532}
    533
    534/**
    535 * ice_add_tc_flower_adv_fltr - add appropriate filter rules
    536 * @vsi: Pointer to VSI
    537 * @tc_fltr: Pointer to TC flower filter structure
    538 *
    539 * based on filter parameters using Advance recipes supported
    540 * by OS package.
    541 */
    542static int
    543ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
    544			   struct ice_tc_flower_fltr *tc_fltr)
    545{
    546	struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
    547	struct ice_adv_rule_info rule_info = {0};
    548	struct ice_rule_query_data rule_added;
    549	struct ice_adv_lkup_elem *list;
    550	struct ice_pf *pf = vsi->back;
    551	struct ice_hw *hw = &pf->hw;
    552	u32 flags = tc_fltr->flags;
    553	struct ice_vsi *ch_vsi;
    554	struct device *dev;
    555	u16 lkups_cnt = 0;
    556	u16 l4_proto = 0;
    557	int ret = 0;
    558	u16 i = 0;
    559
    560	dev = ice_pf_to_dev(pf);
    561	if (ice_is_safe_mode(pf)) {
    562		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
    563		return -EOPNOTSUPP;
    564	}
    565
    566	if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
    567				ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
    568				ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
    569				ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
    570				ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
    571		NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
    572		return -EOPNOTSUPP;
    573	}
    574
    575	/* get the channel (aka ADQ VSI) */
    576	if (tc_fltr->dest_vsi)
    577		ch_vsi = tc_fltr->dest_vsi;
    578	else
    579		ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
    580
    581	lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
    582	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
    583	if (!list)
    584		return -ENOMEM;
    585
    586	i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
    587	if (i != lkups_cnt) {
    588		ret = -EINVAL;
    589		goto exit;
    590	}
    591
    592	rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
    593	if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
    594		if (!ch_vsi) {
    595			NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
    596			ret = -EINVAL;
    597			goto exit;
    598		}
    599
    600		rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
    601		rule_info.sw_act.vsi_handle = ch_vsi->idx;
    602		rule_info.priority = 7;
    603		rule_info.sw_act.src = hw->pf_id;
    604		rule_info.rx = true;
    605		dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
    606			tc_fltr->action.tc_class,
    607			rule_info.sw_act.vsi_handle, lkups_cnt);
    608	} else {
    609		rule_info.sw_act.flag |= ICE_FLTR_TX;
    610		rule_info.sw_act.src = vsi->idx;
    611		rule_info.rx = false;
    612	}
    613
    614	/* specify the cookie as filter_rule_id */
    615	rule_info.fltr_rule_id = tc_fltr->cookie;
    616
    617	ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
    618	if (ret == -EEXIST) {
    619		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
    620				   "Unable to add filter because it already exist");
    621		ret = -EINVAL;
    622		goto exit;
    623	} else if (ret) {
    624		NL_SET_ERR_MSG_MOD(tc_fltr->extack,
    625				   "Unable to add filter due to error");
    626		goto exit;
    627	}
    628
    629	/* store the output params, which are needed later for removing
    630	 * advanced switch filter
    631	 */
    632	tc_fltr->rid = rule_added.rid;
    633	tc_fltr->rule_id = rule_added.rule_id;
    634	if (tc_fltr->action.tc_class > 0 && ch_vsi) {
    635		/* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
    636		 * for PF ADQ filter, it is not yet set in tc_fltr,
    637		 * hence store the dest_vsi ptr in tc_fltr
    638		 */
    639		if (ch_vsi->type == ICE_VSI_CHNL)
    640			tc_fltr->dest_vsi = ch_vsi;
    641		/* keep track of advanced switch filter for
    642		 * destination VSI (channel VSI)
    643		 */
    644		ch_vsi->num_chnl_fltr++;
    645		/* in this case, dest_id is VSI handle (sw handle) */
    646		tc_fltr->dest_id = rule_added.vsi_handle;
    647
    648		/* keeps track of channel filters for PF VSI */
    649		if (vsi->type == ICE_VSI_PF &&
    650		    (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
    651			      ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
    652			pf->num_dmac_chnl_fltrs++;
    653	}
    654	dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
    655		lkups_cnt, flags,
    656		tc_fltr->action.tc_class, rule_added.rid,
    657		rule_added.rule_id, rule_added.vsi_handle);
    658exit:
    659	kfree(list);
    660	return ret;
    661}
    662
    663/**
    664 * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
    665 * @match: Pointer to flow match structure
    666 * @fltr: Pointer to filter structure
    667 * @headers: inner or outer header fields
    668 * @is_encap: set true for tunnel IPv4 address
    669 */
    670static int
    671ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
    672		struct ice_tc_flower_fltr *fltr,
    673		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
    674{
    675	if (match->key->dst) {
    676		if (is_encap)
    677			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
    678		else
    679			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
    680		headers->l3_key.dst_ipv4 = match->key->dst;
    681		headers->l3_mask.dst_ipv4 = match->mask->dst;
    682	}
    683	if (match->key->src) {
    684		if (is_encap)
    685			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
    686		else
    687			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
    688		headers->l3_key.src_ipv4 = match->key->src;
    689		headers->l3_mask.src_ipv4 = match->mask->src;
    690	}
    691	return 0;
    692}
    693
    694/**
    695 * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
    696 * @match: Pointer to flow match structure
    697 * @fltr: Pointer to filter structure
    698 * @headers: inner or outer header fields
    699 * @is_encap: set true for tunnel IPv6 address
    700 */
    701static int
    702ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
    703		struct ice_tc_flower_fltr *fltr,
    704		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
    705{
    706	struct ice_tc_l3_hdr *l3_key, *l3_mask;
    707
    708	/* src and dest IPV6 address should not be LOOPBACK
    709	 * (0:0:0:0:0:0:0:1), which can be represented as ::1
    710	 */
    711	if (ipv6_addr_loopback(&match->key->dst) ||
    712	    ipv6_addr_loopback(&match->key->src)) {
    713		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
    714		return -EINVAL;
    715	}
    716	/* if src/dest IPv6 address is *,* error */
    717	if (ipv6_addr_any(&match->mask->dst) &&
    718	    ipv6_addr_any(&match->mask->src)) {
    719		NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
    720		return -EINVAL;
    721	}
    722	if (!ipv6_addr_any(&match->mask->dst)) {
    723		if (is_encap)
    724			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
    725		else
    726			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
    727	}
    728	if (!ipv6_addr_any(&match->mask->src)) {
    729		if (is_encap)
    730			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
    731		else
    732			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
    733	}
    734
    735	l3_key = &headers->l3_key;
    736	l3_mask = &headers->l3_mask;
    737
    738	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
    739			   ICE_TC_FLWR_FIELD_SRC_IPV6)) {
    740		memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
    741		       sizeof(match->key->src.s6_addr));
    742		memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
    743		       sizeof(match->mask->src.s6_addr));
    744	}
    745	if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
    746			   ICE_TC_FLWR_FIELD_DEST_IPV6)) {
    747		memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
    748		       sizeof(match->key->dst.s6_addr));
    749		memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
    750		       sizeof(match->mask->dst.s6_addr));
    751	}
    752
    753	return 0;
    754}
    755
    756/**
    757 * ice_tc_set_port - Parse ports from TC flower filter
    758 * @match: Flow match structure
    759 * @fltr: Pointer to filter structure
    760 * @headers: inner or outer header fields
    761 * @is_encap: set true for tunnel port
    762 */
    763static int
    764ice_tc_set_port(struct flow_match_ports match,
    765		struct ice_tc_flower_fltr *fltr,
    766		struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
    767{
    768	if (match.key->dst) {
    769		if (is_encap)
    770			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
    771		else
    772			fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
    773
    774		headers->l4_key.dst_port = match.key->dst;
    775		headers->l4_mask.dst_port = match.mask->dst;
    776	}
    777	if (match.key->src) {
    778		if (is_encap)
    779			fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
    780		else
    781			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
    782
    783		headers->l4_key.src_port = match.key->src;
    784		headers->l4_mask.src_port = match.mask->src;
    785	}
    786	return 0;
    787}
    788
    789static struct net_device *
    790ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
    791{
    792	struct flow_action_entry *act;
    793	int i;
    794
    795	if (ice_is_tunnel_supported(dev))
    796		return dev;
    797
    798	flow_action_for_each(i, act, &rule->action) {
    799		if (act->id == FLOW_ACTION_REDIRECT &&
    800		    ice_is_tunnel_supported(act->dev))
    801			return act->dev;
    802	}
    803
    804	return NULL;
    805}
    806
    807/**
    808 * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
    809 * @match: Flow match structure
    810 * @fltr: Pointer to filter structure
    811 *
    812 * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
    813 * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
    814 * therefore making GTP-U the default choice (when destination port number is
    815 * not specified).
    816 */
    817static int
    818ice_parse_gtp_type(struct flow_match_ports match,
    819		   struct ice_tc_flower_fltr *fltr)
    820{
    821	u16 dst_port;
    822
    823	if (match.key->dst) {
    824		dst_port = be16_to_cpu(match.key->dst);
    825
    826		switch (dst_port) {
    827		case 2152:
    828			break;
    829		case 2123:
    830			fltr->tunnel_type = TNL_GTPC;
    831			break;
    832		default:
    833			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
    834			return -EINVAL;
    835		}
    836	}
    837
    838	return 0;
    839}
    840
    841static int
    842ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
    843		      struct ice_tc_flower_fltr *fltr)
    844{
    845	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
    846	struct flow_match_control enc_control;
    847
    848	fltr->tunnel_type = ice_tc_tun_get_type(dev);
    849	headers->l3_key.ip_proto = IPPROTO_UDP;
    850
    851	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
    852		struct flow_match_enc_keyid enc_keyid;
    853
    854		flow_rule_match_enc_keyid(rule, &enc_keyid);
    855
    856		if (!enc_keyid.mask->keyid ||
    857		    enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
    858			return -EINVAL;
    859
    860		fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
    861		fltr->tenant_id = enc_keyid.key->keyid;
    862	}
    863
    864	flow_rule_match_enc_control(rule, &enc_control);
    865
    866	if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
    867		struct flow_match_ipv4_addrs match;
    868
    869		flow_rule_match_enc_ipv4_addrs(rule, &match);
    870		if (ice_tc_set_ipv4(&match, fltr, headers, true))
    871			return -EINVAL;
    872	} else if (enc_control.key->addr_type ==
    873					FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
    874		struct flow_match_ipv6_addrs match;
    875
    876		flow_rule_match_enc_ipv6_addrs(rule, &match);
    877		if (ice_tc_set_ipv6(&match, fltr, headers, true))
    878			return -EINVAL;
    879	}
    880
    881	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
    882		struct flow_match_ip match;
    883
    884		flow_rule_match_enc_ip(rule, &match);
    885		headers->l3_key.tos = match.key->tos;
    886		headers->l3_key.ttl = match.key->ttl;
    887		headers->l3_mask.tos = match.mask->tos;
    888		headers->l3_mask.ttl = match.mask->ttl;
    889	}
    890
    891	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
    892	    fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
    893		struct flow_match_ports match;
    894
    895		flow_rule_match_enc_ports(rule, &match);
    896
    897		if (fltr->tunnel_type != TNL_GTPU) {
    898			if (ice_tc_set_port(match, fltr, headers, true))
    899				return -EINVAL;
    900		} else {
    901			if (ice_parse_gtp_type(match, fltr))
    902				return -EINVAL;
    903		}
    904	}
    905
    906	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
    907		struct flow_match_enc_opts match;
    908
    909		flow_rule_match_enc_opts(rule, &match);
    910
    911		memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
    912		       sizeof(struct gtp_pdu_session_info));
    913
    914		memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
    915		       sizeof(struct gtp_pdu_session_info));
    916
    917		fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
    918	}
    919
    920	return 0;
    921}
    922
    923/**
    924 * ice_parse_cls_flower - Parse TC flower filters provided by kernel
    925 * @vsi: Pointer to the VSI
    926 * @filter_dev: Pointer to device on which filter is being added
    927 * @f: Pointer to struct flow_cls_offload
    928 * @fltr: Pointer to filter structure
    929 */
    930static int
    931ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
    932		     struct flow_cls_offload *f,
    933		     struct ice_tc_flower_fltr *fltr)
    934{
    935	struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
    936	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
    937	u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
    938	struct flow_dissector *dissector;
    939	struct net_device *tunnel_dev;
    940
    941	dissector = rule->match.dissector;
    942
    943	if (dissector->used_keys &
    944	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
    945	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
    946	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
    947	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
    948	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
    949	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
    950	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
    951	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
    952	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
    953	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
    954	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
    955	      BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
    956	      BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
    957	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
    958		NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
    959		return -EOPNOTSUPP;
    960	}
    961
    962	tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
    963	if (tunnel_dev) {
    964		int err;
    965
    966		filter_dev = tunnel_dev;
    967
    968		err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
    969		if (err) {
    970			NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
    971			return err;
    972		}
    973
    974		/* header pointers should point to the inner headers, outer
    975		 * header were already set by ice_parse_tunnel_attr
    976		 */
    977		headers = &fltr->inner_headers;
    978	} else if (dissector->used_keys &
    979		  (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
    980		   BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
    981		   BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
    982		   BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
    983		NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
    984		return -EOPNOTSUPP;
    985	} else {
    986		fltr->tunnel_type = TNL_LAST;
    987	}
    988
    989	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
    990		struct flow_match_basic match;
    991
    992		flow_rule_match_basic(rule, &match);
    993
    994		n_proto_key = ntohs(match.key->n_proto);
    995		n_proto_mask = ntohs(match.mask->n_proto);
    996
    997		if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
    998		    fltr->tunnel_type == TNL_GTPU ||
    999		    fltr->tunnel_type == TNL_GTPC) {
   1000			n_proto_key = 0;
   1001			n_proto_mask = 0;
   1002		} else {
   1003			fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
   1004		}
   1005
   1006		headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
   1007		headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
   1008		headers->l3_key.ip_proto = match.key->ip_proto;
   1009	}
   1010
   1011	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
   1012		struct flow_match_eth_addrs match;
   1013
   1014		flow_rule_match_eth_addrs(rule, &match);
   1015
   1016		if (!is_zero_ether_addr(match.key->dst)) {
   1017			ether_addr_copy(headers->l2_key.dst_mac,
   1018					match.key->dst);
   1019			ether_addr_copy(headers->l2_mask.dst_mac,
   1020					match.mask->dst);
   1021			fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
   1022		}
   1023
   1024		if (!is_zero_ether_addr(match.key->src)) {
   1025			ether_addr_copy(headers->l2_key.src_mac,
   1026					match.key->src);
   1027			ether_addr_copy(headers->l2_mask.src_mac,
   1028					match.mask->src);
   1029			fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
   1030		}
   1031	}
   1032
   1033	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
   1034	    is_vlan_dev(filter_dev)) {
   1035		struct flow_dissector_key_vlan mask;
   1036		struct flow_dissector_key_vlan key;
   1037		struct flow_match_vlan match;
   1038
   1039		if (is_vlan_dev(filter_dev)) {
   1040			match.key = &key;
   1041			match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
   1042			match.key->vlan_priority = 0;
   1043			match.mask = &mask;
   1044			memset(match.mask, 0xff, sizeof(*match.mask));
   1045			match.mask->vlan_priority = 0;
   1046		} else {
   1047			flow_rule_match_vlan(rule, &match);
   1048		}
   1049
   1050		if (match.mask->vlan_id) {
   1051			if (match.mask->vlan_id == VLAN_VID_MASK) {
   1052				fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
   1053			} else {
   1054				NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
   1055				return -EINVAL;
   1056			}
   1057		}
   1058
   1059		headers->vlan_hdr.vlan_id =
   1060				cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
   1061		if (match.mask->vlan_priority)
   1062			headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
   1063	}
   1064
   1065	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
   1066		struct flow_match_control match;
   1067
   1068		flow_rule_match_control(rule, &match);
   1069
   1070		addr_type = match.key->addr_type;
   1071	}
   1072
   1073	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
   1074		struct flow_match_ipv4_addrs match;
   1075
   1076		flow_rule_match_ipv4_addrs(rule, &match);
   1077		if (ice_tc_set_ipv4(&match, fltr, headers, false))
   1078			return -EINVAL;
   1079	}
   1080
   1081	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
   1082		struct flow_match_ipv6_addrs match;
   1083
   1084		flow_rule_match_ipv6_addrs(rule, &match);
   1085		if (ice_tc_set_ipv6(&match, fltr, headers, false))
   1086			return -EINVAL;
   1087	}
   1088
   1089	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
   1090		struct flow_match_ports match;
   1091
   1092		flow_rule_match_ports(rule, &match);
   1093		if (ice_tc_set_port(match, fltr, headers, false))
   1094			return -EINVAL;
   1095		switch (headers->l3_key.ip_proto) {
   1096		case IPPROTO_TCP:
   1097		case IPPROTO_UDP:
   1098			break;
   1099		default:
   1100			NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
   1101			return -EINVAL;
   1102		}
   1103	}
   1104	return 0;
   1105}
   1106
   1107/**
   1108 * ice_add_switch_fltr - Add TC flower filters
   1109 * @vsi: Pointer to VSI
   1110 * @fltr: Pointer to struct ice_tc_flower_fltr
   1111 *
   1112 * Add filter in HW switch block
   1113 */
   1114static int
   1115ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
   1116{
   1117	if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
   1118		return -EOPNOTSUPP;
   1119
   1120	if (ice_is_eswitch_mode_switchdev(vsi->back))
   1121		return ice_eswitch_add_tc_fltr(vsi, fltr);
   1122
   1123	return ice_add_tc_flower_adv_fltr(vsi, fltr);
   1124}
   1125
   1126/**
   1127 * ice_handle_tclass_action - Support directing to a traffic class
   1128 * @vsi: Pointer to VSI
   1129 * @cls_flower: Pointer to TC flower offload structure
   1130 * @fltr: Pointer to TC flower filter structure
   1131 *
   1132 * Support directing traffic to a traffic class
   1133 */
   1134static int
   1135ice_handle_tclass_action(struct ice_vsi *vsi,
   1136			 struct flow_cls_offload *cls_flower,
   1137			 struct ice_tc_flower_fltr *fltr)
   1138{
   1139	int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
   1140	struct ice_vsi *main_vsi;
   1141
   1142	if (tc < 0) {
   1143		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
   1144		return -EINVAL;
   1145	}
   1146	if (!tc) {
   1147		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
   1148		return -EINVAL;
   1149	}
   1150
   1151	if (!(vsi->all_enatc & BIT(tc))) {
   1152		NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
   1153		return -EINVAL;
   1154	}
   1155
   1156	/* Redirect to a TC class or Queue Group */
   1157	main_vsi = ice_get_main_vsi(vsi->back);
   1158	if (!main_vsi || !main_vsi->netdev) {
   1159		NL_SET_ERR_MSG_MOD(fltr->extack,
   1160				   "Unable to add filter because of invalid netdevice");
   1161		return -EINVAL;
   1162	}
   1163
   1164	if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
   1165	    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
   1166			   ICE_TC_FLWR_FIELD_SRC_MAC))) {
   1167		NL_SET_ERR_MSG_MOD(fltr->extack,
   1168				   "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
   1169		return -EOPNOTSUPP;
   1170	}
   1171
   1172	/* For ADQ, filter must include dest MAC address, otherwise unwanted
   1173	 * packets with unrelated MAC address get delivered to ADQ VSIs as long
   1174	 * as remaining filter criteria is satisfied such as dest IP address
   1175	 * and dest/src L4 port. Following code is trying to handle:
   1176	 * 1. For non-tunnel, if user specify MAC addresses, use them (means
   1177	 * this code won't do anything
   1178	 * 2. For non-tunnel, if user didn't specify MAC address, add implicit
   1179	 * dest MAC to be lower netdev's active unicast MAC address
   1180	 * 3. For tunnel,  as of now TC-filter through flower classifier doesn't
   1181	 * have provision for user to specify outer DMAC, hence driver to
   1182	 * implicitly add outer dest MAC to be lower netdev's active unicast
   1183	 * MAC address.
   1184	 */
   1185	if (fltr->tunnel_type != TNL_LAST &&
   1186	    !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
   1187		fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
   1188
   1189	if (fltr->tunnel_type == TNL_LAST &&
   1190	    !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
   1191		fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
   1192
   1193	if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
   1194			   ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
   1195		ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
   1196				vsi->netdev->dev_addr);
   1197		memset(fltr->outer_headers.l2_mask.dst_mac, 0xff, ETH_ALEN);
   1198	}
   1199
   1200	/* validate specified dest MAC address, make sure either it belongs to
   1201	 * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
   1202	 * unicast MAC filter destined to main VSI.
   1203	 */
   1204	if (!ice_mac_fltr_exist(&main_vsi->back->hw,
   1205				fltr->outer_headers.l2_key.dst_mac,
   1206				main_vsi->idx)) {
   1207		NL_SET_ERR_MSG_MOD(fltr->extack,
   1208				   "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
   1209		return -EINVAL;
   1210	}
   1211
   1212	/* Make sure VLAN is already added to main VSI, before allowing ADQ to
   1213	 * add a VLAN based filter such as MAC + VLAN + L4 port.
   1214	 */
   1215	if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
   1216		u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
   1217
   1218		if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
   1219					 main_vsi->idx)) {
   1220			NL_SET_ERR_MSG_MOD(fltr->extack,
   1221					   "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
   1222			return -EINVAL;
   1223		}
   1224	}
   1225	fltr->action.fltr_act = ICE_FWD_TO_VSI;
   1226	fltr->action.tc_class = tc;
   1227
   1228	return 0;
   1229}
   1230
   1231/**
   1232 * ice_parse_tc_flower_actions - Parse the actions for a TC filter
   1233 * @vsi: Pointer to VSI
   1234 * @cls_flower: Pointer to TC flower offload structure
   1235 * @fltr: Pointer to TC flower filter structure
   1236 *
   1237 * Parse the actions for a TC filter
   1238 */
   1239static int
   1240ice_parse_tc_flower_actions(struct ice_vsi *vsi,
   1241			    struct flow_cls_offload *cls_flower,
   1242			    struct ice_tc_flower_fltr *fltr)
   1243{
   1244	struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
   1245	struct flow_action *flow_action = &rule->action;
   1246	struct flow_action_entry *act;
   1247	int i;
   1248
   1249	if (cls_flower->classid)
   1250		return ice_handle_tclass_action(vsi, cls_flower, fltr);
   1251
   1252	if (!flow_action_has_entries(flow_action))
   1253		return -EINVAL;
   1254
   1255	flow_action_for_each(i, act, flow_action) {
   1256		if (ice_is_eswitch_mode_switchdev(vsi->back)) {
   1257			int err = ice_eswitch_tc_parse_action(fltr, act);
   1258
   1259			if (err)
   1260				return err;
   1261			continue;
   1262		}
   1263		/* Allow only one rule per filter */
   1264
   1265		/* Drop action */
   1266		if (act->id == FLOW_ACTION_DROP) {
   1267			NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
   1268			return -EINVAL;
   1269		}
   1270		fltr->action.fltr_act = ICE_FWD_TO_VSI;
   1271	}
   1272	return 0;
   1273}
   1274
   1275/**
   1276 * ice_del_tc_fltr - deletes a filter from HW table
   1277 * @vsi: Pointer to VSI
   1278 * @fltr: Pointer to struct ice_tc_flower_fltr
   1279 *
   1280 * This function deletes a filter from HW table and manages book-keeping
   1281 */
   1282static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
   1283{
   1284	struct ice_rule_query_data rule_rem;
   1285	struct ice_pf *pf = vsi->back;
   1286	int err;
   1287
   1288	rule_rem.rid = fltr->rid;
   1289	rule_rem.rule_id = fltr->rule_id;
   1290	rule_rem.vsi_handle = fltr->dest_id;
   1291	err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
   1292	if (err) {
   1293		if (err == -ENOENT) {
   1294			NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
   1295			return -ENOENT;
   1296		}
   1297		NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
   1298		return -EIO;
   1299	}
   1300
   1301	/* update advanced switch filter count for destination
   1302	 * VSI if filter destination was VSI
   1303	 */
   1304	if (fltr->dest_vsi) {
   1305		if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
   1306			fltr->dest_vsi->num_chnl_fltr--;
   1307
   1308			/* keeps track of channel filters for PF VSI */
   1309			if (vsi->type == ICE_VSI_PF &&
   1310			    (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
   1311					    ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
   1312				pf->num_dmac_chnl_fltrs--;
   1313		}
   1314	}
   1315	return 0;
   1316}
   1317
   1318/**
   1319 * ice_add_tc_fltr - adds a TC flower filter
   1320 * @netdev: Pointer to netdev
   1321 * @vsi: Pointer to VSI
   1322 * @f: Pointer to flower offload structure
   1323 * @__fltr: Pointer to struct ice_tc_flower_fltr
   1324 *
   1325 * This function parses TC-flower input fields, parses action,
   1326 * and adds a filter.
   1327 */
   1328static int
   1329ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
   1330		struct flow_cls_offload *f,
   1331		struct ice_tc_flower_fltr **__fltr)
   1332{
   1333	struct ice_tc_flower_fltr *fltr;
   1334	int err;
   1335
   1336	/* by default, set output to be INVALID */
   1337	*__fltr = NULL;
   1338
   1339	fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
   1340	if (!fltr)
   1341		return -ENOMEM;
   1342
   1343	fltr->cookie = f->cookie;
   1344	fltr->extack = f->common.extack;
   1345	fltr->src_vsi = vsi;
   1346	INIT_HLIST_NODE(&fltr->tc_flower_node);
   1347
   1348	err = ice_parse_cls_flower(netdev, vsi, f, fltr);
   1349	if (err < 0)
   1350		goto err;
   1351
   1352	err = ice_parse_tc_flower_actions(vsi, f, fltr);
   1353	if (err < 0)
   1354		goto err;
   1355
   1356	err = ice_add_switch_fltr(vsi, fltr);
   1357	if (err < 0)
   1358		goto err;
   1359
   1360	/* return the newly created filter */
   1361	*__fltr = fltr;
   1362
   1363	return 0;
   1364err:
   1365	kfree(fltr);
   1366	return err;
   1367}
   1368
   1369/**
   1370 * ice_find_tc_flower_fltr - Find the TC flower filter in the list
   1371 * @pf: Pointer to PF
   1372 * @cookie: filter specific cookie
   1373 */
   1374static struct ice_tc_flower_fltr *
   1375ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
   1376{
   1377	struct ice_tc_flower_fltr *fltr;
   1378
   1379	hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
   1380		if (cookie == fltr->cookie)
   1381			return fltr;
   1382
   1383	return NULL;
   1384}
   1385
   1386/**
   1387 * ice_add_cls_flower - add TC flower filters
   1388 * @netdev: Pointer to filter device
   1389 * @vsi: Pointer to VSI
   1390 * @cls_flower: Pointer to flower offload structure
   1391 */
   1392int
   1393ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
   1394		   struct flow_cls_offload *cls_flower)
   1395{
   1396	struct netlink_ext_ack *extack = cls_flower->common.extack;
   1397	struct net_device *vsi_netdev = vsi->netdev;
   1398	struct ice_tc_flower_fltr *fltr;
   1399	struct ice_pf *pf = vsi->back;
   1400	int err;
   1401
   1402	if (ice_is_reset_in_progress(pf->state))
   1403		return -EBUSY;
   1404	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
   1405		return -EINVAL;
   1406
   1407	if (ice_is_port_repr_netdev(netdev))
   1408		vsi_netdev = netdev;
   1409
   1410	if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
   1411	    !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
   1412		/* Based on TC indirect notifications from kernel, all ice
   1413		 * devices get an instance of rule from higher level device.
   1414		 * Avoid triggering explicit error in this case.
   1415		 */
   1416		if (netdev == vsi_netdev)
   1417			NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
   1418		return -EINVAL;
   1419	}
   1420
   1421	/* avoid duplicate entries, if exists - return error */
   1422	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
   1423	if (fltr) {
   1424		NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
   1425		return -EEXIST;
   1426	}
   1427
   1428	/* prep and add TC-flower filter in HW */
   1429	err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
   1430	if (err)
   1431		return err;
   1432
   1433	/* add filter into an ordered list */
   1434	hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
   1435	return 0;
   1436}
   1437
   1438/**
   1439 * ice_del_cls_flower - delete TC flower filters
   1440 * @vsi: Pointer to VSI
   1441 * @cls_flower: Pointer to struct flow_cls_offload
   1442 */
   1443int
   1444ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
   1445{
   1446	struct ice_tc_flower_fltr *fltr;
   1447	struct ice_pf *pf = vsi->back;
   1448	int err;
   1449
   1450	/* find filter */
   1451	fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
   1452	if (!fltr) {
   1453		if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
   1454		    hlist_empty(&pf->tc_flower_fltr_list))
   1455			return 0;
   1456
   1457		NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
   1458		return -EINVAL;
   1459	}
   1460
   1461	fltr->extack = cls_flower->common.extack;
   1462	/* delete filter from HW */
   1463	err = ice_del_tc_fltr(vsi, fltr);
   1464	if (err)
   1465		return err;
   1466
   1467	/* delete filter from an ordered list */
   1468	hlist_del(&fltr->tc_flower_node);
   1469
   1470	/* free the filter node */
   1471	kfree(fltr);
   1472
   1473	return 0;
   1474}
   1475
   1476/**
   1477 * ice_replay_tc_fltrs - replay TC filters
   1478 * @pf: pointer to PF struct
   1479 */
   1480void ice_replay_tc_fltrs(struct ice_pf *pf)
   1481{
   1482	struct ice_tc_flower_fltr *fltr;
   1483	struct hlist_node *node;
   1484
   1485	hlist_for_each_entry_safe(fltr, node,
   1486				  &pf->tc_flower_fltr_list,
   1487				  tc_flower_node) {
   1488		fltr->extack = NULL;
   1489		ice_add_switch_fltr(fltr->src_vsi, fltr);
   1490	}
   1491}