cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ice_ethtool_fdir.c (53706B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (C) 2018-2020, Intel Corporation. */
      3
      4/* flow director ethtool support for ice */
      5
      6#include "ice.h"
      7#include "ice_lib.h"
      8#include "ice_fdir.h"
      9#include "ice_flow.h"
     10
     11static struct in6_addr full_ipv6_addr_mask = {
     12	.in6_u = {
     13		.u6_addr8 = {
     14			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
     15			0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
     16		}
     17	}
     18};
     19
     20static struct in6_addr zero_ipv6_addr_mask = {
     21	.in6_u = {
     22		.u6_addr8 = {
     23			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
     24			0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
     25		}
     26	}
     27};
     28
     29/* calls to ice_flow_add_prof require the number of segments in the array
     30 * for segs_cnt. In this code that is one more than the index.
     31 */
     32#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
     33
     34/**
     35 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
     36 * flow type values
     37 * @flow: filter type to be converted
     38 *
     39 * Returns the corresponding ethtool flow type.
     40 */
     41static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
     42{
     43	switch (flow) {
     44	case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
     45		return TCP_V4_FLOW;
     46	case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
     47		return UDP_V4_FLOW;
     48	case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
     49		return SCTP_V4_FLOW;
     50	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
     51		return IPV4_USER_FLOW;
     52	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
     53		return TCP_V6_FLOW;
     54	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
     55		return UDP_V6_FLOW;
     56	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
     57		return SCTP_V6_FLOW;
     58	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
     59		return IPV6_USER_FLOW;
     60	default:
     61		/* 0 is undefined ethtool flow */
     62		return 0;
     63	}
     64}
     65
     66/**
     67 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
     68 * @eth: Ethtool flow type to be converted
     69 *
     70 * Returns flow enum
     71 */
     72static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
     73{
     74	switch (eth) {
     75	case TCP_V4_FLOW:
     76		return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
     77	case UDP_V4_FLOW:
     78		return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
     79	case SCTP_V4_FLOW:
     80		return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
     81	case IPV4_USER_FLOW:
     82		return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
     83	case TCP_V6_FLOW:
     84		return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
     85	case UDP_V6_FLOW:
     86		return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
     87	case SCTP_V6_FLOW:
     88		return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
     89	case IPV6_USER_FLOW:
     90		return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
     91	default:
     92		return ICE_FLTR_PTYPE_NONF_NONE;
     93	}
     94}
     95
     96/**
     97 * ice_is_mask_valid - check mask field set
     98 * @mask: full mask to check
     99 * @field: field for which mask should be valid
    100 *
    101 * If the mask is fully set return true. If it is not valid for field return
    102 * false.
    103 */
    104static bool ice_is_mask_valid(u64 mask, u64 field)
    105{
    106	return (mask & field) == field;
    107}
    108
    109/**
    110 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
    111 * @hw: hardware structure that contains filter list
    112 * @cmd: ethtool command data structure to receive the filter data
    113 *
    114 * Returns 0 on success and -EINVAL on failure
    115 */
    116int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
    117{
    118	struct ethtool_rx_flow_spec *fsp;
    119	struct ice_fdir_fltr *rule;
    120	int ret = 0;
    121	u16 idx;
    122
    123	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
    124
    125	mutex_lock(&hw->fdir_fltr_lock);
    126
    127	rule = ice_fdir_find_fltr_by_idx(hw, fsp->location);
    128
    129	if (!rule || fsp->location != rule->fltr_id) {
    130		ret = -EINVAL;
    131		goto release_lock;
    132	}
    133
    134	fsp->flow_type = ice_fltr_to_ethtool_flow(rule->flow_type);
    135
    136	memset(&fsp->m_u, 0, sizeof(fsp->m_u));
    137	memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
    138
    139	switch (fsp->flow_type) {
    140	case IPV4_USER_FLOW:
    141		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
    142		fsp->h_u.usr_ip4_spec.proto = 0;
    143		fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
    144		fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
    145		fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
    146		fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
    147		fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
    148		fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
    149		fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
    150		fsp->m_u.usr_ip4_spec.proto = 0;
    151		fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
    152		fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
    153		break;
    154	case TCP_V4_FLOW:
    155	case UDP_V4_FLOW:
    156	case SCTP_V4_FLOW:
    157		fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
    158		fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
    159		fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
    160		fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
    161		fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
    162		fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
    163		fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
    164		fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
    165		break;
    166	case IPV6_USER_FLOW:
    167		fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
    168		fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
    169		fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
    170		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
    171		       sizeof(struct in6_addr));
    172		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
    173		       sizeof(struct in6_addr));
    174		memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
    175		       sizeof(struct in6_addr));
    176		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
    177		       sizeof(struct in6_addr));
    178		fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
    179		fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
    180		fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
    181		break;
    182	case TCP_V6_FLOW:
    183	case UDP_V6_FLOW:
    184	case SCTP_V6_FLOW:
    185		memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
    186		       sizeof(struct in6_addr));
    187		memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
    188		       sizeof(struct in6_addr));
    189		fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
    190		fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
    191		memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
    192		       rule->mask.v6.src_ip,
    193		       sizeof(struct in6_addr));
    194		memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
    195		       rule->mask.v6.dst_ip,
    196		       sizeof(struct in6_addr));
    197		fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
    198		fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
    199		fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
    200		fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
    201		break;
    202	default:
    203		break;
    204	}
    205
    206	if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
    207		fsp->ring_cookie = RX_CLS_FLOW_DISC;
    208	else
    209		fsp->ring_cookie = rule->orig_q_index;
    210
    211	idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
    212	if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
    213		dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
    214			rule->flow_type);
    215		ret = -EINVAL;
    216	}
    217
    218release_lock:
    219	mutex_unlock(&hw->fdir_fltr_lock);
    220	return ret;
    221}
    222
    223/**
    224 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
    225 * @hw: hardware structure containing the filter list
    226 * @cmd: ethtool command data structure
    227 * @rule_locs: ethtool array passed in from OS to receive filter IDs
    228 *
    229 * Returns 0 as expected for success by ethtool
    230 */
    231int
    232ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
    233		      u32 *rule_locs)
    234{
    235	struct ice_fdir_fltr *f_rule;
    236	unsigned int cnt = 0;
    237	int val = 0;
    238
    239	/* report total rule count */
    240	cmd->data = ice_get_fdir_cnt_all(hw);
    241
    242	mutex_lock(&hw->fdir_fltr_lock);
    243
    244	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
    245		if (cnt == cmd->rule_cnt) {
    246			val = -EMSGSIZE;
    247			goto release_lock;
    248		}
    249		rule_locs[cnt] = f_rule->fltr_id;
    250		cnt++;
    251	}
    252
    253release_lock:
    254	mutex_unlock(&hw->fdir_fltr_lock);
    255	if (!val)
    256		cmd->rule_cnt = cnt;
    257	return val;
    258}
    259
    260/**
    261 * ice_fdir_remap_entries - update the FDir entries in profile
    262 * @prof: FDir structure pointer
    263 * @tun: tunneled or non-tunneled packet
    264 * @idx: FDir entry index
    265 */
    266static void
    267ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
    268{
    269	if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
    270		int i;
    271
    272		for (i = idx; i < (prof->cnt - 1); i++) {
    273			u64 old_entry_h;
    274
    275			old_entry_h = prof->entry_h[i + 1][tun];
    276			prof->entry_h[i][tun] = old_entry_h;
    277			prof->vsi_h[i] = prof->vsi_h[i + 1];
    278		}
    279
    280		prof->entry_h[i][tun] = 0;
    281		prof->vsi_h[i] = 0;
    282	}
    283}
    284
    285/**
    286 * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
    287 * @hw: hardware structure containing filter list
    288 * @vsi_idx: VSI handle
    289 */
    290void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
    291{
    292	int status, flow;
    293
    294	if (!hw->fdir_prof)
    295		return;
    296
    297	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
    298		struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
    299		int tun, i;
    300
    301		if (!prof || !prof->cnt)
    302			continue;
    303
    304		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
    305			u64 prof_id;
    306
    307			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
    308
    309			for (i = 0; i < prof->cnt; i++) {
    310				if (prof->vsi_h[i] != vsi_idx)
    311					continue;
    312
    313				prof->entry_h[i][tun] = 0;
    314				prof->vsi_h[i] = 0;
    315				break;
    316			}
    317
    318			/* after clearing FDir entries update the remaining */
    319			ice_fdir_remap_entries(prof, tun, i);
    320
    321			/* find flow profile corresponding to prof_id and clear
    322			 * vsi_idx from bitmap.
    323			 */
    324			status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
    325			if (status) {
    326				dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
    327					status);
    328			}
    329		}
    330		prof->cnt--;
    331	}
    332}
    333
    334/**
    335 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
    336 * @hw: hardware structure containing the filter list
    337 * @blk: hardware block
    338 * @flow: FDir flow type to release
    339 */
    340static struct ice_fd_hw_prof *
    341ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
    342{
    343	if (blk == ICE_BLK_FD && hw->fdir_prof)
    344		return hw->fdir_prof[flow];
    345
    346	return NULL;
    347}
    348
    349/**
    350 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
    351 * @hw: hardware structure containing the filter list
    352 * @blk: hardware block
    353 * @flow: FDir flow type to release
    354 */
    355static void
    356ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
    357{
    358	struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
    359	int tun;
    360
    361	if (!prof)
    362		return;
    363
    364	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
    365		u64 prof_id;
    366		int j;
    367
    368		prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
    369		for (j = 0; j < prof->cnt; j++) {
    370			u16 vsi_num;
    371
    372			if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
    373				continue;
    374			vsi_num = ice_get_hw_vsi_num(hw, prof->vsi_h[j]);
    375			ice_rem_prof_id_flow(hw, blk, vsi_num, prof_id);
    376			ice_flow_rem_entry(hw, blk, prof->entry_h[j][tun]);
    377			prof->entry_h[j][tun] = 0;
    378		}
    379		ice_flow_rem_prof(hw, blk, prof_id);
    380	}
    381}
    382
    383/**
    384 * ice_fdir_rem_flow - release the ice_flow structures for a filter type
    385 * @hw: hardware structure containing the filter list
    386 * @blk: hardware block
    387 * @flow_type: FDir flow type to release
    388 */
    389static void
    390ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
    391		  enum ice_fltr_ptype flow_type)
    392{
    393	int flow = (int)flow_type & ~FLOW_EXT;
    394	struct ice_fd_hw_prof *prof;
    395	int tun, i;
    396
    397	prof = ice_fdir_get_hw_prof(hw, blk, flow);
    398	if (!prof)
    399		return;
    400
    401	ice_fdir_erase_flow_from_hw(hw, blk, flow);
    402	for (i = 0; i < prof->cnt; i++)
    403		prof->vsi_h[i] = 0;
    404	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
    405		if (!prof->fdir_seg[tun])
    406			continue;
    407		devm_kfree(ice_hw_to_dev(hw), prof->fdir_seg[tun]);
    408		prof->fdir_seg[tun] = NULL;
    409	}
    410	prof->cnt = 0;
    411}
    412
    413/**
    414 * ice_fdir_release_flows - release all flows in use for later replay
    415 * @hw: pointer to HW instance
    416 */
    417void ice_fdir_release_flows(struct ice_hw *hw)
    418{
    419	int flow;
    420
    421	/* release Flow Director HW table entries */
    422	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
    423		ice_fdir_erase_flow_from_hw(hw, ICE_BLK_FD, flow);
    424}
    425
    426/**
    427 * ice_fdir_replay_flows - replay HW Flow Director filter info
    428 * @hw: pointer to HW instance
    429 */
    430void ice_fdir_replay_flows(struct ice_hw *hw)
    431{
    432	int flow;
    433
    434	for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
    435		int tun;
    436
    437		if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
    438			continue;
    439		for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
    440			struct ice_flow_prof *hw_prof;
    441			struct ice_fd_hw_prof *prof;
    442			u64 prof_id;
    443			int j;
    444
    445			prof = hw->fdir_prof[flow];
    446			prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
    447			ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id,
    448					  prof->fdir_seg[tun], TNL_SEG_CNT(tun),
    449					  &hw_prof);
    450			for (j = 0; j < prof->cnt; j++) {
    451				enum ice_flow_priority prio;
    452				u64 entry_h = 0;
    453				int err;
    454
    455				prio = ICE_FLOW_PRIO_NORMAL;
    456				err = ice_flow_add_entry(hw, ICE_BLK_FD,
    457							 prof_id,
    458							 prof->vsi_h[0],
    459							 prof->vsi_h[j],
    460							 prio, prof->fdir_seg,
    461							 &entry_h);
    462				if (err) {
    463					dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
    464						flow);
    465					continue;
    466				}
    467				prof->entry_h[j][tun] = entry_h;
    468			}
    469		}
    470	}
    471}
    472
    473/**
    474 * ice_parse_rx_flow_user_data - deconstruct user-defined data
    475 * @fsp: pointer to ethtool Rx flow specification
    476 * @data: pointer to userdef data structure for storage
    477 *
    478 * Returns 0 on success, negative error value on failure
    479 */
    480static int
    481ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
    482			    struct ice_rx_flow_userdef *data)
    483{
    484	u64 value, mask;
    485
    486	memset(data, 0, sizeof(*data));
    487	if (!(fsp->flow_type & FLOW_EXT))
    488		return 0;
    489
    490	value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
    491	mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
    492	if (!mask)
    493		return 0;
    494
    495#define ICE_USERDEF_FLEX_WORD_M	GENMASK_ULL(15, 0)
    496#define ICE_USERDEF_FLEX_OFFS_S	16
    497#define ICE_USERDEF_FLEX_OFFS_M	GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
    498#define ICE_USERDEF_FLEX_FLTR_M	GENMASK_ULL(31, 0)
    499
    500	/* 0x1fe is the maximum value for offsets stored in the internal
    501	 * filtering tables.
    502	 */
    503#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
    504
    505	if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
    506	    value > ICE_USERDEF_FLEX_FLTR_M)
    507		return -EINVAL;
    508
    509	data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
    510	data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
    511			     ICE_USERDEF_FLEX_OFFS_S;
    512	if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
    513		return -EINVAL;
    514
    515	data->flex_fltr = true;
    516
    517	return 0;
    518}
    519
    520/**
    521 * ice_fdir_num_avail_fltr - return the number of unused flow director filters
    522 * @hw: pointer to hardware structure
    523 * @vsi: software VSI structure
    524 *
    525 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
    526 * use filters from either pool. The guaranteed pool is divided between VSIs.
    527 * The best effort filter pool is common to all VSIs and is a device shared
    528 * resource pool. The number of filters available to this VSI is the sum of
    529 * the VSIs guaranteed filter pool and the global available best effort
    530 * filter pool.
    531 *
    532 * Returns the number of available flow director filters to this VSI
    533 */
    534static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
    535{
    536	u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
    537	u16 num_guar;
    538	u16 num_be;
    539
    540	/* total guaranteed filters assigned to this VSI */
    541	num_guar = vsi->num_gfltr;
    542
    543	/* minus the guaranteed filters programed by this VSI */
    544	num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
    545		     VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
    546
    547	/* total global best effort filters */
    548	num_be = hw->func_caps.fd_fltr_best_effort;
    549
    550	/* minus the global best effort filters programmed */
    551	num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
    552		   GLQF_FD_CNT_FD_BCNT_S;
    553
    554	return num_guar + num_be;
    555}
    556
    557/**
    558 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
    559 * @hw: HW structure containing the FDir flow profile structure(s)
    560 * @flow: flow type to allocate the flow profile for
    561 *
    562 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
    563 * on success and negative on error.
    564 */
    565static int
    566ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
    567{
    568	if (!hw)
    569		return -EINVAL;
    570
    571	if (!hw->fdir_prof) {
    572		hw->fdir_prof = devm_kcalloc(ice_hw_to_dev(hw),
    573					     ICE_FLTR_PTYPE_MAX,
    574					     sizeof(*hw->fdir_prof),
    575					     GFP_KERNEL);
    576		if (!hw->fdir_prof)
    577			return -ENOMEM;
    578	}
    579
    580	if (!hw->fdir_prof[flow]) {
    581		hw->fdir_prof[flow] = devm_kzalloc(ice_hw_to_dev(hw),
    582						   sizeof(**hw->fdir_prof),
    583						   GFP_KERNEL);
    584		if (!hw->fdir_prof[flow])
    585			return -ENOMEM;
    586	}
    587
    588	return 0;
    589}
    590
    591/**
    592 * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
    593 * @prof: pointer to flow director HW profile
    594 * @vsi_idx: vsi_idx to locate
    595 *
    596 * return the index of the vsi_idx. if vsi_idx is not found insert it
    597 * into the vsi_h table.
    598 */
    599static u16
    600ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
    601{
    602	u16 idx = 0;
    603
    604	for (idx = 0; idx < prof->cnt; idx++)
    605		if (prof->vsi_h[idx] == vsi_idx)
    606			return idx;
    607
    608	if (idx == prof->cnt)
    609		prof->vsi_h[prof->cnt++] = vsi_idx;
    610	return idx;
    611}
    612
    613/**
    614 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
    615 * @pf: pointer to the PF structure
    616 * @seg: protocol header description pointer
    617 * @flow: filter enum
    618 * @tun: FDir segment to program
    619 */
    620static int
    621ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
    622			  enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
    623{
    624	struct device *dev = ice_pf_to_dev(pf);
    625	struct ice_vsi *main_vsi, *ctrl_vsi;
    626	struct ice_flow_seg_info *old_seg;
    627	struct ice_flow_prof *prof = NULL;
    628	struct ice_fd_hw_prof *hw_prof;
    629	struct ice_hw *hw = &pf->hw;
    630	u64 entry1_h = 0;
    631	u64 entry2_h = 0;
    632	bool del_last;
    633	u64 prof_id;
    634	int err;
    635	int idx;
    636
    637	main_vsi = ice_get_main_vsi(pf);
    638	if (!main_vsi)
    639		return -EINVAL;
    640
    641	ctrl_vsi = ice_get_ctrl_vsi(pf);
    642	if (!ctrl_vsi)
    643		return -EINVAL;
    644
    645	err = ice_fdir_alloc_flow_prof(hw, flow);
    646	if (err)
    647		return err;
    648
    649	hw_prof = hw->fdir_prof[flow];
    650	old_seg = hw_prof->fdir_seg[tun];
    651	if (old_seg) {
    652		/* This flow_type already has a changed input set.
    653		 * If it matches the requested input set then we are
    654		 * done. Or, if it's different then it's an error.
    655		 */
    656		if (!memcmp(old_seg, seg, sizeof(*seg)))
    657			return -EEXIST;
    658
    659		/* if there are FDir filters using this flow,
    660		 * then return error.
    661		 */
    662		if (hw->fdir_fltr_cnt[flow]) {
    663			dev_err(dev, "Failed to add filter.  Flow director filters on each port must have the same input set.\n");
    664			return -EINVAL;
    665		}
    666
    667		if (ice_is_arfs_using_perfect_flow(hw, flow)) {
    668			dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
    669				flow);
    670			return -EINVAL;
    671		}
    672
    673		/* remove HW filter definition */
    674		ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
    675	}
    676
    677	/* Adding a profile, but there is only one header supported.
    678	 * That is the final parameters are 1 header (segment), no
    679	 * actions (NULL) and zero actions 0.
    680	 */
    681	prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
    682	err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
    683				TNL_SEG_CNT(tun), &prof);
    684	if (err)
    685		return err;
    686	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
    687				 main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
    688				 seg, &entry1_h);
    689	if (err)
    690		goto err_prof;
    691	err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
    692				 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
    693				 seg, &entry2_h);
    694	if (err)
    695		goto err_entry;
    696
    697	hw_prof->fdir_seg[tun] = seg;
    698	hw_prof->entry_h[0][tun] = entry1_h;
    699	hw_prof->entry_h[1][tun] = entry2_h;
    700	hw_prof->vsi_h[0] = main_vsi->idx;
    701	hw_prof->vsi_h[1] = ctrl_vsi->idx;
    702	if (!hw_prof->cnt)
    703		hw_prof->cnt = 2;
    704
    705	for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
    706		u16 vsi_idx;
    707		u16 vsi_h;
    708
    709		if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
    710			continue;
    711
    712		entry1_h = 0;
    713		vsi_h = main_vsi->tc_map_vsi[idx]->idx;
    714		err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
    715					 main_vsi->idx, vsi_h,
    716					 ICE_FLOW_PRIO_NORMAL, seg,
    717					 &entry1_h);
    718		if (err) {
    719			dev_err(dev, "Could not add Channel VSI %d to flow group\n",
    720				idx);
    721			goto err_unroll;
    722		}
    723
    724		vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
    725						main_vsi->tc_map_vsi[idx]->idx);
    726		hw_prof->entry_h[vsi_idx][tun] = entry1_h;
    727	}
    728
    729	return 0;
    730
    731err_unroll:
    732	entry1_h = 0;
    733	hw_prof->fdir_seg[tun] = NULL;
    734
    735	/* The variable del_last will be used to determine when to clean up
    736	 * the VSI group data. The VSI data is not needed if there are no
    737	 * segments.
    738	 */
    739	del_last = true;
    740	for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
    741		if (hw_prof->fdir_seg[idx]) {
    742			del_last = false;
    743			break;
    744		}
    745
    746	for (idx = 0; idx < hw_prof->cnt; idx++) {
    747		u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
    748
    749		if (!hw_prof->entry_h[idx][tun])
    750			continue;
    751		ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
    752		ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
    753		hw_prof->entry_h[idx][tun] = 0;
    754		if (del_last)
    755			hw_prof->vsi_h[idx] = 0;
    756	}
    757	if (del_last)
    758		hw_prof->cnt = 0;
    759err_entry:
    760	ice_rem_prof_id_flow(hw, ICE_BLK_FD,
    761			     ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
    762	ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
    763err_prof:
    764	ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
    765	dev_err(dev, "Failed to add filter.  Flow director filters on each port must have the same input set.\n");
    766
    767	return err;
    768}
    769
    770/**
    771 * ice_set_init_fdir_seg
    772 * @seg: flow segment for programming
    773 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
    774 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
    775 *
    776 * Set the configuration for perfect filters to the provided flow segment for
    777 * programming the HW filter. This is to be called only when initializing
    778 * filters as this function it assumes no filters exist.
    779 */
    780static int
    781ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
    782		      enum ice_flow_seg_hdr l3_proto,
    783		      enum ice_flow_seg_hdr l4_proto)
    784{
    785	enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
    786
    787	if (!seg)
    788		return -EINVAL;
    789
    790	if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
    791		src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
    792		dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
    793	} else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
    794		src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
    795		dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
    796	} else {
    797		return -EINVAL;
    798	}
    799
    800	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
    801		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
    802		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
    803	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
    804		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
    805		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
    806	} else {
    807		return -EINVAL;
    808	}
    809
    810	ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
    811
    812	/* IP source address */
    813	ice_flow_set_fld(seg, src_addr, ICE_FLOW_FLD_OFF_INVAL,
    814			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
    815
    816	/* IP destination address */
    817	ice_flow_set_fld(seg, dst_addr, ICE_FLOW_FLD_OFF_INVAL,
    818			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
    819
    820	/* Layer 4 source port */
    821	ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
    822			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
    823
    824	/* Layer 4 destination port */
    825	ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
    826			 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false);
    827
    828	return 0;
    829}
    830
    831/**
    832 * ice_create_init_fdir_rule
    833 * @pf: PF structure
    834 * @flow: filter enum
    835 *
    836 * Return error value or 0 on success.
    837 */
    838static int
    839ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
    840{
    841	struct ice_flow_seg_info *seg, *tun_seg;
    842	struct device *dev = ice_pf_to_dev(pf);
    843	struct ice_hw *hw = &pf->hw;
    844	int ret;
    845
    846	/* if there is already a filter rule for kind return -EINVAL */
    847	if (hw->fdir_prof && hw->fdir_prof[flow] &&
    848	    hw->fdir_prof[flow]->fdir_seg[0])
    849		return -EINVAL;
    850
    851	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
    852	if (!seg)
    853		return -ENOMEM;
    854
    855	tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
    856			       GFP_KERNEL);
    857	if (!tun_seg) {
    858		devm_kfree(dev, seg);
    859		return -ENOMEM;
    860	}
    861
    862	if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
    863		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
    864					    ICE_FLOW_SEG_HDR_TCP);
    865	else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
    866		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV4,
    867					    ICE_FLOW_SEG_HDR_UDP);
    868	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
    869		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
    870					    ICE_FLOW_SEG_HDR_TCP);
    871	else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
    872		ret = ice_set_init_fdir_seg(seg, ICE_FLOW_SEG_HDR_IPV6,
    873					    ICE_FLOW_SEG_HDR_UDP);
    874	else
    875		ret = -EINVAL;
    876	if (ret)
    877		goto err_exit;
    878
    879	/* add filter for outer headers */
    880	ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, ICE_FD_HW_SEG_NON_TUN);
    881	if (ret)
    882		/* could not write filter, free memory */
    883		goto err_exit;
    884
    885	/* make tunneled filter HW entries if possible */
    886	memcpy(&tun_seg[1], seg, sizeof(*seg));
    887	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, flow, ICE_FD_HW_SEG_TUN);
    888	if (ret)
    889		/* could not write tunnel filter, but outer header filter
    890		 * exists
    891		 */
    892		devm_kfree(dev, tun_seg);
    893
    894	set_bit(flow, hw->fdir_perfect_fltr);
    895	return ret;
    896err_exit:
    897	devm_kfree(dev, tun_seg);
    898	devm_kfree(dev, seg);
    899
    900	return -EOPNOTSUPP;
    901}
    902
    903/**
    904 * ice_set_fdir_ip4_seg
    905 * @seg: flow segment for programming
    906 * @tcp_ip4_spec: mask data from ethtool
    907 * @l4_proto: Layer 4 protocol to program
    908 * @perfect_fltr: only valid on success; returns true if perfect filter,
    909 *		  false if not
    910 *
    911 * Set the mask data into the flow segment to be used to program HW
    912 * table based on provided L4 protocol for IPv4
    913 */
    914static int
    915ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
    916		     struct ethtool_tcpip4_spec *tcp_ip4_spec,
    917		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
    918{
    919	enum ice_flow_field src_port, dst_port;
    920
    921	/* make sure we don't have any empty rule */
    922	if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
    923	    !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
    924		return -EINVAL;
    925
    926	/* filtering on TOS not supported */
    927	if (tcp_ip4_spec->tos)
    928		return -EOPNOTSUPP;
    929
    930	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
    931		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
    932		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
    933	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
    934		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
    935		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
    936	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
    937		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
    938		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
    939	} else {
    940		return -EOPNOTSUPP;
    941	}
    942
    943	*perfect_fltr = true;
    944	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
    945
    946	/* IP source address */
    947	if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
    948		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
    949				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
    950				 ICE_FLOW_FLD_OFF_INVAL, false);
    951	else if (!tcp_ip4_spec->ip4src)
    952		*perfect_fltr = false;
    953	else
    954		return -EOPNOTSUPP;
    955
    956	/* IP destination address */
    957	if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
    958		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
    959				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
    960				 ICE_FLOW_FLD_OFF_INVAL, false);
    961	else if (!tcp_ip4_spec->ip4dst)
    962		*perfect_fltr = false;
    963	else
    964		return -EOPNOTSUPP;
    965
    966	/* Layer 4 source port */
    967	if (tcp_ip4_spec->psrc == htons(0xFFFF))
    968		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
    969				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
    970				 false);
    971	else if (!tcp_ip4_spec->psrc)
    972		*perfect_fltr = false;
    973	else
    974		return -EOPNOTSUPP;
    975
    976	/* Layer 4 destination port */
    977	if (tcp_ip4_spec->pdst == htons(0xFFFF))
    978		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
    979				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
    980				 false);
    981	else if (!tcp_ip4_spec->pdst)
    982		*perfect_fltr = false;
    983	else
    984		return -EOPNOTSUPP;
    985
    986	return 0;
    987}
    988
    989/**
    990 * ice_set_fdir_ip4_usr_seg
    991 * @seg: flow segment for programming
    992 * @usr_ip4_spec: ethtool userdef packet offset
    993 * @perfect_fltr: only valid on success; returns true if perfect filter,
    994 *		  false if not
    995 *
    996 * Set the offset data into the flow segment to be used to program HW
    997 * table for IPv4
    998 */
    999static int
   1000ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
   1001			 struct ethtool_usrip4_spec *usr_ip4_spec,
   1002			 bool *perfect_fltr)
   1003{
   1004	/* first 4 bytes of Layer 4 header */
   1005	if (usr_ip4_spec->l4_4_bytes)
   1006		return -EINVAL;
   1007	if (usr_ip4_spec->tos)
   1008		return -EINVAL;
   1009	if (usr_ip4_spec->ip_ver)
   1010		return -EINVAL;
   1011	/* Filtering on Layer 4 protocol not supported */
   1012	if (usr_ip4_spec->proto)
   1013		return -EOPNOTSUPP;
   1014	/* empty rules are not valid */
   1015	if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
   1016		return -EINVAL;
   1017
   1018	*perfect_fltr = true;
   1019	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
   1020
   1021	/* IP source address */
   1022	if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
   1023		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA,
   1024				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
   1025				 ICE_FLOW_FLD_OFF_INVAL, false);
   1026	else if (!usr_ip4_spec->ip4src)
   1027		*perfect_fltr = false;
   1028	else
   1029		return -EOPNOTSUPP;
   1030
   1031	/* IP destination address */
   1032	if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
   1033		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA,
   1034				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
   1035				 ICE_FLOW_FLD_OFF_INVAL, false);
   1036	else if (!usr_ip4_spec->ip4dst)
   1037		*perfect_fltr = false;
   1038	else
   1039		return -EOPNOTSUPP;
   1040
   1041	return 0;
   1042}
   1043
   1044/**
   1045 * ice_set_fdir_ip6_seg
   1046 * @seg: flow segment for programming
   1047 * @tcp_ip6_spec: mask data from ethtool
   1048 * @l4_proto: Layer 4 protocol to program
   1049 * @perfect_fltr: only valid on success; returns true if perfect filter,
   1050 *		  false if not
   1051 *
   1052 * Set the mask data into the flow segment to be used to program HW
   1053 * table based on provided L4 protocol for IPv6
   1054 */
   1055static int
   1056ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
   1057		     struct ethtool_tcpip6_spec *tcp_ip6_spec,
   1058		     enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
   1059{
   1060	enum ice_flow_field src_port, dst_port;
   1061
   1062	/* make sure we don't have any empty rule */
   1063	if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
   1064		    sizeof(struct in6_addr)) &&
   1065	    !memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
   1066		    sizeof(struct in6_addr)) &&
   1067	    !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
   1068		return -EINVAL;
   1069
   1070	/* filtering on TC not supported */
   1071	if (tcp_ip6_spec->tclass)
   1072		return -EOPNOTSUPP;
   1073
   1074	if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
   1075		src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
   1076		dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
   1077	} else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
   1078		src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
   1079		dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
   1080	} else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
   1081		src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
   1082		dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
   1083	} else {
   1084		return -EINVAL;
   1085	}
   1086
   1087	*perfect_fltr = true;
   1088	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
   1089
   1090	if (!memcmp(tcp_ip6_spec->ip6src, &full_ipv6_addr_mask,
   1091		    sizeof(struct in6_addr)))
   1092		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
   1093				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
   1094				 ICE_FLOW_FLD_OFF_INVAL, false);
   1095	else if (!memcmp(tcp_ip6_spec->ip6src, &zero_ipv6_addr_mask,
   1096			 sizeof(struct in6_addr)))
   1097		*perfect_fltr = false;
   1098	else
   1099		return -EOPNOTSUPP;
   1100
   1101	if (!memcmp(tcp_ip6_spec->ip6dst, &full_ipv6_addr_mask,
   1102		    sizeof(struct in6_addr)))
   1103		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
   1104				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
   1105				 ICE_FLOW_FLD_OFF_INVAL, false);
   1106	else if (!memcmp(tcp_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
   1107			 sizeof(struct in6_addr)))
   1108		*perfect_fltr = false;
   1109	else
   1110		return -EOPNOTSUPP;
   1111
   1112	/* Layer 4 source port */
   1113	if (tcp_ip6_spec->psrc == htons(0xFFFF))
   1114		ice_flow_set_fld(seg, src_port, ICE_FLOW_FLD_OFF_INVAL,
   1115				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
   1116				 false);
   1117	else if (!tcp_ip6_spec->psrc)
   1118		*perfect_fltr = false;
   1119	else
   1120		return -EOPNOTSUPP;
   1121
   1122	/* Layer 4 destination port */
   1123	if (tcp_ip6_spec->pdst == htons(0xFFFF))
   1124		ice_flow_set_fld(seg, dst_port, ICE_FLOW_FLD_OFF_INVAL,
   1125				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
   1126				 false);
   1127	else if (!tcp_ip6_spec->pdst)
   1128		*perfect_fltr = false;
   1129	else
   1130		return -EOPNOTSUPP;
   1131
   1132	return 0;
   1133}
   1134
   1135/**
   1136 * ice_set_fdir_ip6_usr_seg
   1137 * @seg: flow segment for programming
   1138 * @usr_ip6_spec: ethtool userdef packet offset
   1139 * @perfect_fltr: only valid on success; returns true if perfect filter,
   1140 *		  false if not
   1141 *
   1142 * Set the offset data into the flow segment to be used to program HW
   1143 * table for IPv6
   1144 */
   1145static int
   1146ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
   1147			 struct ethtool_usrip6_spec *usr_ip6_spec,
   1148			 bool *perfect_fltr)
   1149{
   1150	/* filtering on Layer 4 bytes not supported */
   1151	if (usr_ip6_spec->l4_4_bytes)
   1152		return -EOPNOTSUPP;
   1153	/* filtering on TC not supported */
   1154	if (usr_ip6_spec->tclass)
   1155		return -EOPNOTSUPP;
   1156	/* filtering on Layer 4 protocol not supported */
   1157	if (usr_ip6_spec->l4_proto)
   1158		return -EOPNOTSUPP;
   1159	/* empty rules are not valid */
   1160	if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
   1161		    sizeof(struct in6_addr)) &&
   1162	    !memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
   1163		    sizeof(struct in6_addr)))
   1164		return -EINVAL;
   1165
   1166	*perfect_fltr = true;
   1167	ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
   1168
   1169	if (!memcmp(usr_ip6_spec->ip6src, &full_ipv6_addr_mask,
   1170		    sizeof(struct in6_addr)))
   1171		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_SA,
   1172				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
   1173				 ICE_FLOW_FLD_OFF_INVAL, false);
   1174	else if (!memcmp(usr_ip6_spec->ip6src, &zero_ipv6_addr_mask,
   1175			 sizeof(struct in6_addr)))
   1176		*perfect_fltr = false;
   1177	else
   1178		return -EOPNOTSUPP;
   1179
   1180	if (!memcmp(usr_ip6_spec->ip6dst, &full_ipv6_addr_mask,
   1181		    sizeof(struct in6_addr)))
   1182		ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV6_DA,
   1183				 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
   1184				 ICE_FLOW_FLD_OFF_INVAL, false);
   1185	else if (!memcmp(usr_ip6_spec->ip6dst, &zero_ipv6_addr_mask,
   1186			 sizeof(struct in6_addr)))
   1187		*perfect_fltr = false;
   1188	else
   1189		return -EOPNOTSUPP;
   1190
   1191	return 0;
   1192}
   1193
   1194/**
   1195 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
   1196 * @pf: PF structure
   1197 * @fsp: pointer to ethtool Rx flow specification
   1198 * @user: user defined data from flow specification
   1199 *
   1200 * Returns 0 on success.
   1201 */
   1202static int
   1203ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
   1204		       struct ice_rx_flow_userdef *user)
   1205{
   1206	struct ice_flow_seg_info *seg, *tun_seg;
   1207	struct device *dev = ice_pf_to_dev(pf);
   1208	enum ice_fltr_ptype fltr_idx;
   1209	struct ice_hw *hw = &pf->hw;
   1210	bool perfect_filter;
   1211	int ret;
   1212
   1213	seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
   1214	if (!seg)
   1215		return -ENOMEM;
   1216
   1217	tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
   1218			       GFP_KERNEL);
   1219	if (!tun_seg) {
   1220		devm_kfree(dev, seg);
   1221		return -ENOMEM;
   1222	}
   1223
   1224	switch (fsp->flow_type & ~FLOW_EXT) {
   1225	case TCP_V4_FLOW:
   1226		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
   1227					   ICE_FLOW_SEG_HDR_TCP,
   1228					   &perfect_filter);
   1229		break;
   1230	case UDP_V4_FLOW:
   1231		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
   1232					   ICE_FLOW_SEG_HDR_UDP,
   1233					   &perfect_filter);
   1234		break;
   1235	case SCTP_V4_FLOW:
   1236		ret = ice_set_fdir_ip4_seg(seg, &fsp->m_u.tcp_ip4_spec,
   1237					   ICE_FLOW_SEG_HDR_SCTP,
   1238					   &perfect_filter);
   1239		break;
   1240	case IPV4_USER_FLOW:
   1241		ret = ice_set_fdir_ip4_usr_seg(seg, &fsp->m_u.usr_ip4_spec,
   1242					       &perfect_filter);
   1243		break;
   1244	case TCP_V6_FLOW:
   1245		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
   1246					   ICE_FLOW_SEG_HDR_TCP,
   1247					   &perfect_filter);
   1248		break;
   1249	case UDP_V6_FLOW:
   1250		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
   1251					   ICE_FLOW_SEG_HDR_UDP,
   1252					   &perfect_filter);
   1253		break;
   1254	case SCTP_V6_FLOW:
   1255		ret = ice_set_fdir_ip6_seg(seg, &fsp->m_u.tcp_ip6_spec,
   1256					   ICE_FLOW_SEG_HDR_SCTP,
   1257					   &perfect_filter);
   1258		break;
   1259	case IPV6_USER_FLOW:
   1260		ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
   1261					       &perfect_filter);
   1262		break;
   1263	default:
   1264		ret = -EINVAL;
   1265	}
   1266	if (ret)
   1267		goto err_exit;
   1268
   1269	/* tunnel segments are shifted up one. */
   1270	memcpy(&tun_seg[1], seg, sizeof(*seg));
   1271
   1272	if (user && user->flex_fltr) {
   1273		perfect_filter = false;
   1274		ice_flow_add_fld_raw(seg, user->flex_offset,
   1275				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
   1276				     ICE_FLOW_FLD_OFF_INVAL,
   1277				     ICE_FLOW_FLD_OFF_INVAL);
   1278		ice_flow_add_fld_raw(&tun_seg[1], user->flex_offset,
   1279				     ICE_FLTR_PRGM_FLEX_WORD_SIZE,
   1280				     ICE_FLOW_FLD_OFF_INVAL,
   1281				     ICE_FLOW_FLD_OFF_INVAL);
   1282	}
   1283
   1284	/* add filter for outer headers */
   1285	fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT);
   1286	ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx,
   1287					ICE_FD_HW_SEG_NON_TUN);
   1288	if (ret == -EEXIST)
   1289		/* Rule already exists, free memory and continue */
   1290		devm_kfree(dev, seg);
   1291	else if (ret)
   1292		/* could not write filter, free memory */
   1293		goto err_exit;
   1294
   1295	/* make tunneled filter HW entries if possible */
   1296	memcpy(&tun_seg[1], seg, sizeof(*seg));
   1297	ret = ice_fdir_set_hw_fltr_rule(pf, tun_seg, fltr_idx,
   1298					ICE_FD_HW_SEG_TUN);
   1299	if (ret == -EEXIST) {
   1300		/* Rule already exists, free memory and count as success */
   1301		devm_kfree(dev, tun_seg);
   1302		ret = 0;
   1303	} else if (ret) {
   1304		/* could not write tunnel filter, but outer filter exists */
   1305		devm_kfree(dev, tun_seg);
   1306	}
   1307
   1308	if (perfect_filter)
   1309		set_bit(fltr_idx, hw->fdir_perfect_fltr);
   1310	else
   1311		clear_bit(fltr_idx, hw->fdir_perfect_fltr);
   1312
   1313	return ret;
   1314
   1315err_exit:
   1316	devm_kfree(dev, tun_seg);
   1317	devm_kfree(dev, seg);
   1318
   1319	return -EOPNOTSUPP;
   1320}
   1321
   1322/**
   1323 * ice_update_per_q_fltr
   1324 * @vsi: ptr to VSI
   1325 * @q_index: queue index
   1326 * @inc: true to increment or false to decrement per queue filter count
   1327 *
   1328 * This function is used to keep track of per queue sideband filters
   1329 */
   1330static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
   1331{
   1332	struct ice_rx_ring *rx_ring;
   1333
   1334	if (!vsi->num_rxq || q_index >= vsi->num_rxq)
   1335		return;
   1336
   1337	rx_ring = vsi->rx_rings[q_index];
   1338	if (!rx_ring || !rx_ring->ch)
   1339		return;
   1340
   1341	if (inc)
   1342		atomic_inc(&rx_ring->ch->num_sb_fltr);
   1343	else
   1344		atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
   1345}
   1346
   1347/**
   1348 * ice_fdir_write_fltr - send a flow director filter to the hardware
   1349 * @pf: PF data structure
   1350 * @input: filter structure
   1351 * @add: true adds filter and false removed filter
   1352 * @is_tun: true adds inner filter on tunnel and false outer headers
   1353 *
   1354 * returns 0 on success and negative value on error
   1355 */
   1356int
   1357ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
   1358		    bool is_tun)
   1359{
   1360	struct device *dev = ice_pf_to_dev(pf);
   1361	struct ice_hw *hw = &pf->hw;
   1362	struct ice_fltr_desc desc;
   1363	struct ice_vsi *ctrl_vsi;
   1364	u8 *pkt, *frag_pkt;
   1365	bool has_frag;
   1366	int err;
   1367
   1368	ctrl_vsi = ice_get_ctrl_vsi(pf);
   1369	if (!ctrl_vsi)
   1370		return -EINVAL;
   1371
   1372	pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
   1373	if (!pkt)
   1374		return -ENOMEM;
   1375	frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
   1376	if (!frag_pkt) {
   1377		err = -ENOMEM;
   1378		goto err_free;
   1379	}
   1380
   1381	ice_fdir_get_prgm_desc(hw, input, &desc, add);
   1382	err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
   1383	if (err)
   1384		goto err_free_all;
   1385	err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
   1386	if (err)
   1387		goto err_free_all;
   1388
   1389	/* repeat for fragment packet */
   1390	has_frag = ice_fdir_has_frag(input->flow_type);
   1391	if (has_frag) {
   1392		/* does not return error */
   1393		ice_fdir_get_prgm_desc(hw, input, &desc, add);
   1394		err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
   1395						is_tun);
   1396		if (err)
   1397			goto err_frag;
   1398		err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
   1399		if (err)
   1400			goto err_frag;
   1401	} else {
   1402		devm_kfree(dev, frag_pkt);
   1403	}
   1404
   1405	return 0;
   1406
   1407err_free_all:
   1408	devm_kfree(dev, frag_pkt);
   1409err_free:
   1410	devm_kfree(dev, pkt);
   1411	return err;
   1412
   1413err_frag:
   1414	devm_kfree(dev, frag_pkt);
   1415	return err;
   1416}
   1417
   1418/**
   1419 * ice_fdir_write_all_fltr - send a flow director filter to the hardware
   1420 * @pf: PF data structure
   1421 * @input: filter structure
   1422 * @add: true adds filter and false removed filter
   1423 *
   1424 * returns 0 on success and negative value on error
   1425 */
   1426static int
   1427ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
   1428			bool add)
   1429{
   1430	u16 port_num;
   1431	int tun;
   1432
   1433	for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
   1434		bool is_tun = tun == ICE_FD_HW_SEG_TUN;
   1435		int err;
   1436
   1437		if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
   1438			continue;
   1439		err = ice_fdir_write_fltr(pf, input, add, is_tun);
   1440		if (err)
   1441			return err;
   1442	}
   1443	return 0;
   1444}
   1445
   1446/**
   1447 * ice_fdir_replay_fltrs - replay filters from the HW filter list
   1448 * @pf: board private structure
   1449 */
   1450void ice_fdir_replay_fltrs(struct ice_pf *pf)
   1451{
   1452	struct ice_fdir_fltr *f_rule;
   1453	struct ice_hw *hw = &pf->hw;
   1454
   1455	list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
   1456		int err = ice_fdir_write_all_fltr(pf, f_rule, true);
   1457
   1458		if (err)
   1459			dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
   1460				err, f_rule->fltr_id);
   1461	}
   1462}
   1463
   1464/**
   1465 * ice_fdir_create_dflt_rules - create default perfect filters
   1466 * @pf: PF data structure
   1467 *
   1468 * Returns 0 for success or error.
   1469 */
   1470int ice_fdir_create_dflt_rules(struct ice_pf *pf)
   1471{
   1472	int err;
   1473
   1474	/* Create perfect TCP and UDP rules in hardware. */
   1475	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
   1476	if (err)
   1477		return err;
   1478
   1479	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
   1480	if (err)
   1481		return err;
   1482
   1483	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
   1484	if (err)
   1485		return err;
   1486
   1487	err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
   1488
   1489	return err;
   1490}
   1491
   1492/**
   1493 * ice_fdir_del_all_fltrs - Delete all flow director filters
   1494 * @vsi: the VSI being changed
   1495 *
   1496 * This function needs to be called while holding hw->fdir_fltr_lock
   1497 */
   1498void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
   1499{
   1500	struct ice_fdir_fltr *f_rule, *tmp;
   1501	struct ice_pf *pf = vsi->back;
   1502	struct ice_hw *hw = &pf->hw;
   1503
   1504	list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
   1505		ice_fdir_write_all_fltr(pf, f_rule, false);
   1506		ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
   1507		list_del(&f_rule->fltr_node);
   1508		devm_kfree(ice_pf_to_dev(pf), f_rule);
   1509	}
   1510}
   1511
   1512/**
   1513 * ice_vsi_manage_fdir - turn on/off flow director
   1514 * @vsi: the VSI being changed
   1515 * @ena: boolean value indicating if this is an enable or disable request
   1516 */
   1517void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
   1518{
   1519	struct ice_pf *pf = vsi->back;
   1520	struct ice_hw *hw = &pf->hw;
   1521	enum ice_fltr_ptype flow;
   1522
   1523	if (ena) {
   1524		set_bit(ICE_FLAG_FD_ENA, pf->flags);
   1525		ice_fdir_create_dflt_rules(pf);
   1526		return;
   1527	}
   1528
   1529	mutex_lock(&hw->fdir_fltr_lock);
   1530	if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
   1531		goto release_lock;
   1532
   1533	ice_fdir_del_all_fltrs(vsi);
   1534
   1535	if (hw->fdir_prof)
   1536		for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
   1537		     flow++)
   1538			if (hw->fdir_prof[flow])
   1539				ice_fdir_rem_flow(hw, ICE_BLK_FD, flow);
   1540
   1541release_lock:
   1542	mutex_unlock(&hw->fdir_fltr_lock);
   1543}
   1544
   1545/**
   1546 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
   1547 * @pf: PF structure
   1548 * @flow_type: FDir flow type to release
   1549 */
   1550static void
   1551ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
   1552{
   1553	struct ice_hw *hw = &pf->hw;
   1554	bool need_perfect = false;
   1555
   1556	if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
   1557	    flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
   1558	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
   1559	    flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
   1560		need_perfect = true;
   1561
   1562	if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
   1563		return;
   1564
   1565	ice_fdir_rem_flow(hw, ICE_BLK_FD, flow_type);
   1566	if (need_perfect)
   1567		ice_create_init_fdir_rule(pf, flow_type);
   1568}
   1569
   1570/**
   1571 * ice_fdir_update_list_entry - add or delete a filter from the filter list
   1572 * @pf: PF structure
   1573 * @input: filter structure
   1574 * @fltr_idx: ethtool index of filter to modify
   1575 *
   1576 * returns 0 on success and negative on errors
   1577 */
   1578static int
   1579ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
   1580			   int fltr_idx)
   1581{
   1582	struct ice_fdir_fltr *old_fltr;
   1583	struct ice_hw *hw = &pf->hw;
   1584	struct ice_vsi *vsi;
   1585	int err = -ENOENT;
   1586
   1587	/* Do not update filters during reset */
   1588	if (ice_is_reset_in_progress(pf->state))
   1589		return -EBUSY;
   1590
   1591	vsi = ice_get_main_vsi(pf);
   1592	if (!vsi)
   1593		return -EINVAL;
   1594
   1595	old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
   1596	if (old_fltr) {
   1597		err = ice_fdir_write_all_fltr(pf, old_fltr, false);
   1598		if (err)
   1599			return err;
   1600		ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
   1601		/* update sb-filters count, specific to ring->channel */
   1602		ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
   1603		if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
   1604			/* we just deleted the last filter of flow_type so we
   1605			 * should also delete the HW filter info.
   1606			 */
   1607			ice_fdir_do_rem_flow(pf, old_fltr->flow_type);
   1608		list_del(&old_fltr->fltr_node);
   1609		devm_kfree(ice_hw_to_dev(hw), old_fltr);
   1610	}
   1611	if (!input)
   1612		return err;
   1613	ice_fdir_list_add_fltr(hw, input);
   1614	/* update sb-filters count, specific to ring->channel */
   1615	ice_update_per_q_fltr(vsi, input->orig_q_index, true);
   1616	ice_fdir_update_cntrs(hw, input->flow_type, true);
   1617	return 0;
   1618}
   1619
   1620/**
   1621 * ice_del_fdir_ethtool - delete Flow Director filter
   1622 * @vsi: pointer to target VSI
   1623 * @cmd: command to add or delete Flow Director filter
   1624 *
   1625 * Returns 0 on success and negative values for failure
   1626 */
   1627int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
   1628{
   1629	struct ethtool_rx_flow_spec *fsp =
   1630		(struct ethtool_rx_flow_spec *)&cmd->fs;
   1631	struct ice_pf *pf = vsi->back;
   1632	struct ice_hw *hw = &pf->hw;
   1633	int val;
   1634
   1635	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
   1636		return -EOPNOTSUPP;
   1637
   1638	/* Do not delete filters during reset */
   1639	if (ice_is_reset_in_progress(pf->state)) {
   1640		dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
   1641		return -EBUSY;
   1642	}
   1643
   1644	if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
   1645		return -EBUSY;
   1646
   1647	mutex_lock(&hw->fdir_fltr_lock);
   1648	val = ice_fdir_update_list_entry(pf, NULL, fsp->location);
   1649	mutex_unlock(&hw->fdir_fltr_lock);
   1650
   1651	return val;
   1652}
   1653
   1654/**
   1655 * ice_update_ring_dest_vsi - update dest ring and dest VSI
   1656 * @vsi: pointer to target VSI
   1657 * @dest_vsi: ptr to dest VSI index
   1658 * @ring: ptr to dest ring
   1659 *
   1660 * This function updates destination VSI and queue if user specifies
   1661 * target queue which falls in channel's (aka ADQ) queue region
   1662 */
   1663static void
   1664ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
   1665{
   1666	struct ice_channel *ch;
   1667
   1668	list_for_each_entry(ch, &vsi->ch_list, list) {
   1669		if (!ch->ch_vsi)
   1670			continue;
   1671
   1672		/* make sure to locate corresponding channel based on "queue"
   1673		 * specified
   1674		 */
   1675		if ((*ring < ch->base_q) ||
   1676		    (*ring >= (ch->base_q + ch->num_rxq)))
   1677			continue;
   1678
   1679		/* update the dest_vsi based on channel */
   1680		*dest_vsi = ch->ch_vsi->idx;
   1681
   1682		/* update the "ring" to be correct based on channel */
   1683		*ring -= ch->base_q;
   1684	}
   1685}
   1686
   1687/**
   1688 * ice_set_fdir_input_set - Set the input set for Flow Director
   1689 * @vsi: pointer to target VSI
   1690 * @fsp: pointer to ethtool Rx flow specification
   1691 * @input: filter structure
   1692 */
   1693static int
   1694ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
   1695		       struct ice_fdir_fltr *input)
   1696{
   1697	u16 dest_vsi, q_index = 0;
   1698	u16 orig_q_index = 0;
   1699	struct ice_pf *pf;
   1700	struct ice_hw *hw;
   1701	int flow_type;
   1702	u8 dest_ctl;
   1703
   1704	if (!vsi || !fsp || !input)
   1705		return -EINVAL;
   1706
   1707	pf = vsi->back;
   1708	hw = &pf->hw;
   1709
   1710	dest_vsi = vsi->idx;
   1711	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
   1712		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
   1713	} else {
   1714		u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
   1715		u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
   1716
   1717		if (vf) {
   1718			dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
   1719			return -EINVAL;
   1720		}
   1721
   1722		if (ring >= vsi->num_rxq)
   1723			return -EINVAL;
   1724
   1725		orig_q_index = ring;
   1726		ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
   1727		dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
   1728		q_index = ring;
   1729	}
   1730
   1731	input->fltr_id = fsp->location;
   1732	input->q_index = q_index;
   1733	flow_type = fsp->flow_type & ~FLOW_EXT;
   1734
   1735	/* Record the original queue index as specified by user.
   1736	 * with channel configuration 'q_index' becomes relative
   1737	 * to TC (channel).
   1738	 */
   1739	input->orig_q_index = orig_q_index;
   1740	input->dest_vsi = dest_vsi;
   1741	input->dest_ctl = dest_ctl;
   1742	input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
   1743	input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
   1744	input->flow_type = ice_ethtool_flow_to_fltr(flow_type);
   1745
   1746	if (fsp->flow_type & FLOW_EXT) {
   1747		memcpy(input->ext_data.usr_def, fsp->h_ext.data,
   1748		       sizeof(input->ext_data.usr_def));
   1749		input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
   1750		input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
   1751		memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
   1752		       sizeof(input->ext_mask.usr_def));
   1753		input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
   1754		input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
   1755	}
   1756
   1757	switch (flow_type) {
   1758	case TCP_V4_FLOW:
   1759	case UDP_V4_FLOW:
   1760	case SCTP_V4_FLOW:
   1761		input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
   1762		input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
   1763		input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
   1764		input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
   1765		input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
   1766		input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
   1767		input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
   1768		input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
   1769		break;
   1770	case IPV4_USER_FLOW:
   1771		input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
   1772		input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
   1773		input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
   1774		input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
   1775		input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
   1776		input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
   1777		input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
   1778		input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
   1779		input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
   1780		input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
   1781		input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
   1782		input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
   1783		break;
   1784	case TCP_V6_FLOW:
   1785	case UDP_V6_FLOW:
   1786	case SCTP_V6_FLOW:
   1787		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
   1788		       sizeof(struct in6_addr));
   1789		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
   1790		       sizeof(struct in6_addr));
   1791		input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
   1792		input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
   1793		input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
   1794		memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
   1795		       sizeof(struct in6_addr));
   1796		memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
   1797		       sizeof(struct in6_addr));
   1798		input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
   1799		input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
   1800		input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
   1801		break;
   1802	case IPV6_USER_FLOW:
   1803		memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
   1804		       sizeof(struct in6_addr));
   1805		memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
   1806		       sizeof(struct in6_addr));
   1807		input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
   1808		input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
   1809
   1810		/* if no protocol requested, use IPPROTO_NONE */
   1811		if (!fsp->m_u.usr_ip6_spec.l4_proto)
   1812			input->ip.v6.proto = IPPROTO_NONE;
   1813		else
   1814			input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
   1815
   1816		memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
   1817		       sizeof(struct in6_addr));
   1818		memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
   1819		       sizeof(struct in6_addr));
   1820		input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
   1821		input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
   1822		input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
   1823		break;
   1824	default:
   1825		/* not doing un-parsed flow types */
   1826		return -EINVAL;
   1827	}
   1828
   1829	return 0;
   1830}
   1831
   1832/**
   1833 * ice_add_fdir_ethtool - Add/Remove Flow Director filter
   1834 * @vsi: pointer to target VSI
   1835 * @cmd: command to add or delete Flow Director filter
   1836 *
   1837 * Returns 0 on success and negative values for failure
   1838 */
   1839int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
   1840{
   1841	struct ice_rx_flow_userdef userdata;
   1842	struct ethtool_rx_flow_spec *fsp;
   1843	struct ice_fdir_fltr *input;
   1844	struct device *dev;
   1845	struct ice_pf *pf;
   1846	struct ice_hw *hw;
   1847	int fltrs_needed;
   1848	u16 tunnel_port;
   1849	int ret;
   1850
   1851	if (!vsi)
   1852		return -EINVAL;
   1853
   1854	pf = vsi->back;
   1855	hw = &pf->hw;
   1856	dev = ice_pf_to_dev(pf);
   1857
   1858	if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
   1859		return -EOPNOTSUPP;
   1860
   1861	/* Do not program filters during reset */
   1862	if (ice_is_reset_in_progress(pf->state)) {
   1863		dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
   1864		return -EBUSY;
   1865	}
   1866
   1867	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
   1868
   1869	if (ice_parse_rx_flow_user_data(fsp, &userdata))
   1870		return -EINVAL;
   1871
   1872	if (fsp->flow_type & FLOW_MAC_EXT)
   1873		return -EINVAL;
   1874
   1875	ret = ice_cfg_fdir_xtrct_seq(pf, fsp, &userdata);
   1876	if (ret)
   1877		return ret;
   1878
   1879	if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
   1880		dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
   1881		return -ENOSPC;
   1882	}
   1883
   1884	/* return error if not an update and no available filters */
   1885	fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
   1886	if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
   1887	    ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
   1888		dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
   1889		return -ENOSPC;
   1890	}
   1891
   1892	input = devm_kzalloc(dev, sizeof(*input), GFP_KERNEL);
   1893	if (!input)
   1894		return -ENOMEM;
   1895
   1896	ret = ice_set_fdir_input_set(vsi, fsp, input);
   1897	if (ret)
   1898		goto free_input;
   1899
   1900	mutex_lock(&hw->fdir_fltr_lock);
   1901	if (ice_fdir_is_dup_fltr(hw, input)) {
   1902		ret = -EINVAL;
   1903		goto release_lock;
   1904	}
   1905
   1906	if (userdata.flex_fltr) {
   1907		input->flex_fltr = true;
   1908		input->flex_word = cpu_to_be16(userdata.flex_word);
   1909		input->flex_offset = userdata.flex_offset;
   1910	}
   1911
   1912	input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
   1913	input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
   1914	input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
   1915
   1916	/* input struct is added to the HW filter list */
   1917	ice_fdir_update_list_entry(pf, input, fsp->location);
   1918
   1919	ret = ice_fdir_write_all_fltr(pf, input, true);
   1920	if (ret)
   1921		goto remove_sw_rule;
   1922
   1923	goto release_lock;
   1924
   1925remove_sw_rule:
   1926	ice_fdir_update_cntrs(hw, input->flow_type, false);
   1927	/* update sb-filters count, specific to ring->channel */
   1928	ice_update_per_q_fltr(vsi, input->orig_q_index, false);
   1929	list_del(&input->fltr_node);
   1930release_lock:
   1931	mutex_unlock(&hw->fdir_fltr_lock);
   1932free_input:
   1933	if (ret)
   1934		devm_kfree(dev, input);
   1935
   1936	return ret;
   1937}