cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

otx2_flows.c (37414B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Marvell RVU Ethernet driver
      3 *
      4 * Copyright (C) 2020 Marvell.
      5 *
      6 */
      7
      8#include <net/ipv6.h>
      9#include <linux/sort.h>
     10
     11#include "otx2_common.h"
     12
     13#define OTX2_DEFAULT_ACTION	0x1
     14
     15static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
     16
     17struct otx2_flow {
     18	struct ethtool_rx_flow_spec flow_spec;
     19	struct list_head list;
     20	u32 location;
     21	u16 entry;
     22	bool is_vf;
     23	u8 rss_ctx_id;
     24#define DMAC_FILTER_RULE		BIT(0)
     25#define PFC_FLOWCTRL_RULE		BIT(1)
     26	u16 rule_type;
     27	int vf;
     28};
     29
     30enum dmac_req {
     31	DMAC_ADDR_UPDATE,
     32	DMAC_ADDR_DEL
     33};
     34
     35static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
     36{
     37	devm_kfree(pfvf->dev, flow_cfg->flow_ent);
     38	flow_cfg->flow_ent = NULL;
     39	flow_cfg->max_flows = 0;
     40}
     41
     42static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
     43{
     44	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
     45	struct npc_mcam_free_entry_req *req;
     46	int ent, err;
     47
     48	if (!flow_cfg->max_flows)
     49		return 0;
     50
     51	mutex_lock(&pfvf->mbox.lock);
     52	for (ent = 0; ent < flow_cfg->max_flows; ent++) {
     53		req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
     54		if (!req)
     55			break;
     56
     57		req->entry = flow_cfg->flow_ent[ent];
     58
     59		/* Send message to AF to free MCAM entries */
     60		err = otx2_sync_mbox_msg(&pfvf->mbox);
     61		if (err)
     62			break;
     63	}
     64	mutex_unlock(&pfvf->mbox.lock);
     65	otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
     66	return 0;
     67}
     68
     69static int mcam_entry_cmp(const void *a, const void *b)
     70{
     71	return *(u16 *)a - *(u16 *)b;
     72}
     73
     74int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
     75{
     76	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
     77	struct npc_mcam_alloc_entry_req *req;
     78	struct npc_mcam_alloc_entry_rsp *rsp;
     79	int ent, allocated = 0;
     80
     81	/* Free current ones and allocate new ones with requested count */
     82	otx2_free_ntuple_mcam_entries(pfvf);
     83
     84	if (!count)
     85		return 0;
     86
     87	flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
     88						sizeof(u16), GFP_KERNEL);
     89	if (!flow_cfg->flow_ent) {
     90		netdev_err(pfvf->netdev,
     91			   "%s: Unable to allocate memory for flow entries\n",
     92			    __func__);
     93		return -ENOMEM;
     94	}
     95
     96	mutex_lock(&pfvf->mbox.lock);
     97
     98	/* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
     99	 * can only be allocated.
    100	 */
    101	while (allocated < count) {
    102		req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
    103		if (!req)
    104			goto exit;
    105
    106		req->contig = false;
    107		req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
    108				NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
    109
    110		/* Allocate higher priority entries for PFs, so that VF's entries
    111		 * will be on top of PF.
    112		 */
    113		if (!is_otx2_vf(pfvf->pcifunc)) {
    114			req->priority = NPC_MCAM_HIGHER_PRIO;
    115			req->ref_entry = flow_cfg->def_ent[0];
    116		}
    117
    118		/* Send message to AF */
    119		if (otx2_sync_mbox_msg(&pfvf->mbox))
    120			goto exit;
    121
    122		rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
    123			(&pfvf->mbox.mbox, 0, &req->hdr);
    124
    125		for (ent = 0; ent < rsp->count; ent++)
    126			flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
    127
    128		allocated += rsp->count;
    129
    130		/* If this request is not fulfilled, no need to send
    131		 * further requests.
    132		 */
    133		if (rsp->count != req->count)
    134			break;
    135	}
    136
    137	/* Multiple MCAM entry alloc requests could result in non-sequential
    138	 * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
    139	 * otherwise user installed ntuple filter index and MCAM entry index will
    140	 * not be in sync.
    141	 */
    142	if (allocated)
    143		sort(&flow_cfg->flow_ent[0], allocated,
    144		     sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
    145
    146exit:
    147	mutex_unlock(&pfvf->mbox.lock);
    148
    149	flow_cfg->max_flows = allocated;
    150
    151	if (allocated) {
    152		pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
    153		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
    154	}
    155
    156	if (allocated != count)
    157		netdev_info(pfvf->netdev,
    158			    "Unable to allocate %d MCAM entries, got only %d\n",
    159			    count, allocated);
    160	return allocated;
    161}
    162EXPORT_SYMBOL(otx2_alloc_mcam_entries);
    163
    164static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
    165{
    166	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
    167	struct npc_mcam_alloc_entry_req *req;
    168	struct npc_mcam_alloc_entry_rsp *rsp;
    169	int vf_vlan_max_flows;
    170	int ent, count;
    171
    172	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
    173	count = OTX2_MAX_UNICAST_FLOWS +
    174			OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
    175
    176	flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
    177					       sizeof(u16), GFP_KERNEL);
    178	if (!flow_cfg->def_ent)
    179		return -ENOMEM;
    180
    181	mutex_lock(&pfvf->mbox.lock);
    182
    183	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
    184	if (!req) {
    185		mutex_unlock(&pfvf->mbox.lock);
    186		return -ENOMEM;
    187	}
    188
    189	req->contig = false;
    190	req->count = count;
    191
    192	/* Send message to AF */
    193	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
    194		mutex_unlock(&pfvf->mbox.lock);
    195		return -EINVAL;
    196	}
    197
    198	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
    199	       (&pfvf->mbox.mbox, 0, &req->hdr);
    200
    201	if (rsp->count != req->count) {
    202		netdev_info(pfvf->netdev,
    203			    "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
    204		mutex_unlock(&pfvf->mbox.lock);
    205		devm_kfree(pfvf->dev, flow_cfg->def_ent);
    206		return 0;
    207	}
    208
    209	for (ent = 0; ent < rsp->count; ent++)
    210		flow_cfg->def_ent[ent] = rsp->entry_list[ent];
    211
    212	flow_cfg->vf_vlan_offset = 0;
    213	flow_cfg->unicast_offset = vf_vlan_max_flows;
    214	flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
    215					OTX2_MAX_UNICAST_FLOWS;
    216	pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
    217	pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
    218	pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
    219
    220	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
    221	mutex_unlock(&pfvf->mbox.lock);
    222
    223	/* Allocate entries for Ntuple filters */
    224	count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
    225	if (count <= 0) {
    226		otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
    227		return 0;
    228	}
    229
    230	pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
    231
    232	return 0;
    233}
    234
    235int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
    236{
    237	struct otx2_flow_config *flow_cfg;
    238
    239	pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
    240				      sizeof(struct otx2_flow_config),
    241				      GFP_KERNEL);
    242	if (!pfvf->flow_cfg)
    243		return -ENOMEM;
    244
    245	flow_cfg = pfvf->flow_cfg;
    246	INIT_LIST_HEAD(&flow_cfg->flow_list);
    247	flow_cfg->max_flows = 0;
    248
    249	return 0;
    250}
    251EXPORT_SYMBOL(otx2vf_mcam_flow_init);
    252
    253int otx2_mcam_flow_init(struct otx2_nic *pf)
    254{
    255	int err;
    256
    257	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
    258				    GFP_KERNEL);
    259	if (!pf->flow_cfg)
    260		return -ENOMEM;
    261
    262	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
    263
    264	/* Allocate bare minimum number of MCAM entries needed for
    265	 * unicast and ntuple filters.
    266	 */
    267	err = otx2_mcam_entry_init(pf);
    268	if (err)
    269		return err;
    270
    271	/* Check if MCAM entries are allocate or not */
    272	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
    273		return 0;
    274
    275	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
    276					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
    277	if (!pf->mac_table)
    278		return -ENOMEM;
    279
    280	otx2_dmacflt_get_max_cnt(pf);
    281
    282	/* DMAC filters are not allocated */
    283	if (!pf->flow_cfg->dmacflt_max_flows)
    284		return 0;
    285
    286	pf->flow_cfg->bmap_to_dmacindex =
    287			devm_kzalloc(pf->dev, sizeof(u8) *
    288				     pf->flow_cfg->dmacflt_max_flows,
    289				     GFP_KERNEL);
    290
    291	if (!pf->flow_cfg->bmap_to_dmacindex)
    292		return -ENOMEM;
    293
    294	pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
    295
    296	return 0;
    297}
    298
    299void otx2_mcam_flow_del(struct otx2_nic *pf)
    300{
    301	otx2_destroy_mcam_flows(pf);
    302}
    303EXPORT_SYMBOL(otx2_mcam_flow_del);
    304
    305/*  On success adds mcam entry
    306 *  On failure enable promisous mode
    307 */
    308static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
    309{
    310	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
    311	struct npc_install_flow_req *req;
    312	int err, i;
    313
    314	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
    315		return -ENOMEM;
    316
    317	/* dont have free mcam entries or uc list is greater than alloted */
    318	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
    319		return -ENOMEM;
    320
    321	mutex_lock(&pf->mbox.lock);
    322	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
    323	if (!req) {
    324		mutex_unlock(&pf->mbox.lock);
    325		return -ENOMEM;
    326	}
    327
    328	/* unicast offset starts with 32 0..31 for ntuple */
    329	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
    330		if (pf->mac_table[i].inuse)
    331			continue;
    332		ether_addr_copy(pf->mac_table[i].addr, mac);
    333		pf->mac_table[i].inuse = true;
    334		pf->mac_table[i].mcam_entry =
    335			flow_cfg->def_ent[i + flow_cfg->unicast_offset];
    336		req->entry =  pf->mac_table[i].mcam_entry;
    337		break;
    338	}
    339
    340	ether_addr_copy(req->packet.dmac, mac);
    341	eth_broadcast_addr((u8 *)&req->mask.dmac);
    342	req->features = BIT_ULL(NPC_DMAC);
    343	req->channel = pf->hw.rx_chan_base;
    344	req->intf = NIX_INTF_RX;
    345	req->op = NIX_RX_ACTION_DEFAULT;
    346	req->set_cntr = 1;
    347
    348	err = otx2_sync_mbox_msg(&pf->mbox);
    349	mutex_unlock(&pf->mbox.lock);
    350
    351	return err;
    352}
    353
    354int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
    355{
    356	struct otx2_nic *pf = netdev_priv(netdev);
    357
    358	if (!bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
    359			  pf->flow_cfg->dmacflt_max_flows))
    360		netdev_warn(netdev,
    361			    "Add %pM to CGX/RPM DMAC filters list as well\n",
    362			    mac);
    363
    364	return otx2_do_add_macfilter(pf, mac);
    365}
    366
    367static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
    368				       int *mcam_entry)
    369{
    370	int i;
    371
    372	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
    373		if (!pf->mac_table[i].inuse)
    374			continue;
    375
    376		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
    377			*mcam_entry = pf->mac_table[i].mcam_entry;
    378			pf->mac_table[i].inuse = false;
    379			return true;
    380		}
    381	}
    382	return false;
    383}
    384
    385int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
    386{
    387	struct otx2_nic *pf = netdev_priv(netdev);
    388	struct npc_delete_flow_req *req;
    389	int err, mcam_entry;
    390
    391	/* check does mcam entry exists for given mac */
    392	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
    393		return 0;
    394
    395	mutex_lock(&pf->mbox.lock);
    396	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
    397	if (!req) {
    398		mutex_unlock(&pf->mbox.lock);
    399		return -ENOMEM;
    400	}
    401	req->entry = mcam_entry;
    402	/* Send message to AF */
    403	err = otx2_sync_mbox_msg(&pf->mbox);
    404	mutex_unlock(&pf->mbox.lock);
    405
    406	return err;
    407}
    408
    409static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
    410{
    411	struct otx2_flow *iter;
    412
    413	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
    414		if (iter->location == location)
    415			return iter;
    416	}
    417
    418	return NULL;
    419}
    420
    421static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
    422{
    423	struct list_head *head = &pfvf->flow_cfg->flow_list;
    424	struct otx2_flow *iter;
    425
    426	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
    427		if (iter->location > flow->location)
    428			break;
    429		head = &iter->list;
    430	}
    431
    432	list_add(&flow->list, head);
    433}
    434
    435int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
    436{
    437	if (!flow_cfg)
    438		return 0;
    439
    440	if (flow_cfg->nr_flows == flow_cfg->max_flows ||
    441	    !bitmap_empty(&flow_cfg->dmacflt_bmap,
    442			  flow_cfg->dmacflt_max_flows))
    443		return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
    444	else
    445		return flow_cfg->max_flows;
    446}
    447EXPORT_SYMBOL(otx2_get_maxflows);
    448
    449int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
    450		  u32 location)
    451{
    452	struct otx2_flow *iter;
    453
    454	if (location >= otx2_get_maxflows(pfvf->flow_cfg))
    455		return -EINVAL;
    456
    457	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
    458		if (iter->location == location) {
    459			nfc->fs = iter->flow_spec;
    460			nfc->rss_context = iter->rss_ctx_id;
    461			return 0;
    462		}
    463	}
    464
    465	return -ENOENT;
    466}
    467
    468int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
    469		       u32 *rule_locs)
    470{
    471	u32 rule_cnt = nfc->rule_cnt;
    472	u32 location = 0;
    473	int idx = 0;
    474	int err = 0;
    475
    476	nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
    477	while ((!err || err == -ENOENT) && idx < rule_cnt) {
    478		err = otx2_get_flow(pfvf, nfc, location);
    479		if (!err)
    480			rule_locs[idx++] = location;
    481		location++;
    482	}
    483	nfc->rule_cnt = rule_cnt;
    484
    485	return err;
    486}
    487
    488static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
    489				  struct npc_install_flow_req *req,
    490				  u32 flow_type)
    491{
    492	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
    493	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
    494	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
    495	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
    496	struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
    497	struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
    498	struct flow_msg *pmask = &req->mask;
    499	struct flow_msg *pkt = &req->packet;
    500
    501	switch (flow_type) {
    502	case IP_USER_FLOW:
    503		if (ipv4_usr_mask->ip4src) {
    504			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
    505			       sizeof(pkt->ip4src));
    506			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
    507			       sizeof(pmask->ip4src));
    508			req->features |= BIT_ULL(NPC_SIP_IPV4);
    509		}
    510		if (ipv4_usr_mask->ip4dst) {
    511			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
    512			       sizeof(pkt->ip4dst));
    513			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
    514			       sizeof(pmask->ip4dst));
    515			req->features |= BIT_ULL(NPC_DIP_IPV4);
    516		}
    517		if (ipv4_usr_mask->tos) {
    518			pkt->tos = ipv4_usr_hdr->tos;
    519			pmask->tos = ipv4_usr_mask->tos;
    520			req->features |= BIT_ULL(NPC_TOS);
    521		}
    522		if (ipv4_usr_mask->proto) {
    523			switch (ipv4_usr_hdr->proto) {
    524			case IPPROTO_ICMP:
    525				req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
    526				break;
    527			case IPPROTO_TCP:
    528				req->features |= BIT_ULL(NPC_IPPROTO_TCP);
    529				break;
    530			case IPPROTO_UDP:
    531				req->features |= BIT_ULL(NPC_IPPROTO_UDP);
    532				break;
    533			case IPPROTO_SCTP:
    534				req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
    535				break;
    536			case IPPROTO_AH:
    537				req->features |= BIT_ULL(NPC_IPPROTO_AH);
    538				break;
    539			case IPPROTO_ESP:
    540				req->features |= BIT_ULL(NPC_IPPROTO_ESP);
    541				break;
    542			default:
    543				return -EOPNOTSUPP;
    544			}
    545		}
    546		pkt->etype = cpu_to_be16(ETH_P_IP);
    547		pmask->etype = cpu_to_be16(0xFFFF);
    548		req->features |= BIT_ULL(NPC_ETYPE);
    549		break;
    550	case TCP_V4_FLOW:
    551	case UDP_V4_FLOW:
    552	case SCTP_V4_FLOW:
    553		pkt->etype = cpu_to_be16(ETH_P_IP);
    554		pmask->etype = cpu_to_be16(0xFFFF);
    555		req->features |= BIT_ULL(NPC_ETYPE);
    556		if (ipv4_l4_mask->ip4src) {
    557			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
    558			       sizeof(pkt->ip4src));
    559			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
    560			       sizeof(pmask->ip4src));
    561			req->features |= BIT_ULL(NPC_SIP_IPV4);
    562		}
    563		if (ipv4_l4_mask->ip4dst) {
    564			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
    565			       sizeof(pkt->ip4dst));
    566			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
    567			       sizeof(pmask->ip4dst));
    568			req->features |= BIT_ULL(NPC_DIP_IPV4);
    569		}
    570		if (ipv4_l4_mask->tos) {
    571			pkt->tos = ipv4_l4_hdr->tos;
    572			pmask->tos = ipv4_l4_mask->tos;
    573			req->features |= BIT_ULL(NPC_TOS);
    574		}
    575		if (ipv4_l4_mask->psrc) {
    576			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
    577			       sizeof(pkt->sport));
    578			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
    579			       sizeof(pmask->sport));
    580			if (flow_type == UDP_V4_FLOW)
    581				req->features |= BIT_ULL(NPC_SPORT_UDP);
    582			else if (flow_type == TCP_V4_FLOW)
    583				req->features |= BIT_ULL(NPC_SPORT_TCP);
    584			else
    585				req->features |= BIT_ULL(NPC_SPORT_SCTP);
    586		}
    587		if (ipv4_l4_mask->pdst) {
    588			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
    589			       sizeof(pkt->dport));
    590			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
    591			       sizeof(pmask->dport));
    592			if (flow_type == UDP_V4_FLOW)
    593				req->features |= BIT_ULL(NPC_DPORT_UDP);
    594			else if (flow_type == TCP_V4_FLOW)
    595				req->features |= BIT_ULL(NPC_DPORT_TCP);
    596			else
    597				req->features |= BIT_ULL(NPC_DPORT_SCTP);
    598		}
    599		if (flow_type == UDP_V4_FLOW)
    600			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
    601		else if (flow_type == TCP_V4_FLOW)
    602			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
    603		else
    604			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
    605		break;
    606	case AH_V4_FLOW:
    607	case ESP_V4_FLOW:
    608		pkt->etype = cpu_to_be16(ETH_P_IP);
    609		pmask->etype = cpu_to_be16(0xFFFF);
    610		req->features |= BIT_ULL(NPC_ETYPE);
    611		if (ah_esp_mask->ip4src) {
    612			memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
    613			       sizeof(pkt->ip4src));
    614			memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
    615			       sizeof(pmask->ip4src));
    616			req->features |= BIT_ULL(NPC_SIP_IPV4);
    617		}
    618		if (ah_esp_mask->ip4dst) {
    619			memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
    620			       sizeof(pkt->ip4dst));
    621			memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
    622			       sizeof(pmask->ip4dst));
    623			req->features |= BIT_ULL(NPC_DIP_IPV4);
    624		}
    625		if (ah_esp_mask->tos) {
    626			pkt->tos = ah_esp_hdr->tos;
    627			pmask->tos = ah_esp_mask->tos;
    628			req->features |= BIT_ULL(NPC_TOS);
    629		}
    630
    631		/* NPC profile doesn't extract AH/ESP header fields */
    632		if (ah_esp_mask->spi & ah_esp_hdr->spi)
    633			return -EOPNOTSUPP;
    634
    635		if (flow_type == AH_V4_FLOW)
    636			req->features |= BIT_ULL(NPC_IPPROTO_AH);
    637		else
    638			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
    639		break;
    640	default:
    641		break;
    642	}
    643
    644	return 0;
    645}
    646
    647static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
    648				  struct npc_install_flow_req *req,
    649				  u32 flow_type)
    650{
    651	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
    652	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
    653	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
    654	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
    655	struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
    656	struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
    657	struct flow_msg *pmask = &req->mask;
    658	struct flow_msg *pkt = &req->packet;
    659
    660	switch (flow_type) {
    661	case IPV6_USER_FLOW:
    662		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
    663			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
    664			       sizeof(pkt->ip6src));
    665			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
    666			       sizeof(pmask->ip6src));
    667			req->features |= BIT_ULL(NPC_SIP_IPV6);
    668		}
    669		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
    670			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
    671			       sizeof(pkt->ip6dst));
    672			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
    673			       sizeof(pmask->ip6dst));
    674			req->features |= BIT_ULL(NPC_DIP_IPV6);
    675		}
    676		pkt->etype = cpu_to_be16(ETH_P_IPV6);
    677		pmask->etype = cpu_to_be16(0xFFFF);
    678		req->features |= BIT_ULL(NPC_ETYPE);
    679		break;
    680	case TCP_V6_FLOW:
    681	case UDP_V6_FLOW:
    682	case SCTP_V6_FLOW:
    683		pkt->etype = cpu_to_be16(ETH_P_IPV6);
    684		pmask->etype = cpu_to_be16(0xFFFF);
    685		req->features |= BIT_ULL(NPC_ETYPE);
    686		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
    687			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
    688			       sizeof(pkt->ip6src));
    689			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
    690			       sizeof(pmask->ip6src));
    691			req->features |= BIT_ULL(NPC_SIP_IPV6);
    692		}
    693		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
    694			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
    695			       sizeof(pkt->ip6dst));
    696			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
    697			       sizeof(pmask->ip6dst));
    698			req->features |= BIT_ULL(NPC_DIP_IPV6);
    699		}
    700		if (ipv6_l4_mask->psrc) {
    701			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
    702			       sizeof(pkt->sport));
    703			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
    704			       sizeof(pmask->sport));
    705			if (flow_type == UDP_V6_FLOW)
    706				req->features |= BIT_ULL(NPC_SPORT_UDP);
    707			else if (flow_type == TCP_V6_FLOW)
    708				req->features |= BIT_ULL(NPC_SPORT_TCP);
    709			else
    710				req->features |= BIT_ULL(NPC_SPORT_SCTP);
    711		}
    712		if (ipv6_l4_mask->pdst) {
    713			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
    714			       sizeof(pkt->dport));
    715			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
    716			       sizeof(pmask->dport));
    717			if (flow_type == UDP_V6_FLOW)
    718				req->features |= BIT_ULL(NPC_DPORT_UDP);
    719			else if (flow_type == TCP_V6_FLOW)
    720				req->features |= BIT_ULL(NPC_DPORT_TCP);
    721			else
    722				req->features |= BIT_ULL(NPC_DPORT_SCTP);
    723		}
    724		if (flow_type == UDP_V6_FLOW)
    725			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
    726		else if (flow_type == TCP_V6_FLOW)
    727			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
    728		else
    729			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
    730		break;
    731	case AH_V6_FLOW:
    732	case ESP_V6_FLOW:
    733		pkt->etype = cpu_to_be16(ETH_P_IPV6);
    734		pmask->etype = cpu_to_be16(0xFFFF);
    735		req->features |= BIT_ULL(NPC_ETYPE);
    736		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
    737			memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
    738			       sizeof(pkt->ip6src));
    739			memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
    740			       sizeof(pmask->ip6src));
    741			req->features |= BIT_ULL(NPC_SIP_IPV6);
    742		}
    743		if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
    744			memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
    745			       sizeof(pkt->ip6dst));
    746			memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
    747			       sizeof(pmask->ip6dst));
    748			req->features |= BIT_ULL(NPC_DIP_IPV6);
    749		}
    750
    751		/* NPC profile doesn't extract AH/ESP header fields */
    752		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
    753		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
    754			return -EOPNOTSUPP;
    755
    756		if (flow_type == AH_V6_FLOW)
    757			req->features |= BIT_ULL(NPC_IPPROTO_AH);
    758		else
    759			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
    760		break;
    761	default:
    762		break;
    763	}
    764
    765	return 0;
    766}
    767
    768static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
    769			      struct npc_install_flow_req *req)
    770{
    771	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
    772	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
    773	struct flow_msg *pmask = &req->mask;
    774	struct flow_msg *pkt = &req->packet;
    775	u32 flow_type;
    776	int ret;
    777
    778	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
    779	switch (flow_type) {
    780	/* bits not set in mask are don't care */
    781	case ETHER_FLOW:
    782		if (!is_zero_ether_addr(eth_mask->h_source)) {
    783			ether_addr_copy(pkt->smac, eth_hdr->h_source);
    784			ether_addr_copy(pmask->smac, eth_mask->h_source);
    785			req->features |= BIT_ULL(NPC_SMAC);
    786		}
    787		if (!is_zero_ether_addr(eth_mask->h_dest)) {
    788			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
    789			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
    790			req->features |= BIT_ULL(NPC_DMAC);
    791		}
    792		if (eth_hdr->h_proto) {
    793			memcpy(&pkt->etype, &eth_hdr->h_proto,
    794			       sizeof(pkt->etype));
    795			memcpy(&pmask->etype, &eth_mask->h_proto,
    796			       sizeof(pmask->etype));
    797			req->features |= BIT_ULL(NPC_ETYPE);
    798		}
    799		break;
    800	case IP_USER_FLOW:
    801	case TCP_V4_FLOW:
    802	case UDP_V4_FLOW:
    803	case SCTP_V4_FLOW:
    804	case AH_V4_FLOW:
    805	case ESP_V4_FLOW:
    806		ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
    807		if (ret)
    808			return ret;
    809		break;
    810	case IPV6_USER_FLOW:
    811	case TCP_V6_FLOW:
    812	case UDP_V6_FLOW:
    813	case SCTP_V6_FLOW:
    814	case AH_V6_FLOW:
    815	case ESP_V6_FLOW:
    816		ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
    817		if (ret)
    818			return ret;
    819		break;
    820	default:
    821		return -EOPNOTSUPP;
    822	}
    823	if (fsp->flow_type & FLOW_EXT) {
    824		u16 vlan_etype;
    825
    826		if (fsp->m_ext.vlan_etype) {
    827			/* Partial masks not supported */
    828			if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF)
    829				return -EINVAL;
    830
    831			vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
    832			/* Only ETH_P_8021Q and ETH_P_802AD types supported */
    833			if (vlan_etype != ETH_P_8021Q &&
    834			    vlan_etype != ETH_P_8021AD)
    835				return -EINVAL;
    836
    837			memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
    838			       sizeof(pkt->vlan_etype));
    839			memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
    840			       sizeof(pmask->vlan_etype));
    841
    842			if (vlan_etype == ETH_P_8021Q)
    843				req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
    844			else
    845				req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
    846		}
    847
    848		if (fsp->m_ext.vlan_tci) {
    849			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
    850			       sizeof(pkt->vlan_tci));
    851			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
    852			       sizeof(pmask->vlan_tci));
    853			req->features |= BIT_ULL(NPC_OUTER_VID);
    854		}
    855
    856		/* Not Drop/Direct to queue but use action in default entry */
    857		if (fsp->m_ext.data[1] &&
    858		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
    859			req->op = NIX_RX_ACTION_DEFAULT;
    860	}
    861
    862	if (fsp->flow_type & FLOW_MAC_EXT &&
    863	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
    864		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
    865		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
    866		req->features |= BIT_ULL(NPC_DMAC);
    867	}
    868
    869	if (!req->features)
    870		return -EOPNOTSUPP;
    871
    872	return 0;
    873}
    874
    875static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
    876					struct ethtool_rx_flow_spec *fsp)
    877{
    878	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
    879	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
    880	u64 ring_cookie = fsp->ring_cookie;
    881	u32 flow_type;
    882
    883	if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
    884		return false;
    885
    886	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
    887
    888	/* CGX/RPM block dmac filtering configured for white listing
    889	 * check for action other than DROP
    890	 */
    891	if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
    892	    !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
    893		if (is_zero_ether_addr(eth_mask->h_dest) &&
    894		    is_valid_ether_addr(eth_hdr->h_dest))
    895			return true;
    896	}
    897
    898	return false;
    899}
    900
    901static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
    902{
    903	u64 ring_cookie = flow->flow_spec.ring_cookie;
    904#ifdef CONFIG_DCB
    905	int vlan_prio, qidx, pfc_rule = 0;
    906#endif
    907	struct npc_install_flow_req *req;
    908	int err, vf = 0;
    909
    910	mutex_lock(&pfvf->mbox.lock);
    911	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
    912	if (!req) {
    913		mutex_unlock(&pfvf->mbox.lock);
    914		return -ENOMEM;
    915	}
    916
    917	err = otx2_prepare_flow_request(&flow->flow_spec, req);
    918	if (err) {
    919		/* free the allocated msg above */
    920		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
    921		mutex_unlock(&pfvf->mbox.lock);
    922		return err;
    923	}
    924
    925	req->entry = flow->entry;
    926	req->intf = NIX_INTF_RX;
    927	req->set_cntr = 1;
    928	req->channel = pfvf->hw.rx_chan_base;
    929	if (ring_cookie == RX_CLS_FLOW_DISC) {
    930		req->op = NIX_RX_ACTIONOP_DROP;
    931	} else {
    932		/* change to unicast only if action of default entry is not
    933		 * requested by user
    934		 */
    935		if (flow->flow_spec.flow_type & FLOW_RSS) {
    936			req->op = NIX_RX_ACTIONOP_RSS;
    937			req->index = flow->rss_ctx_id;
    938			req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
    939		} else {
    940			req->op = NIX_RX_ACTIONOP_UCAST;
    941			req->index = ethtool_get_flow_spec_ring(ring_cookie);
    942		}
    943		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
    944		if (vf > pci_num_vf(pfvf->pdev)) {
    945			mutex_unlock(&pfvf->mbox.lock);
    946			return -EINVAL;
    947		}
    948
    949#ifdef CONFIG_DCB
    950		/* Identify PFC rule if PFC enabled and ntuple rule is vlan */
    951		if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
    952		    pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
    953			vlan_prio = ntohs(req->packet.vlan_tci) &
    954				    ntohs(req->mask.vlan_tci);
    955
    956			/* Get the priority */
    957			vlan_prio >>= 13;
    958			flow->rule_type |= PFC_FLOWCTRL_RULE;
    959			/* Check if PFC enabled for this priority */
    960			if (pfvf->pfc_en & BIT(vlan_prio)) {
    961				pfc_rule = true;
    962				qidx = req->index;
    963			}
    964		}
    965#endif
    966	}
    967
    968	/* ethtool ring_cookie has (VF + 1) for VF */
    969	if (vf) {
    970		req->vf = vf;
    971		flow->is_vf = true;
    972		flow->vf = vf;
    973	}
    974
    975	/* Send message to AF */
    976	err = otx2_sync_mbox_msg(&pfvf->mbox);
    977
    978#ifdef CONFIG_DCB
    979	if (!err && pfc_rule)
    980		otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
    981#endif
    982
    983	mutex_unlock(&pfvf->mbox.lock);
    984	return err;
    985}
    986
    987static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
    988				    struct otx2_flow *flow)
    989{
    990	struct otx2_flow *pf_mac;
    991	struct ethhdr *eth_hdr;
    992
    993	pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
    994	if (!pf_mac)
    995		return -ENOMEM;
    996
    997	pf_mac->entry = 0;
    998	pf_mac->rule_type |= DMAC_FILTER_RULE;
    999	pf_mac->location = pfvf->flow_cfg->max_flows;
   1000	memcpy(&pf_mac->flow_spec, &flow->flow_spec,
   1001	       sizeof(struct ethtool_rx_flow_spec));
   1002	pf_mac->flow_spec.location = pf_mac->location;
   1003
   1004	/* Copy PF mac address */
   1005	eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
   1006	ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
   1007
   1008	/* Install DMAC filter with PF mac address */
   1009	otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
   1010
   1011	otx2_add_flow_to_list(pfvf, pf_mac);
   1012	pfvf->flow_cfg->nr_flows++;
   1013	set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
   1014
   1015	return 0;
   1016}
   1017
   1018int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
   1019{
   1020	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
   1021	struct ethtool_rx_flow_spec *fsp = &nfc->fs;
   1022	struct otx2_flow *flow;
   1023	struct ethhdr *eth_hdr;
   1024	bool new = false;
   1025	int err = 0;
   1026	u32 ring;
   1027
   1028	if (!flow_cfg->max_flows) {
   1029		netdev_err(pfvf->netdev,
   1030			   "Ntuple rule count is 0, allocate and retry\n");
   1031		return -EINVAL;
   1032	}
   1033
   1034	ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
   1035	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
   1036		return -ENOMEM;
   1037
   1038	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
   1039		return -EINVAL;
   1040
   1041	if (fsp->location >= otx2_get_maxflows(flow_cfg))
   1042		return -EINVAL;
   1043
   1044	flow = otx2_find_flow(pfvf, fsp->location);
   1045	if (!flow) {
   1046		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
   1047		if (!flow)
   1048			return -ENOMEM;
   1049		flow->location = fsp->location;
   1050		flow->entry = flow_cfg->flow_ent[flow->location];
   1051		new = true;
   1052	}
   1053	/* struct copy */
   1054	flow->flow_spec = *fsp;
   1055
   1056	if (fsp->flow_type & FLOW_RSS)
   1057		flow->rss_ctx_id = nfc->rss_context;
   1058
   1059	if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
   1060		eth_hdr = &flow->flow_spec.h_u.ether_spec;
   1061
   1062		/* Sync dmac filter table with updated fields */
   1063		if (flow->rule_type & DMAC_FILTER_RULE)
   1064			return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
   1065						   flow->entry);
   1066
   1067		if (bitmap_full(&flow_cfg->dmacflt_bmap,
   1068				flow_cfg->dmacflt_max_flows)) {
   1069			netdev_warn(pfvf->netdev,
   1070				    "Can't insert the rule %d as max allowed dmac filters are %d\n",
   1071				    flow->location +
   1072				    flow_cfg->dmacflt_max_flows,
   1073				    flow_cfg->dmacflt_max_flows);
   1074			err = -EINVAL;
   1075			if (new)
   1076				kfree(flow);
   1077			return err;
   1078		}
   1079
   1080		/* Install PF mac address to DMAC filter list */
   1081		if (!test_bit(0, &flow_cfg->dmacflt_bmap))
   1082			otx2_add_flow_with_pfmac(pfvf, flow);
   1083
   1084		flow->rule_type |= DMAC_FILTER_RULE;
   1085		flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
   1086						  flow_cfg->dmacflt_max_flows);
   1087		fsp->location = flow_cfg->max_flows + flow->entry;
   1088		flow->flow_spec.location = fsp->location;
   1089		flow->location = fsp->location;
   1090
   1091		set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
   1092		otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
   1093
   1094	} else {
   1095		if (flow->location >= pfvf->flow_cfg->max_flows) {
   1096			netdev_warn(pfvf->netdev,
   1097				    "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
   1098				    flow->location,
   1099				    flow_cfg->max_flows - 1);
   1100			err = -EINVAL;
   1101		} else {
   1102			err = otx2_add_flow_msg(pfvf, flow);
   1103		}
   1104	}
   1105
   1106	if (err) {
   1107		if (err == MBOX_MSG_INVALID)
   1108			err = -EINVAL;
   1109		if (new)
   1110			kfree(flow);
   1111		return err;
   1112	}
   1113
   1114	/* add the new flow installed to list */
   1115	if (new) {
   1116		otx2_add_flow_to_list(pfvf, flow);
   1117		flow_cfg->nr_flows++;
   1118	}
   1119
   1120	return 0;
   1121}
   1122
   1123static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
   1124{
   1125	struct npc_delete_flow_req *req;
   1126	int err;
   1127
   1128	mutex_lock(&pfvf->mbox.lock);
   1129	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
   1130	if (!req) {
   1131		mutex_unlock(&pfvf->mbox.lock);
   1132		return -ENOMEM;
   1133	}
   1134
   1135	req->entry = entry;
   1136	if (all)
   1137		req->all = 1;
   1138
   1139	/* Send message to AF */
   1140	err = otx2_sync_mbox_msg(&pfvf->mbox);
   1141	mutex_unlock(&pfvf->mbox.lock);
   1142	return err;
   1143}
   1144
   1145static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
   1146{
   1147	struct otx2_flow *iter;
   1148	struct ethhdr *eth_hdr;
   1149	bool found = false;
   1150
   1151	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
   1152		if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
   1153			eth_hdr = &iter->flow_spec.h_u.ether_spec;
   1154			if (req == DMAC_ADDR_DEL) {
   1155				otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
   1156						    0);
   1157				clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
   1158				found = true;
   1159			} else {
   1160				ether_addr_copy(eth_hdr->h_dest,
   1161						pfvf->netdev->dev_addr);
   1162				otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
   1163			}
   1164			break;
   1165		}
   1166	}
   1167
   1168	if (found) {
   1169		list_del(&iter->list);
   1170		kfree(iter);
   1171		pfvf->flow_cfg->nr_flows--;
   1172	}
   1173}
   1174
   1175int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
   1176{
   1177	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
   1178	struct otx2_flow *flow;
   1179	int err;
   1180
   1181	if (location >= otx2_get_maxflows(flow_cfg))
   1182		return -EINVAL;
   1183
   1184	flow = otx2_find_flow(pfvf, location);
   1185	if (!flow)
   1186		return -ENOENT;
   1187
   1188	if (flow->rule_type & DMAC_FILTER_RULE) {
   1189		struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
   1190
   1191		/* user not allowed to remove dmac filter with interface mac */
   1192		if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
   1193			return -EPERM;
   1194
   1195		err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
   1196					  flow->entry);
   1197		clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
   1198		/* If all dmac filters are removed delete macfilter with
   1199		 * interface mac address and configure CGX/RPM block in
   1200		 * promiscuous mode
   1201		 */
   1202		if (bitmap_weight(&flow_cfg->dmacflt_bmap,
   1203				  flow_cfg->dmacflt_max_flows) == 1)
   1204			otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
   1205	} else {
   1206#ifdef CONFIG_DCB
   1207		if (flow->rule_type & PFC_FLOWCTRL_RULE)
   1208			otx2_update_bpid_in_rqctx(pfvf, 0,
   1209						  flow->flow_spec.ring_cookie,
   1210						  false);
   1211#endif
   1212
   1213		err = otx2_remove_flow_msg(pfvf, flow->entry, false);
   1214	}
   1215
   1216	if (err)
   1217		return err;
   1218
   1219	list_del(&flow->list);
   1220	kfree(flow);
   1221	flow_cfg->nr_flows--;
   1222
   1223	return 0;
   1224}
   1225
   1226void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
   1227{
   1228	struct otx2_flow *flow, *tmp;
   1229	int err;
   1230
   1231	list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
   1232		if (flow->rss_ctx_id != ctx_id)
   1233			continue;
   1234		err = otx2_remove_flow(pfvf, flow->location);
   1235		if (err)
   1236			netdev_warn(pfvf->netdev,
   1237				    "Can't delete the rule %d associated with this rss group err:%d",
   1238				    flow->location, err);
   1239	}
   1240}
   1241
   1242int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
   1243{
   1244	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
   1245	struct npc_delete_flow_req *req;
   1246	struct otx2_flow *iter, *tmp;
   1247	int err;
   1248
   1249	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
   1250		return 0;
   1251
   1252	if (!flow_cfg->max_flows)
   1253		return 0;
   1254
   1255	mutex_lock(&pfvf->mbox.lock);
   1256	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
   1257	if (!req) {
   1258		mutex_unlock(&pfvf->mbox.lock);
   1259		return -ENOMEM;
   1260	}
   1261
   1262	req->start = flow_cfg->flow_ent[0];
   1263	req->end   = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
   1264	err = otx2_sync_mbox_msg(&pfvf->mbox);
   1265	mutex_unlock(&pfvf->mbox.lock);
   1266
   1267	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
   1268		list_del(&iter->list);
   1269		kfree(iter);
   1270		flow_cfg->nr_flows--;
   1271	}
   1272	return err;
   1273}
   1274
   1275int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
   1276{
   1277	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
   1278	struct npc_mcam_free_entry_req *req;
   1279	struct otx2_flow *iter, *tmp;
   1280	int err;
   1281
   1282	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
   1283		return 0;
   1284
   1285	/* remove all flows */
   1286	err = otx2_remove_flow_msg(pfvf, 0, true);
   1287	if (err)
   1288		return err;
   1289
   1290	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
   1291		list_del(&iter->list);
   1292		kfree(iter);
   1293		flow_cfg->nr_flows--;
   1294	}
   1295
   1296	mutex_lock(&pfvf->mbox.lock);
   1297	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
   1298	if (!req) {
   1299		mutex_unlock(&pfvf->mbox.lock);
   1300		return -ENOMEM;
   1301	}
   1302
   1303	req->all = 1;
   1304	/* Send message to AF to free MCAM entries */
   1305	err = otx2_sync_mbox_msg(&pfvf->mbox);
   1306	if (err) {
   1307		mutex_unlock(&pfvf->mbox.lock);
   1308		return err;
   1309	}
   1310
   1311	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
   1312	mutex_unlock(&pfvf->mbox.lock);
   1313
   1314	return 0;
   1315}
   1316
   1317int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
   1318{
   1319	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
   1320	struct npc_install_flow_req *req;
   1321	int err;
   1322
   1323	mutex_lock(&pfvf->mbox.lock);
   1324	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
   1325	if (!req) {
   1326		mutex_unlock(&pfvf->mbox.lock);
   1327		return -ENOMEM;
   1328	}
   1329
   1330	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
   1331	req->intf = NIX_INTF_RX;
   1332	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
   1333	eth_broadcast_addr((u8 *)&req->mask.dmac);
   1334	req->channel = pfvf->hw.rx_chan_base;
   1335	req->op = NIX_RX_ACTION_DEFAULT;
   1336	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
   1337	req->vtag0_valid = true;
   1338	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
   1339
   1340	/* Send message to AF */
   1341	err = otx2_sync_mbox_msg(&pfvf->mbox);
   1342	mutex_unlock(&pfvf->mbox.lock);
   1343	return err;
   1344}
   1345
   1346static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
   1347{
   1348	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
   1349	struct npc_delete_flow_req *req;
   1350	int err;
   1351
   1352	mutex_lock(&pfvf->mbox.lock);
   1353	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
   1354	if (!req) {
   1355		mutex_unlock(&pfvf->mbox.lock);
   1356		return -ENOMEM;
   1357	}
   1358
   1359	req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
   1360	/* Send message to AF */
   1361	err = otx2_sync_mbox_msg(&pfvf->mbox);
   1362	mutex_unlock(&pfvf->mbox.lock);
   1363	return err;
   1364}
   1365
   1366int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
   1367{
   1368	struct nix_vtag_config *req;
   1369	struct mbox_msghdr *rsp_hdr;
   1370	int err;
   1371
   1372	/* Dont have enough mcam entries */
   1373	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
   1374		return -ENOMEM;
   1375
   1376	if (enable) {
   1377		err = otx2_install_rxvlan_offload_flow(pf);
   1378		if (err)
   1379			return err;
   1380	} else {
   1381		err = otx2_delete_rxvlan_offload_flow(pf);
   1382		if (err)
   1383			return err;
   1384	}
   1385
   1386	mutex_lock(&pf->mbox.lock);
   1387	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
   1388	if (!req) {
   1389		mutex_unlock(&pf->mbox.lock);
   1390		return -ENOMEM;
   1391	}
   1392
   1393	/* config strip, capture and size */
   1394	req->vtag_size = VTAGSIZE_T4;
   1395	req->cfg_type = 1; /* rx vlan cfg */
   1396	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
   1397	req->rx.strip_vtag = enable;
   1398	req->rx.capture_vtag = enable;
   1399
   1400	err = otx2_sync_mbox_msg(&pf->mbox);
   1401	if (err) {
   1402		mutex_unlock(&pf->mbox.lock);
   1403		return err;
   1404	}
   1405
   1406	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
   1407	if (IS_ERR(rsp_hdr)) {
   1408		mutex_unlock(&pf->mbox.lock);
   1409		return PTR_ERR(rsp_hdr);
   1410	}
   1411
   1412	mutex_unlock(&pf->mbox.lock);
   1413	return rsp_hdr->rc;
   1414}
   1415
   1416void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
   1417{
   1418	struct otx2_flow *iter;
   1419	struct ethhdr *eth_hdr;
   1420
   1421	list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
   1422		if (iter->rule_type & DMAC_FILTER_RULE) {
   1423			eth_hdr = &iter->flow_spec.h_u.ether_spec;
   1424			otx2_dmacflt_add(pf, eth_hdr->h_dest,
   1425					 iter->entry);
   1426		}
   1427	}
   1428}
   1429
   1430void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
   1431{
   1432	otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
   1433}