cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rvu_nix.c (149137B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Marvell RVU Admin Function driver
      3 *
      4 * Copyright (C) 2018 Marvell.
      5 *
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/pci.h>
     10
     11#include "rvu_struct.h"
     12#include "rvu_reg.h"
     13#include "rvu.h"
     14#include "npc.h"
     15#include "cgx.h"
     16#include "lmac_common.h"
     17
     18static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
     19static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
     20			    int type, int chan_id);
     21static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
     22			       int type, bool add);
     23static int nix_setup_ipolicers(struct rvu *rvu,
     24			       struct nix_hw *nix_hw, int blkaddr);
     25static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
     26static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
     27			       struct nix_hw *nix_hw, u16 pcifunc);
     28static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
     29static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
     30				     u32 leaf_prof);
     31static const char *nix_get_ctx_name(int ctype);
     32
     33enum mc_tbl_sz {
     34	MC_TBL_SZ_256,
     35	MC_TBL_SZ_512,
     36	MC_TBL_SZ_1K,
     37	MC_TBL_SZ_2K,
     38	MC_TBL_SZ_4K,
     39	MC_TBL_SZ_8K,
     40	MC_TBL_SZ_16K,
     41	MC_TBL_SZ_32K,
     42	MC_TBL_SZ_64K,
     43};
     44
     45enum mc_buf_cnt {
     46	MC_BUF_CNT_8,
     47	MC_BUF_CNT_16,
     48	MC_BUF_CNT_32,
     49	MC_BUF_CNT_64,
     50	MC_BUF_CNT_128,
     51	MC_BUF_CNT_256,
     52	MC_BUF_CNT_512,
     53	MC_BUF_CNT_1024,
     54	MC_BUF_CNT_2048,
     55};
     56
     57enum nix_makr_fmt_indexes {
     58	NIX_MARK_CFG_IP_DSCP_RED,
     59	NIX_MARK_CFG_IP_DSCP_YELLOW,
     60	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
     61	NIX_MARK_CFG_IP_ECN_RED,
     62	NIX_MARK_CFG_IP_ECN_YELLOW,
     63	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
     64	NIX_MARK_CFG_VLAN_DEI_RED,
     65	NIX_MARK_CFG_VLAN_DEI_YELLOW,
     66	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
     67	NIX_MARK_CFG_MAX,
     68};
     69
     70/* For now considering MC resources needed for broadcast
     71 * pkt replication only. i.e 256 HWVFs + 12 PFs.
     72 */
     73#define MC_TBL_SIZE	MC_TBL_SZ_512
     74#define MC_BUF_CNT	MC_BUF_CNT_128
     75
     76struct mce {
     77	struct hlist_node	node;
     78	u16			pcifunc;
     79};
     80
     81int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
     82{
     83	int i = 0;
     84
     85	/*If blkaddr is 0, return the first nix block address*/
     86	if (blkaddr == 0)
     87		return rvu->nix_blkaddr[blkaddr];
     88
     89	while (i + 1 < MAX_NIX_BLKS) {
     90		if (rvu->nix_blkaddr[i] == blkaddr)
     91			return rvu->nix_blkaddr[i + 1];
     92		i++;
     93	}
     94
     95	return 0;
     96}
     97
     98bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
     99{
    100	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
    101	int blkaddr;
    102
    103	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
    104	if (!pfvf->nixlf || blkaddr < 0)
    105		return false;
    106	return true;
    107}
    108
    109int rvu_get_nixlf_count(struct rvu *rvu)
    110{
    111	int blkaddr = 0, max = 0;
    112	struct rvu_block *block;
    113
    114	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
    115	while (blkaddr) {
    116		block = &rvu->hw->block[blkaddr];
    117		max += block->lf.max;
    118		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
    119	}
    120	return max;
    121}
    122
    123int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
    124{
    125	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
    126	struct rvu_hwinfo *hw = rvu->hw;
    127	int blkaddr;
    128
    129	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
    130	if (!pfvf->nixlf || blkaddr < 0)
    131		return NIX_AF_ERR_AF_LF_INVALID;
    132
    133	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
    134	if (*nixlf < 0)
    135		return NIX_AF_ERR_AF_LF_INVALID;
    136
    137	if (nix_blkaddr)
    138		*nix_blkaddr = blkaddr;
    139
    140	return 0;
    141}
    142
    143int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
    144			struct nix_hw **nix_hw, int *blkaddr)
    145{
    146	struct rvu_pfvf *pfvf;
    147
    148	pfvf = rvu_get_pfvf(rvu, pcifunc);
    149	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
    150	if (!pfvf->nixlf || *blkaddr < 0)
    151		return NIX_AF_ERR_AF_LF_INVALID;
    152
    153	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
    154	if (!*nix_hw)
    155		return NIX_AF_ERR_INVALID_NIXBLK;
    156	return 0;
    157}
    158
    159static void nix_mce_list_init(struct nix_mce_list *list, int max)
    160{
    161	INIT_HLIST_HEAD(&list->head);
    162	list->count = 0;
    163	list->max = max;
    164}
    165
    166static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
    167{
    168	int idx;
    169
    170	if (!mcast)
    171		return 0;
    172
    173	idx = mcast->next_free_mce;
    174	mcast->next_free_mce += count;
    175	return idx;
    176}
    177
    178struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
    179{
    180	int nix_blkaddr = 0, i = 0;
    181	struct rvu *rvu = hw->rvu;
    182
    183	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
    184	while (nix_blkaddr) {
    185		if (blkaddr == nix_blkaddr && hw->nix)
    186			return &hw->nix[i];
    187		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
    188		i++;
    189	}
    190	return NULL;
    191}
    192
    193u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
    194{
    195	dwrr_mtu &= 0x1FULL;
    196
    197	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
    198	 * Value of 4 is reserved for MTU value of 9728 bytes.
    199	 * Value of 5 is reserved for MTU value of 10240 bytes.
    200	 */
    201	switch (dwrr_mtu) {
    202	case 4:
    203		return 9728;
    204	case 5:
    205		return 10240;
    206	default:
    207		return BIT_ULL(dwrr_mtu);
    208	}
    209
    210	return 0;
    211}
    212
    213u32 convert_bytes_to_dwrr_mtu(u32 bytes)
    214{
    215	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
    216	 * Value of 4 is reserved for MTU value of 9728 bytes.
    217	 * Value of 5 is reserved for MTU value of 10240 bytes.
    218	 */
    219	if (bytes > BIT_ULL(16))
    220		return 0;
    221
    222	switch (bytes) {
    223	case 9728:
    224		return 4;
    225	case 10240:
    226		return 5;
    227	default:
    228		return ilog2(bytes);
    229	}
    230
    231	return 0;
    232}
    233
    234static void nix_rx_sync(struct rvu *rvu, int blkaddr)
    235{
    236	int err;
    237
    238	/* Sync all in flight RX packets to LLC/DRAM */
    239	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
    240	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
    241	if (err)
    242		dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
    243
    244	/* SW_SYNC ensures all existing transactions are finished and pkts
    245	 * are written to LLC/DRAM, queues should be teared down after
    246	 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
    247	 * an existing transaction might end after SW_SYNC operation. To
    248	 * ensure operation is fully done, do the SW_SYNC twice.
    249	 */
    250	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
    251	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
    252	if (err)
    253		dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
    254}
    255
    256static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
    257			    int lvl, u16 pcifunc, u16 schq)
    258{
    259	struct rvu_hwinfo *hw = rvu->hw;
    260	struct nix_txsch *txsch;
    261	struct nix_hw *nix_hw;
    262	u16 map_func;
    263
    264	nix_hw = get_nix_hw(rvu->hw, blkaddr);
    265	if (!nix_hw)
    266		return false;
    267
    268	txsch = &nix_hw->txsch[lvl];
    269	/* Check out of bounds */
    270	if (schq >= txsch->schq.max)
    271		return false;
    272
    273	mutex_lock(&rvu->rsrc_lock);
    274	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
    275	mutex_unlock(&rvu->rsrc_lock);
    276
    277	/* TLs aggegating traffic are shared across PF and VFs */
    278	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
    279		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
    280			return false;
    281		else
    282			return true;
    283	}
    284
    285	if (map_func != pcifunc)
    286		return false;
    287
    288	return true;
    289}
    290
    291static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
    292			      struct nix_lf_alloc_rsp *rsp, bool loop)
    293{
    294	struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
    295	u16 req_chan_base, req_chan_end, req_chan_cnt;
    296	struct rvu_hwinfo *hw = rvu->hw;
    297	struct sdp_node_info *sdp_info;
    298	int pkind, pf, vf, lbkid, vfid;
    299	u8 cgx_id, lmac_id;
    300	bool from_vf;
    301	int err;
    302
    303	pf = rvu_get_pf(pcifunc);
    304	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
    305	    type != NIX_INTF_TYPE_SDP)
    306		return 0;
    307
    308	switch (type) {
    309	case NIX_INTF_TYPE_CGX:
    310		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
    311		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
    312
    313		pkind = rvu_npc_get_pkind(rvu, pf);
    314		if (pkind < 0) {
    315			dev_err(rvu->dev,
    316				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
    317			return -EINVAL;
    318		}
    319		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
    320		pfvf->tx_chan_base = pfvf->rx_chan_base;
    321		pfvf->rx_chan_cnt = 1;
    322		pfvf->tx_chan_cnt = 1;
    323		rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
    324
    325		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
    326		rvu_npc_set_pkind(rvu, pkind, pfvf);
    327
    328		break;
    329	case NIX_INTF_TYPE_LBK:
    330		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
    331
    332		/* If NIX1 block is present on the silicon then NIXes are
    333		 * assigned alternatively for lbk interfaces. NIX0 should
    334		 * send packets on lbk link 1 channels and NIX1 should send
    335		 * on lbk link 0 channels for the communication between
    336		 * NIX0 and NIX1.
    337		 */
    338		lbkid = 0;
    339		if (rvu->hw->lbk_links > 1)
    340			lbkid = vf & 0x1 ? 0 : 1;
    341
    342		/* By default NIX0 is configured to send packet on lbk link 1
    343		 * (which corresponds to LBK1), same packet will receive on
    344		 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
    345		 * (which corresponds to LBK2) packet will receive on NIX0 lbk
    346		 * link 1.
    347		 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
    348		 * transmits and receives on lbk link 0, whick corresponds
    349		 * to LBK1 block, back to back connectivity between NIX and
    350		 * LBK can be achieved (which is similar to 96xx)
    351		 *
    352		 *			RX		TX
    353		 * NIX0 lbk link	1 (LBK2)	1 (LBK1)
    354		 * NIX0 lbk link	0 (LBK0)	0 (LBK0)
    355		 * NIX1 lbk link	0 (LBK1)	0 (LBK2)
    356		 * NIX1 lbk link	1 (LBK3)	1 (LBK3)
    357		 */
    358		if (loop)
    359			lbkid = !lbkid;
    360
    361		/* Note that AF's VFs work in pairs and talk over consecutive
    362		 * loopback channels.Therefore if odd number of AF VFs are
    363		 * enabled then the last VF remains with no pair.
    364		 */
    365		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
    366		pfvf->tx_chan_base = vf & 0x1 ?
    367					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
    368					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
    369		pfvf->rx_chan_cnt = 1;
    370		pfvf->tx_chan_cnt = 1;
    371		rsp->tx_link = hw->cgx_links + lbkid;
    372		pfvf->lbkid = lbkid;
    373		rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
    374		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
    375					      pfvf->rx_chan_base,
    376					      pfvf->rx_chan_cnt);
    377
    378		break;
    379	case NIX_INTF_TYPE_SDP:
    380		from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
    381		parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
    382		sdp_info = parent_pf->sdp_info;
    383		if (!sdp_info) {
    384			dev_err(rvu->dev, "Invalid sdp_info pointer\n");
    385			return -EINVAL;
    386		}
    387		if (from_vf) {
    388			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
    389				sdp_info->num_pf_rings;
    390			vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
    391			for (vfid = 0; vfid < vf; vfid++)
    392				req_chan_base += sdp_info->vf_rings[vfid];
    393			req_chan_cnt = sdp_info->vf_rings[vf];
    394			req_chan_end = req_chan_base + req_chan_cnt - 1;
    395			if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
    396			    req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
    397				dev_err(rvu->dev,
    398					"PF_Func 0x%x: Invalid channel base and count\n",
    399					pcifunc);
    400				return -EINVAL;
    401			}
    402		} else {
    403			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
    404			req_chan_cnt = sdp_info->num_pf_rings;
    405		}
    406
    407		pfvf->rx_chan_base = req_chan_base;
    408		pfvf->rx_chan_cnt = req_chan_cnt;
    409		pfvf->tx_chan_base = pfvf->rx_chan_base;
    410		pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
    411
    412		rsp->tx_link = hw->cgx_links + hw->lbk_links;
    413		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
    414					      pfvf->rx_chan_base,
    415					      pfvf->rx_chan_cnt);
    416		break;
    417	}
    418
    419	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
    420	 * RVU PF/VF's MAC address.
    421	 */
    422	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
    423				    pfvf->rx_chan_base, pfvf->mac_addr);
    424
    425	/* Add this PF_FUNC to bcast pkt replication list */
    426	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
    427	if (err) {
    428		dev_err(rvu->dev,
    429			"Bcast list, failed to enable PF_FUNC 0x%x\n",
    430			pcifunc);
    431		return err;
    432	}
    433	/* Install MCAM rule matching Ethernet broadcast mac address */
    434	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
    435					  nixlf, pfvf->rx_chan_base);
    436
    437	pfvf->maxlen = NIC_HW_MIN_FRS;
    438	pfvf->minlen = NIC_HW_MIN_FRS;
    439
    440	return 0;
    441}
    442
    443static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
    444{
    445	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
    446	int err;
    447
    448	pfvf->maxlen = 0;
    449	pfvf->minlen = 0;
    450
    451	/* Remove this PF_FUNC from bcast pkt replication list */
    452	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
    453	if (err) {
    454		dev_err(rvu->dev,
    455			"Bcast list, failed to disable PF_FUNC 0x%x\n",
    456			pcifunc);
    457	}
    458
    459	/* Free and disable any MCAM entries used by this NIX LF */
    460	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
    461
    462	/* Disable DMAC filters used */
    463	rvu_cgx_disable_dmac_entries(rvu, pcifunc);
    464}
    465
    466int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
    467				    struct nix_bp_cfg_req *req,
    468				    struct msg_rsp *rsp)
    469{
    470	u16 pcifunc = req->hdr.pcifunc;
    471	struct rvu_pfvf *pfvf;
    472	int blkaddr, pf, type;
    473	u16 chan_base, chan;
    474	u64 cfg;
    475
    476	pf = rvu_get_pf(pcifunc);
    477	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
    478	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
    479		return 0;
    480
    481	pfvf = rvu_get_pfvf(rvu, pcifunc);
    482	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
    483
    484	chan_base = pfvf->rx_chan_base + req->chan_base;
    485	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
    486		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
    487		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
    488			    cfg & ~BIT_ULL(16));
    489	}
    490	return 0;
    491}
    492
    493static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
    494			    int type, int chan_id)
    495{
    496	int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
    497	u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
    498	struct rvu_hwinfo *hw = rvu->hw;
    499	struct rvu_pfvf *pfvf;
    500	u8 cgx_id, lmac_id;
    501	u64 cfg;
    502
    503	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
    504	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
    505	lmac_chan_cnt = cfg & 0xFF;
    506
    507	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
    508	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
    509
    510	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
    511	sdp_chan_cnt = cfg & 0xFFF;
    512	sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
    513
    514	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
    515
    516	/* Backpressure IDs range division
    517	 * CGX channles are mapped to (0 - 191) BPIDs
    518	 * LBK channles are mapped to (192 - 255) BPIDs
    519	 * SDP channles are mapped to (256 - 511) BPIDs
    520	 *
    521	 * Lmac channles and bpids mapped as follows
    522	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
    523	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
    524	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
    525	 */
    526	switch (type) {
    527	case NIX_INTF_TYPE_CGX:
    528		if ((req->chan_base + req->chan_cnt) > 16)
    529			return -EINVAL;
    530		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
    531		/* Assign bpid based on cgx, lmac and chan id */
    532		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
    533			(lmac_id * lmac_chan_cnt) + req->chan_base;
    534
    535		if (req->bpid_per_chan)
    536			bpid += chan_id;
    537		if (bpid > cgx_bpid_cnt)
    538			return -EINVAL;
    539		break;
    540
    541	case NIX_INTF_TYPE_LBK:
    542		if ((req->chan_base + req->chan_cnt) > 63)
    543			return -EINVAL;
    544		bpid = cgx_bpid_cnt + req->chan_base;
    545		if (req->bpid_per_chan)
    546			bpid += chan_id;
    547		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
    548			return -EINVAL;
    549		break;
    550	case NIX_INTF_TYPE_SDP:
    551		if ((req->chan_base + req->chan_cnt) > 255)
    552			return -EINVAL;
    553
    554		bpid = sdp_bpid_cnt + req->chan_base;
    555		if (req->bpid_per_chan)
    556			bpid += chan_id;
    557
    558		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
    559			return -EINVAL;
    560		break;
    561	default:
    562		return -EINVAL;
    563	}
    564	return bpid;
    565}
    566
    567int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
    568				   struct nix_bp_cfg_req *req,
    569				   struct nix_bp_cfg_rsp *rsp)
    570{
    571	int blkaddr, pf, type, chan_id = 0;
    572	u16 pcifunc = req->hdr.pcifunc;
    573	struct rvu_pfvf *pfvf;
    574	u16 chan_base, chan;
    575	s16 bpid, bpid_base;
    576	u64 cfg;
    577
    578	pf = rvu_get_pf(pcifunc);
    579	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
    580	if (is_sdp_pfvf(pcifunc))
    581		type = NIX_INTF_TYPE_SDP;
    582
    583	/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
    584	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
    585	    type != NIX_INTF_TYPE_SDP)
    586		return 0;
    587
    588	pfvf = rvu_get_pfvf(rvu, pcifunc);
    589	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
    590
    591	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
    592	chan_base = pfvf->rx_chan_base + req->chan_base;
    593	bpid = bpid_base;
    594
    595	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
    596		if (bpid < 0) {
    597			dev_warn(rvu->dev, "Fail to enable backpressure\n");
    598			return -EINVAL;
    599		}
    600
    601		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
    602		cfg &= ~GENMASK_ULL(8, 0);
    603		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
    604			    cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
    605		chan_id++;
    606		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
    607	}
    608
    609	for (chan = 0; chan < req->chan_cnt; chan++) {
    610		/* Map channel and bpid assign to it */
    611		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
    612					(bpid_base & 0x3FF);
    613		if (req->bpid_per_chan)
    614			bpid_base++;
    615	}
    616	rsp->chan_cnt = req->chan_cnt;
    617
    618	return 0;
    619}
    620
    621static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
    622				 u64 format, bool v4, u64 *fidx)
    623{
    624	struct nix_lso_format field = {0};
    625
    626	/* IP's Length field */
    627	field.layer = NIX_TXLAYER_OL3;
    628	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
    629	field.offset = v4 ? 2 : 4;
    630	field.sizem1 = 1; /* i.e 2 bytes */
    631	field.alg = NIX_LSOALG_ADD_PAYLEN;
    632	rvu_write64(rvu, blkaddr,
    633		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
    634		    *(u64 *)&field);
    635
    636	/* No ID field in IPv6 header */
    637	if (!v4)
    638		return;
    639
    640	/* IP's ID field */
    641	field.layer = NIX_TXLAYER_OL3;
    642	field.offset = 4;
    643	field.sizem1 = 1; /* i.e 2 bytes */
    644	field.alg = NIX_LSOALG_ADD_SEGNUM;
    645	rvu_write64(rvu, blkaddr,
    646		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
    647		    *(u64 *)&field);
    648}
    649
    650static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
    651				 u64 format, u64 *fidx)
    652{
    653	struct nix_lso_format field = {0};
    654
    655	/* TCP's sequence number field */
    656	field.layer = NIX_TXLAYER_OL4;
    657	field.offset = 4;
    658	field.sizem1 = 3; /* i.e 4 bytes */
    659	field.alg = NIX_LSOALG_ADD_OFFSET;
    660	rvu_write64(rvu, blkaddr,
    661		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
    662		    *(u64 *)&field);
    663
    664	/* TCP's flags field */
    665	field.layer = NIX_TXLAYER_OL4;
    666	field.offset = 12;
    667	field.sizem1 = 1; /* 2 bytes */
    668	field.alg = NIX_LSOALG_TCP_FLAGS;
    669	rvu_write64(rvu, blkaddr,
    670		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
    671		    *(u64 *)&field);
    672}
    673
    674static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
    675{
    676	u64 cfg, idx, fidx = 0;
    677
    678	/* Get max HW supported format indices */
    679	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
    680	nix_hw->lso.total = cfg;
    681
    682	/* Enable LSO */
    683	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
    684	/* For TSO, set first and middle segment flags to
    685	 * mask out PSH, RST & FIN flags in TCP packet
    686	 */
    687	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
    688	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
    689	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
    690
    691	/* Setup default static LSO formats
    692	 *
    693	 * Configure format fields for TCPv4 segmentation offload
    694	 */
    695	idx = NIX_LSO_FORMAT_IDX_TSOV4;
    696	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
    697	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
    698
    699	/* Set rest of the fields to NOP */
    700	for (; fidx < 8; fidx++) {
    701		rvu_write64(rvu, blkaddr,
    702			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
    703	}
    704	nix_hw->lso.in_use++;
    705
    706	/* Configure format fields for TCPv6 segmentation offload */
    707	idx = NIX_LSO_FORMAT_IDX_TSOV6;
    708	fidx = 0;
    709	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
    710	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
    711
    712	/* Set rest of the fields to NOP */
    713	for (; fidx < 8; fidx++) {
    714		rvu_write64(rvu, blkaddr,
    715			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
    716	}
    717	nix_hw->lso.in_use++;
    718}
    719
    720static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
    721{
    722	kfree(pfvf->rq_bmap);
    723	kfree(pfvf->sq_bmap);
    724	kfree(pfvf->cq_bmap);
    725	if (pfvf->rq_ctx)
    726		qmem_free(rvu->dev, pfvf->rq_ctx);
    727	if (pfvf->sq_ctx)
    728		qmem_free(rvu->dev, pfvf->sq_ctx);
    729	if (pfvf->cq_ctx)
    730		qmem_free(rvu->dev, pfvf->cq_ctx);
    731	if (pfvf->rss_ctx)
    732		qmem_free(rvu->dev, pfvf->rss_ctx);
    733	if (pfvf->nix_qints_ctx)
    734		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
    735	if (pfvf->cq_ints_ctx)
    736		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
    737
    738	pfvf->rq_bmap = NULL;
    739	pfvf->cq_bmap = NULL;
    740	pfvf->sq_bmap = NULL;
    741	pfvf->rq_ctx = NULL;
    742	pfvf->sq_ctx = NULL;
    743	pfvf->cq_ctx = NULL;
    744	pfvf->rss_ctx = NULL;
    745	pfvf->nix_qints_ctx = NULL;
    746	pfvf->cq_ints_ctx = NULL;
    747}
    748
    749static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
    750			      struct rvu_pfvf *pfvf, int nixlf,
    751			      int rss_sz, int rss_grps, int hwctx_size,
    752			      u64 way_mask, bool tag_lsb_as_adder)
    753{
    754	int err, grp, num_indices;
    755	u64 val;
    756
    757	/* RSS is not requested for this NIXLF */
    758	if (!rss_sz)
    759		return 0;
    760	num_indices = rss_sz * rss_grps;
    761
    762	/* Alloc NIX RSS HW context memory and config the base */
    763	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
    764	if (err)
    765		return err;
    766
    767	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
    768		    (u64)pfvf->rss_ctx->iova);
    769
    770	/* Config full RSS table size, enable RSS and caching */
    771	val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
    772			ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
    773
    774	if (tag_lsb_as_adder)
    775		val |= BIT_ULL(5);
    776
    777	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
    778	/* Config RSS group offset and sizes */
    779	for (grp = 0; grp < rss_grps; grp++)
    780		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
    781			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
    782	return 0;
    783}
    784
    785static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
    786			       struct nix_aq_inst_s *inst)
    787{
    788	struct admin_queue *aq = block->aq;
    789	struct nix_aq_res_s *result;
    790	int timeout = 1000;
    791	u64 reg, head;
    792
    793	result = (struct nix_aq_res_s *)aq->res->base;
    794
    795	/* Get current head pointer where to append this instruction */
    796	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
    797	head = (reg >> 4) & AQ_PTR_MASK;
    798
    799	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
    800	       (void *)inst, aq->inst->entry_sz);
    801	memset(result, 0, sizeof(*result));
    802	/* sync into memory */
    803	wmb();
    804
    805	/* Ring the doorbell and wait for result */
    806	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
    807	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
    808		cpu_relax();
    809		udelay(1);
    810		timeout--;
    811		if (!timeout)
    812			return -EBUSY;
    813	}
    814
    815	if (result->compcode != NIX_AQ_COMP_GOOD)
    816		/* TODO: Replace this with some error code */
    817		return -EBUSY;
    818
    819	return 0;
    820}
    821
    822static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
    823				   struct nix_aq_enq_req *req,
    824				   struct nix_aq_enq_rsp *rsp)
    825{
    826	struct rvu_hwinfo *hw = rvu->hw;
    827	u16 pcifunc = req->hdr.pcifunc;
    828	int nixlf, blkaddr, rc = 0;
    829	struct nix_aq_inst_s inst;
    830	struct rvu_block *block;
    831	struct admin_queue *aq;
    832	struct rvu_pfvf *pfvf;
    833	void *ctx, *mask;
    834	bool ena;
    835	u64 cfg;
    836
    837	blkaddr = nix_hw->blkaddr;
    838	block = &hw->block[blkaddr];
    839	aq = block->aq;
    840	if (!aq) {
    841		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
    842		return NIX_AF_ERR_AQ_ENQUEUE;
    843	}
    844
    845	pfvf = rvu_get_pfvf(rvu, pcifunc);
    846	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
    847
    848	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
    849	 * operations done by AF itself.
    850	 */
    851	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
    852	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
    853		if (!pfvf->nixlf || nixlf < 0)
    854			return NIX_AF_ERR_AF_LF_INVALID;
    855	}
    856
    857	switch (req->ctype) {
    858	case NIX_AQ_CTYPE_RQ:
    859		/* Check if index exceeds max no of queues */
    860		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
    861			rc = NIX_AF_ERR_AQ_ENQUEUE;
    862		break;
    863	case NIX_AQ_CTYPE_SQ:
    864		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
    865			rc = NIX_AF_ERR_AQ_ENQUEUE;
    866		break;
    867	case NIX_AQ_CTYPE_CQ:
    868		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
    869			rc = NIX_AF_ERR_AQ_ENQUEUE;
    870		break;
    871	case NIX_AQ_CTYPE_RSS:
    872		/* Check if RSS is enabled and qidx is within range */
    873		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
    874		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
    875		    (req->qidx >= (256UL << (cfg & 0xF))))
    876			rc = NIX_AF_ERR_AQ_ENQUEUE;
    877		break;
    878	case NIX_AQ_CTYPE_MCE:
    879		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
    880
    881		/* Check if index exceeds MCE list length */
    882		if (!nix_hw->mcast.mce_ctx ||
    883		    (req->qidx >= (256UL << (cfg & 0xF))))
    884			rc = NIX_AF_ERR_AQ_ENQUEUE;
    885
    886		/* Adding multicast lists for requests from PF/VFs is not
    887		 * yet supported, so ignore this.
    888		 */
    889		if (rsp)
    890			rc = NIX_AF_ERR_AQ_ENQUEUE;
    891		break;
    892	case NIX_AQ_CTYPE_BANDPROF:
    893		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
    894					nix_hw, pcifunc))
    895			rc = NIX_AF_ERR_INVALID_BANDPROF;
    896		break;
    897	default:
    898		rc = NIX_AF_ERR_AQ_ENQUEUE;
    899	}
    900
    901	if (rc)
    902		return rc;
    903
    904	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
    905	if (req->ctype == NIX_AQ_CTYPE_SQ &&
    906	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
    907	     (req->op == NIX_AQ_INSTOP_WRITE &&
    908	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
    909		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
    910				     pcifunc, req->sq.smq))
    911			return NIX_AF_ERR_AQ_ENQUEUE;
    912	}
    913
    914	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
    915	inst.lf = nixlf;
    916	inst.cindex = req->qidx;
    917	inst.ctype = req->ctype;
    918	inst.op = req->op;
    919	/* Currently we are not supporting enqueuing multiple instructions,
    920	 * so always choose first entry in result memory.
    921	 */
    922	inst.res_addr = (u64)aq->res->iova;
    923
    924	/* Hardware uses same aq->res->base for updating result of
    925	 * previous instruction hence wait here till it is done.
    926	 */
    927	spin_lock(&aq->lock);
    928
    929	/* Clean result + context memory */
    930	memset(aq->res->base, 0, aq->res->entry_sz);
    931	/* Context needs to be written at RES_ADDR + 128 */
    932	ctx = aq->res->base + 128;
    933	/* Mask needs to be written at RES_ADDR + 256 */
    934	mask = aq->res->base + 256;
    935
    936	switch (req->op) {
    937	case NIX_AQ_INSTOP_WRITE:
    938		if (req->ctype == NIX_AQ_CTYPE_RQ)
    939			memcpy(mask, &req->rq_mask,
    940			       sizeof(struct nix_rq_ctx_s));
    941		else if (req->ctype == NIX_AQ_CTYPE_SQ)
    942			memcpy(mask, &req->sq_mask,
    943			       sizeof(struct nix_sq_ctx_s));
    944		else if (req->ctype == NIX_AQ_CTYPE_CQ)
    945			memcpy(mask, &req->cq_mask,
    946			       sizeof(struct nix_cq_ctx_s));
    947		else if (req->ctype == NIX_AQ_CTYPE_RSS)
    948			memcpy(mask, &req->rss_mask,
    949			       sizeof(struct nix_rsse_s));
    950		else if (req->ctype == NIX_AQ_CTYPE_MCE)
    951			memcpy(mask, &req->mce_mask,
    952			       sizeof(struct nix_rx_mce_s));
    953		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
    954			memcpy(mask, &req->prof_mask,
    955			       sizeof(struct nix_bandprof_s));
    956		fallthrough;
    957	case NIX_AQ_INSTOP_INIT:
    958		if (req->ctype == NIX_AQ_CTYPE_RQ)
    959			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
    960		else if (req->ctype == NIX_AQ_CTYPE_SQ)
    961			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
    962		else if (req->ctype == NIX_AQ_CTYPE_CQ)
    963			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
    964		else if (req->ctype == NIX_AQ_CTYPE_RSS)
    965			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
    966		else if (req->ctype == NIX_AQ_CTYPE_MCE)
    967			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
    968		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
    969			memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
    970		break;
    971	case NIX_AQ_INSTOP_NOP:
    972	case NIX_AQ_INSTOP_READ:
    973	case NIX_AQ_INSTOP_LOCK:
    974	case NIX_AQ_INSTOP_UNLOCK:
    975		break;
    976	default:
    977		rc = NIX_AF_ERR_AQ_ENQUEUE;
    978		spin_unlock(&aq->lock);
    979		return rc;
    980	}
    981
    982	/* Submit the instruction to AQ */
    983	rc = nix_aq_enqueue_wait(rvu, block, &inst);
    984	if (rc) {
    985		spin_unlock(&aq->lock);
    986		return rc;
    987	}
    988
    989	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
    990	if (req->op == NIX_AQ_INSTOP_INIT) {
    991		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
    992			__set_bit(req->qidx, pfvf->rq_bmap);
    993		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
    994			__set_bit(req->qidx, pfvf->sq_bmap);
    995		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
    996			__set_bit(req->qidx, pfvf->cq_bmap);
    997	}
    998
    999	if (req->op == NIX_AQ_INSTOP_WRITE) {
   1000		if (req->ctype == NIX_AQ_CTYPE_RQ) {
   1001			ena = (req->rq.ena & req->rq_mask.ena) |
   1002				(test_bit(req->qidx, pfvf->rq_bmap) &
   1003				~req->rq_mask.ena);
   1004			if (ena)
   1005				__set_bit(req->qidx, pfvf->rq_bmap);
   1006			else
   1007				__clear_bit(req->qidx, pfvf->rq_bmap);
   1008		}
   1009		if (req->ctype == NIX_AQ_CTYPE_SQ) {
   1010			ena = (req->rq.ena & req->sq_mask.ena) |
   1011				(test_bit(req->qidx, pfvf->sq_bmap) &
   1012				~req->sq_mask.ena);
   1013			if (ena)
   1014				__set_bit(req->qidx, pfvf->sq_bmap);
   1015			else
   1016				__clear_bit(req->qidx, pfvf->sq_bmap);
   1017		}
   1018		if (req->ctype == NIX_AQ_CTYPE_CQ) {
   1019			ena = (req->rq.ena & req->cq_mask.ena) |
   1020				(test_bit(req->qidx, pfvf->cq_bmap) &
   1021				~req->cq_mask.ena);
   1022			if (ena)
   1023				__set_bit(req->qidx, pfvf->cq_bmap);
   1024			else
   1025				__clear_bit(req->qidx, pfvf->cq_bmap);
   1026		}
   1027	}
   1028
   1029	if (rsp) {
   1030		/* Copy read context into mailbox */
   1031		if (req->op == NIX_AQ_INSTOP_READ) {
   1032			if (req->ctype == NIX_AQ_CTYPE_RQ)
   1033				memcpy(&rsp->rq, ctx,
   1034				       sizeof(struct nix_rq_ctx_s));
   1035			else if (req->ctype == NIX_AQ_CTYPE_SQ)
   1036				memcpy(&rsp->sq, ctx,
   1037				       sizeof(struct nix_sq_ctx_s));
   1038			else if (req->ctype == NIX_AQ_CTYPE_CQ)
   1039				memcpy(&rsp->cq, ctx,
   1040				       sizeof(struct nix_cq_ctx_s));
   1041			else if (req->ctype == NIX_AQ_CTYPE_RSS)
   1042				memcpy(&rsp->rss, ctx,
   1043				       sizeof(struct nix_rsse_s));
   1044			else if (req->ctype == NIX_AQ_CTYPE_MCE)
   1045				memcpy(&rsp->mce, ctx,
   1046				       sizeof(struct nix_rx_mce_s));
   1047			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
   1048				memcpy(&rsp->prof, ctx,
   1049				       sizeof(struct nix_bandprof_s));
   1050		}
   1051	}
   1052
   1053	spin_unlock(&aq->lock);
   1054	return 0;
   1055}
   1056
   1057static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
   1058				 struct nix_aq_enq_req *req, u8 ctype)
   1059{
   1060	struct nix_cn10k_aq_enq_req aq_req;
   1061	struct nix_cn10k_aq_enq_rsp aq_rsp;
   1062	int rc, word;
   1063
   1064	if (req->ctype != NIX_AQ_CTYPE_CQ)
   1065		return 0;
   1066
   1067	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
   1068				 req->hdr.pcifunc, ctype, req->qidx);
   1069	if (rc) {
   1070		dev_err(rvu->dev,
   1071			"%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
   1072			__func__, nix_get_ctx_name(ctype), req->qidx,
   1073			req->hdr.pcifunc);
   1074		return rc;
   1075	}
   1076
   1077	/* Make copy of original context & mask which are required
   1078	 * for resubmission
   1079	 */
   1080	memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
   1081	memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
   1082
   1083	/* exclude fields which HW can update */
   1084	aq_req.cq_mask.cq_err       = 0;
   1085	aq_req.cq_mask.wrptr        = 0;
   1086	aq_req.cq_mask.tail         = 0;
   1087	aq_req.cq_mask.head	    = 0;
   1088	aq_req.cq_mask.avg_level    = 0;
   1089	aq_req.cq_mask.update_time  = 0;
   1090	aq_req.cq_mask.substream    = 0;
   1091
   1092	/* Context mask (cq_mask) holds mask value of fields which
   1093	 * are changed in AQ WRITE operation.
   1094	 * for example cq.drop = 0xa;
   1095	 *	       cq_mask.drop = 0xff;
   1096	 * Below logic performs '&' between cq and cq_mask so that non
   1097	 * updated fields are masked out for request and response
   1098	 * comparison
   1099	 */
   1100	for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
   1101	     word++) {
   1102		*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
   1103			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
   1104		*(u64 *)((u8 *)&aq_req.cq + word * 8) &=
   1105			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
   1106	}
   1107
   1108	if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
   1109		return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
   1110
   1111	return 0;
   1112}
   1113
   1114static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
   1115			       struct nix_aq_enq_rsp *rsp)
   1116{
   1117	struct nix_hw *nix_hw;
   1118	int err, retries = 5;
   1119	int blkaddr;
   1120
   1121	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
   1122	if (blkaddr < 0)
   1123		return NIX_AF_ERR_AF_LF_INVALID;
   1124
   1125	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
   1126	if (!nix_hw)
   1127		return NIX_AF_ERR_INVALID_NIXBLK;
   1128
   1129retry:
   1130	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
   1131
   1132	/* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
   1133	 * As a work around perfrom CQ context read after each AQ write. If AQ
   1134	 * read shows AQ write is not updated perform AQ write again.
   1135	 */
   1136	if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
   1137		err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
   1138		if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
   1139			if (retries--)
   1140				goto retry;
   1141			else
   1142				return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
   1143		}
   1144	}
   1145
   1146	return err;
   1147}
   1148
   1149static const char *nix_get_ctx_name(int ctype)
   1150{
   1151	switch (ctype) {
   1152	case NIX_AQ_CTYPE_CQ:
   1153		return "CQ";
   1154	case NIX_AQ_CTYPE_SQ:
   1155		return "SQ";
   1156	case NIX_AQ_CTYPE_RQ:
   1157		return "RQ";
   1158	case NIX_AQ_CTYPE_RSS:
   1159		return "RSS";
   1160	}
   1161	return "";
   1162}
   1163
   1164static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
   1165{
   1166	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
   1167	struct nix_aq_enq_req aq_req;
   1168	unsigned long *bmap;
   1169	int qidx, q_cnt = 0;
   1170	int err = 0, rc;
   1171
   1172	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
   1173		return NIX_AF_ERR_AQ_ENQUEUE;
   1174
   1175	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
   1176	aq_req.hdr.pcifunc = req->hdr.pcifunc;
   1177
   1178	if (req->ctype == NIX_AQ_CTYPE_CQ) {
   1179		aq_req.cq.ena = 0;
   1180		aq_req.cq_mask.ena = 1;
   1181		aq_req.cq.bp_ena = 0;
   1182		aq_req.cq_mask.bp_ena = 1;
   1183		q_cnt = pfvf->cq_ctx->qsize;
   1184		bmap = pfvf->cq_bmap;
   1185	}
   1186	if (req->ctype == NIX_AQ_CTYPE_SQ) {
   1187		aq_req.sq.ena = 0;
   1188		aq_req.sq_mask.ena = 1;
   1189		q_cnt = pfvf->sq_ctx->qsize;
   1190		bmap = pfvf->sq_bmap;
   1191	}
   1192	if (req->ctype == NIX_AQ_CTYPE_RQ) {
   1193		aq_req.rq.ena = 0;
   1194		aq_req.rq_mask.ena = 1;
   1195		q_cnt = pfvf->rq_ctx->qsize;
   1196		bmap = pfvf->rq_bmap;
   1197	}
   1198
   1199	aq_req.ctype = req->ctype;
   1200	aq_req.op = NIX_AQ_INSTOP_WRITE;
   1201
   1202	for (qidx = 0; qidx < q_cnt; qidx++) {
   1203		if (!test_bit(qidx, bmap))
   1204			continue;
   1205		aq_req.qidx = qidx;
   1206		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
   1207		if (rc) {
   1208			err = rc;
   1209			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
   1210				nix_get_ctx_name(req->ctype), qidx);
   1211		}
   1212	}
   1213
   1214	return err;
   1215}
   1216
   1217#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
   1218static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
   1219{
   1220	struct nix_aq_enq_req lock_ctx_req;
   1221	int err;
   1222
   1223	if (req->op != NIX_AQ_INSTOP_INIT)
   1224		return 0;
   1225
   1226	if (req->ctype == NIX_AQ_CTYPE_MCE ||
   1227	    req->ctype == NIX_AQ_CTYPE_DYNO)
   1228		return 0;
   1229
   1230	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
   1231	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
   1232	lock_ctx_req.ctype = req->ctype;
   1233	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
   1234	lock_ctx_req.qidx = req->qidx;
   1235	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
   1236	if (err)
   1237		dev_err(rvu->dev,
   1238			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
   1239			req->hdr.pcifunc,
   1240			nix_get_ctx_name(req->ctype), req->qidx);
   1241	return err;
   1242}
   1243
   1244int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
   1245				struct nix_aq_enq_req *req,
   1246				struct nix_aq_enq_rsp *rsp)
   1247{
   1248	int err;
   1249
   1250	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
   1251	if (!err)
   1252		err = nix_lf_hwctx_lockdown(rvu, req);
   1253	return err;
   1254}
   1255#else
   1256
   1257int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
   1258				struct nix_aq_enq_req *req,
   1259				struct nix_aq_enq_rsp *rsp)
   1260{
   1261	return rvu_nix_aq_enq_inst(rvu, req, rsp);
   1262}
   1263#endif
   1264/* CN10K mbox handler */
   1265int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
   1266				      struct nix_cn10k_aq_enq_req *req,
   1267				      struct nix_cn10k_aq_enq_rsp *rsp)
   1268{
   1269	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
   1270				  (struct nix_aq_enq_rsp *)rsp);
   1271}
   1272
   1273int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
   1274				       struct hwctx_disable_req *req,
   1275				       struct msg_rsp *rsp)
   1276{
   1277	return nix_lf_hwctx_disable(rvu, req);
   1278}
   1279
   1280int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
   1281				  struct nix_lf_alloc_req *req,
   1282				  struct nix_lf_alloc_rsp *rsp)
   1283{
   1284	int nixlf, qints, hwctx_size, intf, err, rc = 0;
   1285	struct rvu_hwinfo *hw = rvu->hw;
   1286	u16 pcifunc = req->hdr.pcifunc;
   1287	struct rvu_block *block;
   1288	struct rvu_pfvf *pfvf;
   1289	u64 cfg, ctx_cfg;
   1290	int blkaddr;
   1291
   1292	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
   1293		return NIX_AF_ERR_PARAM;
   1294
   1295	if (req->way_mask)
   1296		req->way_mask &= 0xFFFF;
   1297
   1298	pfvf = rvu_get_pfvf(rvu, pcifunc);
   1299	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   1300	if (!pfvf->nixlf || blkaddr < 0)
   1301		return NIX_AF_ERR_AF_LF_INVALID;
   1302
   1303	block = &hw->block[blkaddr];
   1304	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
   1305	if (nixlf < 0)
   1306		return NIX_AF_ERR_AF_LF_INVALID;
   1307
   1308	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
   1309	if (req->npa_func) {
   1310		/* If default, use 'this' NIXLF's PFFUNC */
   1311		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
   1312			req->npa_func = pcifunc;
   1313		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
   1314			return NIX_AF_INVAL_NPA_PF_FUNC;
   1315	}
   1316
   1317	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
   1318	if (req->sso_func) {
   1319		/* If default, use 'this' NIXLF's PFFUNC */
   1320		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
   1321			req->sso_func = pcifunc;
   1322		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
   1323			return NIX_AF_INVAL_SSO_PF_FUNC;
   1324	}
   1325
   1326	/* If RSS is being enabled, check if requested config is valid.
   1327	 * RSS table size should be power of two, otherwise
   1328	 * RSS_GRP::OFFSET + adder might go beyond that group or
   1329	 * won't be able to use entire table.
   1330	 */
   1331	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
   1332			    !is_power_of_2(req->rss_sz)))
   1333		return NIX_AF_ERR_RSS_SIZE_INVALID;
   1334
   1335	if (req->rss_sz &&
   1336	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
   1337		return NIX_AF_ERR_RSS_GRPS_INVALID;
   1338
   1339	/* Reset this NIX LF */
   1340	err = rvu_lf_reset(rvu, block, nixlf);
   1341	if (err) {
   1342		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
   1343			block->addr - BLKADDR_NIX0, nixlf);
   1344		return NIX_AF_ERR_LF_RESET;
   1345	}
   1346
   1347	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
   1348
   1349	/* Alloc NIX RQ HW context memory and config the base */
   1350	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
   1351	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
   1352	if (err)
   1353		goto free_mem;
   1354
   1355	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
   1356	if (!pfvf->rq_bmap)
   1357		goto free_mem;
   1358
   1359	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
   1360		    (u64)pfvf->rq_ctx->iova);
   1361
   1362	/* Set caching and queue count in HW */
   1363	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
   1364	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
   1365
   1366	/* Alloc NIX SQ HW context memory and config the base */
   1367	hwctx_size = 1UL << (ctx_cfg & 0xF);
   1368	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
   1369	if (err)
   1370		goto free_mem;
   1371
   1372	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
   1373	if (!pfvf->sq_bmap)
   1374		goto free_mem;
   1375
   1376	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
   1377		    (u64)pfvf->sq_ctx->iova);
   1378
   1379	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
   1380	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
   1381
   1382	/* Alloc NIX CQ HW context memory and config the base */
   1383	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
   1384	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
   1385	if (err)
   1386		goto free_mem;
   1387
   1388	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
   1389	if (!pfvf->cq_bmap)
   1390		goto free_mem;
   1391
   1392	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
   1393		    (u64)pfvf->cq_ctx->iova);
   1394
   1395	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
   1396	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
   1397
   1398	/* Initialize receive side scaling (RSS) */
   1399	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
   1400	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
   1401				 req->rss_grps, hwctx_size, req->way_mask,
   1402				 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
   1403	if (err)
   1404		goto free_mem;
   1405
   1406	/* Alloc memory for CQINT's HW contexts */
   1407	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
   1408	qints = (cfg >> 24) & 0xFFF;
   1409	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
   1410	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
   1411	if (err)
   1412		goto free_mem;
   1413
   1414	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
   1415		    (u64)pfvf->cq_ints_ctx->iova);
   1416
   1417	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
   1418		    BIT_ULL(36) | req->way_mask << 20);
   1419
   1420	/* Alloc memory for QINT's HW contexts */
   1421	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
   1422	qints = (cfg >> 12) & 0xFFF;
   1423	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
   1424	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
   1425	if (err)
   1426		goto free_mem;
   1427
   1428	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
   1429		    (u64)pfvf->nix_qints_ctx->iova);
   1430	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
   1431		    BIT_ULL(36) | req->way_mask << 20);
   1432
   1433	/* Setup VLANX TPID's.
   1434	 * Use VLAN1 for 802.1Q
   1435	 * and VLAN0 for 802.1AD.
   1436	 */
   1437	cfg = (0x8100ULL << 16) | 0x88A8ULL;
   1438	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
   1439
   1440	/* Enable LMTST for this NIX LF */
   1441	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
   1442
   1443	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
   1444	if (req->npa_func)
   1445		cfg = req->npa_func;
   1446	if (req->sso_func)
   1447		cfg |= (u64)req->sso_func << 16;
   1448
   1449	cfg |= (u64)req->xqe_sz << 33;
   1450	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
   1451
   1452	/* Config Rx pkt length, csum checks and apad  enable / disable */
   1453	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
   1454
   1455	/* Configure pkind for TX parse config */
   1456	cfg = NPC_TX_DEF_PKIND;
   1457	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
   1458
   1459	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
   1460	if (is_sdp_pfvf(pcifunc))
   1461		intf = NIX_INTF_TYPE_SDP;
   1462
   1463	err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
   1464				 !!(req->flags & NIX_LF_LBK_BLK_SEL));
   1465	if (err)
   1466		goto free_mem;
   1467
   1468	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
   1469	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
   1470
   1471	/* Configure RX VTAG Type 7 (strip) for vf vlan */
   1472	rvu_write64(rvu, blkaddr,
   1473		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
   1474		    VTAGSIZE_T4 | VTAG_STRIP);
   1475
   1476	goto exit;
   1477
   1478free_mem:
   1479	nix_ctx_free(rvu, pfvf);
   1480	rc = -ENOMEM;
   1481
   1482exit:
   1483	/* Set macaddr of this PF/VF */
   1484	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
   1485
   1486	/* set SQB size info */
   1487	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
   1488	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
   1489	rsp->rx_chan_base = pfvf->rx_chan_base;
   1490	rsp->tx_chan_base = pfvf->tx_chan_base;
   1491	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
   1492	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
   1493	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
   1494	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
   1495	/* Get HW supported stat count */
   1496	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
   1497	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
   1498	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
   1499	/* Get count of CQ IRQs and error IRQs supported per LF */
   1500	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
   1501	rsp->qints = ((cfg >> 12) & 0xFFF);
   1502	rsp->cints = ((cfg >> 24) & 0xFFF);
   1503	rsp->cgx_links = hw->cgx_links;
   1504	rsp->lbk_links = hw->lbk_links;
   1505	rsp->sdp_links = hw->sdp_links;
   1506
   1507	return rc;
   1508}
   1509
   1510int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
   1511				 struct msg_rsp *rsp)
   1512{
   1513	struct rvu_hwinfo *hw = rvu->hw;
   1514	u16 pcifunc = req->hdr.pcifunc;
   1515	struct rvu_block *block;
   1516	int blkaddr, nixlf, err;
   1517	struct rvu_pfvf *pfvf;
   1518
   1519	pfvf = rvu_get_pfvf(rvu, pcifunc);
   1520	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   1521	if (!pfvf->nixlf || blkaddr < 0)
   1522		return NIX_AF_ERR_AF_LF_INVALID;
   1523
   1524	block = &hw->block[blkaddr];
   1525	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
   1526	if (nixlf < 0)
   1527		return NIX_AF_ERR_AF_LF_INVALID;
   1528
   1529	if (req->flags & NIX_LF_DISABLE_FLOWS)
   1530		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
   1531	else
   1532		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
   1533
   1534	/* Free any tx vtag def entries used by this NIX LF */
   1535	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
   1536		nix_free_tx_vtag_entries(rvu, pcifunc);
   1537
   1538	nix_interface_deinit(rvu, pcifunc, nixlf);
   1539
   1540	/* Reset this NIX LF */
   1541	err = rvu_lf_reset(rvu, block, nixlf);
   1542	if (err) {
   1543		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
   1544			block->addr - BLKADDR_NIX0, nixlf);
   1545		return NIX_AF_ERR_LF_RESET;
   1546	}
   1547
   1548	nix_ctx_free(rvu, pfvf);
   1549
   1550	return 0;
   1551}
   1552
   1553int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
   1554					 struct nix_mark_format_cfg  *req,
   1555					 struct nix_mark_format_cfg_rsp *rsp)
   1556{
   1557	u16 pcifunc = req->hdr.pcifunc;
   1558	struct nix_hw *nix_hw;
   1559	struct rvu_pfvf *pfvf;
   1560	int blkaddr, rc;
   1561	u32 cfg;
   1562
   1563	pfvf = rvu_get_pfvf(rvu, pcifunc);
   1564	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   1565	if (!pfvf->nixlf || blkaddr < 0)
   1566		return NIX_AF_ERR_AF_LF_INVALID;
   1567
   1568	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   1569	if (!nix_hw)
   1570		return NIX_AF_ERR_INVALID_NIXBLK;
   1571
   1572	cfg = (((u32)req->offset & 0x7) << 16) |
   1573	      (((u32)req->y_mask & 0xF) << 12) |
   1574	      (((u32)req->y_val & 0xF) << 8) |
   1575	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
   1576
   1577	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
   1578	if (rc < 0) {
   1579		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
   1580			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
   1581		return NIX_AF_ERR_MARK_CFG_FAIL;
   1582	}
   1583
   1584	rsp->mark_format_idx = rc;
   1585	return 0;
   1586}
   1587
   1588/* Handle shaper update specially for few revisions */
   1589static bool
   1590handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
   1591			    int lvl, u64 reg, u64 regval)
   1592{
   1593	u64 regbase, oldval, sw_xoff = 0;
   1594	u64 dbgval, md_debug0 = 0;
   1595	unsigned long poll_tmo;
   1596	bool rate_reg = 0;
   1597	u32 schq;
   1598
   1599	regbase = reg & 0xFFFF;
   1600	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
   1601
   1602	/* Check for rate register */
   1603	switch (lvl) {
   1604	case NIX_TXSCH_LVL_TL1:
   1605		md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
   1606		sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
   1607
   1608		rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
   1609		break;
   1610	case NIX_TXSCH_LVL_TL2:
   1611		md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
   1612		sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
   1613
   1614		rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
   1615			    regbase == NIX_AF_TL2X_PIR(0));
   1616		break;
   1617	case NIX_TXSCH_LVL_TL3:
   1618		md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
   1619		sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
   1620
   1621		rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
   1622			    regbase == NIX_AF_TL3X_PIR(0));
   1623		break;
   1624	case NIX_TXSCH_LVL_TL4:
   1625		md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
   1626		sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
   1627
   1628		rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
   1629			    regbase == NIX_AF_TL4X_PIR(0));
   1630		break;
   1631	case NIX_TXSCH_LVL_MDQ:
   1632		sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
   1633		rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
   1634			    regbase == NIX_AF_MDQX_PIR(0));
   1635		break;
   1636	}
   1637
   1638	if (!rate_reg)
   1639		return false;
   1640
   1641	/* Nothing special to do when state is not toggled */
   1642	oldval = rvu_read64(rvu, blkaddr, reg);
   1643	if ((oldval & 0x1) == (regval & 0x1)) {
   1644		rvu_write64(rvu, blkaddr, reg, regval);
   1645		return true;
   1646	}
   1647
   1648	/* PIR/CIR disable */
   1649	if (!(regval & 0x1)) {
   1650		rvu_write64(rvu, blkaddr, sw_xoff, 1);
   1651		rvu_write64(rvu, blkaddr, reg, 0);
   1652		udelay(4);
   1653		rvu_write64(rvu, blkaddr, sw_xoff, 0);
   1654		return true;
   1655	}
   1656
   1657	/* PIR/CIR enable */
   1658	rvu_write64(rvu, blkaddr, sw_xoff, 1);
   1659	if (md_debug0) {
   1660		poll_tmo = jiffies + usecs_to_jiffies(10000);
   1661		/* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
   1662		do {
   1663			if (time_after(jiffies, poll_tmo)) {
   1664				dev_err(rvu->dev,
   1665					"NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
   1666					nixlf, schq, lvl);
   1667				goto exit;
   1668			}
   1669			usleep_range(1, 5);
   1670			dbgval = rvu_read64(rvu, blkaddr, md_debug0);
   1671		} while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
   1672	}
   1673	rvu_write64(rvu, blkaddr, reg, regval);
   1674exit:
   1675	rvu_write64(rvu, blkaddr, sw_xoff, 0);
   1676	return true;
   1677}
   1678
   1679/* Disable shaping of pkts by a scheduler queue
   1680 * at a given scheduler level.
   1681 */
   1682static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
   1683				 int nixlf, int lvl, int schq)
   1684{
   1685	struct rvu_hwinfo *hw = rvu->hw;
   1686	u64  cir_reg = 0, pir_reg = 0;
   1687	u64  cfg;
   1688
   1689	switch (lvl) {
   1690	case NIX_TXSCH_LVL_TL1:
   1691		cir_reg = NIX_AF_TL1X_CIR(schq);
   1692		pir_reg = 0; /* PIR not available at TL1 */
   1693		break;
   1694	case NIX_TXSCH_LVL_TL2:
   1695		cir_reg = NIX_AF_TL2X_CIR(schq);
   1696		pir_reg = NIX_AF_TL2X_PIR(schq);
   1697		break;
   1698	case NIX_TXSCH_LVL_TL3:
   1699		cir_reg = NIX_AF_TL3X_CIR(schq);
   1700		pir_reg = NIX_AF_TL3X_PIR(schq);
   1701		break;
   1702	case NIX_TXSCH_LVL_TL4:
   1703		cir_reg = NIX_AF_TL4X_CIR(schq);
   1704		pir_reg = NIX_AF_TL4X_PIR(schq);
   1705		break;
   1706	case NIX_TXSCH_LVL_MDQ:
   1707		cir_reg = NIX_AF_MDQX_CIR(schq);
   1708		pir_reg = NIX_AF_MDQX_PIR(schq);
   1709		break;
   1710	}
   1711
   1712	/* Shaper state toggle needs wait/poll */
   1713	if (hw->cap.nix_shaper_toggle_wait) {
   1714		if (cir_reg)
   1715			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
   1716						    lvl, cir_reg, 0);
   1717		if (pir_reg)
   1718			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
   1719						    lvl, pir_reg, 0);
   1720		return;
   1721	}
   1722
   1723	if (!cir_reg)
   1724		return;
   1725	cfg = rvu_read64(rvu, blkaddr, cir_reg);
   1726	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
   1727
   1728	if (!pir_reg)
   1729		return;
   1730	cfg = rvu_read64(rvu, blkaddr, pir_reg);
   1731	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
   1732}
   1733
   1734static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
   1735				 int lvl, int schq)
   1736{
   1737	struct rvu_hwinfo *hw = rvu->hw;
   1738	int link_level;
   1739	int link;
   1740
   1741	if (lvl >= hw->cap.nix_tx_aggr_lvl)
   1742		return;
   1743
   1744	/* Reset TL4's SDP link config */
   1745	if (lvl == NIX_TXSCH_LVL_TL4)
   1746		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
   1747
   1748	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
   1749			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
   1750	if (lvl != link_level)
   1751		return;
   1752
   1753	/* Reset TL2's CGX or LBK link config */
   1754	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
   1755		rvu_write64(rvu, blkaddr,
   1756			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
   1757}
   1758
   1759static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
   1760			      int lvl, int schq)
   1761{
   1762	struct rvu_hwinfo *hw = rvu->hw;
   1763	u64 reg;
   1764
   1765	/* Skip this if shaping is not supported */
   1766	if (!hw->cap.nix_shaping)
   1767		return;
   1768
   1769	/* Clear level specific SW_XOFF */
   1770	switch (lvl) {
   1771	case NIX_TXSCH_LVL_TL1:
   1772		reg = NIX_AF_TL1X_SW_XOFF(schq);
   1773		break;
   1774	case NIX_TXSCH_LVL_TL2:
   1775		reg = NIX_AF_TL2X_SW_XOFF(schq);
   1776		break;
   1777	case NIX_TXSCH_LVL_TL3:
   1778		reg = NIX_AF_TL3X_SW_XOFF(schq);
   1779		break;
   1780	case NIX_TXSCH_LVL_TL4:
   1781		reg = NIX_AF_TL4X_SW_XOFF(schq);
   1782		break;
   1783	case NIX_TXSCH_LVL_MDQ:
   1784		reg = NIX_AF_MDQX_SW_XOFF(schq);
   1785		break;
   1786	default:
   1787		return;
   1788	}
   1789
   1790	rvu_write64(rvu, blkaddr, reg, 0x0);
   1791}
   1792
   1793static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
   1794{
   1795	struct rvu_hwinfo *hw = rvu->hw;
   1796	int pf = rvu_get_pf(pcifunc);
   1797	u8 cgx_id = 0, lmac_id = 0;
   1798
   1799	if (is_afvf(pcifunc)) {/* LBK links */
   1800		return hw->cgx_links;
   1801	} else if (is_pf_cgxmapped(rvu, pf)) {
   1802		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
   1803		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
   1804	}
   1805
   1806	/* SDP link */
   1807	return hw->cgx_links + hw->lbk_links;
   1808}
   1809
   1810static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
   1811				 int link, int *start, int *end)
   1812{
   1813	struct rvu_hwinfo *hw = rvu->hw;
   1814	int pf = rvu_get_pf(pcifunc);
   1815
   1816	if (is_afvf(pcifunc)) { /* LBK links */
   1817		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
   1818		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
   1819	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
   1820		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
   1821		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
   1822	} else { /* SDP link */
   1823		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
   1824			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
   1825		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
   1826	}
   1827}
   1828
   1829static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
   1830				      struct nix_hw *nix_hw,
   1831				      struct nix_txsch_alloc_req *req)
   1832{
   1833	struct rvu_hwinfo *hw = rvu->hw;
   1834	int schq, req_schq, free_cnt;
   1835	struct nix_txsch *txsch;
   1836	int link, start, end;
   1837
   1838	txsch = &nix_hw->txsch[lvl];
   1839	req_schq = req->schq_contig[lvl] + req->schq[lvl];
   1840
   1841	if (!req_schq)
   1842		return 0;
   1843
   1844	link = nix_get_tx_link(rvu, pcifunc);
   1845
   1846	/* For traffic aggregating scheduler level, one queue is enough */
   1847	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
   1848		if (req_schq != 1)
   1849			return NIX_AF_ERR_TLX_ALLOC_FAIL;
   1850		return 0;
   1851	}
   1852
   1853	/* Get free SCHQ count and check if request can be accomodated */
   1854	if (hw->cap.nix_fixed_txschq_mapping) {
   1855		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
   1856		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
   1857		if (end <= txsch->schq.max && schq < end &&
   1858		    !test_bit(schq, txsch->schq.bmap))
   1859			free_cnt = 1;
   1860		else
   1861			free_cnt = 0;
   1862	} else {
   1863		free_cnt = rvu_rsrc_free_count(&txsch->schq);
   1864	}
   1865
   1866	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
   1867		return NIX_AF_ERR_TLX_ALLOC_FAIL;
   1868
   1869	/* If contiguous queues are needed, check for availability */
   1870	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
   1871	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
   1872		return NIX_AF_ERR_TLX_ALLOC_FAIL;
   1873
   1874	return 0;
   1875}
   1876
   1877static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
   1878			    struct nix_txsch_alloc_rsp *rsp,
   1879			    int lvl, int start, int end)
   1880{
   1881	struct rvu_hwinfo *hw = rvu->hw;
   1882	u16 pcifunc = rsp->hdr.pcifunc;
   1883	int idx, schq;
   1884
   1885	/* For traffic aggregating levels, queue alloc is based
   1886	 * on transmit link to which PF_FUNC is mapped to.
   1887	 */
   1888	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
   1889		/* A single TL queue is allocated */
   1890		if (rsp->schq_contig[lvl]) {
   1891			rsp->schq_contig[lvl] = 1;
   1892			rsp->schq_contig_list[lvl][0] = start;
   1893		}
   1894
   1895		/* Both contig and non-contig reqs doesn't make sense here */
   1896		if (rsp->schq_contig[lvl])
   1897			rsp->schq[lvl] = 0;
   1898
   1899		if (rsp->schq[lvl]) {
   1900			rsp->schq[lvl] = 1;
   1901			rsp->schq_list[lvl][0] = start;
   1902		}
   1903		return;
   1904	}
   1905
   1906	/* Adjust the queue request count if HW supports
   1907	 * only one queue per level configuration.
   1908	 */
   1909	if (hw->cap.nix_fixed_txschq_mapping) {
   1910		idx = pcifunc & RVU_PFVF_FUNC_MASK;
   1911		schq = start + idx;
   1912		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
   1913			rsp->schq_contig[lvl] = 0;
   1914			rsp->schq[lvl] = 0;
   1915			return;
   1916		}
   1917
   1918		if (rsp->schq_contig[lvl]) {
   1919			rsp->schq_contig[lvl] = 1;
   1920			set_bit(schq, txsch->schq.bmap);
   1921			rsp->schq_contig_list[lvl][0] = schq;
   1922			rsp->schq[lvl] = 0;
   1923		} else if (rsp->schq[lvl]) {
   1924			rsp->schq[lvl] = 1;
   1925			set_bit(schq, txsch->schq.bmap);
   1926			rsp->schq_list[lvl][0] = schq;
   1927		}
   1928		return;
   1929	}
   1930
   1931	/* Allocate contiguous queue indices requesty first */
   1932	if (rsp->schq_contig[lvl]) {
   1933		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
   1934						  txsch->schq.max, start,
   1935						  rsp->schq_contig[lvl], 0);
   1936		if (schq >= end)
   1937			rsp->schq_contig[lvl] = 0;
   1938		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
   1939			set_bit(schq, txsch->schq.bmap);
   1940			rsp->schq_contig_list[lvl][idx] = schq;
   1941			schq++;
   1942		}
   1943	}
   1944
   1945	/* Allocate non-contiguous queue indices */
   1946	if (rsp->schq[lvl]) {
   1947		idx = 0;
   1948		for (schq = start; schq < end; schq++) {
   1949			if (!test_bit(schq, txsch->schq.bmap)) {
   1950				set_bit(schq, txsch->schq.bmap);
   1951				rsp->schq_list[lvl][idx++] = schq;
   1952			}
   1953			if (idx == rsp->schq[lvl])
   1954				break;
   1955		}
   1956		/* Update how many were allocated */
   1957		rsp->schq[lvl] = idx;
   1958	}
   1959}
   1960
   1961int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
   1962				     struct nix_txsch_alloc_req *req,
   1963				     struct nix_txsch_alloc_rsp *rsp)
   1964{
   1965	struct rvu_hwinfo *hw = rvu->hw;
   1966	u16 pcifunc = req->hdr.pcifunc;
   1967	int link, blkaddr, rc = 0;
   1968	int lvl, idx, start, end;
   1969	struct nix_txsch *txsch;
   1970	struct nix_hw *nix_hw;
   1971	u32 *pfvf_map;
   1972	int nixlf;
   1973	u16 schq;
   1974
   1975	rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
   1976	if (rc)
   1977		return rc;
   1978
   1979	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   1980	if (!nix_hw)
   1981		return NIX_AF_ERR_INVALID_NIXBLK;
   1982
   1983	mutex_lock(&rvu->rsrc_lock);
   1984
   1985	/* Check if request is valid as per HW capabilities
   1986	 * and can be accomodated.
   1987	 */
   1988	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
   1989		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
   1990		if (rc)
   1991			goto err;
   1992	}
   1993
   1994	/* Allocate requested Tx scheduler queues */
   1995	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
   1996		txsch = &nix_hw->txsch[lvl];
   1997		pfvf_map = txsch->pfvf_map;
   1998
   1999		if (!req->schq[lvl] && !req->schq_contig[lvl])
   2000			continue;
   2001
   2002		rsp->schq[lvl] = req->schq[lvl];
   2003		rsp->schq_contig[lvl] = req->schq_contig[lvl];
   2004
   2005		link = nix_get_tx_link(rvu, pcifunc);
   2006
   2007		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
   2008			start = link;
   2009			end = link;
   2010		} else if (hw->cap.nix_fixed_txschq_mapping) {
   2011			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
   2012		} else {
   2013			start = 0;
   2014			end = txsch->schq.max;
   2015		}
   2016
   2017		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
   2018
   2019		/* Reset queue config */
   2020		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
   2021			schq = rsp->schq_contig_list[lvl][idx];
   2022			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
   2023			    NIX_TXSCHQ_CFG_DONE))
   2024				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
   2025			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
   2026			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
   2027		}
   2028
   2029		for (idx = 0; idx < req->schq[lvl]; idx++) {
   2030			schq = rsp->schq_list[lvl][idx];
   2031			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
   2032			    NIX_TXSCHQ_CFG_DONE))
   2033				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
   2034			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
   2035			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
   2036		}
   2037	}
   2038
   2039	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
   2040	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
   2041	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
   2042				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
   2043				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
   2044	goto exit;
   2045err:
   2046	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
   2047exit:
   2048	mutex_unlock(&rvu->rsrc_lock);
   2049	return rc;
   2050}
   2051
   2052static int nix_smq_flush(struct rvu *rvu, int blkaddr,
   2053			 int smq, u16 pcifunc, int nixlf)
   2054{
   2055	int pf = rvu_get_pf(pcifunc);
   2056	u8 cgx_id = 0, lmac_id = 0;
   2057	int err, restore_tx_en = 0;
   2058	u64 cfg;
   2059
   2060	/* enable cgx tx if disabled */
   2061	if (is_pf_cgxmapped(rvu, pf)) {
   2062		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
   2063		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
   2064						   lmac_id, true);
   2065	}
   2066
   2067	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
   2068	/* Do SMQ flush and set enqueue xoff */
   2069	cfg |= BIT_ULL(50) | BIT_ULL(49);
   2070	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
   2071
   2072	/* Disable backpressure from physical link,
   2073	 * otherwise SMQ flush may stall.
   2074	 */
   2075	rvu_cgx_enadis_rx_bp(rvu, pf, false);
   2076
   2077	/* Wait for flush to complete */
   2078	err = rvu_poll_reg(rvu, blkaddr,
   2079			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
   2080	if (err)
   2081		dev_err(rvu->dev,
   2082			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
   2083
   2084	rvu_cgx_enadis_rx_bp(rvu, pf, true);
   2085	/* restore cgx tx state */
   2086	if (restore_tx_en)
   2087		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
   2088	return err;
   2089}
   2090
   2091static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
   2092{
   2093	int blkaddr, nixlf, lvl, schq, err;
   2094	struct rvu_hwinfo *hw = rvu->hw;
   2095	struct nix_txsch *txsch;
   2096	struct nix_hw *nix_hw;
   2097	u16 map_func;
   2098
   2099	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   2100	if (blkaddr < 0)
   2101		return NIX_AF_ERR_AF_LF_INVALID;
   2102
   2103	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   2104	if (!nix_hw)
   2105		return NIX_AF_ERR_INVALID_NIXBLK;
   2106
   2107	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
   2108	if (nixlf < 0)
   2109		return NIX_AF_ERR_AF_LF_INVALID;
   2110
   2111	/* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
   2112	mutex_lock(&rvu->rsrc_lock);
   2113	for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
   2114		txsch = &nix_hw->txsch[lvl];
   2115
   2116		if (lvl >= hw->cap.nix_tx_aggr_lvl)
   2117			continue;
   2118
   2119		for (schq = 0; schq < txsch->schq.max; schq++) {
   2120			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
   2121				continue;
   2122			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
   2123			nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
   2124		}
   2125	}
   2126	nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
   2127			  nix_get_tx_link(rvu, pcifunc));
   2128
   2129	/* On PF cleanup, clear cfg done flag as
   2130	 * PF would have changed default config.
   2131	 */
   2132	if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
   2133		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
   2134		schq = nix_get_tx_link(rvu, pcifunc);
   2135		/* Do not clear pcifunc in txsch->pfvf_map[schq] because
   2136		 * VF might be using this TL1 queue
   2137		 */
   2138		map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
   2139		txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
   2140	}
   2141
   2142	/* Flush SMQs */
   2143	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
   2144	for (schq = 0; schq < txsch->schq.max; schq++) {
   2145		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
   2146			continue;
   2147		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
   2148	}
   2149
   2150	/* Now free scheduler queues to free pool */
   2151	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
   2152		 /* TLs above aggregation level are shared across all PF
   2153		  * and it's VFs, hence skip freeing them.
   2154		  */
   2155		if (lvl >= hw->cap.nix_tx_aggr_lvl)
   2156			continue;
   2157
   2158		txsch = &nix_hw->txsch[lvl];
   2159		for (schq = 0; schq < txsch->schq.max; schq++) {
   2160			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
   2161				continue;
   2162			rvu_free_rsrc(&txsch->schq, schq);
   2163			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
   2164		}
   2165	}
   2166	mutex_unlock(&rvu->rsrc_lock);
   2167
   2168	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
   2169	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
   2170	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
   2171	if (err)
   2172		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
   2173
   2174	return 0;
   2175}
   2176
   2177static int nix_txschq_free_one(struct rvu *rvu,
   2178			       struct nix_txsch_free_req *req)
   2179{
   2180	struct rvu_hwinfo *hw = rvu->hw;
   2181	u16 pcifunc = req->hdr.pcifunc;
   2182	int lvl, schq, nixlf, blkaddr;
   2183	struct nix_txsch *txsch;
   2184	struct nix_hw *nix_hw;
   2185	u32 *pfvf_map;
   2186	int rc;
   2187
   2188	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   2189	if (blkaddr < 0)
   2190		return NIX_AF_ERR_AF_LF_INVALID;
   2191
   2192	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   2193	if (!nix_hw)
   2194		return NIX_AF_ERR_INVALID_NIXBLK;
   2195
   2196	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
   2197	if (nixlf < 0)
   2198		return NIX_AF_ERR_AF_LF_INVALID;
   2199
   2200	lvl = req->schq_lvl;
   2201	schq = req->schq;
   2202	txsch = &nix_hw->txsch[lvl];
   2203
   2204	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
   2205		return 0;
   2206
   2207	pfvf_map = txsch->pfvf_map;
   2208	mutex_lock(&rvu->rsrc_lock);
   2209
   2210	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
   2211		rc = NIX_AF_ERR_TLX_INVALID;
   2212		goto err;
   2213	}
   2214
   2215	/* Clear SW_XOFF of this resource only.
   2216	 * For SMQ level, all path XOFF's
   2217	 * need to be made clear by user
   2218	 */
   2219	nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
   2220
   2221	/* Flush if it is a SMQ. Onus of disabling
   2222	 * TL2/3 queue links before SMQ flush is on user
   2223	 */
   2224	if (lvl == NIX_TXSCH_LVL_SMQ &&
   2225	    nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
   2226		rc = NIX_AF_SMQ_FLUSH_FAILED;
   2227		goto err;
   2228	}
   2229
   2230	/* Free the resource */
   2231	rvu_free_rsrc(&txsch->schq, schq);
   2232	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
   2233	mutex_unlock(&rvu->rsrc_lock);
   2234	return 0;
   2235err:
   2236	mutex_unlock(&rvu->rsrc_lock);
   2237	return rc;
   2238}
   2239
   2240int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
   2241				    struct nix_txsch_free_req *req,
   2242				    struct msg_rsp *rsp)
   2243{
   2244	if (req->flags & TXSCHQ_FREE_ALL)
   2245		return nix_txschq_free(rvu, req->hdr.pcifunc);
   2246	else
   2247		return nix_txschq_free_one(rvu, req);
   2248}
   2249
   2250static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
   2251				      int lvl, u64 reg, u64 regval)
   2252{
   2253	u64 regbase = reg & 0xFFFF;
   2254	u16 schq, parent;
   2255
   2256	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
   2257		return false;
   2258
   2259	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
   2260	/* Check if this schq belongs to this PF/VF or not */
   2261	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
   2262		return false;
   2263
   2264	parent = (regval >> 16) & 0x1FF;
   2265	/* Validate MDQ's TL4 parent */
   2266	if (regbase == NIX_AF_MDQX_PARENT(0) &&
   2267	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
   2268		return false;
   2269
   2270	/* Validate TL4's TL3 parent */
   2271	if (regbase == NIX_AF_TL4X_PARENT(0) &&
   2272	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
   2273		return false;
   2274
   2275	/* Validate TL3's TL2 parent */
   2276	if (regbase == NIX_AF_TL3X_PARENT(0) &&
   2277	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
   2278		return false;
   2279
   2280	/* Validate TL2's TL1 parent */
   2281	if (regbase == NIX_AF_TL2X_PARENT(0) &&
   2282	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
   2283		return false;
   2284
   2285	return true;
   2286}
   2287
   2288static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
   2289{
   2290	u64 regbase;
   2291
   2292	if (hw->cap.nix_shaping)
   2293		return true;
   2294
   2295	/* If shaping and coloring is not supported, then
   2296	 * *_CIR and *_PIR registers should not be configured.
   2297	 */
   2298	regbase = reg & 0xFFFF;
   2299
   2300	switch (lvl) {
   2301	case NIX_TXSCH_LVL_TL1:
   2302		if (regbase == NIX_AF_TL1X_CIR(0))
   2303			return false;
   2304		break;
   2305	case NIX_TXSCH_LVL_TL2:
   2306		if (regbase == NIX_AF_TL2X_CIR(0) ||
   2307		    regbase == NIX_AF_TL2X_PIR(0))
   2308			return false;
   2309		break;
   2310	case NIX_TXSCH_LVL_TL3:
   2311		if (regbase == NIX_AF_TL3X_CIR(0) ||
   2312		    regbase == NIX_AF_TL3X_PIR(0))
   2313			return false;
   2314		break;
   2315	case NIX_TXSCH_LVL_TL4:
   2316		if (regbase == NIX_AF_TL4X_CIR(0) ||
   2317		    regbase == NIX_AF_TL4X_PIR(0))
   2318			return false;
   2319		break;
   2320	case NIX_TXSCH_LVL_MDQ:
   2321		if (regbase == NIX_AF_MDQX_CIR(0) ||
   2322		    regbase == NIX_AF_MDQX_PIR(0))
   2323			return false;
   2324		break;
   2325	}
   2326	return true;
   2327}
   2328
   2329static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
   2330				u16 pcifunc, int blkaddr)
   2331{
   2332	u32 *pfvf_map;
   2333	int schq;
   2334
   2335	schq = nix_get_tx_link(rvu, pcifunc);
   2336	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
   2337	/* Skip if PF has already done the config */
   2338	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
   2339		return;
   2340	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
   2341		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
   2342
   2343	/* On OcteonTx2 the config was in bytes and newer silcons
   2344	 * it's changed to weight.
   2345	 */
   2346	if (!rvu->hw->cap.nix_common_dwrr_mtu)
   2347		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
   2348			    TXSCH_TL1_DFLT_RR_QTM);
   2349	else
   2350		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
   2351			    CN10K_MAX_DWRR_WEIGHT);
   2352
   2353	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
   2354	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
   2355}
   2356
   2357/* Register offset - [15:0]
   2358 * Scheduler Queue number - [25:16]
   2359 */
   2360#define NIX_TX_SCHQ_MASK	GENMASK_ULL(25, 0)
   2361
   2362static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
   2363			       int blkaddr, struct nix_txschq_config *req,
   2364			       struct nix_txschq_config *rsp)
   2365{
   2366	u16 pcifunc = req->hdr.pcifunc;
   2367	int idx, schq;
   2368	u64 reg;
   2369
   2370	for (idx = 0; idx < req->num_regs; idx++) {
   2371		reg = req->reg[idx];
   2372		reg &= NIX_TX_SCHQ_MASK;
   2373		schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
   2374		if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
   2375		    !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
   2376			return NIX_AF_INVAL_TXSCHQ_CFG;
   2377		rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
   2378	}
   2379	rsp->lvl = req->lvl;
   2380	rsp->num_regs = req->num_regs;
   2381	return 0;
   2382}
   2383
   2384static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
   2385			       u16 pcifunc, struct nix_txsch *txsch)
   2386{
   2387	struct rvu_hwinfo *hw = rvu->hw;
   2388	int lbk_link_start, lbk_links;
   2389	u8 pf = rvu_get_pf(pcifunc);
   2390	int schq;
   2391
   2392	if (!is_pf_cgxmapped(rvu, pf))
   2393		return;
   2394
   2395	lbk_link_start = hw->cgx_links;
   2396
   2397	for (schq = 0; schq < txsch->schq.max; schq++) {
   2398		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
   2399			continue;
   2400		/* Enable all LBK links with channel 63 by default so that
   2401		 * packets can be sent to LBK with a NPC TX MCAM rule
   2402		 */
   2403		lbk_links = hw->lbk_links;
   2404		while (lbk_links--)
   2405			rvu_write64(rvu, blkaddr,
   2406				    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
   2407							      lbk_link_start +
   2408							      lbk_links),
   2409				    BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
   2410	}
   2411}
   2412
   2413int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
   2414				    struct nix_txschq_config *req,
   2415				    struct nix_txschq_config *rsp)
   2416{
   2417	u64 reg, val, regval, schq_regbase, val_mask;
   2418	struct rvu_hwinfo *hw = rvu->hw;
   2419	u16 pcifunc = req->hdr.pcifunc;
   2420	struct nix_txsch *txsch;
   2421	struct nix_hw *nix_hw;
   2422	int blkaddr, idx, err;
   2423	int nixlf, schq;
   2424	u32 *pfvf_map;
   2425
   2426	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
   2427	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
   2428		return NIX_AF_INVAL_TXSCHQ_CFG;
   2429
   2430	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
   2431	if (err)
   2432		return err;
   2433
   2434	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   2435	if (!nix_hw)
   2436		return NIX_AF_ERR_INVALID_NIXBLK;
   2437
   2438	if (req->read)
   2439		return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
   2440
   2441	txsch = &nix_hw->txsch[req->lvl];
   2442	pfvf_map = txsch->pfvf_map;
   2443
   2444	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
   2445	    pcifunc & RVU_PFVF_FUNC_MASK) {
   2446		mutex_lock(&rvu->rsrc_lock);
   2447		if (req->lvl == NIX_TXSCH_LVL_TL1)
   2448			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
   2449		mutex_unlock(&rvu->rsrc_lock);
   2450		return 0;
   2451	}
   2452
   2453	for (idx = 0; idx < req->num_regs; idx++) {
   2454		reg = req->reg[idx];
   2455		reg &= NIX_TX_SCHQ_MASK;
   2456		regval = req->regval[idx];
   2457		schq_regbase = reg & 0xFFFF;
   2458		val_mask = req->regval_mask[idx];
   2459
   2460		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
   2461					       txsch->lvl, reg, regval))
   2462			return NIX_AF_INVAL_TXSCHQ_CFG;
   2463
   2464		/* Check if shaping and coloring is supported */
   2465		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
   2466			continue;
   2467
   2468		val = rvu_read64(rvu, blkaddr, reg);
   2469		regval = (val & val_mask) | (regval & ~val_mask);
   2470
   2471		/* Handle shaping state toggle specially */
   2472		if (hw->cap.nix_shaper_toggle_wait &&
   2473		    handle_txschq_shaper_update(rvu, blkaddr, nixlf,
   2474						req->lvl, reg, regval))
   2475			continue;
   2476
   2477		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
   2478		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
   2479			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
   2480					   pcifunc, 0);
   2481			regval &= ~(0x7FULL << 24);
   2482			regval |= ((u64)nixlf << 24);
   2483		}
   2484
   2485		/* Clear 'BP_ENA' config, if it's not allowed */
   2486		if (!hw->cap.nix_tx_link_bp) {
   2487			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
   2488			    (schq_regbase & 0xFF00) ==
   2489			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
   2490				regval &= ~BIT_ULL(13);
   2491		}
   2492
   2493		/* Mark config as done for TL1 by PF */
   2494		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
   2495		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
   2496			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
   2497			mutex_lock(&rvu->rsrc_lock);
   2498			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
   2499							NIX_TXSCHQ_CFG_DONE);
   2500			mutex_unlock(&rvu->rsrc_lock);
   2501		}
   2502
   2503		/* SMQ flush is special hence split register writes such
   2504		 * that flush first and write rest of the bits later.
   2505		 */
   2506		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
   2507		    (regval & BIT_ULL(49))) {
   2508			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
   2509			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
   2510			regval &= ~BIT_ULL(49);
   2511		}
   2512		rvu_write64(rvu, blkaddr, reg, regval);
   2513	}
   2514
   2515	rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
   2516			   &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
   2517	return 0;
   2518}
   2519
   2520static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
   2521			   struct nix_vtag_config *req)
   2522{
   2523	u64 regval = req->vtag_size;
   2524
   2525	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
   2526	    req->vtag_size > VTAGSIZE_T8)
   2527		return -EINVAL;
   2528
   2529	/* RX VTAG Type 7 reserved for vf vlan */
   2530	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
   2531		return NIX_AF_ERR_RX_VTAG_INUSE;
   2532
   2533	if (req->rx.capture_vtag)
   2534		regval |= BIT_ULL(5);
   2535	if (req->rx.strip_vtag)
   2536		regval |= BIT_ULL(4);
   2537
   2538	rvu_write64(rvu, blkaddr,
   2539		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
   2540	return 0;
   2541}
   2542
   2543static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
   2544			    u16 pcifunc, int index)
   2545{
   2546	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
   2547	struct nix_txvlan *vlan;
   2548
   2549	if (!nix_hw)
   2550		return NIX_AF_ERR_INVALID_NIXBLK;
   2551
   2552	vlan = &nix_hw->txvlan;
   2553	if (vlan->entry2pfvf_map[index] != pcifunc)
   2554		return NIX_AF_ERR_PARAM;
   2555
   2556	rvu_write64(rvu, blkaddr,
   2557		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
   2558	rvu_write64(rvu, blkaddr,
   2559		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
   2560
   2561	vlan->entry2pfvf_map[index] = 0;
   2562	rvu_free_rsrc(&vlan->rsrc, index);
   2563
   2564	return 0;
   2565}
   2566
   2567static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
   2568{
   2569	struct nix_txvlan *vlan;
   2570	struct nix_hw *nix_hw;
   2571	int index, blkaddr;
   2572
   2573	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   2574	if (blkaddr < 0)
   2575		return;
   2576
   2577	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   2578	if (!nix_hw)
   2579		return;
   2580
   2581	vlan = &nix_hw->txvlan;
   2582
   2583	mutex_lock(&vlan->rsrc_lock);
   2584	/* Scan all the entries and free the ones mapped to 'pcifunc' */
   2585	for (index = 0; index < vlan->rsrc.max; index++) {
   2586		if (vlan->entry2pfvf_map[index] == pcifunc)
   2587			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
   2588	}
   2589	mutex_unlock(&vlan->rsrc_lock);
   2590}
   2591
   2592static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
   2593			     u64 vtag, u8 size)
   2594{
   2595	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
   2596	struct nix_txvlan *vlan;
   2597	u64 regval;
   2598	int index;
   2599
   2600	if (!nix_hw)
   2601		return NIX_AF_ERR_INVALID_NIXBLK;
   2602
   2603	vlan = &nix_hw->txvlan;
   2604
   2605	mutex_lock(&vlan->rsrc_lock);
   2606
   2607	index = rvu_alloc_rsrc(&vlan->rsrc);
   2608	if (index < 0) {
   2609		mutex_unlock(&vlan->rsrc_lock);
   2610		return index;
   2611	}
   2612
   2613	mutex_unlock(&vlan->rsrc_lock);
   2614
   2615	regval = size ? vtag : vtag << 32;
   2616
   2617	rvu_write64(rvu, blkaddr,
   2618		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
   2619	rvu_write64(rvu, blkaddr,
   2620		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
   2621
   2622	return index;
   2623}
   2624
   2625static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
   2626			     struct nix_vtag_config *req)
   2627{
   2628	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
   2629	u16 pcifunc = req->hdr.pcifunc;
   2630	int idx0 = req->tx.vtag0_idx;
   2631	int idx1 = req->tx.vtag1_idx;
   2632	struct nix_txvlan *vlan;
   2633	int err = 0;
   2634
   2635	if (!nix_hw)
   2636		return NIX_AF_ERR_INVALID_NIXBLK;
   2637
   2638	vlan = &nix_hw->txvlan;
   2639	if (req->tx.free_vtag0 && req->tx.free_vtag1)
   2640		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
   2641		    vlan->entry2pfvf_map[idx1] != pcifunc)
   2642			return NIX_AF_ERR_PARAM;
   2643
   2644	mutex_lock(&vlan->rsrc_lock);
   2645
   2646	if (req->tx.free_vtag0) {
   2647		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
   2648		if (err)
   2649			goto exit;
   2650	}
   2651
   2652	if (req->tx.free_vtag1)
   2653		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
   2654
   2655exit:
   2656	mutex_unlock(&vlan->rsrc_lock);
   2657	return err;
   2658}
   2659
   2660static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
   2661			   struct nix_vtag_config *req,
   2662			   struct nix_vtag_config_rsp *rsp)
   2663{
   2664	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
   2665	struct nix_txvlan *vlan;
   2666	u16 pcifunc = req->hdr.pcifunc;
   2667
   2668	if (!nix_hw)
   2669		return NIX_AF_ERR_INVALID_NIXBLK;
   2670
   2671	vlan = &nix_hw->txvlan;
   2672	if (req->tx.cfg_vtag0) {
   2673		rsp->vtag0_idx =
   2674			nix_tx_vtag_alloc(rvu, blkaddr,
   2675					  req->tx.vtag0, req->vtag_size);
   2676
   2677		if (rsp->vtag0_idx < 0)
   2678			return NIX_AF_ERR_TX_VTAG_NOSPC;
   2679
   2680		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
   2681	}
   2682
   2683	if (req->tx.cfg_vtag1) {
   2684		rsp->vtag1_idx =
   2685			nix_tx_vtag_alloc(rvu, blkaddr,
   2686					  req->tx.vtag1, req->vtag_size);
   2687
   2688		if (rsp->vtag1_idx < 0)
   2689			goto err_free;
   2690
   2691		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
   2692	}
   2693
   2694	return 0;
   2695
   2696err_free:
   2697	if (req->tx.cfg_vtag0)
   2698		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
   2699
   2700	return NIX_AF_ERR_TX_VTAG_NOSPC;
   2701}
   2702
   2703int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
   2704				  struct nix_vtag_config *req,
   2705				  struct nix_vtag_config_rsp *rsp)
   2706{
   2707	u16 pcifunc = req->hdr.pcifunc;
   2708	int blkaddr, nixlf, err;
   2709
   2710	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
   2711	if (err)
   2712		return err;
   2713
   2714	if (req->cfg_type) {
   2715		/* rx vtag configuration */
   2716		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
   2717		if (err)
   2718			return NIX_AF_ERR_PARAM;
   2719	} else {
   2720		/* tx vtag configuration */
   2721		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
   2722		    (req->tx.free_vtag0 || req->tx.free_vtag1))
   2723			return NIX_AF_ERR_PARAM;
   2724
   2725		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
   2726			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
   2727
   2728		if (req->tx.free_vtag0 || req->tx.free_vtag1)
   2729			return nix_tx_vtag_decfg(rvu, blkaddr, req);
   2730	}
   2731
   2732	return 0;
   2733}
   2734
   2735static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
   2736			     int mce, u8 op, u16 pcifunc, int next, bool eol)
   2737{
   2738	struct nix_aq_enq_req aq_req;
   2739	int err;
   2740
   2741	aq_req.hdr.pcifunc = 0;
   2742	aq_req.ctype = NIX_AQ_CTYPE_MCE;
   2743	aq_req.op = op;
   2744	aq_req.qidx = mce;
   2745
   2746	/* Use RSS with RSS index 0 */
   2747	aq_req.mce.op = 1;
   2748	aq_req.mce.index = 0;
   2749	aq_req.mce.eol = eol;
   2750	aq_req.mce.pf_func = pcifunc;
   2751	aq_req.mce.next = next;
   2752
   2753	/* All fields valid */
   2754	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
   2755
   2756	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
   2757	if (err) {
   2758		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
   2759			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
   2760		return err;
   2761	}
   2762	return 0;
   2763}
   2764
   2765static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
   2766				     u16 pcifunc, bool add)
   2767{
   2768	struct mce *mce, *tail = NULL;
   2769	bool delete = false;
   2770
   2771	/* Scan through the current list */
   2772	hlist_for_each_entry(mce, &mce_list->head, node) {
   2773		/* If already exists, then delete */
   2774		if (mce->pcifunc == pcifunc && !add) {
   2775			delete = true;
   2776			break;
   2777		} else if (mce->pcifunc == pcifunc && add) {
   2778			/* entry already exists */
   2779			return 0;
   2780		}
   2781		tail = mce;
   2782	}
   2783
   2784	if (delete) {
   2785		hlist_del(&mce->node);
   2786		kfree(mce);
   2787		mce_list->count--;
   2788		return 0;
   2789	}
   2790
   2791	if (!add)
   2792		return 0;
   2793
   2794	/* Add a new one to the list, at the tail */
   2795	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
   2796	if (!mce)
   2797		return -ENOMEM;
   2798	mce->pcifunc = pcifunc;
   2799	if (!tail)
   2800		hlist_add_head(&mce->node, &mce_list->head);
   2801	else
   2802		hlist_add_behind(&mce->node, &tail->node);
   2803	mce_list->count++;
   2804	return 0;
   2805}
   2806
   2807int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
   2808			struct nix_mce_list *mce_list,
   2809			int mce_idx, int mcam_index, bool add)
   2810{
   2811	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
   2812	struct npc_mcam *mcam = &rvu->hw->mcam;
   2813	struct nix_mcast *mcast;
   2814	struct nix_hw *nix_hw;
   2815	struct mce *mce;
   2816
   2817	if (!mce_list)
   2818		return -EINVAL;
   2819
   2820	/* Get this PF/VF func's MCE index */
   2821	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
   2822
   2823	if (idx > (mce_idx + mce_list->max)) {
   2824		dev_err(rvu->dev,
   2825			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
   2826			__func__, idx, mce_list->max,
   2827			pcifunc >> RVU_PFVF_PF_SHIFT);
   2828		return -EINVAL;
   2829	}
   2830
   2831	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
   2832	if (err)
   2833		return err;
   2834
   2835	mcast = &nix_hw->mcast;
   2836	mutex_lock(&mcast->mce_lock);
   2837
   2838	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
   2839	if (err)
   2840		goto end;
   2841
   2842	/* Disable MCAM entry in NPC */
   2843	if (!mce_list->count) {
   2844		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
   2845		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
   2846		goto end;
   2847	}
   2848
   2849	/* Dump the updated list to HW */
   2850	idx = mce_idx;
   2851	last_idx = idx + mce_list->count - 1;
   2852	hlist_for_each_entry(mce, &mce_list->head, node) {
   2853		if (idx > last_idx)
   2854			break;
   2855
   2856		next_idx = idx + 1;
   2857		/* EOL should be set in last MCE */
   2858		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
   2859					mce->pcifunc, next_idx,
   2860					(next_idx > last_idx) ? true : false);
   2861		if (err)
   2862			goto end;
   2863		idx++;
   2864	}
   2865
   2866end:
   2867	mutex_unlock(&mcast->mce_lock);
   2868	return err;
   2869}
   2870
   2871void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
   2872		      struct nix_mce_list **mce_list, int *mce_idx)
   2873{
   2874	struct rvu_hwinfo *hw = rvu->hw;
   2875	struct rvu_pfvf *pfvf;
   2876
   2877	if (!hw->cap.nix_rx_multicast ||
   2878	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
   2879		*mce_list = NULL;
   2880		*mce_idx = 0;
   2881		return;
   2882	}
   2883
   2884	/* Get this PF/VF func's MCE index */
   2885	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
   2886
   2887	if (type == NIXLF_BCAST_ENTRY) {
   2888		*mce_list = &pfvf->bcast_mce_list;
   2889		*mce_idx = pfvf->bcast_mce_idx;
   2890	} else if (type == NIXLF_ALLMULTI_ENTRY) {
   2891		*mce_list = &pfvf->mcast_mce_list;
   2892		*mce_idx = pfvf->mcast_mce_idx;
   2893	} else if (type == NIXLF_PROMISC_ENTRY) {
   2894		*mce_list = &pfvf->promisc_mce_list;
   2895		*mce_idx = pfvf->promisc_mce_idx;
   2896	}  else {
   2897		*mce_list = NULL;
   2898		*mce_idx = 0;
   2899	}
   2900}
   2901
   2902static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
   2903			       int type, bool add)
   2904{
   2905	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
   2906	struct npc_mcam *mcam = &rvu->hw->mcam;
   2907	struct rvu_hwinfo *hw = rvu->hw;
   2908	struct nix_mce_list *mce_list;
   2909	int pf;
   2910
   2911	/* skip multicast pkt replication for AF's VFs & SDP links */
   2912	if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
   2913		return 0;
   2914
   2915	if (!hw->cap.nix_rx_multicast)
   2916		return 0;
   2917
   2918	pf = rvu_get_pf(pcifunc);
   2919	if (!is_pf_cgxmapped(rvu, pf))
   2920		return 0;
   2921
   2922	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   2923	if (blkaddr < 0)
   2924		return -EINVAL;
   2925
   2926	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
   2927	if (nixlf < 0)
   2928		return -EINVAL;
   2929
   2930	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
   2931
   2932	mcam_index = npc_get_nixlf_mcam_index(mcam,
   2933					      pcifunc & ~RVU_PFVF_FUNC_MASK,
   2934					      nixlf, type);
   2935	err = nix_update_mce_list(rvu, pcifunc, mce_list,
   2936				  mce_idx, mcam_index, add);
   2937	return err;
   2938}
   2939
   2940static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
   2941{
   2942	struct nix_mcast *mcast = &nix_hw->mcast;
   2943	int err, pf, numvfs, idx;
   2944	struct rvu_pfvf *pfvf;
   2945	u16 pcifunc;
   2946	u64 cfg;
   2947
   2948	/* Skip PF0 (i.e AF) */
   2949	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
   2950		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
   2951		/* If PF is not enabled, nothing to do */
   2952		if (!((cfg >> 20) & 0x01))
   2953			continue;
   2954		/* Get numVFs attached to this PF */
   2955		numvfs = (cfg >> 12) & 0xFF;
   2956
   2957		pfvf = &rvu->pf[pf];
   2958
   2959		/* This NIX0/1 block mapped to PF ? */
   2960		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
   2961			continue;
   2962
   2963		/* save start idx of broadcast mce list */
   2964		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
   2965		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
   2966
   2967		/* save start idx of multicast mce list */
   2968		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
   2969		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
   2970
   2971		/* save the start idx of promisc mce list */
   2972		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
   2973		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
   2974
   2975		for (idx = 0; idx < (numvfs + 1); idx++) {
   2976			/* idx-0 is for PF, followed by VFs */
   2977			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
   2978			pcifunc |= idx;
   2979			/* Add dummy entries now, so that we don't have to check
   2980			 * for whether AQ_OP should be INIT/WRITE later on.
   2981			 * Will be updated when a NIXLF is attached/detached to
   2982			 * these PF/VFs.
   2983			 */
   2984			err = nix_blk_setup_mce(rvu, nix_hw,
   2985						pfvf->bcast_mce_idx + idx,
   2986						NIX_AQ_INSTOP_INIT,
   2987						pcifunc, 0, true);
   2988			if (err)
   2989				return err;
   2990
   2991			/* add dummy entries to multicast mce list */
   2992			err = nix_blk_setup_mce(rvu, nix_hw,
   2993						pfvf->mcast_mce_idx + idx,
   2994						NIX_AQ_INSTOP_INIT,
   2995						pcifunc, 0, true);
   2996			if (err)
   2997				return err;
   2998
   2999			/* add dummy entries to promisc mce list */
   3000			err = nix_blk_setup_mce(rvu, nix_hw,
   3001						pfvf->promisc_mce_idx + idx,
   3002						NIX_AQ_INSTOP_INIT,
   3003						pcifunc, 0, true);
   3004			if (err)
   3005				return err;
   3006		}
   3007	}
   3008	return 0;
   3009}
   3010
   3011static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
   3012{
   3013	struct nix_mcast *mcast = &nix_hw->mcast;
   3014	struct rvu_hwinfo *hw = rvu->hw;
   3015	int err, size;
   3016
   3017	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
   3018	size = (1ULL << size);
   3019
   3020	/* Alloc memory for multicast/mirror replication entries */
   3021	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
   3022			 (256UL << MC_TBL_SIZE), size);
   3023	if (err)
   3024		return -ENOMEM;
   3025
   3026	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
   3027		    (u64)mcast->mce_ctx->iova);
   3028
   3029	/* Set max list length equal to max no of VFs per PF  + PF itself */
   3030	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
   3031		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
   3032
   3033	/* Alloc memory for multicast replication buffers */
   3034	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
   3035	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
   3036			 (8UL << MC_BUF_CNT), size);
   3037	if (err)
   3038		return -ENOMEM;
   3039
   3040	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
   3041		    (u64)mcast->mcast_buf->iova);
   3042
   3043	/* Alloc pkind for NIX internal RX multicast/mirror replay */
   3044	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
   3045
   3046	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
   3047		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
   3048		    BIT_ULL(20) | MC_BUF_CNT);
   3049
   3050	mutex_init(&mcast->mce_lock);
   3051
   3052	return nix_setup_mce_tables(rvu, nix_hw);
   3053}
   3054
   3055static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
   3056{
   3057	struct nix_txvlan *vlan = &nix_hw->txvlan;
   3058	int err;
   3059
   3060	/* Allocate resource bimap for tx vtag def registers*/
   3061	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
   3062	err = rvu_alloc_bitmap(&vlan->rsrc);
   3063	if (err)
   3064		return -ENOMEM;
   3065
   3066	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
   3067	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
   3068					    sizeof(u16), GFP_KERNEL);
   3069	if (!vlan->entry2pfvf_map)
   3070		goto free_mem;
   3071
   3072	mutex_init(&vlan->rsrc_lock);
   3073	return 0;
   3074
   3075free_mem:
   3076	kfree(vlan->rsrc.bmap);
   3077	return -ENOMEM;
   3078}
   3079
   3080static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
   3081{
   3082	struct nix_txsch *txsch;
   3083	int err, lvl, schq;
   3084	u64 cfg, reg;
   3085
   3086	/* Get scheduler queue count of each type and alloc
   3087	 * bitmap for each for alloc/free/attach operations.
   3088	 */
   3089	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
   3090		txsch = &nix_hw->txsch[lvl];
   3091		txsch->lvl = lvl;
   3092		switch (lvl) {
   3093		case NIX_TXSCH_LVL_SMQ:
   3094			reg = NIX_AF_MDQ_CONST;
   3095			break;
   3096		case NIX_TXSCH_LVL_TL4:
   3097			reg = NIX_AF_TL4_CONST;
   3098			break;
   3099		case NIX_TXSCH_LVL_TL3:
   3100			reg = NIX_AF_TL3_CONST;
   3101			break;
   3102		case NIX_TXSCH_LVL_TL2:
   3103			reg = NIX_AF_TL2_CONST;
   3104			break;
   3105		case NIX_TXSCH_LVL_TL1:
   3106			reg = NIX_AF_TL1_CONST;
   3107			break;
   3108		}
   3109		cfg = rvu_read64(rvu, blkaddr, reg);
   3110		txsch->schq.max = cfg & 0xFFFF;
   3111		err = rvu_alloc_bitmap(&txsch->schq);
   3112		if (err)
   3113			return err;
   3114
   3115		/* Allocate memory for scheduler queues to
   3116		 * PF/VF pcifunc mapping info.
   3117		 */
   3118		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
   3119					       sizeof(u32), GFP_KERNEL);
   3120		if (!txsch->pfvf_map)
   3121			return -ENOMEM;
   3122		for (schq = 0; schq < txsch->schq.max; schq++)
   3123			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
   3124	}
   3125
   3126	/* Setup a default value of 8192 as DWRR MTU */
   3127	if (rvu->hw->cap.nix_common_dwrr_mtu) {
   3128		rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU,
   3129			    convert_bytes_to_dwrr_mtu(8192));
   3130		rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU,
   3131			    convert_bytes_to_dwrr_mtu(8192));
   3132	}
   3133
   3134	return 0;
   3135}
   3136
   3137int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
   3138				int blkaddr, u32 cfg)
   3139{
   3140	int fmt_idx;
   3141
   3142	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
   3143		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
   3144			return fmt_idx;
   3145	}
   3146	if (fmt_idx >= nix_hw->mark_format.total)
   3147		return -ERANGE;
   3148
   3149	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
   3150	nix_hw->mark_format.cfg[fmt_idx] = cfg;
   3151	nix_hw->mark_format.in_use++;
   3152	return fmt_idx;
   3153}
   3154
   3155static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
   3156				    int blkaddr)
   3157{
   3158	u64 cfgs[] = {
   3159		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
   3160		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
   3161		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
   3162		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
   3163		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
   3164		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
   3165		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
   3166		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
   3167		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
   3168	};
   3169	int i, rc;
   3170	u64 total;
   3171
   3172	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
   3173	nix_hw->mark_format.total = (u8)total;
   3174	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
   3175					       GFP_KERNEL);
   3176	if (!nix_hw->mark_format.cfg)
   3177		return -ENOMEM;
   3178	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
   3179		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
   3180		if (rc < 0)
   3181			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
   3182				i, rc);
   3183	}
   3184
   3185	return 0;
   3186}
   3187
   3188static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
   3189{
   3190	/* CN10K supports LBK FIFO size 72 KB */
   3191	if (rvu->hw->lbk_bufsize == 0x12000)
   3192		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
   3193	else
   3194		*max_mtu = NIC_HW_MAX_FRS;
   3195}
   3196
   3197static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
   3198{
   3199	/* RPM supports FIFO len 128 KB */
   3200	if (rvu_cgx_get_fifolen(rvu) == 0x20000)
   3201		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
   3202	else
   3203		*max_mtu = NIC_HW_MAX_FRS;
   3204}
   3205
   3206int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
   3207				     struct nix_hw_info *rsp)
   3208{
   3209	u16 pcifunc = req->hdr.pcifunc;
   3210	u64 dwrr_mtu;
   3211	int blkaddr;
   3212
   3213	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   3214	if (blkaddr < 0)
   3215		return NIX_AF_ERR_AF_LF_INVALID;
   3216
   3217	if (is_afvf(pcifunc))
   3218		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
   3219	else
   3220		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
   3221
   3222	rsp->min_mtu = NIC_HW_MIN_FRS;
   3223
   3224	if (!rvu->hw->cap.nix_common_dwrr_mtu) {
   3225		/* Return '1' on OTx2 */
   3226		rsp->rpm_dwrr_mtu = 1;
   3227		rsp->sdp_dwrr_mtu = 1;
   3228		return 0;
   3229	}
   3230
   3231	dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU);
   3232	rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
   3233
   3234	dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU);
   3235	rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
   3236
   3237	return 0;
   3238}
   3239
   3240int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
   3241				   struct msg_rsp *rsp)
   3242{
   3243	u16 pcifunc = req->hdr.pcifunc;
   3244	int i, nixlf, blkaddr, err;
   3245	u64 stats;
   3246
   3247	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
   3248	if (err)
   3249		return err;
   3250
   3251	/* Get stats count supported by HW */
   3252	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
   3253
   3254	/* Reset tx stats */
   3255	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
   3256		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
   3257
   3258	/* Reset rx stats */
   3259	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
   3260		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
   3261
   3262	return 0;
   3263}
   3264
   3265/* Returns the ALG index to be set into NPC_RX_ACTION */
   3266static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
   3267{
   3268	int i;
   3269
   3270	/* Scan over exiting algo entries to find a match */
   3271	for (i = 0; i < nix_hw->flowkey.in_use; i++)
   3272		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
   3273			return i;
   3274
   3275	return -ERANGE;
   3276}
   3277
   3278static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
   3279{
   3280	int idx, nr_field, key_off, field_marker, keyoff_marker;
   3281	int max_key_off, max_bit_pos, group_member;
   3282	struct nix_rx_flowkey_alg *field;
   3283	struct nix_rx_flowkey_alg tmp;
   3284	u32 key_type, valid_key;
   3285	int l4_key_offset = 0;
   3286
   3287	if (!alg)
   3288		return -EINVAL;
   3289
   3290#define FIELDS_PER_ALG  5
   3291#define MAX_KEY_OFF	40
   3292	/* Clear all fields */
   3293	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
   3294
   3295	/* Each of the 32 possible flow key algorithm definitions should
   3296	 * fall into above incremental config (except ALG0). Otherwise a
   3297	 * single NPC MCAM entry is not sufficient for supporting RSS.
   3298	 *
   3299	 * If a different definition or combination needed then NPC MCAM
   3300	 * has to be programmed to filter such pkts and it's action should
   3301	 * point to this definition to calculate flowtag or hash.
   3302	 *
   3303	 * The `for loop` goes over _all_ protocol field and the following
   3304	 * variables depicts the state machine forward progress logic.
   3305	 *
   3306	 * keyoff_marker - Enabled when hash byte length needs to be accounted
   3307	 * in field->key_offset update.
   3308	 * field_marker - Enabled when a new field needs to be selected.
   3309	 * group_member - Enabled when protocol is part of a group.
   3310	 */
   3311
   3312	keyoff_marker = 0; max_key_off = 0; group_member = 0;
   3313	nr_field = 0; key_off = 0; field_marker = 1;
   3314	field = &tmp; max_bit_pos = fls(flow_cfg);
   3315	for (idx = 0;
   3316	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
   3317	     key_off < MAX_KEY_OFF; idx++) {
   3318		key_type = BIT(idx);
   3319		valid_key = flow_cfg & key_type;
   3320		/* Found a field marker, reset the field values */
   3321		if (field_marker)
   3322			memset(&tmp, 0, sizeof(tmp));
   3323
   3324		field_marker = true;
   3325		keyoff_marker = true;
   3326		switch (key_type) {
   3327		case NIX_FLOW_KEY_TYPE_PORT:
   3328			field->sel_chan = true;
   3329			/* This should be set to 1, when SEL_CHAN is set */
   3330			field->bytesm1 = 1;
   3331			break;
   3332		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
   3333			field->lid = NPC_LID_LC;
   3334			field->hdr_offset = 9; /* offset */
   3335			field->bytesm1 = 0; /* 1 byte */
   3336			field->ltype_match = NPC_LT_LC_IP;
   3337			field->ltype_mask = 0xF;
   3338			break;
   3339		case NIX_FLOW_KEY_TYPE_IPV4:
   3340		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
   3341			field->lid = NPC_LID_LC;
   3342			field->ltype_match = NPC_LT_LC_IP;
   3343			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
   3344				field->lid = NPC_LID_LG;
   3345				field->ltype_match = NPC_LT_LG_TU_IP;
   3346			}
   3347			field->hdr_offset = 12; /* SIP offset */
   3348			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
   3349			field->ltype_mask = 0xF; /* Match only IPv4 */
   3350			keyoff_marker = false;
   3351			break;
   3352		case NIX_FLOW_KEY_TYPE_IPV6:
   3353		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
   3354			field->lid = NPC_LID_LC;
   3355			field->ltype_match = NPC_LT_LC_IP6;
   3356			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
   3357				field->lid = NPC_LID_LG;
   3358				field->ltype_match = NPC_LT_LG_TU_IP6;
   3359			}
   3360			field->hdr_offset = 8; /* SIP offset */
   3361			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
   3362			field->ltype_mask = 0xF; /* Match only IPv6 */
   3363			break;
   3364		case NIX_FLOW_KEY_TYPE_TCP:
   3365		case NIX_FLOW_KEY_TYPE_UDP:
   3366		case NIX_FLOW_KEY_TYPE_SCTP:
   3367		case NIX_FLOW_KEY_TYPE_INNR_TCP:
   3368		case NIX_FLOW_KEY_TYPE_INNR_UDP:
   3369		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
   3370			field->lid = NPC_LID_LD;
   3371			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
   3372			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
   3373			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
   3374				field->lid = NPC_LID_LH;
   3375			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
   3376
   3377			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
   3378			 * so no need to change the ltype_match, just change
   3379			 * the lid for inner protocols
   3380			 */
   3381			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
   3382				     (int)NPC_LT_LH_TU_TCP);
   3383			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
   3384				     (int)NPC_LT_LH_TU_UDP);
   3385			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
   3386				     (int)NPC_LT_LH_TU_SCTP);
   3387
   3388			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
   3389			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
   3390			    valid_key) {
   3391				field->ltype_match |= NPC_LT_LD_TCP;
   3392				group_member = true;
   3393			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
   3394				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
   3395				   valid_key) {
   3396				field->ltype_match |= NPC_LT_LD_UDP;
   3397				group_member = true;
   3398			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
   3399				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
   3400				   valid_key) {
   3401				field->ltype_match |= NPC_LT_LD_SCTP;
   3402				group_member = true;
   3403			}
   3404			field->ltype_mask = ~field->ltype_match;
   3405			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
   3406			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
   3407				/* Handle the case where any of the group item
   3408				 * is enabled in the group but not the final one
   3409				 */
   3410				if (group_member) {
   3411					valid_key = true;
   3412					group_member = false;
   3413				}
   3414			} else {
   3415				field_marker = false;
   3416				keyoff_marker = false;
   3417			}
   3418
   3419			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
   3420			 * remember the TCP key offset of 40 byte hash key.
   3421			 */
   3422			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
   3423				l4_key_offset = key_off;
   3424			break;
   3425		case NIX_FLOW_KEY_TYPE_NVGRE:
   3426			field->lid = NPC_LID_LD;
   3427			field->hdr_offset = 4; /* VSID offset */
   3428			field->bytesm1 = 2;
   3429			field->ltype_match = NPC_LT_LD_NVGRE;
   3430			field->ltype_mask = 0xF;
   3431			break;
   3432		case NIX_FLOW_KEY_TYPE_VXLAN:
   3433		case NIX_FLOW_KEY_TYPE_GENEVE:
   3434			field->lid = NPC_LID_LE;
   3435			field->bytesm1 = 2;
   3436			field->hdr_offset = 4;
   3437			field->ltype_mask = 0xF;
   3438			field_marker = false;
   3439			keyoff_marker = false;
   3440
   3441			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
   3442				field->ltype_match |= NPC_LT_LE_VXLAN;
   3443				group_member = true;
   3444			}
   3445
   3446			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
   3447				field->ltype_match |= NPC_LT_LE_GENEVE;
   3448				group_member = true;
   3449			}
   3450
   3451			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
   3452				if (group_member) {
   3453					field->ltype_mask = ~field->ltype_match;
   3454					field_marker = true;
   3455					keyoff_marker = true;
   3456					valid_key = true;
   3457					group_member = false;
   3458				}
   3459			}
   3460			break;
   3461		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
   3462		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
   3463			field->lid = NPC_LID_LA;
   3464			field->ltype_match = NPC_LT_LA_ETHER;
   3465			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
   3466				field->lid = NPC_LID_LF;
   3467				field->ltype_match = NPC_LT_LF_TU_ETHER;
   3468			}
   3469			field->hdr_offset = 0;
   3470			field->bytesm1 = 5; /* DMAC 6 Byte */
   3471			field->ltype_mask = 0xF;
   3472			break;
   3473		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
   3474			field->lid = NPC_LID_LC;
   3475			field->hdr_offset = 40; /* IPV6 hdr */
   3476			field->bytesm1 = 0; /* 1 Byte ext hdr*/
   3477			field->ltype_match = NPC_LT_LC_IP6_EXT;
   3478			field->ltype_mask = 0xF;
   3479			break;
   3480		case NIX_FLOW_KEY_TYPE_GTPU:
   3481			field->lid = NPC_LID_LE;
   3482			field->hdr_offset = 4;
   3483			field->bytesm1 = 3; /* 4 bytes TID*/
   3484			field->ltype_match = NPC_LT_LE_GTPU;
   3485			field->ltype_mask = 0xF;
   3486			break;
   3487		case NIX_FLOW_KEY_TYPE_VLAN:
   3488			field->lid = NPC_LID_LB;
   3489			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
   3490			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
   3491			field->ltype_match = NPC_LT_LB_CTAG;
   3492			field->ltype_mask = 0xF;
   3493			field->fn_mask = 1; /* Mask out the first nibble */
   3494			break;
   3495		case NIX_FLOW_KEY_TYPE_AH:
   3496		case NIX_FLOW_KEY_TYPE_ESP:
   3497			field->hdr_offset = 0;
   3498			field->bytesm1 = 7; /* SPI + sequence number */
   3499			field->ltype_mask = 0xF;
   3500			field->lid = NPC_LID_LE;
   3501			field->ltype_match = NPC_LT_LE_ESP;
   3502			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
   3503				field->lid = NPC_LID_LD;
   3504				field->ltype_match = NPC_LT_LD_AH;
   3505				field->hdr_offset = 4;
   3506				keyoff_marker = false;
   3507			}
   3508			break;
   3509		}
   3510		field->ena = 1;
   3511
   3512		/* Found a valid flow key type */
   3513		if (valid_key) {
   3514			/* Use the key offset of TCP/UDP/SCTP fields
   3515			 * for ESP/AH fields.
   3516			 */
   3517			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
   3518			    key_type == NIX_FLOW_KEY_TYPE_AH)
   3519				key_off = l4_key_offset;
   3520			field->key_offset = key_off;
   3521			memcpy(&alg[nr_field], field, sizeof(*field));
   3522			max_key_off = max(max_key_off, field->bytesm1 + 1);
   3523
   3524			/* Found a field marker, get the next field */
   3525			if (field_marker)
   3526				nr_field++;
   3527		}
   3528
   3529		/* Found a keyoff marker, update the new key_off */
   3530		if (keyoff_marker) {
   3531			key_off += max_key_off;
   3532			max_key_off = 0;
   3533		}
   3534	}
   3535	/* Processed all the flow key types */
   3536	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
   3537		return 0;
   3538	else
   3539		return NIX_AF_ERR_RSS_NOSPC_FIELD;
   3540}
   3541
   3542static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
   3543{
   3544	u64 field[FIELDS_PER_ALG];
   3545	struct nix_hw *hw;
   3546	int fid, rc;
   3547
   3548	hw = get_nix_hw(rvu->hw, blkaddr);
   3549	if (!hw)
   3550		return NIX_AF_ERR_INVALID_NIXBLK;
   3551
   3552	/* No room to add new flow hash algoritham */
   3553	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
   3554		return NIX_AF_ERR_RSS_NOSPC_ALGO;
   3555
   3556	/* Generate algo fields for the given flow_cfg */
   3557	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
   3558	if (rc)
   3559		return rc;
   3560
   3561	/* Update ALGX_FIELDX register with generated fields */
   3562	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
   3563		rvu_write64(rvu, blkaddr,
   3564			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
   3565							   fid), field[fid]);
   3566
   3567	/* Store the flow_cfg for futher lookup */
   3568	rc = hw->flowkey.in_use;
   3569	hw->flowkey.flowkey[rc] = flow_cfg;
   3570	hw->flowkey.in_use++;
   3571
   3572	return rc;
   3573}
   3574
   3575int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
   3576					 struct nix_rss_flowkey_cfg *req,
   3577					 struct nix_rss_flowkey_cfg_rsp *rsp)
   3578{
   3579	u16 pcifunc = req->hdr.pcifunc;
   3580	int alg_idx, nixlf, blkaddr;
   3581	struct nix_hw *nix_hw;
   3582	int err;
   3583
   3584	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
   3585	if (err)
   3586		return err;
   3587
   3588	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   3589	if (!nix_hw)
   3590		return NIX_AF_ERR_INVALID_NIXBLK;
   3591
   3592	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
   3593	/* Failed to get algo index from the exiting list, reserve new  */
   3594	if (alg_idx < 0) {
   3595		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
   3596						  req->flowkey_cfg);
   3597		if (alg_idx < 0)
   3598			return alg_idx;
   3599	}
   3600	rsp->alg_idx = alg_idx;
   3601	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
   3602				       alg_idx, req->mcam_index);
   3603	return 0;
   3604}
   3605
   3606static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
   3607{
   3608	u32 flowkey_cfg, minkey_cfg;
   3609	int alg, fid, rc;
   3610
   3611	/* Disable all flow key algx fieldx */
   3612	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
   3613		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
   3614			rvu_write64(rvu, blkaddr,
   3615				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
   3616				    0);
   3617	}
   3618
   3619	/* IPv4/IPv6 SIP/DIPs */
   3620	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
   3621	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
   3622	if (rc < 0)
   3623		return rc;
   3624
   3625	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
   3626	minkey_cfg = flowkey_cfg;
   3627	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
   3628	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
   3629	if (rc < 0)
   3630		return rc;
   3631
   3632	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
   3633	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
   3634	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
   3635	if (rc < 0)
   3636		return rc;
   3637
   3638	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
   3639	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
   3640	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
   3641	if (rc < 0)
   3642		return rc;
   3643
   3644	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
   3645	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
   3646			NIX_FLOW_KEY_TYPE_UDP;
   3647	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
   3648	if (rc < 0)
   3649		return rc;
   3650
   3651	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
   3652	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
   3653			NIX_FLOW_KEY_TYPE_SCTP;
   3654	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
   3655	if (rc < 0)
   3656		return rc;
   3657
   3658	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
   3659	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
   3660			NIX_FLOW_KEY_TYPE_SCTP;
   3661	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
   3662	if (rc < 0)
   3663		return rc;
   3664
   3665	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
   3666	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
   3667		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
   3668	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
   3669	if (rc < 0)
   3670		return rc;
   3671
   3672	return 0;
   3673}
   3674
   3675int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
   3676				      struct nix_set_mac_addr *req,
   3677				      struct msg_rsp *rsp)
   3678{
   3679	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
   3680	u16 pcifunc = req->hdr.pcifunc;
   3681	int blkaddr, nixlf, err;
   3682	struct rvu_pfvf *pfvf;
   3683
   3684	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
   3685	if (err)
   3686		return err;
   3687
   3688	pfvf = rvu_get_pfvf(rvu, pcifunc);
   3689
   3690	/* untrusted VF can't overwrite admin(PF) changes */
   3691	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
   3692	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
   3693		dev_warn(rvu->dev,
   3694			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
   3695		return -EPERM;
   3696	}
   3697
   3698	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
   3699
   3700	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
   3701				    pfvf->rx_chan_base, req->mac_addr);
   3702
   3703	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
   3704		ether_addr_copy(pfvf->default_mac, req->mac_addr);
   3705
   3706	rvu_switch_update_rules(rvu, pcifunc);
   3707
   3708	return 0;
   3709}
   3710
   3711int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
   3712				      struct msg_req *req,
   3713				      struct nix_get_mac_addr_rsp *rsp)
   3714{
   3715	u16 pcifunc = req->hdr.pcifunc;
   3716	struct rvu_pfvf *pfvf;
   3717
   3718	if (!is_nixlf_attached(rvu, pcifunc))
   3719		return NIX_AF_ERR_AF_LF_INVALID;
   3720
   3721	pfvf = rvu_get_pfvf(rvu, pcifunc);
   3722
   3723	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
   3724
   3725	return 0;
   3726}
   3727
   3728int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
   3729				     struct msg_rsp *rsp)
   3730{
   3731	bool allmulti, promisc, nix_rx_multicast;
   3732	u16 pcifunc = req->hdr.pcifunc;
   3733	struct rvu_pfvf *pfvf;
   3734	int nixlf, err;
   3735
   3736	pfvf = rvu_get_pfvf(rvu, pcifunc);
   3737	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
   3738	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
   3739	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
   3740
   3741	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
   3742
   3743	if (is_vf(pcifunc) && !nix_rx_multicast &&
   3744	    (promisc || allmulti)) {
   3745		dev_warn_ratelimited(rvu->dev,
   3746				     "VF promisc/multicast not supported\n");
   3747		return 0;
   3748	}
   3749
   3750	/* untrusted VF can't configure promisc/allmulti */
   3751	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
   3752	    (promisc || allmulti))
   3753		return 0;
   3754
   3755	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
   3756	if (err)
   3757		return err;
   3758
   3759	if (nix_rx_multicast) {
   3760		/* add/del this PF_FUNC to/from mcast pkt replication list */
   3761		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
   3762					  allmulti);
   3763		if (err) {
   3764			dev_err(rvu->dev,
   3765				"Failed to update pcifunc 0x%x to multicast list\n",
   3766				pcifunc);
   3767			return err;
   3768		}
   3769
   3770		/* add/del this PF_FUNC to/from promisc pkt replication list */
   3771		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
   3772					  promisc);
   3773		if (err) {
   3774			dev_err(rvu->dev,
   3775				"Failed to update pcifunc 0x%x to promisc list\n",
   3776				pcifunc);
   3777			return err;
   3778		}
   3779	}
   3780
   3781	/* install/uninstall allmulti entry */
   3782	if (allmulti) {
   3783		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
   3784					       pfvf->rx_chan_base);
   3785	} else {
   3786		if (!nix_rx_multicast)
   3787			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
   3788	}
   3789
   3790	/* install/uninstall promisc entry */
   3791	if (promisc) {
   3792		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
   3793					      pfvf->rx_chan_base,
   3794					      pfvf->rx_chan_cnt);
   3795	} else {
   3796		if (!nix_rx_multicast)
   3797			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
   3798	}
   3799
   3800	return 0;
   3801}
   3802
   3803static void nix_find_link_frs(struct rvu *rvu,
   3804			      struct nix_frs_cfg *req, u16 pcifunc)
   3805{
   3806	int pf = rvu_get_pf(pcifunc);
   3807	struct rvu_pfvf *pfvf;
   3808	int maxlen, minlen;
   3809	int numvfs, hwvf;
   3810	int vf;
   3811
   3812	/* Update with requester's min/max lengths */
   3813	pfvf = rvu_get_pfvf(rvu, pcifunc);
   3814	pfvf->maxlen = req->maxlen;
   3815	if (req->update_minlen)
   3816		pfvf->minlen = req->minlen;
   3817
   3818	maxlen = req->maxlen;
   3819	minlen = req->update_minlen ? req->minlen : 0;
   3820
   3821	/* Get this PF's numVFs and starting hwvf */
   3822	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
   3823
   3824	/* For each VF, compare requested max/minlen */
   3825	for (vf = 0; vf < numvfs; vf++) {
   3826		pfvf =  &rvu->hwvf[hwvf + vf];
   3827		if (pfvf->maxlen > maxlen)
   3828			maxlen = pfvf->maxlen;
   3829		if (req->update_minlen &&
   3830		    pfvf->minlen && pfvf->minlen < minlen)
   3831			minlen = pfvf->minlen;
   3832	}
   3833
   3834	/* Compare requested max/minlen with PF's max/minlen */
   3835	pfvf = &rvu->pf[pf];
   3836	if (pfvf->maxlen > maxlen)
   3837		maxlen = pfvf->maxlen;
   3838	if (req->update_minlen &&
   3839	    pfvf->minlen && pfvf->minlen < minlen)
   3840		minlen = pfvf->minlen;
   3841
   3842	/* Update the request with max/min PF's and it's VF's max/min */
   3843	req->maxlen = maxlen;
   3844	if (req->update_minlen)
   3845		req->minlen = minlen;
   3846}
   3847
   3848static int
   3849nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
   3850			u16 pcifunc, u64 tx_credits)
   3851{
   3852	struct rvu_hwinfo *hw = rvu->hw;
   3853	int pf = rvu_get_pf(pcifunc);
   3854	u8 cgx_id = 0, lmac_id = 0;
   3855	unsigned long poll_tmo;
   3856	bool restore_tx_en = 0;
   3857	struct nix_hw *nix_hw;
   3858	u64 cfg, sw_xoff = 0;
   3859	u32 schq = 0;
   3860	u32 credits;
   3861	int rc;
   3862
   3863	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   3864	if (!nix_hw)
   3865		return NIX_AF_ERR_INVALID_NIXBLK;
   3866
   3867	if (tx_credits == nix_hw->tx_credits[link])
   3868		return 0;
   3869
   3870	/* Enable cgx tx if disabled for credits to be back */
   3871	if (is_pf_cgxmapped(rvu, pf)) {
   3872		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
   3873		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
   3874						    lmac_id, true);
   3875	}
   3876
   3877	mutex_lock(&rvu->rsrc_lock);
   3878	/* Disable new traffic to link */
   3879	if (hw->cap.nix_shaping) {
   3880		schq = nix_get_tx_link(rvu, pcifunc);
   3881		sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
   3882		rvu_write64(rvu, blkaddr,
   3883			    NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
   3884	}
   3885
   3886	rc = NIX_AF_ERR_LINK_CREDITS;
   3887	poll_tmo = jiffies + usecs_to_jiffies(200000);
   3888	/* Wait for credits to return */
   3889	do {
   3890		if (time_after(jiffies, poll_tmo))
   3891			goto exit;
   3892		usleep_range(100, 200);
   3893
   3894		cfg = rvu_read64(rvu, blkaddr,
   3895				 NIX_AF_TX_LINKX_NORM_CREDIT(link));
   3896		credits = (cfg >> 12) & 0xFFFFFULL;
   3897	} while (credits != nix_hw->tx_credits[link]);
   3898
   3899	cfg &= ~(0xFFFFFULL << 12);
   3900	cfg |= (tx_credits << 12);
   3901	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
   3902	rc = 0;
   3903
   3904	nix_hw->tx_credits[link] = tx_credits;
   3905
   3906exit:
   3907	/* Enable traffic back */
   3908	if (hw->cap.nix_shaping && !sw_xoff)
   3909		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
   3910
   3911	/* Restore state of cgx tx */
   3912	if (restore_tx_en)
   3913		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
   3914
   3915	mutex_unlock(&rvu->rsrc_lock);
   3916	return rc;
   3917}
   3918
   3919int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
   3920				    struct msg_rsp *rsp)
   3921{
   3922	struct rvu_hwinfo *hw = rvu->hw;
   3923	u16 pcifunc = req->hdr.pcifunc;
   3924	int pf = rvu_get_pf(pcifunc);
   3925	int blkaddr, schq, link = -1;
   3926	struct nix_txsch *txsch;
   3927	u64 cfg, lmac_fifo_len;
   3928	struct nix_hw *nix_hw;
   3929	struct rvu_pfvf *pfvf;
   3930	u8 cgx = 0, lmac = 0;
   3931	u16 max_mtu;
   3932
   3933	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   3934	if (blkaddr < 0)
   3935		return NIX_AF_ERR_AF_LF_INVALID;
   3936
   3937	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   3938	if (!nix_hw)
   3939		return NIX_AF_ERR_INVALID_NIXBLK;
   3940
   3941	if (is_afvf(pcifunc))
   3942		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
   3943	else
   3944		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
   3945
   3946	if (!req->sdp_link && req->maxlen > max_mtu)
   3947		return NIX_AF_ERR_FRS_INVALID;
   3948
   3949	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
   3950		return NIX_AF_ERR_FRS_INVALID;
   3951
   3952	/* Check if requester wants to update SMQ's */
   3953	if (!req->update_smq)
   3954		goto rx_frscfg;
   3955
   3956	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
   3957	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
   3958	mutex_lock(&rvu->rsrc_lock);
   3959	for (schq = 0; schq < txsch->schq.max; schq++) {
   3960		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
   3961			continue;
   3962		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
   3963		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
   3964		if (req->update_minlen)
   3965			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
   3966		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
   3967	}
   3968	mutex_unlock(&rvu->rsrc_lock);
   3969
   3970rx_frscfg:
   3971	/* Check if config is for SDP link */
   3972	if (req->sdp_link) {
   3973		if (!hw->sdp_links)
   3974			return NIX_AF_ERR_RX_LINK_INVALID;
   3975		link = hw->cgx_links + hw->lbk_links;
   3976		goto linkcfg;
   3977	}
   3978
   3979	/* Check if the request is from CGX mapped RVU PF */
   3980	if (is_pf_cgxmapped(rvu, pf)) {
   3981		/* Get CGX and LMAC to which this PF is mapped and find link */
   3982		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
   3983		link = (cgx * hw->lmac_per_cgx) + lmac;
   3984	} else if (pf == 0) {
   3985		/* For VFs of PF0 ingress is LBK port, so config LBK link */
   3986		pfvf = rvu_get_pfvf(rvu, pcifunc);
   3987		link = hw->cgx_links + pfvf->lbkid;
   3988	}
   3989
   3990	if (link < 0)
   3991		return NIX_AF_ERR_RX_LINK_INVALID;
   3992
   3993	nix_find_link_frs(rvu, req, pcifunc);
   3994
   3995linkcfg:
   3996	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
   3997	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
   3998	if (req->update_minlen)
   3999		cfg = (cfg & ~0xFFFFULL) | req->minlen;
   4000	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
   4001
   4002	if (req->sdp_link || pf == 0)
   4003		return 0;
   4004
   4005	/* Update transmit credits for CGX links */
   4006	lmac_fifo_len =
   4007		rvu_cgx_get_fifolen(rvu) /
   4008		cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
   4009	return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
   4010				       (lmac_fifo_len - req->maxlen) / 16);
   4011}
   4012
   4013int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
   4014				    struct msg_rsp *rsp)
   4015{
   4016	int nixlf, blkaddr, err;
   4017	u64 cfg;
   4018
   4019	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
   4020	if (err)
   4021		return err;
   4022
   4023	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
   4024	/* Set the interface configuration */
   4025	if (req->len_verify & BIT(0))
   4026		cfg |= BIT_ULL(41);
   4027	else
   4028		cfg &= ~BIT_ULL(41);
   4029
   4030	if (req->len_verify & BIT(1))
   4031		cfg |= BIT_ULL(40);
   4032	else
   4033		cfg &= ~BIT_ULL(40);
   4034
   4035	if (req->csum_verify & BIT(0))
   4036		cfg |= BIT_ULL(37);
   4037	else
   4038		cfg &= ~BIT_ULL(37);
   4039
   4040	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
   4041
   4042	return 0;
   4043}
   4044
   4045static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
   4046{
   4047	/* CN10k supports 72KB FIFO size and max packet size of 64k */
   4048	if (rvu->hw->lbk_bufsize == 0x12000)
   4049		return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
   4050
   4051	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
   4052}
   4053
   4054static void nix_link_config(struct rvu *rvu, int blkaddr,
   4055			    struct nix_hw *nix_hw)
   4056{
   4057	struct rvu_hwinfo *hw = rvu->hw;
   4058	int cgx, lmac_cnt, slink, link;
   4059	u16 lbk_max_frs, lmac_max_frs;
   4060	u64 tx_credits, cfg;
   4061
   4062	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
   4063	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
   4064
   4065	/* Set default min/max packet lengths allowed on NIX Rx links.
   4066	 *
   4067	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
   4068	 * as undersize and report them to SW as error pkts, hence
   4069	 * setting it to 40 bytes.
   4070	 */
   4071	for (link = 0; link < hw->cgx_links; link++) {
   4072		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
   4073				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
   4074	}
   4075
   4076	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
   4077		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
   4078			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
   4079	}
   4080	if (hw->sdp_links) {
   4081		link = hw->cgx_links + hw->lbk_links;
   4082		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
   4083			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
   4084	}
   4085
   4086	/* Set credits for Tx links assuming max packet length allowed.
   4087	 * This will be reconfigured based on MTU set for PF/VF.
   4088	 */
   4089	for (cgx = 0; cgx < hw->cgx; cgx++) {
   4090		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
   4091		/* Skip when cgx is not available or lmac cnt is zero */
   4092		if (lmac_cnt <= 0)
   4093			continue;
   4094		tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
   4095			       lmac_max_frs) / 16;
   4096		/* Enable credits and set credit pkt count to max allowed */
   4097		cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
   4098		slink = cgx * hw->lmac_per_cgx;
   4099		for (link = slink; link < (slink + lmac_cnt); link++) {
   4100			nix_hw->tx_credits[link] = tx_credits;
   4101			rvu_write64(rvu, blkaddr,
   4102				    NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
   4103		}
   4104	}
   4105
   4106	/* Set Tx credits for LBK link */
   4107	slink = hw->cgx_links;
   4108	for (link = slink; link < (slink + hw->lbk_links); link++) {
   4109		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
   4110		nix_hw->tx_credits[link] = tx_credits;
   4111		/* Enable credits and set credit pkt count to max allowed */
   4112		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
   4113		rvu_write64(rvu, blkaddr,
   4114			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
   4115	}
   4116}
   4117
   4118static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
   4119{
   4120	int idx, err;
   4121	u64 status;
   4122
   4123	/* Start X2P bus calibration */
   4124	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
   4125		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
   4126	/* Wait for calibration to complete */
   4127	err = rvu_poll_reg(rvu, blkaddr,
   4128			   NIX_AF_STATUS, BIT_ULL(10), false);
   4129	if (err) {
   4130		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
   4131		return err;
   4132	}
   4133
   4134	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
   4135	/* Check if CGX devices are ready */
   4136	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
   4137		/* Skip when cgx port is not available */
   4138		if (!rvu_cgx_pdata(idx, rvu) ||
   4139		    (status & (BIT_ULL(16 + idx))))
   4140			continue;
   4141		dev_err(rvu->dev,
   4142			"CGX%d didn't respond to NIX X2P calibration\n", idx);
   4143		err = -EBUSY;
   4144	}
   4145
   4146	/* Check if LBK is ready */
   4147	if (!(status & BIT_ULL(19))) {
   4148		dev_err(rvu->dev,
   4149			"LBK didn't respond to NIX X2P calibration\n");
   4150		err = -EBUSY;
   4151	}
   4152
   4153	/* Clear 'calibrate_x2p' bit */
   4154	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
   4155		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
   4156	if (err || (status & 0x3FFULL))
   4157		dev_err(rvu->dev,
   4158			"NIX X2P calibration failed, status 0x%llx\n", status);
   4159	if (err)
   4160		return err;
   4161	return 0;
   4162}
   4163
   4164static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
   4165{
   4166	u64 cfg;
   4167	int err;
   4168
   4169	/* Set admin queue endianness */
   4170	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
   4171#ifdef __BIG_ENDIAN
   4172	cfg |= BIT_ULL(8);
   4173	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
   4174#else
   4175	cfg &= ~BIT_ULL(8);
   4176	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
   4177#endif
   4178
   4179	/* Do not bypass NDC cache */
   4180	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
   4181	cfg &= ~0x3FFEULL;
   4182#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
   4183	/* Disable caching of SQB aka SQEs */
   4184	cfg |= 0x04ULL;
   4185#endif
   4186	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
   4187
   4188	/* Result structure can be followed by RQ/SQ/CQ context at
   4189	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
   4190	 * operation type. Alloc sufficient result memory for all operations.
   4191	 */
   4192	err = rvu_aq_alloc(rvu, &block->aq,
   4193			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
   4194			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
   4195	if (err)
   4196		return err;
   4197
   4198	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
   4199	rvu_write64(rvu, block->addr,
   4200		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
   4201	return 0;
   4202}
   4203
   4204static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
   4205{
   4206	struct rvu_hwinfo *hw = rvu->hw;
   4207	u64 hw_const;
   4208
   4209	hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
   4210
   4211	/* On OcteonTx2 DWRR quantum is directly configured into each of
   4212	 * the transmit scheduler queues. And PF/VF drivers were free to
   4213	 * config any value upto 2^24.
   4214	 * On CN10K, HW is modified, the quantum configuration at scheduler
   4215	 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
   4216	 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
   4217	 * 'DWRR MTU * weight' to get the quantum.
   4218	 *
   4219	 * Check if HW uses a common MTU for all DWRR quantum configs.
   4220	 * On OcteonTx2 this register field is '0'.
   4221	 */
   4222	if (((hw_const >> 56) & 0x10) == 0x10)
   4223		hw->cap.nix_common_dwrr_mtu = true;
   4224}
   4225
   4226static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
   4227{
   4228	const struct npc_lt_def_cfg *ltdefs;
   4229	struct rvu_hwinfo *hw = rvu->hw;
   4230	int blkaddr = nix_hw->blkaddr;
   4231	struct rvu_block *block;
   4232	int err;
   4233	u64 cfg;
   4234
   4235	block = &hw->block[blkaddr];
   4236
   4237	if (is_rvu_96xx_B0(rvu)) {
   4238		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
   4239		 * internal state when conditional clocks are turned off.
   4240		 * Hence enable them.
   4241		 */
   4242		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
   4243			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
   4244
   4245		/* Set chan/link to backpressure TL3 instead of TL2 */
   4246		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
   4247
   4248		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
   4249		 * This sticky mode is known to cause SQ stalls when multiple
   4250		 * SQs are mapped to same SMQ and transmitting pkts at a time.
   4251		 */
   4252		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
   4253		cfg &= ~BIT_ULL(15);
   4254		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
   4255	}
   4256
   4257	ltdefs = rvu->kpu.lt_def;
   4258	/* Calibrate X2P bus to check if CGX/LBK links are fine */
   4259	err = nix_calibrate_x2p(rvu, blkaddr);
   4260	if (err)
   4261		return err;
   4262
   4263	/* Setup capabilities of the NIX block */
   4264	rvu_nix_setup_capabilities(rvu, blkaddr);
   4265
   4266	/* Initialize admin queue */
   4267	err = nix_aq_init(rvu, block);
   4268	if (err)
   4269		return err;
   4270
   4271	/* Restore CINT timer delay to HW reset values */
   4272	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
   4273
   4274	/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
   4275	rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
   4276
   4277	if (is_block_implemented(hw, blkaddr)) {
   4278		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
   4279		if (err)
   4280			return err;
   4281
   4282		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
   4283		if (err)
   4284			return err;
   4285
   4286		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
   4287		if (err)
   4288			return err;
   4289
   4290		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
   4291		if (err)
   4292			return err;
   4293
   4294		err = nix_setup_txvlan(rvu, nix_hw);
   4295		if (err)
   4296			return err;
   4297
   4298		/* Configure segmentation offload formats */
   4299		nix_setup_lso(rvu, nix_hw, blkaddr);
   4300
   4301		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
   4302		 * This helps HW protocol checker to identify headers
   4303		 * and validate length and checksums.
   4304		 */
   4305		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
   4306			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
   4307			    ltdefs->rx_ol2.ltype_mask);
   4308		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
   4309			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
   4310			    ltdefs->rx_oip4.ltype_mask);
   4311		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
   4312			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
   4313			    ltdefs->rx_iip4.ltype_mask);
   4314		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
   4315			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
   4316			    ltdefs->rx_oip6.ltype_mask);
   4317		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
   4318			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
   4319			    ltdefs->rx_iip6.ltype_mask);
   4320		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
   4321			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
   4322			    ltdefs->rx_otcp.ltype_mask);
   4323		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
   4324			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
   4325			    ltdefs->rx_itcp.ltype_mask);
   4326		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
   4327			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
   4328			    ltdefs->rx_oudp.ltype_mask);
   4329		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
   4330			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
   4331			    ltdefs->rx_iudp.ltype_mask);
   4332		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
   4333			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
   4334			    ltdefs->rx_osctp.ltype_mask);
   4335		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
   4336			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
   4337			    ltdefs->rx_isctp.ltype_mask);
   4338
   4339		if (!is_rvu_otx2(rvu)) {
   4340			/* Enable APAD calculation for other protocols
   4341			 * matching APAD0 and APAD1 lt def registers.
   4342			 */
   4343			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
   4344				    (ltdefs->rx_apad0.valid << 11) |
   4345				    (ltdefs->rx_apad0.lid << 8) |
   4346				    (ltdefs->rx_apad0.ltype_match << 4) |
   4347				    ltdefs->rx_apad0.ltype_mask);
   4348			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
   4349				    (ltdefs->rx_apad1.valid << 11) |
   4350				    (ltdefs->rx_apad1.lid << 8) |
   4351				    (ltdefs->rx_apad1.ltype_match << 4) |
   4352				    ltdefs->rx_apad1.ltype_mask);
   4353
   4354			/* Receive ethertype defination register defines layer
   4355			 * information in NPC_RESULT_S to identify the Ethertype
   4356			 * location in L2 header. Used for Ethertype overwriting
   4357			 * in inline IPsec flow.
   4358			 */
   4359			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
   4360				    (ltdefs->rx_et[0].offset << 12) |
   4361				    (ltdefs->rx_et[0].valid << 11) |
   4362				    (ltdefs->rx_et[0].lid << 8) |
   4363				    (ltdefs->rx_et[0].ltype_match << 4) |
   4364				    ltdefs->rx_et[0].ltype_mask);
   4365			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
   4366				    (ltdefs->rx_et[1].offset << 12) |
   4367				    (ltdefs->rx_et[1].valid << 11) |
   4368				    (ltdefs->rx_et[1].lid << 8) |
   4369				    (ltdefs->rx_et[1].ltype_match << 4) |
   4370				    ltdefs->rx_et[1].ltype_mask);
   4371		}
   4372
   4373		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
   4374		if (err)
   4375			return err;
   4376
   4377		nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
   4378					     sizeof(u64), GFP_KERNEL);
   4379		if (!nix_hw->tx_credits)
   4380			return -ENOMEM;
   4381
   4382		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
   4383		nix_link_config(rvu, blkaddr, nix_hw);
   4384
   4385		/* Enable Channel backpressure */
   4386		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
   4387	}
   4388	return 0;
   4389}
   4390
   4391int rvu_nix_init(struct rvu *rvu)
   4392{
   4393	struct rvu_hwinfo *hw = rvu->hw;
   4394	struct nix_hw *nix_hw;
   4395	int blkaddr = 0, err;
   4396	int i = 0;
   4397
   4398	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
   4399			       GFP_KERNEL);
   4400	if (!hw->nix)
   4401		return -ENOMEM;
   4402
   4403	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
   4404	while (blkaddr) {
   4405		nix_hw = &hw->nix[i];
   4406		nix_hw->rvu = rvu;
   4407		nix_hw->blkaddr = blkaddr;
   4408		err = rvu_nix_block_init(rvu, nix_hw);
   4409		if (err)
   4410			return err;
   4411		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
   4412		i++;
   4413	}
   4414
   4415	return 0;
   4416}
   4417
   4418static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
   4419				  struct rvu_block *block)
   4420{
   4421	struct nix_txsch *txsch;
   4422	struct nix_mcast *mcast;
   4423	struct nix_txvlan *vlan;
   4424	struct nix_hw *nix_hw;
   4425	int lvl;
   4426
   4427	rvu_aq_free(rvu, block->aq);
   4428
   4429	if (is_block_implemented(rvu->hw, blkaddr)) {
   4430		nix_hw = get_nix_hw(rvu->hw, blkaddr);
   4431		if (!nix_hw)
   4432			return;
   4433
   4434		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
   4435			txsch = &nix_hw->txsch[lvl];
   4436			kfree(txsch->schq.bmap);
   4437		}
   4438
   4439		kfree(nix_hw->tx_credits);
   4440
   4441		nix_ipolicer_freemem(rvu, nix_hw);
   4442
   4443		vlan = &nix_hw->txvlan;
   4444		kfree(vlan->rsrc.bmap);
   4445		mutex_destroy(&vlan->rsrc_lock);
   4446
   4447		mcast = &nix_hw->mcast;
   4448		qmem_free(rvu->dev, mcast->mce_ctx);
   4449		qmem_free(rvu->dev, mcast->mcast_buf);
   4450		mutex_destroy(&mcast->mce_lock);
   4451	}
   4452}
   4453
   4454void rvu_nix_freemem(struct rvu *rvu)
   4455{
   4456	struct rvu_hwinfo *hw = rvu->hw;
   4457	struct rvu_block *block;
   4458	int blkaddr = 0;
   4459
   4460	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
   4461	while (blkaddr) {
   4462		block = &hw->block[blkaddr];
   4463		rvu_nix_block_freemem(rvu, blkaddr, block);
   4464		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
   4465	}
   4466}
   4467
   4468int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
   4469				     struct msg_rsp *rsp)
   4470{
   4471	u16 pcifunc = req->hdr.pcifunc;
   4472	struct rvu_pfvf *pfvf;
   4473	int nixlf, err;
   4474
   4475	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
   4476	if (err)
   4477		return err;
   4478
   4479	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
   4480
   4481	npc_mcam_enable_flows(rvu, pcifunc);
   4482
   4483	pfvf = rvu_get_pfvf(rvu, pcifunc);
   4484	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
   4485
   4486	rvu_switch_update_rules(rvu, pcifunc);
   4487
   4488	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
   4489}
   4490
   4491int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
   4492				    struct msg_rsp *rsp)
   4493{
   4494	u16 pcifunc = req->hdr.pcifunc;
   4495	struct rvu_pfvf *pfvf;
   4496	int nixlf, err;
   4497
   4498	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
   4499	if (err)
   4500		return err;
   4501
   4502	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
   4503
   4504	pfvf = rvu_get_pfvf(rvu, pcifunc);
   4505	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
   4506
   4507	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
   4508}
   4509
   4510#define RX_SA_BASE  GENMASK_ULL(52, 7)
   4511
   4512void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
   4513{
   4514	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
   4515	struct hwctx_disable_req ctx_req;
   4516	int pf = rvu_get_pf(pcifunc);
   4517	struct mac_ops *mac_ops;
   4518	u8 cgx_id, lmac_id;
   4519	u64 sa_base;
   4520	void *cgxd;
   4521	int err;
   4522
   4523	ctx_req.hdr.pcifunc = pcifunc;
   4524
   4525	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
   4526	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
   4527	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
   4528	nix_interface_deinit(rvu, pcifunc, nixlf);
   4529	nix_rx_sync(rvu, blkaddr);
   4530	nix_txschq_free(rvu, pcifunc);
   4531
   4532	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
   4533
   4534	rvu_cgx_start_stop_io(rvu, pcifunc, false);
   4535
   4536	if (pfvf->sq_ctx) {
   4537		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
   4538		err = nix_lf_hwctx_disable(rvu, &ctx_req);
   4539		if (err)
   4540			dev_err(rvu->dev, "SQ ctx disable failed\n");
   4541	}
   4542
   4543	if (pfvf->rq_ctx) {
   4544		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
   4545		err = nix_lf_hwctx_disable(rvu, &ctx_req);
   4546		if (err)
   4547			dev_err(rvu->dev, "RQ ctx disable failed\n");
   4548	}
   4549
   4550	if (pfvf->cq_ctx) {
   4551		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
   4552		err = nix_lf_hwctx_disable(rvu, &ctx_req);
   4553		if (err)
   4554			dev_err(rvu->dev, "CQ ctx disable failed\n");
   4555	}
   4556
   4557	/* reset HW config done for Switch headers */
   4558	rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
   4559			       (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
   4560
   4561	/* Disabling CGX and NPC config done for PTP */
   4562	if (pfvf->hw_rx_tstamp_en) {
   4563		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
   4564		cgxd = rvu_cgx_pdata(cgx_id, rvu);
   4565		mac_ops = get_mac_ops(cgxd);
   4566		mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
   4567		/* Undo NPC config done for PTP */
   4568		if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
   4569			dev_err(rvu->dev, "NPC config for PTP failed\n");
   4570		pfvf->hw_rx_tstamp_en = false;
   4571	}
   4572
   4573	/* reset priority flow control config */
   4574	rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
   4575
   4576	/* reset 802.3x flow control config */
   4577	rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
   4578
   4579	nix_ctx_free(rvu, pfvf);
   4580
   4581	nix_free_all_bandprof(rvu, pcifunc);
   4582
   4583	sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
   4584	if (FIELD_GET(RX_SA_BASE, sa_base)) {
   4585		err = rvu_cpt_ctx_flush(rvu, pcifunc);
   4586		if (err)
   4587			dev_err(rvu->dev,
   4588				"CPT ctx flush failed with error: %d\n", err);
   4589	}
   4590}
   4591
   4592#define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
   4593
   4594static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
   4595{
   4596	struct rvu_hwinfo *hw = rvu->hw;
   4597	struct rvu_block *block;
   4598	int blkaddr, pf;
   4599	int nixlf;
   4600	u64 cfg;
   4601
   4602	pf = rvu_get_pf(pcifunc);
   4603	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
   4604		return 0;
   4605
   4606	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   4607	if (blkaddr < 0)
   4608		return NIX_AF_ERR_AF_LF_INVALID;
   4609
   4610	block = &hw->block[blkaddr];
   4611	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
   4612	if (nixlf < 0)
   4613		return NIX_AF_ERR_AF_LF_INVALID;
   4614
   4615	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
   4616
   4617	if (enable)
   4618		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
   4619	else
   4620		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
   4621
   4622	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
   4623
   4624	return 0;
   4625}
   4626
   4627int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
   4628					  struct msg_rsp *rsp)
   4629{
   4630	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
   4631}
   4632
   4633int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
   4634					   struct msg_rsp *rsp)
   4635{
   4636	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
   4637}
   4638
   4639int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
   4640					struct nix_lso_format_cfg *req,
   4641					struct nix_lso_format_cfg_rsp *rsp)
   4642{
   4643	u16 pcifunc = req->hdr.pcifunc;
   4644	struct nix_hw *nix_hw;
   4645	struct rvu_pfvf *pfvf;
   4646	int blkaddr, idx, f;
   4647	u64 reg;
   4648
   4649	pfvf = rvu_get_pfvf(rvu, pcifunc);
   4650	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   4651	if (!pfvf->nixlf || blkaddr < 0)
   4652		return NIX_AF_ERR_AF_LF_INVALID;
   4653
   4654	nix_hw = get_nix_hw(rvu->hw, blkaddr);
   4655	if (!nix_hw)
   4656		return NIX_AF_ERR_INVALID_NIXBLK;
   4657
   4658	/* Find existing matching LSO format, if any */
   4659	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
   4660		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
   4661			reg = rvu_read64(rvu, blkaddr,
   4662					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
   4663			if (req->fields[f] != (reg & req->field_mask))
   4664				break;
   4665		}
   4666
   4667		if (f == NIX_LSO_FIELD_MAX)
   4668			break;
   4669	}
   4670
   4671	if (idx < nix_hw->lso.in_use) {
   4672		/* Match found */
   4673		rsp->lso_format_idx = idx;
   4674		return 0;
   4675	}
   4676
   4677	if (nix_hw->lso.in_use == nix_hw->lso.total)
   4678		return NIX_AF_ERR_LSO_CFG_FAIL;
   4679
   4680	rsp->lso_format_idx = nix_hw->lso.in_use++;
   4681
   4682	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
   4683		rvu_write64(rvu, blkaddr,
   4684			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
   4685			    req->fields[f]);
   4686
   4687	return 0;
   4688}
   4689
   4690#define IPSEC_GEN_CFG_EGRP    GENMASK_ULL(50, 48)
   4691#define IPSEC_GEN_CFG_OPCODE  GENMASK_ULL(47, 32)
   4692#define IPSEC_GEN_CFG_PARAM1  GENMASK_ULL(31, 16)
   4693#define IPSEC_GEN_CFG_PARAM2  GENMASK_ULL(15, 0)
   4694
   4695#define CPT_INST_QSEL_BLOCK   GENMASK_ULL(28, 24)
   4696#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
   4697#define CPT_INST_QSEL_SLOT    GENMASK_ULL(7, 0)
   4698
   4699static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
   4700				 int blkaddr)
   4701{
   4702	u8 cpt_idx, cpt_blkaddr;
   4703	u64 val;
   4704
   4705	cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
   4706	if (req->enable) {
   4707		val = 0;
   4708		/* Enable context prefetching */
   4709		if (!is_rvu_otx2(rvu))
   4710			val |= BIT_ULL(51);
   4711
   4712		/* Set OPCODE and EGRP */
   4713		val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
   4714		val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
   4715		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
   4716		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
   4717
   4718		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
   4719
   4720		/* Set CPT queue for inline IPSec */
   4721		val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
   4722		val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
   4723				  req->inst_qsel.cpt_pf_func);
   4724
   4725		if (!is_rvu_otx2(rvu)) {
   4726			cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
   4727						       BLKADDR_CPT1;
   4728			val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
   4729		}
   4730
   4731		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
   4732			    val);
   4733
   4734		/* Set CPT credit */
   4735		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
   4736			    req->cpt_credit);
   4737	} else {
   4738		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
   4739		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
   4740			    0x0);
   4741		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
   4742			    0x3FFFFF);
   4743	}
   4744}
   4745
   4746int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
   4747					  struct nix_inline_ipsec_cfg *req,
   4748					  struct msg_rsp *rsp)
   4749{
   4750	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
   4751		return 0;
   4752
   4753	nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
   4754	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
   4755		nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
   4756
   4757	return 0;
   4758}
   4759
   4760int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
   4761					     struct nix_inline_ipsec_lf_cfg *req,
   4762					     struct msg_rsp *rsp)
   4763{
   4764	int lf, blkaddr, err;
   4765	u64 val;
   4766
   4767	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
   4768		return 0;
   4769
   4770	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
   4771	if (err)
   4772		return err;
   4773
   4774	if (req->enable) {
   4775		/* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
   4776		val = (u64)req->ipsec_cfg0.tt << 44 |
   4777		      (u64)req->ipsec_cfg0.tag_const << 20 |
   4778		      (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
   4779		      req->ipsec_cfg0.lenm1_max;
   4780
   4781		if (blkaddr == BLKADDR_NIX1)
   4782			val |= BIT_ULL(46);
   4783
   4784		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
   4785
   4786		/* Set SA_IDX_W and SA_IDX_MAX */
   4787		val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
   4788		      req->ipsec_cfg1.sa_idx_max;
   4789		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
   4790
   4791		/* Set SA base address */
   4792		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
   4793			    req->sa_base_addr);
   4794	} else {
   4795		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
   4796		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
   4797		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
   4798			    0x0);
   4799	}
   4800
   4801	return 0;
   4802}
   4803void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
   4804{
   4805	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
   4806
   4807	/* overwrite vf mac address with default_mac */
   4808	if (from_vf)
   4809		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
   4810}
   4811
   4812/* NIX ingress policers or bandwidth profiles APIs */
   4813static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
   4814{
   4815	struct npc_lt_def_cfg defs, *ltdefs;
   4816
   4817	ltdefs = &defs;
   4818	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
   4819
   4820	/* Extract PCP and DEI fields from outer VLAN from byte offset
   4821	 * 2 from the start of LB_PTR (ie TAG).
   4822	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
   4823	 * fields are considered when 'Tunnel enable' is set in profile.
   4824	 */
   4825	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
   4826		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
   4827		    (ltdefs->ovlan.ltype_match << 4) |
   4828		    ltdefs->ovlan.ltype_mask);
   4829	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
   4830		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
   4831		    (ltdefs->ivlan.ltype_match << 4) |
   4832		    ltdefs->ivlan.ltype_mask);
   4833
   4834	/* DSCP field in outer and tunneled IPv4 packets */
   4835	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
   4836		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
   4837		    (ltdefs->rx_oip4.ltype_match << 4) |
   4838		    ltdefs->rx_oip4.ltype_mask);
   4839	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
   4840		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
   4841		    (ltdefs->rx_iip4.ltype_match << 4) |
   4842		    ltdefs->rx_iip4.ltype_mask);
   4843
   4844	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
   4845	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
   4846		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
   4847		    (ltdefs->rx_oip6.ltype_match << 4) |
   4848		    ltdefs->rx_oip6.ltype_mask);
   4849	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
   4850		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
   4851		    (ltdefs->rx_iip6.ltype_match << 4) |
   4852		    ltdefs->rx_iip6.ltype_mask);
   4853}
   4854
   4855static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
   4856				    int layer, int prof_idx)
   4857{
   4858	struct nix_cn10k_aq_enq_req aq_req;
   4859	int rc;
   4860
   4861	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
   4862
   4863	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
   4864	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
   4865	aq_req.op = NIX_AQ_INSTOP_INIT;
   4866
   4867	/* Context is all zeros, submit to AQ */
   4868	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
   4869				     (struct nix_aq_enq_req *)&aq_req, NULL);
   4870	if (rc)
   4871		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
   4872			layer, prof_idx);
   4873	return rc;
   4874}
   4875
   4876static int nix_setup_ipolicers(struct rvu *rvu,
   4877			       struct nix_hw *nix_hw, int blkaddr)
   4878{
   4879	struct rvu_hwinfo *hw = rvu->hw;
   4880	struct nix_ipolicer *ipolicer;
   4881	int err, layer, prof_idx;
   4882	u64 cfg;
   4883
   4884	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
   4885	if (!(cfg & BIT_ULL(61))) {
   4886		hw->cap.ipolicer = false;
   4887		return 0;
   4888	}
   4889
   4890	hw->cap.ipolicer = true;
   4891	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
   4892					sizeof(*ipolicer), GFP_KERNEL);
   4893	if (!nix_hw->ipolicer)
   4894		return -ENOMEM;
   4895
   4896	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
   4897
   4898	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
   4899		ipolicer = &nix_hw->ipolicer[layer];
   4900		switch (layer) {
   4901		case BAND_PROF_LEAF_LAYER:
   4902			ipolicer->band_prof.max = cfg & 0XFFFF;
   4903			break;
   4904		case BAND_PROF_MID_LAYER:
   4905			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
   4906			break;
   4907		case BAND_PROF_TOP_LAYER:
   4908			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
   4909			break;
   4910		}
   4911
   4912		if (!ipolicer->band_prof.max)
   4913			continue;
   4914
   4915		err = rvu_alloc_bitmap(&ipolicer->band_prof);
   4916		if (err)
   4917			return err;
   4918
   4919		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
   4920						  ipolicer->band_prof.max,
   4921						  sizeof(u16), GFP_KERNEL);
   4922		if (!ipolicer->pfvf_map)
   4923			return -ENOMEM;
   4924
   4925		ipolicer->match_id = devm_kcalloc(rvu->dev,
   4926						  ipolicer->band_prof.max,
   4927						  sizeof(u16), GFP_KERNEL);
   4928		if (!ipolicer->match_id)
   4929			return -ENOMEM;
   4930
   4931		for (prof_idx = 0;
   4932		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
   4933			/* Set AF as current owner for INIT ops to succeed */
   4934			ipolicer->pfvf_map[prof_idx] = 0x00;
   4935
   4936			/* There is no enable bit in the profile context,
   4937			 * so no context disable. So let's INIT them here
   4938			 * so that PF/VF later on have to just do WRITE to
   4939			 * setup policer rates and config.
   4940			 */
   4941			err = nix_init_policer_context(rvu, nix_hw,
   4942						       layer, prof_idx);
   4943			if (err)
   4944				return err;
   4945		}
   4946
   4947		/* Allocate memory for maintaining ref_counts for MID level
   4948		 * profiles, this will be needed for leaf layer profiles'
   4949		 * aggregation.
   4950		 */
   4951		if (layer != BAND_PROF_MID_LAYER)
   4952			continue;
   4953
   4954		ipolicer->ref_count = devm_kcalloc(rvu->dev,
   4955						   ipolicer->band_prof.max,
   4956						   sizeof(u16), GFP_KERNEL);
   4957	}
   4958
   4959	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
   4960	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
   4961
   4962	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
   4963
   4964	return 0;
   4965}
   4966
   4967static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
   4968{
   4969	struct nix_ipolicer *ipolicer;
   4970	int layer;
   4971
   4972	if (!rvu->hw->cap.ipolicer)
   4973		return;
   4974
   4975	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
   4976		ipolicer = &nix_hw->ipolicer[layer];
   4977
   4978		if (!ipolicer->band_prof.max)
   4979			continue;
   4980
   4981		kfree(ipolicer->band_prof.bmap);
   4982	}
   4983}
   4984
   4985static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
   4986			       struct nix_hw *nix_hw, u16 pcifunc)
   4987{
   4988	struct nix_ipolicer *ipolicer;
   4989	int layer, hi_layer, prof_idx;
   4990
   4991	/* Bits [15:14] in profile index represent layer */
   4992	layer = (req->qidx >> 14) & 0x03;
   4993	prof_idx = req->qidx & 0x3FFF;
   4994
   4995	ipolicer = &nix_hw->ipolicer[layer];
   4996	if (prof_idx >= ipolicer->band_prof.max)
   4997		return -EINVAL;
   4998
   4999	/* Check if the profile is allocated to the requesting PCIFUNC or not
   5000	 * with the exception of AF. AF is allowed to read and update contexts.
   5001	 */
   5002	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
   5003		return -EINVAL;
   5004
   5005	/* If this profile is linked to higher layer profile then check
   5006	 * if that profile is also allocated to the requesting PCIFUNC
   5007	 * or not.
   5008	 */
   5009	if (!req->prof.hl_en)
   5010		return 0;
   5011
   5012	/* Leaf layer profile can link only to mid layer and
   5013	 * mid layer to top layer.
   5014	 */
   5015	if (layer == BAND_PROF_LEAF_LAYER)
   5016		hi_layer = BAND_PROF_MID_LAYER;
   5017	else if (layer == BAND_PROF_MID_LAYER)
   5018		hi_layer = BAND_PROF_TOP_LAYER;
   5019	else
   5020		return -EINVAL;
   5021
   5022	ipolicer = &nix_hw->ipolicer[hi_layer];
   5023	prof_idx = req->prof.band_prof_id;
   5024	if (prof_idx >= ipolicer->band_prof.max ||
   5025	    ipolicer->pfvf_map[prof_idx] != pcifunc)
   5026		return -EINVAL;
   5027
   5028	return 0;
   5029}
   5030
   5031int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
   5032					struct nix_bandprof_alloc_req *req,
   5033					struct nix_bandprof_alloc_rsp *rsp)
   5034{
   5035	int blkaddr, layer, prof, idx, err;
   5036	u16 pcifunc = req->hdr.pcifunc;
   5037	struct nix_ipolicer *ipolicer;
   5038	struct nix_hw *nix_hw;
   5039
   5040	if (!rvu->hw->cap.ipolicer)
   5041		return NIX_AF_ERR_IPOLICER_NOTSUPP;
   5042
   5043	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
   5044	if (err)
   5045		return err;
   5046
   5047	mutex_lock(&rvu->rsrc_lock);
   5048	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
   5049		if (layer == BAND_PROF_INVAL_LAYER)
   5050			continue;
   5051		if (!req->prof_count[layer])
   5052			continue;
   5053
   5054		ipolicer = &nix_hw->ipolicer[layer];
   5055		for (idx = 0; idx < req->prof_count[layer]; idx++) {
   5056			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
   5057			if (idx == MAX_BANDPROF_PER_PFFUNC)
   5058				break;
   5059
   5060			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
   5061			if (prof < 0)
   5062				break;
   5063			rsp->prof_count[layer]++;
   5064			rsp->prof_idx[layer][idx] = prof;
   5065			ipolicer->pfvf_map[prof] = pcifunc;
   5066		}
   5067	}
   5068	mutex_unlock(&rvu->rsrc_lock);
   5069	return 0;
   5070}
   5071
   5072static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
   5073{
   5074	int blkaddr, layer, prof_idx, err;
   5075	struct nix_ipolicer *ipolicer;
   5076	struct nix_hw *nix_hw;
   5077
   5078	if (!rvu->hw->cap.ipolicer)
   5079		return NIX_AF_ERR_IPOLICER_NOTSUPP;
   5080
   5081	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
   5082	if (err)
   5083		return err;
   5084
   5085	mutex_lock(&rvu->rsrc_lock);
   5086	/* Free all the profiles allocated to the PCIFUNC */
   5087	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
   5088		if (layer == BAND_PROF_INVAL_LAYER)
   5089			continue;
   5090		ipolicer = &nix_hw->ipolicer[layer];
   5091
   5092		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
   5093			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
   5094				continue;
   5095
   5096			/* Clear ratelimit aggregation, if any */
   5097			if (layer == BAND_PROF_LEAF_LAYER &&
   5098			    ipolicer->match_id[prof_idx])
   5099				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
   5100
   5101			ipolicer->pfvf_map[prof_idx] = 0x00;
   5102			ipolicer->match_id[prof_idx] = 0;
   5103			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
   5104		}
   5105	}
   5106	mutex_unlock(&rvu->rsrc_lock);
   5107	return 0;
   5108}
   5109
   5110int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
   5111				       struct nix_bandprof_free_req *req,
   5112				       struct msg_rsp *rsp)
   5113{
   5114	int blkaddr, layer, prof_idx, idx, err;
   5115	u16 pcifunc = req->hdr.pcifunc;
   5116	struct nix_ipolicer *ipolicer;
   5117	struct nix_hw *nix_hw;
   5118
   5119	if (req->free_all)
   5120		return nix_free_all_bandprof(rvu, pcifunc);
   5121
   5122	if (!rvu->hw->cap.ipolicer)
   5123		return NIX_AF_ERR_IPOLICER_NOTSUPP;
   5124
   5125	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
   5126	if (err)
   5127		return err;
   5128
   5129	mutex_lock(&rvu->rsrc_lock);
   5130	/* Free the requested profile indices */
   5131	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
   5132		if (layer == BAND_PROF_INVAL_LAYER)
   5133			continue;
   5134		if (!req->prof_count[layer])
   5135			continue;
   5136
   5137		ipolicer = &nix_hw->ipolicer[layer];
   5138		for (idx = 0; idx < req->prof_count[layer]; idx++) {
   5139			prof_idx = req->prof_idx[layer][idx];
   5140			if (prof_idx >= ipolicer->band_prof.max ||
   5141			    ipolicer->pfvf_map[prof_idx] != pcifunc)
   5142				continue;
   5143
   5144			/* Clear ratelimit aggregation, if any */
   5145			if (layer == BAND_PROF_LEAF_LAYER &&
   5146			    ipolicer->match_id[prof_idx])
   5147				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
   5148
   5149			ipolicer->pfvf_map[prof_idx] = 0x00;
   5150			ipolicer->match_id[prof_idx] = 0;
   5151			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
   5152			if (idx == MAX_BANDPROF_PER_PFFUNC)
   5153				break;
   5154		}
   5155	}
   5156	mutex_unlock(&rvu->rsrc_lock);
   5157	return 0;
   5158}
   5159
   5160int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
   5161			struct nix_cn10k_aq_enq_req *aq_req,
   5162			struct nix_cn10k_aq_enq_rsp *aq_rsp,
   5163			u16 pcifunc, u8 ctype, u32 qidx)
   5164{
   5165	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
   5166	aq_req->hdr.pcifunc = pcifunc;
   5167	aq_req->ctype = ctype;
   5168	aq_req->op = NIX_AQ_INSTOP_READ;
   5169	aq_req->qidx = qidx;
   5170
   5171	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
   5172				       (struct nix_aq_enq_req *)aq_req,
   5173				       (struct nix_aq_enq_rsp *)aq_rsp);
   5174}
   5175
   5176static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
   5177					  struct nix_hw *nix_hw,
   5178					  struct nix_cn10k_aq_enq_req *aq_req,
   5179					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
   5180					  u32 leaf_prof, u16 mid_prof)
   5181{
   5182	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
   5183	aq_req->hdr.pcifunc = 0x00;
   5184	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
   5185	aq_req->op = NIX_AQ_INSTOP_WRITE;
   5186	aq_req->qidx = leaf_prof;
   5187
   5188	aq_req->prof.band_prof_id = mid_prof;
   5189	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
   5190	aq_req->prof.hl_en = 1;
   5191	aq_req->prof_mask.hl_en = 1;
   5192
   5193	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
   5194				       (struct nix_aq_enq_req *)aq_req,
   5195				       (struct nix_aq_enq_rsp *)aq_rsp);
   5196}
   5197
   5198int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
   5199				 u16 rq_idx, u16 match_id)
   5200{
   5201	int leaf_prof, mid_prof, leaf_match;
   5202	struct nix_cn10k_aq_enq_req aq_req;
   5203	struct nix_cn10k_aq_enq_rsp aq_rsp;
   5204	struct nix_ipolicer *ipolicer;
   5205	struct nix_hw *nix_hw;
   5206	int blkaddr, idx, rc;
   5207
   5208	if (!rvu->hw->cap.ipolicer)
   5209		return 0;
   5210
   5211	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
   5212	if (rc)
   5213		return rc;
   5214
   5215	/* Fetch the RQ's context to see if policing is enabled */
   5216	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
   5217				 NIX_AQ_CTYPE_RQ, rq_idx);
   5218	if (rc) {
   5219		dev_err(rvu->dev,
   5220			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
   5221			__func__, rq_idx, pcifunc);
   5222		return rc;
   5223	}
   5224
   5225	if (!aq_rsp.rq.policer_ena)
   5226		return 0;
   5227
   5228	/* Get the bandwidth profile ID mapped to this RQ */
   5229	leaf_prof = aq_rsp.rq.band_prof_id;
   5230
   5231	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
   5232	ipolicer->match_id[leaf_prof] = match_id;
   5233
   5234	/* Check if any other leaf profile is marked with same match_id */
   5235	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
   5236		if (idx == leaf_prof)
   5237			continue;
   5238		if (ipolicer->match_id[idx] != match_id)
   5239			continue;
   5240
   5241		leaf_match = idx;
   5242		break;
   5243	}
   5244
   5245	if (idx == ipolicer->band_prof.max)
   5246		return 0;
   5247
   5248	/* Fetch the matching profile's context to check if it's already
   5249	 * mapped to a mid level profile.
   5250	 */
   5251	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
   5252				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
   5253	if (rc) {
   5254		dev_err(rvu->dev,
   5255			"%s: Failed to fetch context of leaf profile %d\n",
   5256			__func__, leaf_match);
   5257		return rc;
   5258	}
   5259
   5260	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
   5261	if (aq_rsp.prof.hl_en) {
   5262		/* Get Mid layer prof index and map leaf_prof index
   5263		 * also such that flows that are being steered
   5264		 * to different RQs and marked with same match_id
   5265		 * are rate limited in a aggregate fashion
   5266		 */
   5267		mid_prof = aq_rsp.prof.band_prof_id;
   5268		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
   5269						    &aq_req, &aq_rsp,
   5270						    leaf_prof, mid_prof);
   5271		if (rc) {
   5272			dev_err(rvu->dev,
   5273				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
   5274				__func__, leaf_prof, mid_prof);
   5275			goto exit;
   5276		}
   5277
   5278		mutex_lock(&rvu->rsrc_lock);
   5279		ipolicer->ref_count[mid_prof]++;
   5280		mutex_unlock(&rvu->rsrc_lock);
   5281		goto exit;
   5282	}
   5283
   5284	/* Allocate a mid layer profile and
   5285	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
   5286	 */
   5287	mutex_lock(&rvu->rsrc_lock);
   5288	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
   5289	if (mid_prof < 0) {
   5290		dev_err(rvu->dev,
   5291			"%s: Unable to allocate mid layer profile\n", __func__);
   5292		mutex_unlock(&rvu->rsrc_lock);
   5293		goto exit;
   5294	}
   5295	mutex_unlock(&rvu->rsrc_lock);
   5296	ipolicer->pfvf_map[mid_prof] = 0x00;
   5297	ipolicer->ref_count[mid_prof] = 0;
   5298
   5299	/* Initialize mid layer profile same as 'leaf_prof' */
   5300	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
   5301				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
   5302	if (rc) {
   5303		dev_err(rvu->dev,
   5304			"%s: Failed to fetch context of leaf profile %d\n",
   5305			__func__, leaf_prof);
   5306		goto exit;
   5307	}
   5308
   5309	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
   5310	aq_req.hdr.pcifunc = 0x00;
   5311	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
   5312	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
   5313	aq_req.op = NIX_AQ_INSTOP_WRITE;
   5314	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
   5315	memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
   5316	/* Clear higher layer enable bit in the mid profile, just in case */
   5317	aq_req.prof.hl_en = 0;
   5318	aq_req.prof_mask.hl_en = 1;
   5319
   5320	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
   5321				     (struct nix_aq_enq_req *)&aq_req, NULL);
   5322	if (rc) {
   5323		dev_err(rvu->dev,
   5324			"%s: Failed to INIT context of mid layer profile %d\n",
   5325			__func__, mid_prof);
   5326		goto exit;
   5327	}
   5328
   5329	/* Map both leaf profiles to this mid layer profile */
   5330	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
   5331					    &aq_req, &aq_rsp,
   5332					    leaf_prof, mid_prof);
   5333	if (rc) {
   5334		dev_err(rvu->dev,
   5335			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
   5336			__func__, leaf_prof, mid_prof);
   5337		goto exit;
   5338	}
   5339
   5340	mutex_lock(&rvu->rsrc_lock);
   5341	ipolicer->ref_count[mid_prof]++;
   5342	mutex_unlock(&rvu->rsrc_lock);
   5343
   5344	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
   5345					    &aq_req, &aq_rsp,
   5346					    leaf_match, mid_prof);
   5347	if (rc) {
   5348		dev_err(rvu->dev,
   5349			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
   5350			__func__, leaf_match, mid_prof);
   5351		ipolicer->ref_count[mid_prof]--;
   5352		goto exit;
   5353	}
   5354
   5355	mutex_lock(&rvu->rsrc_lock);
   5356	ipolicer->ref_count[mid_prof]++;
   5357	mutex_unlock(&rvu->rsrc_lock);
   5358
   5359exit:
   5360	return rc;
   5361}
   5362
   5363/* Called with mutex rsrc_lock */
   5364static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
   5365				     u32 leaf_prof)
   5366{
   5367	struct nix_cn10k_aq_enq_req aq_req;
   5368	struct nix_cn10k_aq_enq_rsp aq_rsp;
   5369	struct nix_ipolicer *ipolicer;
   5370	u16 mid_prof;
   5371	int rc;
   5372
   5373	mutex_unlock(&rvu->rsrc_lock);
   5374
   5375	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
   5376				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
   5377
   5378	mutex_lock(&rvu->rsrc_lock);
   5379	if (rc) {
   5380		dev_err(rvu->dev,
   5381			"%s: Failed to fetch context of leaf profile %d\n",
   5382			__func__, leaf_prof);
   5383		return;
   5384	}
   5385
   5386	if (!aq_rsp.prof.hl_en)
   5387		return;
   5388
   5389	mid_prof = aq_rsp.prof.band_prof_id;
   5390	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
   5391	ipolicer->ref_count[mid_prof]--;
   5392	/* If ref_count is zero, free mid layer profile */
   5393	if (!ipolicer->ref_count[mid_prof]) {
   5394		ipolicer->pfvf_map[mid_prof] = 0x00;
   5395		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
   5396	}
   5397}
   5398
   5399int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
   5400					     struct nix_bandprof_get_hwinfo_rsp *rsp)
   5401{
   5402	struct nix_ipolicer *ipolicer;
   5403	int blkaddr, layer, err;
   5404	struct nix_hw *nix_hw;
   5405	u64 tu;
   5406
   5407	if (!rvu->hw->cap.ipolicer)
   5408		return NIX_AF_ERR_IPOLICER_NOTSUPP;
   5409
   5410	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
   5411	if (err)
   5412		return err;
   5413
   5414	/* Return number of bandwidth profiles free at each layer */
   5415	mutex_lock(&rvu->rsrc_lock);
   5416	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
   5417		if (layer == BAND_PROF_INVAL_LAYER)
   5418			continue;
   5419
   5420		ipolicer = &nix_hw->ipolicer[layer];
   5421		rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
   5422	}
   5423	mutex_unlock(&rvu->rsrc_lock);
   5424
   5425	/* Set the policer timeunit in nanosec */
   5426	tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
   5427	rsp->policer_timeunit = (tu + 1) * 100;
   5428
   5429	return 0;
   5430}