cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rvu_cpt.c (29323B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Marvell RVU Admin Function driver
      3 *
      4 * Copyright (C) 2020 Marvell.
      5 *
      6 */
      7
      8#include <linux/bitfield.h>
      9#include <linux/pci.h>
     10#include "rvu_struct.h"
     11#include "rvu_reg.h"
     12#include "mbox.h"
     13#include "rvu.h"
     14
     15/* CPT PF device id */
     16#define	PCI_DEVID_OTX2_CPT_PF	0xA0FD
     17#define	PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
     18
     19/* Length of initial context fetch in 128 byte words */
     20#define CPT_CTX_ILEN    2
     21
     22#define cpt_get_eng_sts(e_min, e_max, rsp, etype)                   \
     23({                                                                  \
     24	u64 free_sts = 0, busy_sts = 0;                             \
     25	typeof(rsp) _rsp = rsp;                                     \
     26	u32 e, i;                                                   \
     27								    \
     28	for (e = (e_min), i = 0; e < (e_max); e++, i++) {           \
     29		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
     30		if (reg & 0x1)                                      \
     31			busy_sts |= 1ULL << i;                      \
     32								    \
     33		if (reg & 0x2)                                      \
     34			free_sts |= 1ULL << i;                      \
     35	}                                                           \
     36	(_rsp)->busy_sts_##etype = busy_sts;                        \
     37	(_rsp)->free_sts_##etype = free_sts;                        \
     38})
     39
     40static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
     41{
     42	struct rvu_block *block = ptr;
     43	struct rvu *rvu = block->rvu;
     44	int blkaddr = block->addr;
     45	u64 reg0, reg1, reg2;
     46
     47	reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
     48	reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
     49	if (!is_rvu_otx2(rvu)) {
     50		reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
     51		dev_err_ratelimited(rvu->dev,
     52				    "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
     53				     reg0, reg1, reg2);
     54	} else {
     55		dev_err_ratelimited(rvu->dev,
     56				    "Received CPTAF FLT irq : 0x%llx, 0x%llx",
     57				     reg0, reg1);
     58	}
     59
     60	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
     61	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
     62	if (!is_rvu_otx2(rvu))
     63		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
     64
     65	return IRQ_HANDLED;
     66}
     67
     68static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
     69{
     70	struct rvu_block *block = ptr;
     71	struct rvu *rvu = block->rvu;
     72	int blkaddr = block->addr;
     73	u64 reg;
     74
     75	reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
     76	dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
     77
     78	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
     79	return IRQ_HANDLED;
     80}
     81
     82static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
     83{
     84	struct rvu_block *block = ptr;
     85	struct rvu *rvu = block->rvu;
     86	int blkaddr = block->addr;
     87	u64 reg;
     88
     89	reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
     90	dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
     91
     92	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
     93	return IRQ_HANDLED;
     94}
     95
     96static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
     97					 irq_handler_t handler,
     98					 const char *name)
     99{
    100	struct rvu *rvu = block->rvu;
    101	int ret;
    102
    103	ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
    104			  name, block);
    105	if (ret) {
    106		dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
    107		return ret;
    108	}
    109
    110	WARN_ON(rvu->irq_allocated[irq_offs]);
    111	rvu->irq_allocated[irq_offs] = true;
    112	return 0;
    113}
    114
    115static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
    116{
    117	struct rvu *rvu = block->rvu;
    118	int blkaddr = block->addr;
    119	int i;
    120
    121	/* Disable all CPT AF interrupts */
    122	for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
    123		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
    124	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
    125	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
    126
    127	for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
    128		if (rvu->irq_allocated[off + i]) {
    129			free_irq(pci_irq_vector(rvu->pdev, off + i), block);
    130			rvu->irq_allocated[off + i] = false;
    131		}
    132}
    133
    134static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
    135{
    136	struct rvu_hwinfo *hw = rvu->hw;
    137	struct rvu_block *block;
    138	int i, offs;
    139
    140	if (!is_block_implemented(rvu->hw, blkaddr))
    141		return;
    142	offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
    143	if (!offs) {
    144		dev_warn(rvu->dev,
    145			 "Failed to get CPT_AF_INT vector offsets\n");
    146		return;
    147	}
    148	block = &hw->block[blkaddr];
    149	if (!is_rvu_otx2(rvu))
    150		return cpt_10k_unregister_interrupts(block, offs);
    151
    152	/* Disable all CPT AF interrupts */
    153	for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
    154		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
    155	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
    156	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
    157
    158	for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
    159		if (rvu->irq_allocated[offs + i]) {
    160			free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
    161			rvu->irq_allocated[offs + i] = false;
    162		}
    163}
    164
    165void rvu_cpt_unregister_interrupts(struct rvu *rvu)
    166{
    167	cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
    168	cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
    169}
    170
    171static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
    172{
    173	struct rvu *rvu = block->rvu;
    174	int blkaddr = block->addr;
    175	int i, ret;
    176
    177	for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
    178		sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
    179		ret = rvu_cpt_do_register_interrupt(block, off + i,
    180						    rvu_cpt_af_flt_intr_handler,
    181						    &rvu->irq_name[(off + i) * NAME_SIZE]);
    182		if (ret)
    183			goto err;
    184		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
    185	}
    186
    187	ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
    188					    rvu_cpt_af_rvu_intr_handler,
    189					    "CPTAF RVU");
    190	if (ret)
    191		goto err;
    192	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
    193
    194	ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
    195					    rvu_cpt_af_ras_intr_handler,
    196					    "CPTAF RAS");
    197	if (ret)
    198		goto err;
    199	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
    200
    201	return 0;
    202err:
    203	rvu_cpt_unregister_interrupts(rvu);
    204	return ret;
    205}
    206
    207static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
    208{
    209	struct rvu_hwinfo *hw = rvu->hw;
    210	struct rvu_block *block;
    211	int i, offs, ret = 0;
    212	char irq_name[16];
    213
    214	if (!is_block_implemented(rvu->hw, blkaddr))
    215		return 0;
    216
    217	block = &hw->block[blkaddr];
    218	offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
    219	if (!offs) {
    220		dev_warn(rvu->dev,
    221			 "Failed to get CPT_AF_INT vector offsets\n");
    222		return 0;
    223	}
    224
    225	if (!is_rvu_otx2(rvu))
    226		return cpt_10k_register_interrupts(block, offs);
    227
    228	for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
    229		snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
    230		ret = rvu_cpt_do_register_interrupt(block, offs + i,
    231						    rvu_cpt_af_flt_intr_handler,
    232						    irq_name);
    233		if (ret)
    234			goto err;
    235		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
    236	}
    237
    238	ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
    239					    rvu_cpt_af_rvu_intr_handler,
    240					    "CPTAF RVU");
    241	if (ret)
    242		goto err;
    243	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
    244
    245	ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
    246					    rvu_cpt_af_ras_intr_handler,
    247					    "CPTAF RAS");
    248	if (ret)
    249		goto err;
    250	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
    251
    252	return 0;
    253err:
    254	rvu_cpt_unregister_interrupts(rvu);
    255	return ret;
    256}
    257
    258int rvu_cpt_register_interrupts(struct rvu *rvu)
    259{
    260	int ret;
    261
    262	ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
    263	if (ret)
    264		return ret;
    265
    266	return cpt_register_interrupts(rvu, BLKADDR_CPT1);
    267}
    268
    269static int get_cpt_pf_num(struct rvu *rvu)
    270{
    271	int i, domain_nr, cpt_pf_num = -1;
    272	struct pci_dev *pdev;
    273
    274	domain_nr = pci_domain_nr(rvu->pdev->bus);
    275	for (i = 0; i < rvu->hw->total_pfs; i++) {
    276		pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0);
    277		if (!pdev)
    278			continue;
    279
    280		if (pdev->device == PCI_DEVID_OTX2_CPT_PF ||
    281		    pdev->device == PCI_DEVID_OTX2_CPT10K_PF) {
    282			cpt_pf_num = i;
    283			put_device(&pdev->dev);
    284			break;
    285		}
    286		put_device(&pdev->dev);
    287	}
    288	return cpt_pf_num;
    289}
    290
    291static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
    292{
    293	int cpt_pf_num = get_cpt_pf_num(rvu);
    294
    295	if (rvu_get_pf(pcifunc) != cpt_pf_num)
    296		return false;
    297	if (pcifunc & RVU_PFVF_FUNC_MASK)
    298		return false;
    299
    300	return true;
    301}
    302
    303static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
    304{
    305	int cpt_pf_num = get_cpt_pf_num(rvu);
    306
    307	if (rvu_get_pf(pcifunc) != cpt_pf_num)
    308		return false;
    309	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
    310		return false;
    311
    312	return true;
    313}
    314
    315static int validate_and_get_cpt_blkaddr(int req_blkaddr)
    316{
    317	int blkaddr;
    318
    319	blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0;
    320	if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
    321		return -EINVAL;
    322
    323	return blkaddr;
    324}
    325
    326int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
    327				  struct cpt_lf_alloc_req_msg *req,
    328				  struct msg_rsp *rsp)
    329{
    330	u16 pcifunc = req->hdr.pcifunc;
    331	struct rvu_block *block;
    332	int cptlf, blkaddr;
    333	int num_lfs, slot;
    334	u64 val;
    335
    336	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
    337	if (blkaddr < 0)
    338		return blkaddr;
    339
    340	if (req->eng_grpmsk == 0x0)
    341		return CPT_AF_ERR_GRP_INVALID;
    342
    343	block = &rvu->hw->block[blkaddr];
    344	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
    345					block->addr);
    346	if (!num_lfs)
    347		return CPT_AF_ERR_LF_INVALID;
    348
    349	/* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
    350	if (req->nix_pf_func) {
    351		/* If default, use 'this' CPTLF's PFFUNC */
    352		if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
    353			req->nix_pf_func = pcifunc;
    354		if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
    355			return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
    356	}
    357
    358	/* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
    359	if (req->sso_pf_func) {
    360		/* If default, use 'this' CPTLF's PFFUNC */
    361		if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
    362			req->sso_pf_func = pcifunc;
    363		if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
    364			return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
    365	}
    366
    367	for (slot = 0; slot < num_lfs; slot++) {
    368		cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
    369		if (cptlf < 0)
    370			return CPT_AF_ERR_LF_INVALID;
    371
    372		/* Set CPT LF group and priority */
    373		val = (u64)req->eng_grpmsk << 48 | 1;
    374		if (!is_rvu_otx2(rvu))
    375			val |= (CPT_CTX_ILEN << 17);
    376
    377		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
    378
    379		/* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
    380		 * on reset.
    381		 */
    382		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
    383		val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
    384		val |= ((u64)req->nix_pf_func << 48 |
    385			(u64)req->sso_pf_func << 32);
    386		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
    387	}
    388
    389	return 0;
    390}
    391
    392static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
    393{
    394	u16 pcifunc = req->hdr.pcifunc;
    395	int num_lfs, cptlf, slot, err;
    396	struct rvu_block *block;
    397
    398	block = &rvu->hw->block[blkaddr];
    399	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
    400					block->addr);
    401	if (!num_lfs)
    402		return 0;
    403
    404	for (slot = 0; slot < num_lfs; slot++) {
    405		cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
    406		if (cptlf < 0)
    407			return CPT_AF_ERR_LF_INVALID;
    408
    409		/* Perform teardown */
    410		rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
    411
    412		/* Reset LF */
    413		err = rvu_lf_reset(rvu, block, cptlf);
    414		if (err) {
    415			dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
    416				block->addr, cptlf);
    417		}
    418	}
    419
    420	return 0;
    421}
    422
    423int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
    424				 struct msg_rsp *rsp)
    425{
    426	int ret;
    427
    428	ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
    429	if (ret)
    430		return ret;
    431
    432	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
    433		ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
    434
    435	return ret;
    436}
    437
    438static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
    439					struct cpt_inline_ipsec_cfg_msg *req)
    440{
    441	u16 sso_pf_func = req->sso_pf_func;
    442	u8 nix_sel;
    443	u64 val;
    444
    445	val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
    446	if (req->enable && (val & BIT_ULL(16))) {
    447		/* IPSec inline outbound path is already enabled for a given
    448		 * CPT LF, HRM states that inline inbound & outbound paths
    449		 * must not be enabled at the same time for a given CPT LF
    450		 */
    451		return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
    452	}
    453	/* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
    454	if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
    455		return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
    456
    457	nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
    458	/* Enable CPT LF for IPsec inline inbound operations */
    459	if (req->enable)
    460		val |= BIT_ULL(9);
    461	else
    462		val &= ~BIT_ULL(9);
    463
    464	val |= (u64)nix_sel << 8;
    465	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
    466
    467	if (sso_pf_func) {
    468		/* Set SSO_PF_FUNC */
    469		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
    470		val |= (u64)sso_pf_func << 32;
    471		val |= (u64)req->nix_pf_func << 48;
    472		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
    473	}
    474	if (req->sso_pf_func_ovrd)
    475		/* Set SSO_PF_FUNC_OVRD for inline IPSec */
    476		rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
    477
    478	/* Configure the X2P Link register with the cpt base channel number and
    479	 * range of channels it should propagate to X2P
    480	 */
    481	if (!is_rvu_otx2(rvu)) {
    482		val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
    483		val |= rvu->hw->cpt_chan_base;
    484
    485		rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
    486		rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
    487	}
    488
    489	return 0;
    490}
    491
    492static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
    493					 struct cpt_inline_ipsec_cfg_msg *req)
    494{
    495	u16 nix_pf_func = req->nix_pf_func;
    496	int nix_blkaddr;
    497	u8 nix_sel;
    498	u64 val;
    499
    500	val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
    501	if (req->enable && (val & BIT_ULL(9))) {
    502		/* IPSec inline inbound path is already enabled for a given
    503		 * CPT LF, HRM states that inline inbound & outbound paths
    504		 * must not be enabled at the same time for a given CPT LF
    505		 */
    506		return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
    507	}
    508
    509	/* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
    510	if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
    511		return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
    512
    513	/* Enable CPT LF for IPsec inline outbound operations */
    514	if (req->enable)
    515		val |= BIT_ULL(16);
    516	else
    517		val &= ~BIT_ULL(16);
    518	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
    519
    520	if (nix_pf_func) {
    521		/* Set NIX_PF_FUNC */
    522		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
    523		val |= (u64)nix_pf_func << 48;
    524		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
    525
    526		nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
    527		nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
    528
    529		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
    530		val |= (u64)nix_sel << 8;
    531		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
    532	}
    533
    534	return 0;
    535}
    536
    537int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
    538					  struct cpt_inline_ipsec_cfg_msg *req,
    539					  struct msg_rsp *rsp)
    540{
    541	u16 pcifunc = req->hdr.pcifunc;
    542	struct rvu_block *block;
    543	int cptlf, blkaddr, ret;
    544	u16 actual_slot;
    545
    546	blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
    547					    req->slot, &actual_slot);
    548	if (blkaddr < 0)
    549		return CPT_AF_ERR_LF_INVALID;
    550
    551	block = &rvu->hw->block[blkaddr];
    552
    553	cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
    554	if (cptlf < 0)
    555		return CPT_AF_ERR_LF_INVALID;
    556
    557	switch (req->dir) {
    558	case CPT_INLINE_INBOUND:
    559		ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
    560		break;
    561
    562	case CPT_INLINE_OUTBOUND:
    563		ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
    564		break;
    565
    566	default:
    567		return CPT_AF_ERR_PARAM;
    568	}
    569
    570	return ret;
    571}
    572
    573static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
    574{
    575	u64 offset = req->reg_offset;
    576	int blkaddr, num_lfs, lf;
    577	struct rvu_block *block;
    578	struct rvu_pfvf *pfvf;
    579
    580	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
    581	if (blkaddr < 0)
    582		return false;
    583
    584	/* Registers that can be accessed from PF/VF */
    585	if ((offset & 0xFF000) ==  CPT_AF_LFX_CTL(0) ||
    586	    (offset & 0xFF000) ==  CPT_AF_LFX_CTL2(0)) {
    587		if (offset & 7)
    588			return false;
    589
    590		lf = (offset & 0xFFF) >> 3;
    591		block = &rvu->hw->block[blkaddr];
    592		pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
    593		num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
    594		if (lf >= num_lfs)
    595			/* Slot is not valid for that PF/VF */
    596			return false;
    597
    598		/* Translate local LF used by VFs to global CPT LF */
    599		lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
    600				req->hdr.pcifunc, lf);
    601		if (lf < 0)
    602			return false;
    603
    604		return true;
    605	} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
    606		/* Registers that can be accessed from PF */
    607		switch (offset) {
    608		case CPT_AF_DIAG:
    609		case CPT_AF_CTL:
    610		case CPT_AF_PF_FUNC:
    611		case CPT_AF_BLK_RST:
    612		case CPT_AF_CONSTANTS1:
    613		case CPT_AF_CTX_FLUSH_TIMER:
    614			return true;
    615		}
    616
    617		switch (offset & 0xFF000) {
    618		case CPT_AF_EXEX_STS(0):
    619		case CPT_AF_EXEX_CTL(0):
    620		case CPT_AF_EXEX_CTL2(0):
    621		case CPT_AF_EXEX_UCODE_BASE(0):
    622			if (offset & 7)
    623				return false;
    624			break;
    625		default:
    626			return false;
    627		}
    628		return true;
    629	}
    630	return false;
    631}
    632
    633int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
    634					struct cpt_rd_wr_reg_msg *req,
    635					struct cpt_rd_wr_reg_msg *rsp)
    636{
    637	int blkaddr;
    638
    639	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
    640	if (blkaddr < 0)
    641		return blkaddr;
    642
    643	/* This message is accepted only if sent from CPT PF/VF */
    644	if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
    645	    !is_cpt_vf(rvu, req->hdr.pcifunc))
    646		return CPT_AF_ERR_ACCESS_DENIED;
    647
    648	rsp->reg_offset = req->reg_offset;
    649	rsp->ret_val = req->ret_val;
    650	rsp->is_write = req->is_write;
    651
    652	if (!is_valid_offset(rvu, req))
    653		return CPT_AF_ERR_ACCESS_DENIED;
    654
    655	if (req->is_write)
    656		rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
    657	else
    658		rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
    659
    660	return 0;
    661}
    662
    663static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
    664{
    665	if (is_rvu_otx2(rvu))
    666		return;
    667
    668	rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
    669	rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
    670	rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
    671	rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
    672					 CPT_AF_CTX_AOP_LATENCY_PC);
    673	rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
    674	rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
    675					    CPT_AF_CTX_IFETCH_LATENCY_PC);
    676	rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
    677	rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
    678					    CPT_AF_CTX_FFETCH_LATENCY_PC);
    679	rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
    680	rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
    681					   CPT_AF_CTX_FFETCH_LATENCY_PC);
    682	rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
    683	rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
    684					 CPT_AF_CTX_FFETCH_LATENCY_PC);
    685	rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
    686	rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
    687	rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
    688
    689	rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
    690	rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
    691	rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
    692	rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
    693	rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
    694	rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
    695	rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
    696}
    697
    698static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
    699{
    700	u16 max_ses, max_ies, max_aes;
    701	u32 e_min = 0, e_max = 0;
    702	u64 reg;
    703
    704	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
    705	max_ses = reg & 0xffff;
    706	max_ies = (reg >> 16) & 0xffff;
    707	max_aes = (reg >> 32) & 0xffff;
    708
    709	/* Get AE status */
    710	e_min = max_ses + max_ies;
    711	e_max = max_ses + max_ies + max_aes;
    712	cpt_get_eng_sts(e_min, e_max, rsp, ae);
    713	/* Get SE status */
    714	e_min = 0;
    715	e_max = max_ses;
    716	cpt_get_eng_sts(e_min, e_max, rsp, se);
    717	/* Get IE status */
    718	e_min = max_ses;
    719	e_max = max_ses + max_ies;
    720	cpt_get_eng_sts(e_min, e_max, rsp, ie);
    721}
    722
    723int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
    724			     struct cpt_sts_rsp *rsp)
    725{
    726	int blkaddr;
    727
    728	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
    729	if (blkaddr < 0)
    730		return blkaddr;
    731
    732	/* This message is accepted only if sent from CPT PF/VF */
    733	if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
    734	    !is_cpt_vf(rvu, req->hdr.pcifunc))
    735		return CPT_AF_ERR_ACCESS_DENIED;
    736
    737	get_ctx_pc(rvu, rsp, blkaddr);
    738
    739	/* Get CPT engines status */
    740	get_eng_sts(rvu, rsp, blkaddr);
    741
    742	/* Read CPT instruction PC registers */
    743	rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
    744	rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
    745	rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
    746	rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
    747	rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
    748	rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
    749					   CPT_AF_ACTIVE_CYCLES_PC);
    750	rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
    751	rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
    752	rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
    753
    754	return 0;
    755}
    756
    757#define RXC_ZOMBIE_THRES  GENMASK_ULL(59, 48)
    758#define RXC_ZOMBIE_LIMIT  GENMASK_ULL(43, 32)
    759#define RXC_ACTIVE_THRES  GENMASK_ULL(27, 16)
    760#define RXC_ACTIVE_LIMIT  GENMASK_ULL(11, 0)
    761#define RXC_ACTIVE_COUNT  GENMASK_ULL(60, 48)
    762#define RXC_ZOMBIE_COUNT  GENMASK_ULL(60, 48)
    763
    764static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
    765			     int blkaddr)
    766{
    767	u64 dfrg_reg;
    768
    769	dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
    770	dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
    771	dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
    772	dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit);
    773
    774	rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step);
    775	rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg);
    776}
    777
    778int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
    779				      struct cpt_rxc_time_cfg_req *req,
    780				      struct msg_rsp *rsp)
    781{
    782	int blkaddr;
    783
    784	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
    785	if (blkaddr < 0)
    786		return blkaddr;
    787
    788	/* This message is accepted only if sent from CPT PF/VF */
    789	if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
    790	    !is_cpt_vf(rvu, req->hdr.pcifunc))
    791		return CPT_AF_ERR_ACCESS_DENIED;
    792
    793	cpt_rxc_time_cfg(rvu, req, blkaddr);
    794
    795	return 0;
    796}
    797
    798int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
    799					struct msg_rsp *rsp)
    800{
    801	return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
    802}
    803
    804static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
    805{
    806	struct cpt_rxc_time_cfg_req req;
    807	int timeout = 2000;
    808	u64 reg;
    809
    810	if (is_rvu_otx2(rvu))
    811		return;
    812
    813	/* Set time limit to minimum values, so that rxc entries will be
    814	 * flushed out quickly.
    815	 */
    816	req.step = 1;
    817	req.zombie_thres = 1;
    818	req.zombie_limit = 1;
    819	req.active_thres = 1;
    820	req.active_limit = 1;
    821
    822	cpt_rxc_time_cfg(rvu, &req, blkaddr);
    823
    824	do {
    825		reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
    826		udelay(1);
    827		if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
    828			timeout--;
    829		else
    830			break;
    831	} while (timeout);
    832
    833	if (timeout == 0)
    834		dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
    835
    836	timeout = 2000;
    837	do {
    838		reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
    839		udelay(1);
    840		if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
    841			timeout--;
    842		else
    843			break;
    844	} while (timeout);
    845
    846	if (timeout == 0)
    847		dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
    848}
    849
    850#define INPROG_INFLIGHT(reg)    ((reg) & 0x1FF)
    851#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
    852#define INPROG_GRB(reg)         (((reg) >> 32) & 0xFF)
    853#define INPROG_GWB(reg)         (((reg) >> 40) & 0xFF)
    854
    855static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
    856{
    857	int i = 0, hard_lp_ctr = 100000;
    858	u64 inprog, grp_ptr;
    859	u16 nq_ptr, dq_ptr;
    860
    861	/* Disable instructions enqueuing */
    862	rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
    863
    864	/* Disable executions in the LF's queue */
    865	inprog = rvu_read64(rvu, blkaddr,
    866			    CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
    867	inprog &= ~BIT_ULL(16);
    868	rvu_write64(rvu, blkaddr,
    869		    CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
    870
    871	/* Wait for CPT queue to become execution-quiescent */
    872	do {
    873		inprog = rvu_read64(rvu, blkaddr,
    874				    CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
    875		if (INPROG_GRB_PARTIAL(inprog)) {
    876			i = 0;
    877			hard_lp_ctr--;
    878		} else {
    879			i++;
    880		}
    881
    882		grp_ptr = rvu_read64(rvu, blkaddr,
    883				     CPT_AF_BAR2_ALIASX(slot,
    884							CPT_LF_Q_GRP_PTR));
    885		nq_ptr = (grp_ptr >> 32) & 0x7FFF;
    886		dq_ptr = grp_ptr & 0x7FFF;
    887
    888	} while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
    889
    890	if (hard_lp_ctr == 0)
    891		dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
    892
    893	i = 0;
    894	hard_lp_ctr = 100000;
    895	do {
    896		inprog = rvu_read64(rvu, blkaddr,
    897				    CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
    898
    899		if ((INPROG_INFLIGHT(inprog) == 0) &&
    900		    (INPROG_GWB(inprog) < 40) &&
    901		    ((INPROG_GRB(inprog) == 0) ||
    902		     (INPROG_GRB((inprog)) == 40))) {
    903			i++;
    904		} else {
    905			i = 0;
    906			hard_lp_ctr--;
    907		}
    908	} while (hard_lp_ctr && (i < 10));
    909
    910	if (hard_lp_ctr == 0)
    911		dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
    912}
    913
    914int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
    915{
    916	u64 reg;
    917
    918	if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
    919		cpt_rxc_teardown(rvu, blkaddr);
    920
    921	/* Enable BAR2 ALIAS for this pcifunc. */
    922	reg = BIT_ULL(16) | pcifunc;
    923	rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
    924
    925	cpt_lf_disable_iqueue(rvu, blkaddr, slot);
    926
    927	/* Set group drop to help clear out hardware */
    928	reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
    929	reg |= BIT_ULL(17);
    930	rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
    931
    932	rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
    933
    934	return 0;
    935}
    936
    937#define CPT_RES_LEN    16
    938#define CPT_SE_IE_EGRP 1ULL
    939
    940static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
    941				      int nix_blkaddr)
    942{
    943	int cpt_pf_num = get_cpt_pf_num(rvu);
    944	struct cpt_inst_lmtst_req *req;
    945	dma_addr_t res_daddr;
    946	int timeout = 3000;
    947	u8 cpt_idx;
    948	u64 *inst;
    949	u16 *res;
    950	int rc;
    951
    952	res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
    953	if (!res)
    954		return -ENOMEM;
    955
    956	res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
    957				   DMA_BIDIRECTIONAL);
    958	if (dma_mapping_error(rvu->dev, res_daddr)) {
    959		dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
    960		rc = -EFAULT;
    961		goto res_free;
    962	}
    963	*res = 0xFFFF;
    964
    965	/* Send mbox message to CPT PF */
    966	req = (struct cpt_inst_lmtst_req *)
    967	       otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
    968				       cpt_pf_num, sizeof(*req),
    969				       sizeof(struct msg_rsp));
    970	if (!req) {
    971		rc = -ENOMEM;
    972		goto res_daddr_unmap;
    973	}
    974	req->hdr.sig = OTX2_MBOX_REQ_SIG;
    975	req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
    976
    977	inst = req->inst;
    978	/* Prepare CPT_INST_S */
    979	inst[0] = 0;
    980	inst[1] = res_daddr;
    981	/* AF PF FUNC */
    982	inst[2] = 0;
    983	/* Set QORD */
    984	inst[3] = 1;
    985	inst[4] = 0;
    986	inst[5] = 0;
    987	inst[6] = 0;
    988	/* Set EGRP */
    989	inst[7] = CPT_SE_IE_EGRP << 61;
    990
    991	/* Subtract 1 from the NIX-CPT credit count to preserve
    992	 * credit counts.
    993	 */
    994	cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
    995	rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
    996		    BIT_ULL(22) - 1);
    997
    998	otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
    999	rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
   1000	if (rc)
   1001		dev_warn(rvu->dev, "notification to pf %d failed\n",
   1002			 cpt_pf_num);
   1003	/* Wait for CPT instruction to be completed */
   1004	do {
   1005		mdelay(1);
   1006		if (*res == 0xFFFF)
   1007			timeout--;
   1008		else
   1009			break;
   1010	} while (timeout);
   1011
   1012	if (timeout == 0)
   1013		dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
   1014
   1015res_daddr_unmap:
   1016	dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
   1017res_free:
   1018	kfree(res);
   1019
   1020	return 0;
   1021}
   1022
   1023#define CTX_CAM_PF_FUNC   GENMASK_ULL(61, 46)
   1024#define CTX_CAM_CPTR      GENMASK_ULL(45, 0)
   1025
   1026int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
   1027{
   1028	int nix_blkaddr, blkaddr;
   1029	u16 max_ctx_entries, i;
   1030	int slot = 0, num_lfs;
   1031	u64 reg, cam_data;
   1032	int rc;
   1033
   1034	nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
   1035	if (nix_blkaddr < 0)
   1036		return -EINVAL;
   1037
   1038	if (is_rvu_otx2(rvu))
   1039		return 0;
   1040
   1041	blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
   1042
   1043	/* Submit CPT_INST_S to track when all packets have been
   1044	 * flushed through for the NIX PF FUNC in inline inbound case.
   1045	 */
   1046	rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
   1047	if (rc)
   1048		return rc;
   1049
   1050	/* Wait for rxc entries to be flushed out */
   1051	cpt_rxc_teardown(rvu, blkaddr);
   1052
   1053	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
   1054	max_ctx_entries = (reg >> 48) & 0xFFF;
   1055
   1056	mutex_lock(&rvu->rsrc_lock);
   1057
   1058	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
   1059					blkaddr);
   1060	if (num_lfs == 0) {
   1061		dev_warn(rvu->dev, "CPT LF is not configured\n");
   1062		goto unlock;
   1063	}
   1064
   1065	/* Enable BAR2 ALIAS for this pcifunc. */
   1066	reg = BIT_ULL(16) | pcifunc;
   1067	rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
   1068
   1069	for (i = 0; i < max_ctx_entries; i++) {
   1070		cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
   1071
   1072		if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
   1073		    FIELD_GET(CTX_CAM_CPTR, cam_data)) {
   1074			reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
   1075			rvu_write64(rvu, blkaddr,
   1076				    CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
   1077				    reg);
   1078		}
   1079	}
   1080	rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
   1081
   1082unlock:
   1083	mutex_unlock(&rvu->rsrc_lock);
   1084
   1085	return 0;
   1086}