cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

otx2_pf.c (75188B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Marvell RVU Physical Function ethernet driver
      3 *
      4 * Copyright (C) 2020 Marvell.
      5 *
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/interrupt.h>
     10#include <linux/pci.h>
     11#include <linux/etherdevice.h>
     12#include <linux/of.h>
     13#include <linux/if_vlan.h>
     14#include <linux/iommu.h>
     15#include <net/ip.h>
     16#include <linux/bpf.h>
     17#include <linux/bpf_trace.h>
     18
     19#include "otx2_reg.h"
     20#include "otx2_common.h"
     21#include "otx2_txrx.h"
     22#include "otx2_struct.h"
     23#include "otx2_ptp.h"
     24#include "cn10k.h"
     25#include <rvu_trace.h>
     26
     27#define DRV_NAME	"rvu_nicpf"
     28#define DRV_STRING	"Marvell RVU NIC Physical Function Driver"
     29
     30/* Supported devices */
     31static const struct pci_device_id otx2_pf_id_table[] = {
     32	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
     33	{ 0, }  /* end of table */
     34};
     35
     36MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
     37MODULE_DESCRIPTION(DRV_STRING);
     38MODULE_LICENSE("GPL v2");
     39MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
     40
     41static void otx2_vf_link_event_task(struct work_struct *work);
     42
     43enum {
     44	TYPE_PFAF,
     45	TYPE_PFVF,
     46};
     47
     48static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
     49static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
     50
     51static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
     52{
     53	struct otx2_nic *pf = netdev_priv(netdev);
     54	bool if_up = netif_running(netdev);
     55	int err = 0;
     56
     57	if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
     58		netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
     59			    netdev->mtu);
     60		return -EINVAL;
     61	}
     62	if (if_up)
     63		otx2_stop(netdev);
     64
     65	netdev_info(netdev, "Changing MTU from %d to %d\n",
     66		    netdev->mtu, new_mtu);
     67	netdev->mtu = new_mtu;
     68
     69	if (if_up)
     70		err = otx2_open(netdev);
     71
     72	return err;
     73}
     74
     75static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
     76{
     77	int irq, vfs = pf->total_vfs;
     78
     79	/* Disable VFs ME interrupts */
     80	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
     81	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
     82	free_irq(irq, pf);
     83
     84	/* Disable VFs FLR interrupts */
     85	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
     86	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
     87	free_irq(irq, pf);
     88
     89	if (vfs <= 64)
     90		return;
     91
     92	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
     93	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
     94	free_irq(irq, pf);
     95
     96	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
     97	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
     98	free_irq(irq, pf);
     99}
    100
    101static void otx2_flr_wq_destroy(struct otx2_nic *pf)
    102{
    103	if (!pf->flr_wq)
    104		return;
    105	destroy_workqueue(pf->flr_wq);
    106	pf->flr_wq = NULL;
    107	devm_kfree(pf->dev, pf->flr_wrk);
    108}
    109
    110static void otx2_flr_handler(struct work_struct *work)
    111{
    112	struct flr_work *flrwork = container_of(work, struct flr_work, work);
    113	struct otx2_nic *pf = flrwork->pf;
    114	struct mbox *mbox = &pf->mbox;
    115	struct msg_req *req;
    116	int vf, reg = 0;
    117
    118	vf = flrwork - pf->flr_wrk;
    119
    120	mutex_lock(&mbox->lock);
    121	req = otx2_mbox_alloc_msg_vf_flr(mbox);
    122	if (!req) {
    123		mutex_unlock(&mbox->lock);
    124		return;
    125	}
    126	req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
    127	req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
    128
    129	if (!otx2_sync_mbox_msg(&pf->mbox)) {
    130		if (vf >= 64) {
    131			reg = 1;
    132			vf = vf - 64;
    133		}
    134		/* clear transcation pending bit */
    135		otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
    136		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
    137	}
    138
    139	mutex_unlock(&mbox->lock);
    140}
    141
    142static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
    143{
    144	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
    145	int reg, dev, vf, start_vf, num_reg = 1;
    146	u64 intr;
    147
    148	if (pf->total_vfs > 64)
    149		num_reg = 2;
    150
    151	for (reg = 0; reg < num_reg; reg++) {
    152		intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
    153		if (!intr)
    154			continue;
    155		start_vf = 64 * reg;
    156		for (vf = 0; vf < 64; vf++) {
    157			if (!(intr & BIT_ULL(vf)))
    158				continue;
    159			dev = vf + start_vf;
    160			queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
    161			/* Clear interrupt */
    162			otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
    163			/* Disable the interrupt */
    164			otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
    165				     BIT_ULL(vf));
    166		}
    167	}
    168	return IRQ_HANDLED;
    169}
    170
    171static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
    172{
    173	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
    174	int vf, reg, num_reg = 1;
    175	u64 intr;
    176
    177	if (pf->total_vfs > 64)
    178		num_reg = 2;
    179
    180	for (reg = 0; reg < num_reg; reg++) {
    181		intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
    182		if (!intr)
    183			continue;
    184		for (vf = 0; vf < 64; vf++) {
    185			if (!(intr & BIT_ULL(vf)))
    186				continue;
    187			/* clear trpend bit */
    188			otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
    189			/* clear interrupt */
    190			otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
    191		}
    192	}
    193	return IRQ_HANDLED;
    194}
    195
    196static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
    197{
    198	struct otx2_hw *hw = &pf->hw;
    199	char *irq_name;
    200	int ret;
    201
    202	/* Register ME interrupt handler*/
    203	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
    204	snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
    205	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
    206			  otx2_pf_me_intr_handler, 0, irq_name, pf);
    207	if (ret) {
    208		dev_err(pf->dev,
    209			"RVUPF: IRQ registration failed for ME0\n");
    210	}
    211
    212	/* Register FLR interrupt handler */
    213	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
    214	snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
    215	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
    216			  otx2_pf_flr_intr_handler, 0, irq_name, pf);
    217	if (ret) {
    218		dev_err(pf->dev,
    219			"RVUPF: IRQ registration failed for FLR0\n");
    220		return ret;
    221	}
    222
    223	if (numvfs > 64) {
    224		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
    225		snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
    226			 rvu_get_pf(pf->pcifunc));
    227		ret = request_irq(pci_irq_vector
    228				  (pf->pdev, RVU_PF_INT_VEC_VFME1),
    229				  otx2_pf_me_intr_handler, 0, irq_name, pf);
    230		if (ret) {
    231			dev_err(pf->dev,
    232				"RVUPF: IRQ registration failed for ME1\n");
    233		}
    234		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
    235		snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
    236			 rvu_get_pf(pf->pcifunc));
    237		ret = request_irq(pci_irq_vector
    238				  (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
    239				  otx2_pf_flr_intr_handler, 0, irq_name, pf);
    240		if (ret) {
    241			dev_err(pf->dev,
    242				"RVUPF: IRQ registration failed for FLR1\n");
    243			return ret;
    244		}
    245	}
    246
    247	/* Enable ME interrupt for all VFs*/
    248	otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
    249	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
    250
    251	/* Enable FLR interrupt for all VFs*/
    252	otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
    253	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
    254
    255	if (numvfs > 64) {
    256		numvfs -= 64;
    257
    258		otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
    259		otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
    260			     INTR_MASK(numvfs));
    261
    262		otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
    263		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
    264			     INTR_MASK(numvfs));
    265	}
    266	return 0;
    267}
    268
    269static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
    270{
    271	int vf;
    272
    273	pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
    274				     WQ_UNBOUND | WQ_HIGHPRI, 1);
    275	if (!pf->flr_wq)
    276		return -ENOMEM;
    277
    278	pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
    279				   sizeof(struct flr_work), GFP_KERNEL);
    280	if (!pf->flr_wrk) {
    281		destroy_workqueue(pf->flr_wq);
    282		return -ENOMEM;
    283	}
    284
    285	for (vf = 0; vf < num_vfs; vf++) {
    286		pf->flr_wrk[vf].pf = pf;
    287		INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
    288	}
    289
    290	return 0;
    291}
    292
    293static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
    294			    int first, int mdevs, u64 intr, int type)
    295{
    296	struct otx2_mbox_dev *mdev;
    297	struct otx2_mbox *mbox;
    298	struct mbox_hdr *hdr;
    299	int i;
    300
    301	for (i = first; i < mdevs; i++) {
    302		/* start from 0 */
    303		if (!(intr & BIT_ULL(i - first)))
    304			continue;
    305
    306		mbox = &mw->mbox;
    307		mdev = &mbox->dev[i];
    308		if (type == TYPE_PFAF)
    309			otx2_sync_mbox_bbuf(mbox, i);
    310		hdr = mdev->mbase + mbox->rx_start;
    311		/* The hdr->num_msgs is set to zero immediately in the interrupt
    312		 * handler to  ensure that it holds a correct value next time
    313		 * when the interrupt handler is called.
    314		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
    315		 * pf>mbox.up_num_msgs holds the data for use in
    316		 * pfaf_mbox_up_handler.
    317		 */
    318		if (hdr->num_msgs) {
    319			mw[i].num_msgs = hdr->num_msgs;
    320			hdr->num_msgs = 0;
    321			if (type == TYPE_PFAF)
    322				memset(mbox->hwbase + mbox->rx_start, 0,
    323				       ALIGN(sizeof(struct mbox_hdr),
    324					     sizeof(u64)));
    325
    326			queue_work(mbox_wq, &mw[i].mbox_wrk);
    327		}
    328
    329		mbox = &mw->mbox_up;
    330		mdev = &mbox->dev[i];
    331		if (type == TYPE_PFAF)
    332			otx2_sync_mbox_bbuf(mbox, i);
    333		hdr = mdev->mbase + mbox->rx_start;
    334		if (hdr->num_msgs) {
    335			mw[i].up_num_msgs = hdr->num_msgs;
    336			hdr->num_msgs = 0;
    337			if (type == TYPE_PFAF)
    338				memset(mbox->hwbase + mbox->rx_start, 0,
    339				       ALIGN(sizeof(struct mbox_hdr),
    340					     sizeof(u64)));
    341
    342			queue_work(mbox_wq, &mw[i].mbox_up_wrk);
    343		}
    344	}
    345}
    346
    347static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
    348				  struct otx2_mbox *pfvf_mbox, void *bbuf_base,
    349				  int devid)
    350{
    351	struct otx2_mbox_dev *src_mdev = mdev;
    352	int offset;
    353
    354	/* Msgs are already copied, trigger VF's mbox irq */
    355	smp_wmb();
    356
    357	offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
    358	writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
    359
    360	/* Restore VF's mbox bounce buffer region address */
    361	src_mdev->mbase = bbuf_base;
    362}
    363
    364static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
    365				     struct otx2_mbox *src_mbox,
    366				     int dir, int vf, int num_msgs)
    367{
    368	struct otx2_mbox_dev *src_mdev, *dst_mdev;
    369	struct mbox_hdr *mbox_hdr;
    370	struct mbox_hdr *req_hdr;
    371	struct mbox *dst_mbox;
    372	int dst_size, err;
    373
    374	if (dir == MBOX_DIR_PFAF) {
    375		/* Set VF's mailbox memory as PF's bounce buffer memory, so
    376		 * that explicit copying of VF's msgs to PF=>AF mbox region
    377		 * and AF=>PF responses to VF's mbox region can be avoided.
    378		 */
    379		src_mdev = &src_mbox->dev[vf];
    380		mbox_hdr = src_mbox->hwbase +
    381				src_mbox->rx_start + (vf * MBOX_SIZE);
    382
    383		dst_mbox = &pf->mbox;
    384		dst_size = dst_mbox->mbox.tx_size -
    385				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
    386		/* Check if msgs fit into destination area and has valid size */
    387		if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
    388			return -EINVAL;
    389
    390		dst_mdev = &dst_mbox->mbox.dev[0];
    391
    392		mutex_lock(&pf->mbox.lock);
    393		dst_mdev->mbase = src_mdev->mbase;
    394		dst_mdev->msg_size = mbox_hdr->msg_size;
    395		dst_mdev->num_msgs = num_msgs;
    396		err = otx2_sync_mbox_msg(dst_mbox);
    397		/* Error code -EIO indicate there is a communication failure
    398		 * to the AF. Rest of the error codes indicate that AF processed
    399		 * VF messages and set the error codes in response messages
    400		 * (if any) so simply forward responses to VF.
    401		 */
    402		if (err == -EIO) {
    403			dev_warn(pf->dev,
    404				 "AF not responding to VF%d messages\n", vf);
    405			/* restore PF mbase and exit */
    406			dst_mdev->mbase = pf->mbox.bbuf_base;
    407			mutex_unlock(&pf->mbox.lock);
    408			return err;
    409		}
    410		/* At this point, all the VF messages sent to AF are acked
    411		 * with proper responses and responses are copied to VF
    412		 * mailbox hence raise interrupt to VF.
    413		 */
    414		req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
    415					      dst_mbox->mbox.rx_start);
    416		req_hdr->num_msgs = num_msgs;
    417
    418		otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
    419				      pf->mbox.bbuf_base, vf);
    420		mutex_unlock(&pf->mbox.lock);
    421	} else if (dir == MBOX_DIR_PFVF_UP) {
    422		src_mdev = &src_mbox->dev[0];
    423		mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
    424		req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
    425					      src_mbox->rx_start);
    426		req_hdr->num_msgs = num_msgs;
    427
    428		dst_mbox = &pf->mbox_pfvf[0];
    429		dst_size = dst_mbox->mbox_up.tx_size -
    430				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
    431		/* Check if msgs fit into destination area */
    432		if (mbox_hdr->msg_size > dst_size)
    433			return -EINVAL;
    434
    435		dst_mdev = &dst_mbox->mbox_up.dev[vf];
    436		dst_mdev->mbase = src_mdev->mbase;
    437		dst_mdev->msg_size = mbox_hdr->msg_size;
    438		dst_mdev->num_msgs = mbox_hdr->num_msgs;
    439		err = otx2_sync_mbox_up_msg(dst_mbox, vf);
    440		if (err) {
    441			dev_warn(pf->dev,
    442				 "VF%d is not responding to mailbox\n", vf);
    443			return err;
    444		}
    445	} else if (dir == MBOX_DIR_VFPF_UP) {
    446		req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
    447					      src_mbox->rx_start);
    448		req_hdr->num_msgs = num_msgs;
    449		otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
    450				      &pf->mbox.mbox_up,
    451				      pf->mbox_pfvf[vf].bbuf_base,
    452				      0);
    453	}
    454
    455	return 0;
    456}
    457
    458static void otx2_pfvf_mbox_handler(struct work_struct *work)
    459{
    460	struct mbox_msghdr *msg = NULL;
    461	int offset, vf_idx, id, err;
    462	struct otx2_mbox_dev *mdev;
    463	struct mbox_hdr *req_hdr;
    464	struct otx2_mbox *mbox;
    465	struct mbox *vf_mbox;
    466	struct otx2_nic *pf;
    467
    468	vf_mbox = container_of(work, struct mbox, mbox_wrk);
    469	pf = vf_mbox->pfvf;
    470	vf_idx = vf_mbox - pf->mbox_pfvf;
    471
    472	mbox = &pf->mbox_pfvf[0].mbox;
    473	mdev = &mbox->dev[vf_idx];
    474	req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
    475
    476	offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
    477
    478	for (id = 0; id < vf_mbox->num_msgs; id++) {
    479		msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
    480					     offset);
    481
    482		if (msg->sig != OTX2_MBOX_REQ_SIG)
    483			goto inval_msg;
    484
    485		/* Set VF's number in each of the msg */
    486		msg->pcifunc &= RVU_PFVF_FUNC_MASK;
    487		msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
    488		offset = msg->next_msgoff;
    489	}
    490	err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
    491					vf_mbox->num_msgs);
    492	if (err)
    493		goto inval_msg;
    494	return;
    495
    496inval_msg:
    497	otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
    498	otx2_mbox_msg_send(mbox, vf_idx);
    499}
    500
    501static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
    502{
    503	struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
    504	struct otx2_nic *pf = vf_mbox->pfvf;
    505	struct otx2_mbox_dev *mdev;
    506	int offset, id, vf_idx = 0;
    507	struct mbox_hdr *rsp_hdr;
    508	struct mbox_msghdr *msg;
    509	struct otx2_mbox *mbox;
    510
    511	vf_idx = vf_mbox - pf->mbox_pfvf;
    512	mbox = &pf->mbox_pfvf[0].mbox_up;
    513	mdev = &mbox->dev[vf_idx];
    514
    515	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
    516	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
    517
    518	for (id = 0; id < vf_mbox->up_num_msgs; id++) {
    519		msg = mdev->mbase + offset;
    520
    521		if (msg->id >= MBOX_MSG_MAX) {
    522			dev_err(pf->dev,
    523				"Mbox msg with unknown ID 0x%x\n", msg->id);
    524			goto end;
    525		}
    526
    527		if (msg->sig != OTX2_MBOX_RSP_SIG) {
    528			dev_err(pf->dev,
    529				"Mbox msg with wrong signature %x, ID 0x%x\n",
    530				msg->sig, msg->id);
    531			goto end;
    532		}
    533
    534		switch (msg->id) {
    535		case MBOX_MSG_CGX_LINK_EVENT:
    536			break;
    537		default:
    538			if (msg->rc)
    539				dev_err(pf->dev,
    540					"Mbox msg response has err %d, ID 0x%x\n",
    541					msg->rc, msg->id);
    542			break;
    543		}
    544
    545end:
    546		offset = mbox->rx_start + msg->next_msgoff;
    547		if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
    548			__otx2_mbox_reset(mbox, 0);
    549		mdev->msgs_acked++;
    550	}
    551}
    552
    553static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
    554{
    555	struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
    556	int vfs = pf->total_vfs;
    557	struct mbox *mbox;
    558	u64 intr;
    559
    560	mbox = pf->mbox_pfvf;
    561	/* Handle VF interrupts */
    562	if (vfs > 64) {
    563		intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
    564		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
    565		otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
    566				TYPE_PFVF);
    567		vfs -= 64;
    568	}
    569
    570	intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
    571	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
    572
    573	otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
    574
    575	trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
    576
    577	return IRQ_HANDLED;
    578}
    579
    580static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
    581{
    582	void __iomem *hwbase;
    583	struct mbox *mbox;
    584	int err, vf;
    585	u64 base;
    586
    587	if (!numvfs)
    588		return -EINVAL;
    589
    590	pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
    591				     sizeof(struct mbox), GFP_KERNEL);
    592	if (!pf->mbox_pfvf)
    593		return -ENOMEM;
    594
    595	pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
    596					   WQ_UNBOUND | WQ_HIGHPRI |
    597					   WQ_MEM_RECLAIM, 1);
    598	if (!pf->mbox_pfvf_wq)
    599		return -ENOMEM;
    600
    601	/* On CN10K platform, PF <-> VF mailbox region follows after
    602	 * PF <-> AF mailbox region.
    603	 */
    604	if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
    605		base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
    606		       MBOX_SIZE;
    607	else
    608		base = readq((void __iomem *)((u64)pf->reg_base +
    609					      RVU_PF_VF_BAR4_ADDR));
    610
    611	hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
    612	if (!hwbase) {
    613		err = -ENOMEM;
    614		goto free_wq;
    615	}
    616
    617	mbox = &pf->mbox_pfvf[0];
    618	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
    619			     MBOX_DIR_PFVF, numvfs);
    620	if (err)
    621		goto free_iomem;
    622
    623	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
    624			     MBOX_DIR_PFVF_UP, numvfs);
    625	if (err)
    626		goto free_iomem;
    627
    628	for (vf = 0; vf < numvfs; vf++) {
    629		mbox->pfvf = pf;
    630		INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
    631		INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
    632		mbox++;
    633	}
    634
    635	return 0;
    636
    637free_iomem:
    638	if (hwbase)
    639		iounmap(hwbase);
    640free_wq:
    641	destroy_workqueue(pf->mbox_pfvf_wq);
    642	return err;
    643}
    644
    645static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
    646{
    647	struct mbox *mbox = &pf->mbox_pfvf[0];
    648
    649	if (!mbox)
    650		return;
    651
    652	if (pf->mbox_pfvf_wq) {
    653		destroy_workqueue(pf->mbox_pfvf_wq);
    654		pf->mbox_pfvf_wq = NULL;
    655	}
    656
    657	if (mbox->mbox.hwbase)
    658		iounmap(mbox->mbox.hwbase);
    659
    660	otx2_mbox_destroy(&mbox->mbox);
    661}
    662
    663static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
    664{
    665	/* Clear PF <=> VF mailbox IRQ */
    666	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
    667	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
    668
    669	/* Enable PF <=> VF mailbox IRQ */
    670	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
    671	if (numvfs > 64) {
    672		numvfs -= 64;
    673		otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
    674			     INTR_MASK(numvfs));
    675	}
    676}
    677
    678static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
    679{
    680	int vector;
    681
    682	/* Disable PF <=> VF mailbox IRQ */
    683	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
    684	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
    685
    686	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
    687	vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
    688	free_irq(vector, pf);
    689
    690	if (numvfs > 64) {
    691		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
    692		vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
    693		free_irq(vector, pf);
    694	}
    695}
    696
    697static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
    698{
    699	struct otx2_hw *hw = &pf->hw;
    700	char *irq_name;
    701	int err;
    702
    703	/* Register MBOX0 interrupt handler */
    704	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
    705	if (pf->pcifunc)
    706		snprintf(irq_name, NAME_SIZE,
    707			 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
    708	else
    709		snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
    710	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
    711			  otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
    712	if (err) {
    713		dev_err(pf->dev,
    714			"RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
    715		return err;
    716	}
    717
    718	if (numvfs > 64) {
    719		/* Register MBOX1 interrupt handler */
    720		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
    721		if (pf->pcifunc)
    722			snprintf(irq_name, NAME_SIZE,
    723				 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
    724		else
    725			snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
    726		err = request_irq(pci_irq_vector(pf->pdev,
    727						 RVU_PF_INT_VEC_VFPF_MBOX1),
    728						 otx2_pfvf_mbox_intr_handler,
    729						 0, irq_name, pf);
    730		if (err) {
    731			dev_err(pf->dev,
    732				"RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
    733			return err;
    734		}
    735	}
    736
    737	otx2_enable_pfvf_mbox_intr(pf, numvfs);
    738
    739	return 0;
    740}
    741
    742static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
    743				       struct mbox_msghdr *msg)
    744{
    745	int devid;
    746
    747	if (msg->id >= MBOX_MSG_MAX) {
    748		dev_err(pf->dev,
    749			"Mbox msg with unknown ID 0x%x\n", msg->id);
    750		return;
    751	}
    752
    753	if (msg->sig != OTX2_MBOX_RSP_SIG) {
    754		dev_err(pf->dev,
    755			"Mbox msg with wrong signature %x, ID 0x%x\n",
    756			 msg->sig, msg->id);
    757		return;
    758	}
    759
    760	/* message response heading VF */
    761	devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
    762	if (devid) {
    763		struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
    764		struct delayed_work *dwork;
    765
    766		switch (msg->id) {
    767		case MBOX_MSG_NIX_LF_START_RX:
    768			config->intf_down = false;
    769			dwork = &config->link_event_work;
    770			schedule_delayed_work(dwork, msecs_to_jiffies(100));
    771			break;
    772		case MBOX_MSG_NIX_LF_STOP_RX:
    773			config->intf_down = true;
    774			break;
    775		}
    776
    777		return;
    778	}
    779
    780	switch (msg->id) {
    781	case MBOX_MSG_READY:
    782		pf->pcifunc = msg->pcifunc;
    783		break;
    784	case MBOX_MSG_MSIX_OFFSET:
    785		mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
    786		break;
    787	case MBOX_MSG_NPA_LF_ALLOC:
    788		mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
    789		break;
    790	case MBOX_MSG_NIX_LF_ALLOC:
    791		mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
    792		break;
    793	case MBOX_MSG_NIX_TXSCH_ALLOC:
    794		mbox_handler_nix_txsch_alloc(pf,
    795					     (struct nix_txsch_alloc_rsp *)msg);
    796		break;
    797	case MBOX_MSG_NIX_BP_ENABLE:
    798		mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
    799		break;
    800	case MBOX_MSG_CGX_STATS:
    801		mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
    802		break;
    803	case MBOX_MSG_CGX_FEC_STATS:
    804		mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
    805		break;
    806	default:
    807		if (msg->rc)
    808			dev_err(pf->dev,
    809				"Mbox msg response has err %d, ID 0x%x\n",
    810				msg->rc, msg->id);
    811		break;
    812	}
    813}
    814
    815static void otx2_pfaf_mbox_handler(struct work_struct *work)
    816{
    817	struct otx2_mbox_dev *mdev;
    818	struct mbox_hdr *rsp_hdr;
    819	struct mbox_msghdr *msg;
    820	struct otx2_mbox *mbox;
    821	struct mbox *af_mbox;
    822	struct otx2_nic *pf;
    823	int offset, id;
    824
    825	af_mbox = container_of(work, struct mbox, mbox_wrk);
    826	mbox = &af_mbox->mbox;
    827	mdev = &mbox->dev[0];
    828	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
    829
    830	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
    831	pf = af_mbox->pfvf;
    832
    833	for (id = 0; id < af_mbox->num_msgs; id++) {
    834		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
    835		otx2_process_pfaf_mbox_msg(pf, msg);
    836		offset = mbox->rx_start + msg->next_msgoff;
    837		if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
    838			__otx2_mbox_reset(mbox, 0);
    839		mdev->msgs_acked++;
    840	}
    841
    842}
    843
    844static void otx2_handle_link_event(struct otx2_nic *pf)
    845{
    846	struct cgx_link_user_info *linfo = &pf->linfo;
    847	struct net_device *netdev = pf->netdev;
    848
    849	pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
    850		linfo->link_up ? "UP" : "DOWN", linfo->speed,
    851		linfo->full_duplex ? "Full" : "Half");
    852	if (linfo->link_up) {
    853		netif_carrier_on(netdev);
    854		netif_tx_start_all_queues(netdev);
    855	} else {
    856		netif_tx_stop_all_queues(netdev);
    857		netif_carrier_off(netdev);
    858	}
    859}
    860
    861int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
    862					struct cgx_link_info_msg *msg,
    863					struct msg_rsp *rsp)
    864{
    865	int i;
    866
    867	/* Copy the link info sent by AF */
    868	pf->linfo = msg->link_info;
    869
    870	/* notify VFs about link event */
    871	for (i = 0; i < pci_num_vf(pf->pdev); i++) {
    872		struct otx2_vf_config *config = &pf->vf_configs[i];
    873		struct delayed_work *dwork = &config->link_event_work;
    874
    875		if (config->intf_down)
    876			continue;
    877
    878		schedule_delayed_work(dwork, msecs_to_jiffies(100));
    879	}
    880
    881	/* interface has not been fully configured yet */
    882	if (pf->flags & OTX2_FLAG_INTF_DOWN)
    883		return 0;
    884
    885	otx2_handle_link_event(pf);
    886	return 0;
    887}
    888
    889static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
    890				    struct mbox_msghdr *req)
    891{
    892	/* Check if valid, if not reply with a invalid msg */
    893	if (req->sig != OTX2_MBOX_REQ_SIG) {
    894		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
    895		return -ENODEV;
    896	}
    897
    898	switch (req->id) {
    899#define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
    900	case _id: {							\
    901		struct _rsp_type *rsp;					\
    902		int err;						\
    903									\
    904		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
    905			&pf->mbox.mbox_up, 0,				\
    906			sizeof(struct _rsp_type));			\
    907		if (!rsp)						\
    908			return -ENOMEM;					\
    909									\
    910		rsp->hdr.id = _id;					\
    911		rsp->hdr.sig = OTX2_MBOX_RSP_SIG;			\
    912		rsp->hdr.pcifunc = 0;					\
    913		rsp->hdr.rc = 0;					\
    914									\
    915		err = otx2_mbox_up_handler_ ## _fn_name(		\
    916			pf, (struct _req_type *)req, rsp);		\
    917		return err;						\
    918	}
    919MBOX_UP_CGX_MESSAGES
    920#undef M
    921		break;
    922	default:
    923		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
    924		return -ENODEV;
    925	}
    926	return 0;
    927}
    928
    929static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
    930{
    931	struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
    932	struct otx2_mbox *mbox = &af_mbox->mbox_up;
    933	struct otx2_mbox_dev *mdev = &mbox->dev[0];
    934	struct otx2_nic *pf = af_mbox->pfvf;
    935	int offset, id, devid = 0;
    936	struct mbox_hdr *rsp_hdr;
    937	struct mbox_msghdr *msg;
    938
    939	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
    940
    941	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
    942
    943	for (id = 0; id < af_mbox->up_num_msgs; id++) {
    944		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
    945
    946		devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
    947		/* Skip processing VF's messages */
    948		if (!devid)
    949			otx2_process_mbox_msg_up(pf, msg);
    950		offset = mbox->rx_start + msg->next_msgoff;
    951	}
    952	if (devid) {
    953		otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
    954					  MBOX_DIR_PFVF_UP, devid - 1,
    955					  af_mbox->up_num_msgs);
    956		return;
    957	}
    958
    959	otx2_mbox_msg_send(mbox, 0);
    960}
    961
    962static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
    963{
    964	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
    965	struct mbox *mbox;
    966
    967	/* Clear the IRQ */
    968	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
    969
    970	mbox = &pf->mbox;
    971
    972	trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
    973
    974	otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
    975
    976	return IRQ_HANDLED;
    977}
    978
    979static void otx2_disable_mbox_intr(struct otx2_nic *pf)
    980{
    981	int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
    982
    983	/* Disable AF => PF mailbox IRQ */
    984	otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
    985	free_irq(vector, pf);
    986}
    987
    988static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
    989{
    990	struct otx2_hw *hw = &pf->hw;
    991	struct msg_req *req;
    992	char *irq_name;
    993	int err;
    994
    995	/* Register mailbox interrupt handler */
    996	irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
    997	snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
    998	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
    999			  otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
   1000	if (err) {
   1001		dev_err(pf->dev,
   1002			"RVUPF: IRQ registration failed for PFAF mbox irq\n");
   1003		return err;
   1004	}
   1005
   1006	/* Enable mailbox interrupt for msgs coming from AF.
   1007	 * First clear to avoid spurious interrupts, if any.
   1008	 */
   1009	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
   1010	otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
   1011
   1012	if (!probe_af)
   1013		return 0;
   1014
   1015	/* Check mailbox communication with AF */
   1016	req = otx2_mbox_alloc_msg_ready(&pf->mbox);
   1017	if (!req) {
   1018		otx2_disable_mbox_intr(pf);
   1019		return -ENOMEM;
   1020	}
   1021	err = otx2_sync_mbox_msg(&pf->mbox);
   1022	if (err) {
   1023		dev_warn(pf->dev,
   1024			 "AF not responding to mailbox, deferring probe\n");
   1025		otx2_disable_mbox_intr(pf);
   1026		return -EPROBE_DEFER;
   1027	}
   1028
   1029	return 0;
   1030}
   1031
   1032static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
   1033{
   1034	struct mbox *mbox = &pf->mbox;
   1035
   1036	if (pf->mbox_wq) {
   1037		destroy_workqueue(pf->mbox_wq);
   1038		pf->mbox_wq = NULL;
   1039	}
   1040
   1041	if (mbox->mbox.hwbase)
   1042		iounmap((void __iomem *)mbox->mbox.hwbase);
   1043
   1044	otx2_mbox_destroy(&mbox->mbox);
   1045	otx2_mbox_destroy(&mbox->mbox_up);
   1046}
   1047
   1048static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
   1049{
   1050	struct mbox *mbox = &pf->mbox;
   1051	void __iomem *hwbase;
   1052	int err;
   1053
   1054	mbox->pfvf = pf;
   1055	pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
   1056				      WQ_UNBOUND | WQ_HIGHPRI |
   1057				      WQ_MEM_RECLAIM, 1);
   1058	if (!pf->mbox_wq)
   1059		return -ENOMEM;
   1060
   1061	/* Mailbox is a reserved memory (in RAM) region shared between
   1062	 * admin function (i.e AF) and this PF, shouldn't be mapped as
   1063	 * device memory to allow unaligned accesses.
   1064	 */
   1065	hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
   1066			    MBOX_SIZE);
   1067	if (!hwbase) {
   1068		dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
   1069		err = -ENOMEM;
   1070		goto exit;
   1071	}
   1072
   1073	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
   1074			     MBOX_DIR_PFAF, 1);
   1075	if (err)
   1076		goto exit;
   1077
   1078	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
   1079			     MBOX_DIR_PFAF_UP, 1);
   1080	if (err)
   1081		goto exit;
   1082
   1083	err = otx2_mbox_bbuf_init(mbox, pf->pdev);
   1084	if (err)
   1085		goto exit;
   1086
   1087	INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
   1088	INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
   1089	mutex_init(&mbox->lock);
   1090
   1091	return 0;
   1092exit:
   1093	otx2_pfaf_mbox_destroy(pf);
   1094	return err;
   1095}
   1096
   1097static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
   1098{
   1099	struct msg_req *msg;
   1100	int err;
   1101
   1102	mutex_lock(&pf->mbox.lock);
   1103	if (enable)
   1104		msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
   1105	else
   1106		msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
   1107
   1108	if (!msg) {
   1109		mutex_unlock(&pf->mbox.lock);
   1110		return -ENOMEM;
   1111	}
   1112
   1113	err = otx2_sync_mbox_msg(&pf->mbox);
   1114	mutex_unlock(&pf->mbox.lock);
   1115	return err;
   1116}
   1117
   1118static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
   1119{
   1120	struct msg_req *msg;
   1121	int err;
   1122
   1123	if (enable && !bitmap_empty(&pf->flow_cfg->dmacflt_bmap,
   1124				    pf->flow_cfg->dmacflt_max_flows))
   1125		netdev_warn(pf->netdev,
   1126			    "CGX/RPM internal loopback might not work as DMAC filters are active\n");
   1127
   1128	mutex_lock(&pf->mbox.lock);
   1129	if (enable)
   1130		msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
   1131	else
   1132		msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
   1133
   1134	if (!msg) {
   1135		mutex_unlock(&pf->mbox.lock);
   1136		return -ENOMEM;
   1137	}
   1138
   1139	err = otx2_sync_mbox_msg(&pf->mbox);
   1140	mutex_unlock(&pf->mbox.lock);
   1141	return err;
   1142}
   1143
   1144int otx2_set_real_num_queues(struct net_device *netdev,
   1145			     int tx_queues, int rx_queues)
   1146{
   1147	int err;
   1148
   1149	err = netif_set_real_num_tx_queues(netdev, tx_queues);
   1150	if (err) {
   1151		netdev_err(netdev,
   1152			   "Failed to set no of Tx queues: %d\n", tx_queues);
   1153		return err;
   1154	}
   1155
   1156	err = netif_set_real_num_rx_queues(netdev, rx_queues);
   1157	if (err)
   1158		netdev_err(netdev,
   1159			   "Failed to set no of Rx queues: %d\n", rx_queues);
   1160	return err;
   1161}
   1162EXPORT_SYMBOL(otx2_set_real_num_queues);
   1163
   1164static irqreturn_t otx2_q_intr_handler(int irq, void *data)
   1165{
   1166	struct otx2_nic *pf = data;
   1167	u64 val, *ptr;
   1168	u64 qidx = 0;
   1169
   1170	/* CQ */
   1171	for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
   1172		ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
   1173		val = otx2_atomic64_add((qidx << 44), ptr);
   1174
   1175		otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
   1176			     (val & NIX_CQERRINT_BITS));
   1177		if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
   1178			continue;
   1179
   1180		if (val & BIT_ULL(42)) {
   1181			netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
   1182				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
   1183		} else {
   1184			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
   1185				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
   1186					   qidx);
   1187			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
   1188				netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
   1189					   qidx);
   1190		}
   1191
   1192		schedule_work(&pf->reset_task);
   1193	}
   1194
   1195	/* SQ */
   1196	for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
   1197		ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
   1198		val = otx2_atomic64_add((qidx << 44), ptr);
   1199		otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
   1200			     (val & NIX_SQINT_BITS));
   1201
   1202		if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
   1203			continue;
   1204
   1205		if (val & BIT_ULL(42)) {
   1206			netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
   1207				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
   1208		} else {
   1209			if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
   1210				netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
   1211					   qidx,
   1212					   otx2_read64(pf,
   1213						       NIX_LF_SQ_OP_ERR_DBG));
   1214				otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
   1215					     BIT_ULL(44));
   1216			}
   1217			if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
   1218				netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
   1219					   qidx,
   1220					   otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
   1221				otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
   1222					     BIT_ULL(44));
   1223			}
   1224			if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
   1225				netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
   1226					   qidx,
   1227					   otx2_read64(pf,
   1228						       NIX_LF_SEND_ERR_DBG));
   1229				otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
   1230					     BIT_ULL(44));
   1231			}
   1232			if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
   1233				netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
   1234					   qidx);
   1235		}
   1236
   1237		schedule_work(&pf->reset_task);
   1238	}
   1239
   1240	return IRQ_HANDLED;
   1241}
   1242
   1243static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
   1244{
   1245	struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
   1246	struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
   1247	int qidx = cq_poll->cint_idx;
   1248
   1249	/* Disable interrupts.
   1250	 *
   1251	 * Completion interrupts behave in a level-triggered interrupt
   1252	 * fashion, and hence have to be cleared only after it is serviced.
   1253	 */
   1254	otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
   1255
   1256	/* Schedule NAPI */
   1257	pf->napi_events++;
   1258	napi_schedule_irqoff(&cq_poll->napi);
   1259
   1260	return IRQ_HANDLED;
   1261}
   1262
   1263static void otx2_disable_napi(struct otx2_nic *pf)
   1264{
   1265	struct otx2_qset *qset = &pf->qset;
   1266	struct otx2_cq_poll *cq_poll;
   1267	int qidx;
   1268
   1269	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
   1270		cq_poll = &qset->napi[qidx];
   1271		cancel_work_sync(&cq_poll->dim.work);
   1272		napi_disable(&cq_poll->napi);
   1273		netif_napi_del(&cq_poll->napi);
   1274	}
   1275}
   1276
   1277static void otx2_free_cq_res(struct otx2_nic *pf)
   1278{
   1279	struct otx2_qset *qset = &pf->qset;
   1280	struct otx2_cq_queue *cq;
   1281	int qidx;
   1282
   1283	/* Disable CQs */
   1284	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
   1285	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
   1286		cq = &qset->cq[qidx];
   1287		qmem_free(pf->dev, cq->cqe);
   1288	}
   1289}
   1290
   1291static void otx2_free_sq_res(struct otx2_nic *pf)
   1292{
   1293	struct otx2_qset *qset = &pf->qset;
   1294	struct otx2_snd_queue *sq;
   1295	int qidx;
   1296
   1297	/* Disable SQs */
   1298	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
   1299	/* Free SQB pointers */
   1300	otx2_sq_free_sqbs(pf);
   1301	for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
   1302		sq = &qset->sq[qidx];
   1303		qmem_free(pf->dev, sq->sqe);
   1304		qmem_free(pf->dev, sq->tso_hdrs);
   1305		kfree(sq->sg);
   1306		kfree(sq->sqb_ptrs);
   1307	}
   1308}
   1309
   1310static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
   1311{
   1312	int frame_size;
   1313	int total_size;
   1314	int rbuf_size;
   1315
   1316	if (pf->hw.rbuf_len)
   1317		return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
   1318
   1319	/* The data transferred by NIX to memory consists of actual packet
   1320	 * plus additional data which has timestamp and/or EDSA/HIGIG2
   1321	 * headers if interface is configured in corresponding modes.
   1322	 * NIX transfers entire data using 6 segments/buffers and writes
   1323	 * a CQE_RX descriptor with those segment addresses. First segment
   1324	 * has additional data prepended to packet. Also software omits a
   1325	 * headroom of 128 bytes in each segment. Hence the total size of
   1326	 * memory needed to receive a packet with 'mtu' is:
   1327	 * frame size =  mtu + additional data;
   1328	 * memory = frame_size + headroom * 6;
   1329	 * each receive buffer size = memory / 6;
   1330	 */
   1331	frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
   1332	total_size = frame_size + OTX2_HEAD_ROOM * 6;
   1333	rbuf_size = total_size / 6;
   1334
   1335	return ALIGN(rbuf_size, 2048);
   1336}
   1337
   1338static int otx2_init_hw_resources(struct otx2_nic *pf)
   1339{
   1340	struct nix_lf_free_req *free_req;
   1341	struct mbox *mbox = &pf->mbox;
   1342	struct otx2_hw *hw = &pf->hw;
   1343	struct msg_req *req;
   1344	int err = 0, lvl;
   1345
   1346	/* Set required NPA LF's pool counts
   1347	 * Auras and Pools are used in a 1:1 mapping,
   1348	 * so, aura count = pool count.
   1349	 */
   1350	hw->rqpool_cnt = hw->rx_queues;
   1351	hw->sqpool_cnt = hw->tot_tx_queues;
   1352	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
   1353
   1354	/* Maximum hardware supported transmit length */
   1355	pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
   1356
   1357	pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
   1358
   1359	mutex_lock(&mbox->lock);
   1360	/* NPA init */
   1361	err = otx2_config_npa(pf);
   1362	if (err)
   1363		goto exit;
   1364
   1365	/* NIX init */
   1366	err = otx2_config_nix(pf);
   1367	if (err)
   1368		goto err_free_npa_lf;
   1369
   1370	/* Enable backpressure */
   1371	otx2_nix_config_bp(pf, true);
   1372
   1373	/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
   1374	err = otx2_rq_aura_pool_init(pf);
   1375	if (err) {
   1376		mutex_unlock(&mbox->lock);
   1377		goto err_free_nix_lf;
   1378	}
   1379	/* Init Auras and pools used by NIX SQ, for queueing SQEs */
   1380	err = otx2_sq_aura_pool_init(pf);
   1381	if (err) {
   1382		mutex_unlock(&mbox->lock);
   1383		goto err_free_rq_ptrs;
   1384	}
   1385
   1386	err = otx2_txsch_alloc(pf);
   1387	if (err) {
   1388		mutex_unlock(&mbox->lock);
   1389		goto err_free_sq_ptrs;
   1390	}
   1391
   1392	err = otx2_config_nix_queues(pf);
   1393	if (err) {
   1394		mutex_unlock(&mbox->lock);
   1395		goto err_free_txsch;
   1396	}
   1397	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
   1398		err = otx2_txschq_config(pf, lvl);
   1399		if (err) {
   1400			mutex_unlock(&mbox->lock);
   1401			goto err_free_nix_queues;
   1402		}
   1403	}
   1404	mutex_unlock(&mbox->lock);
   1405	return err;
   1406
   1407err_free_nix_queues:
   1408	otx2_free_sq_res(pf);
   1409	otx2_free_cq_res(pf);
   1410	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
   1411err_free_txsch:
   1412	if (otx2_txschq_stop(pf))
   1413		dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
   1414err_free_sq_ptrs:
   1415	otx2_sq_free_sqbs(pf);
   1416err_free_rq_ptrs:
   1417	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
   1418	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
   1419	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
   1420	otx2_aura_pool_free(pf);
   1421err_free_nix_lf:
   1422	mutex_lock(&mbox->lock);
   1423	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
   1424	if (free_req) {
   1425		free_req->flags = NIX_LF_DISABLE_FLOWS;
   1426		if (otx2_sync_mbox_msg(mbox))
   1427			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
   1428	}
   1429err_free_npa_lf:
   1430	/* Reset NPA LF */
   1431	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
   1432	if (req) {
   1433		if (otx2_sync_mbox_msg(mbox))
   1434			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
   1435	}
   1436exit:
   1437	mutex_unlock(&mbox->lock);
   1438	return err;
   1439}
   1440
   1441static void otx2_free_hw_resources(struct otx2_nic *pf)
   1442{
   1443	struct otx2_qset *qset = &pf->qset;
   1444	struct nix_lf_free_req *free_req;
   1445	struct mbox *mbox = &pf->mbox;
   1446	struct otx2_cq_queue *cq;
   1447	struct msg_req *req;
   1448	int qidx, err;
   1449
   1450	/* Ensure all SQE are processed */
   1451	otx2_sqb_flush(pf);
   1452
   1453	/* Stop transmission */
   1454	err = otx2_txschq_stop(pf);
   1455	if (err)
   1456		dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
   1457
   1458	mutex_lock(&mbox->lock);
   1459	/* Disable backpressure */
   1460	if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
   1461		otx2_nix_config_bp(pf, false);
   1462	mutex_unlock(&mbox->lock);
   1463
   1464	/* Disable RQs */
   1465	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
   1466
   1467	/*Dequeue all CQEs */
   1468	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
   1469		cq = &qset->cq[qidx];
   1470		if (cq->cq_type == CQ_RX)
   1471			otx2_cleanup_rx_cqes(pf, cq);
   1472		else
   1473			otx2_cleanup_tx_cqes(pf, cq);
   1474	}
   1475
   1476	otx2_free_sq_res(pf);
   1477
   1478	/* Free RQ buffer pointers*/
   1479	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
   1480
   1481	otx2_free_cq_res(pf);
   1482
   1483	/* Free all ingress bandwidth profiles allocated */
   1484	cn10k_free_all_ipolicers(pf);
   1485
   1486	mutex_lock(&mbox->lock);
   1487	/* Reset NIX LF */
   1488	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
   1489	if (free_req) {
   1490		free_req->flags = NIX_LF_DISABLE_FLOWS;
   1491		if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
   1492			free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
   1493		if (otx2_sync_mbox_msg(mbox))
   1494			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
   1495	}
   1496	mutex_unlock(&mbox->lock);
   1497
   1498	/* Disable NPA Pool and Aura hw context */
   1499	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
   1500	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
   1501	otx2_aura_pool_free(pf);
   1502
   1503	mutex_lock(&mbox->lock);
   1504	/* Reset NPA LF */
   1505	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
   1506	if (req) {
   1507		if (otx2_sync_mbox_msg(mbox))
   1508			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
   1509	}
   1510	mutex_unlock(&mbox->lock);
   1511}
   1512
   1513static void otx2_do_set_rx_mode(struct otx2_nic *pf)
   1514{
   1515	struct net_device *netdev = pf->netdev;
   1516	struct nix_rx_mode *req;
   1517	bool promisc = false;
   1518
   1519	if (!(netdev->flags & IFF_UP))
   1520		return;
   1521
   1522	if ((netdev->flags & IFF_PROMISC) ||
   1523	    (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
   1524		promisc = true;
   1525	}
   1526
   1527	/* Write unicast address to mcam entries or del from mcam */
   1528	if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
   1529		__dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
   1530
   1531	mutex_lock(&pf->mbox.lock);
   1532	req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
   1533	if (!req) {
   1534		mutex_unlock(&pf->mbox.lock);
   1535		return;
   1536	}
   1537
   1538	req->mode = NIX_RX_MODE_UCAST;
   1539
   1540	if (promisc)
   1541		req->mode |= NIX_RX_MODE_PROMISC;
   1542	if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
   1543		req->mode |= NIX_RX_MODE_ALLMULTI;
   1544
   1545	req->mode |= NIX_RX_MODE_USE_MCE;
   1546
   1547	otx2_sync_mbox_msg(&pf->mbox);
   1548	mutex_unlock(&pf->mbox.lock);
   1549}
   1550
   1551static void otx2_dim_work(struct work_struct *w)
   1552{
   1553	struct dim_cq_moder cur_moder;
   1554	struct otx2_cq_poll *cq_poll;
   1555	struct otx2_nic *pfvf;
   1556	struct dim *dim;
   1557
   1558	dim = container_of(w, struct dim, work);
   1559	cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
   1560	cq_poll = container_of(dim, struct otx2_cq_poll, dim);
   1561	pfvf = (struct otx2_nic *)cq_poll->dev;
   1562	pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
   1563		CQ_TIMER_THRESH_MAX : cur_moder.usec;
   1564	pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
   1565		NAPI_POLL_WEIGHT : cur_moder.pkts;
   1566	dim->state = DIM_START_MEASURE;
   1567}
   1568
   1569int otx2_open(struct net_device *netdev)
   1570{
   1571	struct otx2_nic *pf = netdev_priv(netdev);
   1572	struct otx2_cq_poll *cq_poll = NULL;
   1573	struct otx2_qset *qset = &pf->qset;
   1574	int err = 0, qidx, vec;
   1575	char *irq_name;
   1576
   1577	netif_carrier_off(netdev);
   1578
   1579	pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
   1580	/* RQ and SQs are mapped to different CQs,
   1581	 * so find out max CQ IRQs (i.e CINTs) needed.
   1582	 */
   1583	pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
   1584	qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
   1585	if (!qset->napi)
   1586		return -ENOMEM;
   1587
   1588	/* CQ size of RQ */
   1589	qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
   1590	/* CQ size of SQ */
   1591	qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
   1592
   1593	err = -ENOMEM;
   1594	qset->cq = kcalloc(pf->qset.cq_cnt,
   1595			   sizeof(struct otx2_cq_queue), GFP_KERNEL);
   1596	if (!qset->cq)
   1597		goto err_free_mem;
   1598
   1599	qset->sq = kcalloc(pf->hw.tot_tx_queues,
   1600			   sizeof(struct otx2_snd_queue), GFP_KERNEL);
   1601	if (!qset->sq)
   1602		goto err_free_mem;
   1603
   1604	qset->rq = kcalloc(pf->hw.rx_queues,
   1605			   sizeof(struct otx2_rcv_queue), GFP_KERNEL);
   1606	if (!qset->rq)
   1607		goto err_free_mem;
   1608
   1609	err = otx2_init_hw_resources(pf);
   1610	if (err)
   1611		goto err_free_mem;
   1612
   1613	/* Register NAPI handler */
   1614	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
   1615		cq_poll = &qset->napi[qidx];
   1616		cq_poll->cint_idx = qidx;
   1617		/* RQ0 & SQ0 are mapped to CINT0 and so on..
   1618		 * 'cq_ids[0]' points to RQ's CQ and
   1619		 * 'cq_ids[1]' points to SQ's CQ and
   1620		 * 'cq_ids[2]' points to XDP's CQ and
   1621		 */
   1622		cq_poll->cq_ids[CQ_RX] =
   1623			(qidx <  pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
   1624		cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
   1625				      qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
   1626		if (pf->xdp_prog)
   1627			cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
   1628						  (qidx + pf->hw.rx_queues +
   1629						  pf->hw.tx_queues) :
   1630						  CINT_INVALID_CQ;
   1631		else
   1632			cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
   1633
   1634		cq_poll->dev = (void *)pf;
   1635		cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
   1636		INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
   1637		netif_napi_add(netdev, &cq_poll->napi,
   1638			       otx2_napi_handler, NAPI_POLL_WEIGHT);
   1639		napi_enable(&cq_poll->napi);
   1640	}
   1641
   1642	/* Set maximum frame size allowed in HW */
   1643	err = otx2_hw_set_mtu(pf, netdev->mtu);
   1644	if (err)
   1645		goto err_disable_napi;
   1646
   1647	/* Setup segmentation algorithms, if failed, clear offload capability */
   1648	otx2_setup_segmentation(pf);
   1649
   1650	/* Initialize RSS */
   1651	err = otx2_rss_init(pf);
   1652	if (err)
   1653		goto err_disable_napi;
   1654
   1655	/* Register Queue IRQ handlers */
   1656	vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
   1657	irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
   1658
   1659	snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
   1660
   1661	err = request_irq(pci_irq_vector(pf->pdev, vec),
   1662			  otx2_q_intr_handler, 0, irq_name, pf);
   1663	if (err) {
   1664		dev_err(pf->dev,
   1665			"RVUPF%d: IRQ registration failed for QERR\n",
   1666			rvu_get_pf(pf->pcifunc));
   1667		goto err_disable_napi;
   1668	}
   1669
   1670	/* Enable QINT IRQ */
   1671	otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
   1672
   1673	/* Register CQ IRQ handlers */
   1674	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
   1675	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
   1676		irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
   1677
   1678		snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
   1679			 qidx);
   1680
   1681		err = request_irq(pci_irq_vector(pf->pdev, vec),
   1682				  otx2_cq_intr_handler, 0, irq_name,
   1683				  &qset->napi[qidx]);
   1684		if (err) {
   1685			dev_err(pf->dev,
   1686				"RVUPF%d: IRQ registration failed for CQ%d\n",
   1687				rvu_get_pf(pf->pcifunc), qidx);
   1688			goto err_free_cints;
   1689		}
   1690		vec++;
   1691
   1692		otx2_config_irq_coalescing(pf, qidx);
   1693
   1694		/* Enable CQ IRQ */
   1695		otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
   1696		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
   1697	}
   1698
   1699	otx2_set_cints_affinity(pf);
   1700
   1701	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
   1702		otx2_enable_rxvlan(pf, true);
   1703
   1704	/* When reinitializing enable time stamping if it is enabled before */
   1705	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
   1706		pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
   1707		otx2_config_hw_tx_tstamp(pf, true);
   1708	}
   1709	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
   1710		pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
   1711		otx2_config_hw_rx_tstamp(pf, true);
   1712	}
   1713
   1714	pf->flags &= ~OTX2_FLAG_INTF_DOWN;
   1715	/* 'intf_down' may be checked on any cpu */
   1716	smp_wmb();
   1717
   1718	/* we have already received link status notification */
   1719	if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
   1720		otx2_handle_link_event(pf);
   1721
   1722	/* Install DMAC Filters */
   1723	if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
   1724		otx2_dmacflt_reinstall_flows(pf);
   1725
   1726	err = otx2_rxtx_enable(pf, true);
   1727	if (err)
   1728		goto err_tx_stop_queues;
   1729
   1730	otx2_do_set_rx_mode(pf);
   1731
   1732	return 0;
   1733
   1734err_tx_stop_queues:
   1735	netif_tx_stop_all_queues(netdev);
   1736	netif_carrier_off(netdev);
   1737	pf->flags |= OTX2_FLAG_INTF_DOWN;
   1738err_free_cints:
   1739	otx2_free_cints(pf, qidx);
   1740	vec = pci_irq_vector(pf->pdev,
   1741			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
   1742	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
   1743	free_irq(vec, pf);
   1744err_disable_napi:
   1745	otx2_disable_napi(pf);
   1746	otx2_free_hw_resources(pf);
   1747err_free_mem:
   1748	kfree(qset->sq);
   1749	kfree(qset->cq);
   1750	kfree(qset->rq);
   1751	kfree(qset->napi);
   1752	return err;
   1753}
   1754EXPORT_SYMBOL(otx2_open);
   1755
   1756int otx2_stop(struct net_device *netdev)
   1757{
   1758	struct otx2_nic *pf = netdev_priv(netdev);
   1759	struct otx2_cq_poll *cq_poll = NULL;
   1760	struct otx2_qset *qset = &pf->qset;
   1761	struct otx2_rss_info *rss;
   1762	int qidx, vec, wrk;
   1763
   1764	/* If the DOWN flag is set resources are already freed */
   1765	if (pf->flags & OTX2_FLAG_INTF_DOWN)
   1766		return 0;
   1767
   1768	netif_carrier_off(netdev);
   1769	netif_tx_stop_all_queues(netdev);
   1770
   1771	pf->flags |= OTX2_FLAG_INTF_DOWN;
   1772	/* 'intf_down' may be checked on any cpu */
   1773	smp_wmb();
   1774
   1775	/* First stop packet Rx/Tx */
   1776	otx2_rxtx_enable(pf, false);
   1777
   1778	/* Clear RSS enable flag */
   1779	rss = &pf->hw.rss_info;
   1780	rss->enable = false;
   1781
   1782	/* Cleanup Queue IRQ */
   1783	vec = pci_irq_vector(pf->pdev,
   1784			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
   1785	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
   1786	free_irq(vec, pf);
   1787
   1788	/* Cleanup CQ NAPI and IRQ */
   1789	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
   1790	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
   1791		/* Disable interrupt */
   1792		otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
   1793
   1794		synchronize_irq(pci_irq_vector(pf->pdev, vec));
   1795
   1796		cq_poll = &qset->napi[qidx];
   1797		napi_synchronize(&cq_poll->napi);
   1798		vec++;
   1799	}
   1800
   1801	netif_tx_disable(netdev);
   1802
   1803	otx2_free_hw_resources(pf);
   1804	otx2_free_cints(pf, pf->hw.cint_cnt);
   1805	otx2_disable_napi(pf);
   1806
   1807	for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
   1808		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
   1809
   1810	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
   1811		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
   1812	devm_kfree(pf->dev, pf->refill_wrk);
   1813
   1814	kfree(qset->sq);
   1815	kfree(qset->cq);
   1816	kfree(qset->rq);
   1817	kfree(qset->napi);
   1818	/* Do not clear RQ/SQ ringsize settings */
   1819	memset_startat(qset, 0, sqe_cnt);
   1820	return 0;
   1821}
   1822EXPORT_SYMBOL(otx2_stop);
   1823
   1824static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
   1825{
   1826	struct otx2_nic *pf = netdev_priv(netdev);
   1827	int qidx = skb_get_queue_mapping(skb);
   1828	struct otx2_snd_queue *sq;
   1829	struct netdev_queue *txq;
   1830
   1831	/* Check for minimum and maximum packet length */
   1832	if (skb->len <= ETH_HLEN ||
   1833	    (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
   1834		dev_kfree_skb(skb);
   1835		return NETDEV_TX_OK;
   1836	}
   1837
   1838	sq = &pf->qset.sq[qidx];
   1839	txq = netdev_get_tx_queue(netdev, qidx);
   1840
   1841	if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
   1842		netif_tx_stop_queue(txq);
   1843
   1844		/* Check again, incase SQBs got freed up */
   1845		smp_mb();
   1846		if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
   1847							> sq->sqe_thresh)
   1848			netif_tx_wake_queue(txq);
   1849
   1850		return NETDEV_TX_BUSY;
   1851	}
   1852
   1853	return NETDEV_TX_OK;
   1854}
   1855
   1856static netdev_features_t otx2_fix_features(struct net_device *dev,
   1857					   netdev_features_t features)
   1858{
   1859	if (features & NETIF_F_HW_VLAN_CTAG_RX)
   1860		features |= NETIF_F_HW_VLAN_STAG_RX;
   1861	else
   1862		features &= ~NETIF_F_HW_VLAN_STAG_RX;
   1863
   1864	return features;
   1865}
   1866
   1867static void otx2_set_rx_mode(struct net_device *netdev)
   1868{
   1869	struct otx2_nic *pf = netdev_priv(netdev);
   1870
   1871	queue_work(pf->otx2_wq, &pf->rx_mode_work);
   1872}
   1873
   1874static void otx2_rx_mode_wrk_handler(struct work_struct *work)
   1875{
   1876	struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
   1877
   1878	otx2_do_set_rx_mode(pf);
   1879}
   1880
   1881static int otx2_set_features(struct net_device *netdev,
   1882			     netdev_features_t features)
   1883{
   1884	netdev_features_t changed = features ^ netdev->features;
   1885	struct otx2_nic *pf = netdev_priv(netdev);
   1886
   1887	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
   1888		return otx2_cgx_config_loopback(pf,
   1889						features & NETIF_F_LOOPBACK);
   1890
   1891	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
   1892		return otx2_enable_rxvlan(pf,
   1893					  features & NETIF_F_HW_VLAN_CTAG_RX);
   1894
   1895	return otx2_handle_ntuple_tc_features(netdev, features);
   1896}
   1897
   1898static void otx2_reset_task(struct work_struct *work)
   1899{
   1900	struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
   1901
   1902	if (!netif_running(pf->netdev))
   1903		return;
   1904
   1905	rtnl_lock();
   1906	otx2_stop(pf->netdev);
   1907	pf->reset_count++;
   1908	otx2_open(pf->netdev);
   1909	netif_trans_update(pf->netdev);
   1910	rtnl_unlock();
   1911}
   1912
   1913static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
   1914{
   1915	struct msg_req *req;
   1916	int err;
   1917
   1918	if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
   1919		return 0;
   1920
   1921	mutex_lock(&pfvf->mbox.lock);
   1922	if (enable)
   1923		req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
   1924	else
   1925		req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
   1926	if (!req) {
   1927		mutex_unlock(&pfvf->mbox.lock);
   1928		return -ENOMEM;
   1929	}
   1930
   1931	err = otx2_sync_mbox_msg(&pfvf->mbox);
   1932	if (err) {
   1933		mutex_unlock(&pfvf->mbox.lock);
   1934		return err;
   1935	}
   1936
   1937	mutex_unlock(&pfvf->mbox.lock);
   1938	if (enable)
   1939		pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
   1940	else
   1941		pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
   1942	return 0;
   1943}
   1944
   1945static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
   1946{
   1947	struct msg_req *req;
   1948	int err;
   1949
   1950	if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
   1951		return 0;
   1952
   1953	mutex_lock(&pfvf->mbox.lock);
   1954	if (enable)
   1955		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
   1956	else
   1957		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
   1958	if (!req) {
   1959		mutex_unlock(&pfvf->mbox.lock);
   1960		return -ENOMEM;
   1961	}
   1962
   1963	err = otx2_sync_mbox_msg(&pfvf->mbox);
   1964	if (err) {
   1965		mutex_unlock(&pfvf->mbox.lock);
   1966		return err;
   1967	}
   1968
   1969	mutex_unlock(&pfvf->mbox.lock);
   1970	if (enable)
   1971		pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
   1972	else
   1973		pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
   1974	return 0;
   1975}
   1976
   1977int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
   1978{
   1979	struct otx2_nic *pfvf = netdev_priv(netdev);
   1980	struct hwtstamp_config config;
   1981
   1982	if (!pfvf->ptp)
   1983		return -ENODEV;
   1984
   1985	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
   1986		return -EFAULT;
   1987
   1988	switch (config.tx_type) {
   1989	case HWTSTAMP_TX_OFF:
   1990		otx2_config_hw_tx_tstamp(pfvf, false);
   1991		break;
   1992	case HWTSTAMP_TX_ON:
   1993		otx2_config_hw_tx_tstamp(pfvf, true);
   1994		break;
   1995	default:
   1996		return -ERANGE;
   1997	}
   1998
   1999	switch (config.rx_filter) {
   2000	case HWTSTAMP_FILTER_NONE:
   2001		otx2_config_hw_rx_tstamp(pfvf, false);
   2002		break;
   2003	case HWTSTAMP_FILTER_ALL:
   2004	case HWTSTAMP_FILTER_SOME:
   2005	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
   2006	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
   2007	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
   2008	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
   2009	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
   2010	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
   2011	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
   2012	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
   2013	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
   2014	case HWTSTAMP_FILTER_PTP_V2_EVENT:
   2015	case HWTSTAMP_FILTER_PTP_V2_SYNC:
   2016	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
   2017		otx2_config_hw_rx_tstamp(pfvf, true);
   2018		config.rx_filter = HWTSTAMP_FILTER_ALL;
   2019		break;
   2020	default:
   2021		return -ERANGE;
   2022	}
   2023
   2024	memcpy(&pfvf->tstamp, &config, sizeof(config));
   2025
   2026	return copy_to_user(ifr->ifr_data, &config,
   2027			    sizeof(config)) ? -EFAULT : 0;
   2028}
   2029EXPORT_SYMBOL(otx2_config_hwtstamp);
   2030
   2031int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
   2032{
   2033	struct otx2_nic *pfvf = netdev_priv(netdev);
   2034	struct hwtstamp_config *cfg = &pfvf->tstamp;
   2035
   2036	switch (cmd) {
   2037	case SIOCSHWTSTAMP:
   2038		return otx2_config_hwtstamp(netdev, req);
   2039	case SIOCGHWTSTAMP:
   2040		return copy_to_user(req->ifr_data, cfg,
   2041				    sizeof(*cfg)) ? -EFAULT : 0;
   2042	default:
   2043		return -EOPNOTSUPP;
   2044	}
   2045}
   2046EXPORT_SYMBOL(otx2_ioctl);
   2047
   2048static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
   2049{
   2050	struct npc_install_flow_req *req;
   2051	int err;
   2052
   2053	mutex_lock(&pf->mbox.lock);
   2054	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
   2055	if (!req) {
   2056		err = -ENOMEM;
   2057		goto out;
   2058	}
   2059
   2060	ether_addr_copy(req->packet.dmac, mac);
   2061	eth_broadcast_addr((u8 *)&req->mask.dmac);
   2062	req->features = BIT_ULL(NPC_DMAC);
   2063	req->channel = pf->hw.rx_chan_base;
   2064	req->intf = NIX_INTF_RX;
   2065	req->default_rule = 1;
   2066	req->append = 1;
   2067	req->vf = vf + 1;
   2068	req->op = NIX_RX_ACTION_DEFAULT;
   2069
   2070	err = otx2_sync_mbox_msg(&pf->mbox);
   2071out:
   2072	mutex_unlock(&pf->mbox.lock);
   2073	return err;
   2074}
   2075
   2076static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
   2077{
   2078	struct otx2_nic *pf = netdev_priv(netdev);
   2079	struct pci_dev *pdev = pf->pdev;
   2080	struct otx2_vf_config *config;
   2081	int ret;
   2082
   2083	if (!netif_running(netdev))
   2084		return -EAGAIN;
   2085
   2086	if (vf >= pf->total_vfs)
   2087		return -EINVAL;
   2088
   2089	if (!is_valid_ether_addr(mac))
   2090		return -EINVAL;
   2091
   2092	config = &pf->vf_configs[vf];
   2093	ether_addr_copy(config->mac, mac);
   2094
   2095	ret = otx2_do_set_vf_mac(pf, vf, mac);
   2096	if (ret == 0)
   2097		dev_info(&pdev->dev,
   2098			 "Load/Reload VF driver\n");
   2099
   2100	return ret;
   2101}
   2102
   2103static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
   2104			       __be16 proto)
   2105{
   2106	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
   2107	struct nix_vtag_config_rsp *vtag_rsp;
   2108	struct npc_delete_flow_req *del_req;
   2109	struct nix_vtag_config *vtag_req;
   2110	struct npc_install_flow_req *req;
   2111	struct otx2_vf_config *config;
   2112	int err = 0;
   2113	u32 idx;
   2114
   2115	config = &pf->vf_configs[vf];
   2116
   2117	if (!vlan && !config->vlan)
   2118		goto out;
   2119
   2120	mutex_lock(&pf->mbox.lock);
   2121
   2122	/* free old tx vtag entry */
   2123	if (config->vlan) {
   2124		vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
   2125		if (!vtag_req) {
   2126			err = -ENOMEM;
   2127			goto out;
   2128		}
   2129		vtag_req->cfg_type = 0;
   2130		vtag_req->tx.free_vtag0 = 1;
   2131		vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
   2132
   2133		err = otx2_sync_mbox_msg(&pf->mbox);
   2134		if (err)
   2135			goto out;
   2136	}
   2137
   2138	if (!vlan && config->vlan) {
   2139		/* rx */
   2140		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
   2141		if (!del_req) {
   2142			err = -ENOMEM;
   2143			goto out;
   2144		}
   2145		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
   2146		del_req->entry =
   2147			flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
   2148		err = otx2_sync_mbox_msg(&pf->mbox);
   2149		if (err)
   2150			goto out;
   2151
   2152		/* tx */
   2153		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
   2154		if (!del_req) {
   2155			err = -ENOMEM;
   2156			goto out;
   2157		}
   2158		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
   2159		del_req->entry =
   2160			flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
   2161		err = otx2_sync_mbox_msg(&pf->mbox);
   2162
   2163		goto out;
   2164	}
   2165
   2166	/* rx */
   2167	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
   2168	if (!req) {
   2169		err = -ENOMEM;
   2170		goto out;
   2171	}
   2172
   2173	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
   2174	req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
   2175	req->packet.vlan_tci = htons(vlan);
   2176	req->mask.vlan_tci = htons(VLAN_VID_MASK);
   2177	/* af fills the destination mac addr */
   2178	eth_broadcast_addr((u8 *)&req->mask.dmac);
   2179	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
   2180	req->channel = pf->hw.rx_chan_base;
   2181	req->intf = NIX_INTF_RX;
   2182	req->vf = vf + 1;
   2183	req->op = NIX_RX_ACTION_DEFAULT;
   2184	req->vtag0_valid = true;
   2185	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
   2186	req->set_cntr = 1;
   2187
   2188	err = otx2_sync_mbox_msg(&pf->mbox);
   2189	if (err)
   2190		goto out;
   2191
   2192	/* tx */
   2193	vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
   2194	if (!vtag_req) {
   2195		err = -ENOMEM;
   2196		goto out;
   2197	}
   2198
   2199	/* configure tx vtag params */
   2200	vtag_req->vtag_size = VTAGSIZE_T4;
   2201	vtag_req->cfg_type = 0; /* tx vlan cfg */
   2202	vtag_req->tx.cfg_vtag0 = 1;
   2203	vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
   2204
   2205	err = otx2_sync_mbox_msg(&pf->mbox);
   2206	if (err)
   2207		goto out;
   2208
   2209	vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
   2210			(&pf->mbox.mbox, 0, &vtag_req->hdr);
   2211	if (IS_ERR(vtag_rsp)) {
   2212		err = PTR_ERR(vtag_rsp);
   2213		goto out;
   2214	}
   2215	config->tx_vtag_idx = vtag_rsp->vtag0_idx;
   2216
   2217	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
   2218	if (!req) {
   2219		err = -ENOMEM;
   2220		goto out;
   2221	}
   2222
   2223	eth_zero_addr((u8 *)&req->mask.dmac);
   2224	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
   2225	req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
   2226	req->features = BIT_ULL(NPC_DMAC);
   2227	req->channel = pf->hw.tx_chan_base;
   2228	req->intf = NIX_INTF_TX;
   2229	req->vf = vf + 1;
   2230	req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
   2231	req->vtag0_def = vtag_rsp->vtag0_idx;
   2232	req->vtag0_op = VTAG_INSERT;
   2233	req->set_cntr = 1;
   2234
   2235	err = otx2_sync_mbox_msg(&pf->mbox);
   2236out:
   2237	config->vlan = vlan;
   2238	mutex_unlock(&pf->mbox.lock);
   2239	return err;
   2240}
   2241
   2242static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
   2243			    __be16 proto)
   2244{
   2245	struct otx2_nic *pf = netdev_priv(netdev);
   2246	struct pci_dev *pdev = pf->pdev;
   2247
   2248	if (!netif_running(netdev))
   2249		return -EAGAIN;
   2250
   2251	if (vf >= pci_num_vf(pdev))
   2252		return -EINVAL;
   2253
   2254	/* qos is currently unsupported */
   2255	if (vlan >= VLAN_N_VID || qos)
   2256		return -EINVAL;
   2257
   2258	if (proto != htons(ETH_P_8021Q))
   2259		return -EPROTONOSUPPORT;
   2260
   2261	if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
   2262		return -EOPNOTSUPP;
   2263
   2264	return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
   2265}
   2266
   2267static int otx2_get_vf_config(struct net_device *netdev, int vf,
   2268			      struct ifla_vf_info *ivi)
   2269{
   2270	struct otx2_nic *pf = netdev_priv(netdev);
   2271	struct pci_dev *pdev = pf->pdev;
   2272	struct otx2_vf_config *config;
   2273
   2274	if (!netif_running(netdev))
   2275		return -EAGAIN;
   2276
   2277	if (vf >= pci_num_vf(pdev))
   2278		return -EINVAL;
   2279
   2280	config = &pf->vf_configs[vf];
   2281	ivi->vf = vf;
   2282	ether_addr_copy(ivi->mac, config->mac);
   2283	ivi->vlan = config->vlan;
   2284	ivi->trusted = config->trusted;
   2285
   2286	return 0;
   2287}
   2288
   2289static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
   2290			    int qidx)
   2291{
   2292	struct page *page;
   2293	u64 dma_addr;
   2294	int err = 0;
   2295
   2296	dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
   2297				     offset_in_page(xdpf->data), xdpf->len,
   2298				     DMA_TO_DEVICE);
   2299	if (dma_mapping_error(pf->dev, dma_addr))
   2300		return -ENOMEM;
   2301
   2302	err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
   2303	if (!err) {
   2304		otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
   2305		page = virt_to_page(xdpf->data);
   2306		put_page(page);
   2307		return -ENOMEM;
   2308	}
   2309	return 0;
   2310}
   2311
   2312static int otx2_xdp_xmit(struct net_device *netdev, int n,
   2313			 struct xdp_frame **frames, u32 flags)
   2314{
   2315	struct otx2_nic *pf = netdev_priv(netdev);
   2316	int qidx = smp_processor_id();
   2317	struct otx2_snd_queue *sq;
   2318	int drops = 0, i;
   2319
   2320	if (!netif_running(netdev))
   2321		return -ENETDOWN;
   2322
   2323	qidx += pf->hw.tx_queues;
   2324	sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
   2325
   2326	/* Abort xmit if xdp queue is not */
   2327	if (unlikely(!sq))
   2328		return -ENXIO;
   2329
   2330	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
   2331		return -EINVAL;
   2332
   2333	for (i = 0; i < n; i++) {
   2334		struct xdp_frame *xdpf = frames[i];
   2335		int err;
   2336
   2337		err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
   2338		if (err)
   2339			drops++;
   2340	}
   2341	return n - drops;
   2342}
   2343
   2344static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
   2345{
   2346	struct net_device *dev = pf->netdev;
   2347	bool if_up = netif_running(pf->netdev);
   2348	struct bpf_prog *old_prog;
   2349
   2350	if (prog && dev->mtu > MAX_XDP_MTU) {
   2351		netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
   2352		return -EOPNOTSUPP;
   2353	}
   2354
   2355	if (if_up)
   2356		otx2_stop(pf->netdev);
   2357
   2358	old_prog = xchg(&pf->xdp_prog, prog);
   2359
   2360	if (old_prog)
   2361		bpf_prog_put(old_prog);
   2362
   2363	if (pf->xdp_prog)
   2364		bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
   2365
   2366	/* Network stack and XDP shared same rx queues.
   2367	 * Use separate tx queues for XDP and network stack.
   2368	 */
   2369	if (pf->xdp_prog)
   2370		pf->hw.xdp_queues = pf->hw.rx_queues;
   2371	else
   2372		pf->hw.xdp_queues = 0;
   2373
   2374	pf->hw.tot_tx_queues += pf->hw.xdp_queues;
   2375
   2376	if (if_up)
   2377		otx2_open(pf->netdev);
   2378
   2379	return 0;
   2380}
   2381
   2382static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
   2383{
   2384	struct otx2_nic *pf = netdev_priv(netdev);
   2385
   2386	switch (xdp->command) {
   2387	case XDP_SETUP_PROG:
   2388		return otx2_xdp_setup(pf, xdp->prog);
   2389	default:
   2390		return -EINVAL;
   2391	}
   2392}
   2393
   2394static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
   2395				   int req_perm)
   2396{
   2397	struct set_vf_perm *req;
   2398	int rc;
   2399
   2400	mutex_lock(&pf->mbox.lock);
   2401	req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
   2402	if (!req) {
   2403		rc = -ENOMEM;
   2404		goto out;
   2405	}
   2406
   2407	/* Let AF reset VF permissions as sriov is disabled */
   2408	if (req_perm == OTX2_RESET_VF_PERM) {
   2409		req->flags |= RESET_VF_PERM;
   2410	} else if (req_perm == OTX2_TRUSTED_VF) {
   2411		if (pf->vf_configs[vf].trusted)
   2412			req->flags |= VF_TRUSTED;
   2413	}
   2414
   2415	req->vf = vf;
   2416	rc = otx2_sync_mbox_msg(&pf->mbox);
   2417out:
   2418	mutex_unlock(&pf->mbox.lock);
   2419	return rc;
   2420}
   2421
   2422static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
   2423				 bool enable)
   2424{
   2425	struct otx2_nic *pf = netdev_priv(netdev);
   2426	struct pci_dev *pdev = pf->pdev;
   2427	int rc;
   2428
   2429	if (vf >= pci_num_vf(pdev))
   2430		return -EINVAL;
   2431
   2432	if (pf->vf_configs[vf].trusted == enable)
   2433		return 0;
   2434
   2435	pf->vf_configs[vf].trusted = enable;
   2436	rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
   2437
   2438	if (rc)
   2439		pf->vf_configs[vf].trusted = !enable;
   2440	else
   2441		netdev_info(pf->netdev, "VF %d is %strusted\n",
   2442			    vf, enable ? "" : "not ");
   2443	return rc;
   2444}
   2445
   2446static const struct net_device_ops otx2_netdev_ops = {
   2447	.ndo_open		= otx2_open,
   2448	.ndo_stop		= otx2_stop,
   2449	.ndo_start_xmit		= otx2_xmit,
   2450	.ndo_fix_features	= otx2_fix_features,
   2451	.ndo_set_mac_address    = otx2_set_mac_address,
   2452	.ndo_change_mtu		= otx2_change_mtu,
   2453	.ndo_set_rx_mode	= otx2_set_rx_mode,
   2454	.ndo_set_features	= otx2_set_features,
   2455	.ndo_tx_timeout		= otx2_tx_timeout,
   2456	.ndo_get_stats64	= otx2_get_stats64,
   2457	.ndo_eth_ioctl		= otx2_ioctl,
   2458	.ndo_set_vf_mac		= otx2_set_vf_mac,
   2459	.ndo_set_vf_vlan	= otx2_set_vf_vlan,
   2460	.ndo_get_vf_config	= otx2_get_vf_config,
   2461	.ndo_bpf		= otx2_xdp,
   2462	.ndo_xdp_xmit           = otx2_xdp_xmit,
   2463	.ndo_setup_tc		= otx2_setup_tc,
   2464	.ndo_set_vf_trust	= otx2_ndo_set_vf_trust,
   2465};
   2466
   2467static int otx2_wq_init(struct otx2_nic *pf)
   2468{
   2469	pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
   2470	if (!pf->otx2_wq)
   2471		return -ENOMEM;
   2472
   2473	INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
   2474	INIT_WORK(&pf->reset_task, otx2_reset_task);
   2475	return 0;
   2476}
   2477
   2478static int otx2_check_pf_usable(struct otx2_nic *nic)
   2479{
   2480	u64 rev;
   2481
   2482	rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
   2483	rev = (rev >> 12) & 0xFF;
   2484	/* Check if AF has setup revision for RVUM block,
   2485	 * otherwise this driver probe should be deferred
   2486	 * until AF driver comes up.
   2487	 */
   2488	if (!rev) {
   2489		dev_warn(nic->dev,
   2490			 "AF is not initialized, deferring probe\n");
   2491		return -EPROBE_DEFER;
   2492	}
   2493	return 0;
   2494}
   2495
   2496static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
   2497{
   2498	struct otx2_hw *hw = &pf->hw;
   2499	int num_vec, err;
   2500
   2501	/* NPA interrupts are inot registered, so alloc only
   2502	 * upto NIX vector offset.
   2503	 */
   2504	num_vec = hw->nix_msixoff;
   2505	num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
   2506
   2507	otx2_disable_mbox_intr(pf);
   2508	pci_free_irq_vectors(hw->pdev);
   2509	err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
   2510	if (err < 0) {
   2511		dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
   2512			__func__, num_vec);
   2513		return err;
   2514	}
   2515
   2516	return otx2_register_mbox_intr(pf, false);
   2517}
   2518
   2519static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
   2520{
   2521	int i;
   2522
   2523	pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
   2524				      sizeof(struct otx2_vf_config),
   2525				      GFP_KERNEL);
   2526	if (!pf->vf_configs)
   2527		return -ENOMEM;
   2528
   2529	for (i = 0; i < pf->total_vfs; i++) {
   2530		pf->vf_configs[i].pf = pf;
   2531		pf->vf_configs[i].intf_down = true;
   2532		pf->vf_configs[i].trusted = false;
   2533		INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
   2534				  otx2_vf_link_event_task);
   2535	}
   2536
   2537	return 0;
   2538}
   2539
   2540static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
   2541{
   2542	int i;
   2543
   2544	if (!pf->vf_configs)
   2545		return;
   2546
   2547	for (i = 0; i < pf->total_vfs; i++) {
   2548		cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
   2549		otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
   2550	}
   2551}
   2552
   2553static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
   2554{
   2555	struct device *dev = &pdev->dev;
   2556	struct net_device *netdev;
   2557	struct otx2_nic *pf;
   2558	struct otx2_hw *hw;
   2559	int err, qcount;
   2560	int num_vec;
   2561
   2562	err = pcim_enable_device(pdev);
   2563	if (err) {
   2564		dev_err(dev, "Failed to enable PCI device\n");
   2565		return err;
   2566	}
   2567
   2568	err = pci_request_regions(pdev, DRV_NAME);
   2569	if (err) {
   2570		dev_err(dev, "PCI request regions failed 0x%x\n", err);
   2571		return err;
   2572	}
   2573
   2574	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
   2575	if (err) {
   2576		dev_err(dev, "DMA mask config failed, abort\n");
   2577		goto err_release_regions;
   2578	}
   2579
   2580	pci_set_master(pdev);
   2581
   2582	/* Set number of queues */
   2583	qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
   2584
   2585	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
   2586	if (!netdev) {
   2587		err = -ENOMEM;
   2588		goto err_release_regions;
   2589	}
   2590
   2591	pci_set_drvdata(pdev, netdev);
   2592	SET_NETDEV_DEV(netdev, &pdev->dev);
   2593	pf = netdev_priv(netdev);
   2594	pf->netdev = netdev;
   2595	pf->pdev = pdev;
   2596	pf->dev = dev;
   2597	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
   2598	pf->flags |= OTX2_FLAG_INTF_DOWN;
   2599
   2600	hw = &pf->hw;
   2601	hw->pdev = pdev;
   2602	hw->rx_queues = qcount;
   2603	hw->tx_queues = qcount;
   2604	hw->tot_tx_queues = qcount;
   2605	hw->max_queues = qcount;
   2606	hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
   2607	/* Use CQE of 128 byte descriptor size by default */
   2608	hw->xqe_size = 128;
   2609
   2610	num_vec = pci_msix_vec_count(pdev);
   2611	hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
   2612					  GFP_KERNEL);
   2613	if (!hw->irq_name) {
   2614		err = -ENOMEM;
   2615		goto err_free_netdev;
   2616	}
   2617
   2618	hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
   2619					 sizeof(cpumask_var_t), GFP_KERNEL);
   2620	if (!hw->affinity_mask) {
   2621		err = -ENOMEM;
   2622		goto err_free_netdev;
   2623	}
   2624
   2625	/* Map CSRs */
   2626	pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
   2627	if (!pf->reg_base) {
   2628		dev_err(dev, "Unable to map physical function CSRs, aborting\n");
   2629		err = -ENOMEM;
   2630		goto err_free_netdev;
   2631	}
   2632
   2633	err = otx2_check_pf_usable(pf);
   2634	if (err)
   2635		goto err_free_netdev;
   2636
   2637	err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
   2638				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
   2639	if (err < 0) {
   2640		dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
   2641			__func__, num_vec);
   2642		goto err_free_netdev;
   2643	}
   2644
   2645	otx2_setup_dev_hw_settings(pf);
   2646
   2647	/* Init PF <=> AF mailbox stuff */
   2648	err = otx2_pfaf_mbox_init(pf);
   2649	if (err)
   2650		goto err_free_irq_vectors;
   2651
   2652	/* Register mailbox interrupt */
   2653	err = otx2_register_mbox_intr(pf, true);
   2654	if (err)
   2655		goto err_mbox_destroy;
   2656
   2657	/* Request AF to attach NPA and NIX LFs to this PF.
   2658	 * NIX and NPA LFs are needed for this PF to function as a NIC.
   2659	 */
   2660	err = otx2_attach_npa_nix(pf);
   2661	if (err)
   2662		goto err_disable_mbox_intr;
   2663
   2664	err = otx2_realloc_msix_vectors(pf);
   2665	if (err)
   2666		goto err_detach_rsrc;
   2667
   2668	err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
   2669	if (err)
   2670		goto err_detach_rsrc;
   2671
   2672	err = cn10k_lmtst_init(pf);
   2673	if (err)
   2674		goto err_detach_rsrc;
   2675
   2676	/* Assign default mac address */
   2677	otx2_get_mac_from_af(netdev);
   2678
   2679	/* Don't check for error.  Proceed without ptp */
   2680	otx2_ptp_init(pf);
   2681
   2682	/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
   2683	 * HW allocates buffer pointer from stack and uses it for DMA'ing
   2684	 * ingress packet. In some scenarios HW can free back allocated buffer
   2685	 * pointers to pool. This makes it impossible for SW to maintain a
   2686	 * parallel list where physical addresses of buffer pointers (IOVAs)
   2687	 * given to HW can be saved for later reference.
   2688	 *
   2689	 * So the only way to convert Rx packet's buffer address is to use
   2690	 * IOMMU's iova_to_phys() handler which translates the address by
   2691	 * walking through the translation tables.
   2692	 */
   2693	pf->iommu_domain = iommu_get_domain_for_dev(dev);
   2694
   2695	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
   2696			       NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
   2697			       NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
   2698			       NETIF_F_GSO_UDP_L4);
   2699	netdev->features |= netdev->hw_features;
   2700
   2701	err = otx2_mcam_flow_init(pf);
   2702	if (err)
   2703		goto err_ptp_destroy;
   2704
   2705	if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
   2706		netdev->hw_features |= NETIF_F_NTUPLE;
   2707
   2708	if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
   2709		netdev->priv_flags |= IFF_UNICAST_FLT;
   2710
   2711	/* Support TSO on tag interface */
   2712	netdev->vlan_features |= netdev->features;
   2713	netdev->hw_features  |= NETIF_F_HW_VLAN_CTAG_TX |
   2714				NETIF_F_HW_VLAN_STAG_TX;
   2715	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
   2716		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
   2717				       NETIF_F_HW_VLAN_STAG_RX;
   2718	netdev->features |= netdev->hw_features;
   2719
   2720	/* HW supports tc offload but mutually exclusive with n-tuple filters */
   2721	if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
   2722		netdev->hw_features |= NETIF_F_HW_TC;
   2723
   2724	netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
   2725
   2726	netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
   2727	netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
   2728
   2729	netdev->netdev_ops = &otx2_netdev_ops;
   2730
   2731	netdev->min_mtu = OTX2_MIN_MTU;
   2732	netdev->max_mtu = otx2_get_max_mtu(pf);
   2733
   2734	err = register_netdev(netdev);
   2735	if (err) {
   2736		dev_err(dev, "Failed to register netdevice\n");
   2737		goto err_del_mcam_entries;
   2738	}
   2739
   2740	err = otx2_wq_init(pf);
   2741	if (err)
   2742		goto err_unreg_netdev;
   2743
   2744	otx2_set_ethtool_ops(netdev);
   2745
   2746	err = otx2_init_tc(pf);
   2747	if (err)
   2748		goto err_mcam_flow_del;
   2749
   2750	err = otx2_register_dl(pf);
   2751	if (err)
   2752		goto err_mcam_flow_del;
   2753
   2754	/* Initialize SR-IOV resources */
   2755	err = otx2_sriov_vfcfg_init(pf);
   2756	if (err)
   2757		goto err_pf_sriov_init;
   2758
   2759	/* Enable link notifications */
   2760	otx2_cgx_config_linkevents(pf, true);
   2761
   2762#ifdef CONFIG_DCB
   2763	err = otx2_dcbnl_set_ops(netdev);
   2764	if (err)
   2765		goto err_pf_sriov_init;
   2766#endif
   2767
   2768	return 0;
   2769
   2770err_pf_sriov_init:
   2771	otx2_shutdown_tc(pf);
   2772err_mcam_flow_del:
   2773	otx2_mcam_flow_del(pf);
   2774err_unreg_netdev:
   2775	unregister_netdev(netdev);
   2776err_del_mcam_entries:
   2777	otx2_mcam_flow_del(pf);
   2778err_ptp_destroy:
   2779	otx2_ptp_destroy(pf);
   2780err_detach_rsrc:
   2781	if (pf->hw.lmt_info)
   2782		free_percpu(pf->hw.lmt_info);
   2783	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
   2784		qmem_free(pf->dev, pf->dync_lmt);
   2785	otx2_detach_resources(&pf->mbox);
   2786err_disable_mbox_intr:
   2787	otx2_disable_mbox_intr(pf);
   2788err_mbox_destroy:
   2789	otx2_pfaf_mbox_destroy(pf);
   2790err_free_irq_vectors:
   2791	pci_free_irq_vectors(hw->pdev);
   2792err_free_netdev:
   2793	pci_set_drvdata(pdev, NULL);
   2794	free_netdev(netdev);
   2795err_release_regions:
   2796	pci_release_regions(pdev);
   2797	return err;
   2798}
   2799
   2800static void otx2_vf_link_event_task(struct work_struct *work)
   2801{
   2802	struct otx2_vf_config *config;
   2803	struct cgx_link_info_msg *req;
   2804	struct mbox_msghdr *msghdr;
   2805	struct otx2_nic *pf;
   2806	int vf_idx;
   2807
   2808	config = container_of(work, struct otx2_vf_config,
   2809			      link_event_work.work);
   2810	vf_idx = config - config->pf->vf_configs;
   2811	pf = config->pf;
   2812
   2813	msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
   2814					 sizeof(*req), sizeof(struct msg_rsp));
   2815	if (!msghdr) {
   2816		dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
   2817		return;
   2818	}
   2819
   2820	req = (struct cgx_link_info_msg *)msghdr;
   2821	req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
   2822	req->hdr.sig = OTX2_MBOX_REQ_SIG;
   2823	memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
   2824
   2825	otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
   2826}
   2827
   2828static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
   2829{
   2830	struct net_device *netdev = pci_get_drvdata(pdev);
   2831	struct otx2_nic *pf = netdev_priv(netdev);
   2832	int ret;
   2833
   2834	/* Init PF <=> VF mailbox stuff */
   2835	ret = otx2_pfvf_mbox_init(pf, numvfs);
   2836	if (ret)
   2837		return ret;
   2838
   2839	ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
   2840	if (ret)
   2841		goto free_mbox;
   2842
   2843	ret = otx2_pf_flr_init(pf, numvfs);
   2844	if (ret)
   2845		goto free_intr;
   2846
   2847	ret = otx2_register_flr_me_intr(pf, numvfs);
   2848	if (ret)
   2849		goto free_flr;
   2850
   2851	ret = pci_enable_sriov(pdev, numvfs);
   2852	if (ret)
   2853		goto free_flr_intr;
   2854
   2855	return numvfs;
   2856free_flr_intr:
   2857	otx2_disable_flr_me_intr(pf);
   2858free_flr:
   2859	otx2_flr_wq_destroy(pf);
   2860free_intr:
   2861	otx2_disable_pfvf_mbox_intr(pf, numvfs);
   2862free_mbox:
   2863	otx2_pfvf_mbox_destroy(pf);
   2864	return ret;
   2865}
   2866
   2867static int otx2_sriov_disable(struct pci_dev *pdev)
   2868{
   2869	struct net_device *netdev = pci_get_drvdata(pdev);
   2870	struct otx2_nic *pf = netdev_priv(netdev);
   2871	int numvfs = pci_num_vf(pdev);
   2872
   2873	if (!numvfs)
   2874		return 0;
   2875
   2876	pci_disable_sriov(pdev);
   2877
   2878	otx2_disable_flr_me_intr(pf);
   2879	otx2_flr_wq_destroy(pf);
   2880	otx2_disable_pfvf_mbox_intr(pf, numvfs);
   2881	otx2_pfvf_mbox_destroy(pf);
   2882
   2883	return 0;
   2884}
   2885
   2886static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
   2887{
   2888	if (numvfs == 0)
   2889		return otx2_sriov_disable(pdev);
   2890	else
   2891		return otx2_sriov_enable(pdev, numvfs);
   2892}
   2893
   2894static void otx2_remove(struct pci_dev *pdev)
   2895{
   2896	struct net_device *netdev = pci_get_drvdata(pdev);
   2897	struct otx2_nic *pf;
   2898
   2899	if (!netdev)
   2900		return;
   2901
   2902	pf = netdev_priv(netdev);
   2903
   2904	pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
   2905
   2906	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
   2907		otx2_config_hw_tx_tstamp(pf, false);
   2908	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
   2909		otx2_config_hw_rx_tstamp(pf, false);
   2910
   2911	/* Disable 802.3x pause frames */
   2912	if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
   2913	    (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
   2914		pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
   2915		pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
   2916		otx2_config_pause_frm(pf);
   2917	}
   2918
   2919#ifdef CONFIG_DCB
   2920	/* Disable PFC config */
   2921	if (pf->pfc_en) {
   2922		pf->pfc_en = 0;
   2923		otx2_config_priority_flow_ctrl(pf);
   2924	}
   2925#endif
   2926	cancel_work_sync(&pf->reset_task);
   2927	/* Disable link notifications */
   2928	otx2_cgx_config_linkevents(pf, false);
   2929
   2930	otx2_unregister_dl(pf);
   2931	unregister_netdev(netdev);
   2932	otx2_sriov_disable(pf->pdev);
   2933	otx2_sriov_vfcfg_cleanup(pf);
   2934	if (pf->otx2_wq)
   2935		destroy_workqueue(pf->otx2_wq);
   2936
   2937	otx2_ptp_destroy(pf);
   2938	otx2_mcam_flow_del(pf);
   2939	otx2_shutdown_tc(pf);
   2940	otx2_detach_resources(&pf->mbox);
   2941	if (pf->hw.lmt_info)
   2942		free_percpu(pf->hw.lmt_info);
   2943	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
   2944		qmem_free(pf->dev, pf->dync_lmt);
   2945	otx2_disable_mbox_intr(pf);
   2946	otx2_pfaf_mbox_destroy(pf);
   2947	pci_free_irq_vectors(pf->pdev);
   2948	pci_set_drvdata(pdev, NULL);
   2949	free_netdev(netdev);
   2950
   2951	pci_release_regions(pdev);
   2952}
   2953
   2954static struct pci_driver otx2_pf_driver = {
   2955	.name = DRV_NAME,
   2956	.id_table = otx2_pf_id_table,
   2957	.probe = otx2_probe,
   2958	.shutdown = otx2_remove,
   2959	.remove = otx2_remove,
   2960	.sriov_configure = otx2_sriov_configure
   2961};
   2962
   2963static int __init otx2_rvupf_init_module(void)
   2964{
   2965	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
   2966
   2967	return pci_register_driver(&otx2_pf_driver);
   2968}
   2969
   2970static void __exit otx2_rvupf_cleanup_module(void)
   2971{
   2972	pci_unregister_driver(&otx2_pf_driver);
   2973}
   2974
   2975module_init(otx2_rvupf_init_module);
   2976module_exit(otx2_rvupf_cleanup_module);