cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cn23xx_vf_device.c (19382B)


      1/**********************************************************************
      2 * Author: Cavium, Inc.
      3 *
      4 * Contact: support@cavium.com
      5 *          Please include "LiquidIO" in the subject.
      6 *
      7 * Copyright (c) 2003-2016 Cavium, Inc.
      8 *
      9 * This file is free software; you can redistribute it and/or modify
     10 * it under the terms of the GNU General Public License, Version 2, as
     11 * published by the Free Software Foundation.
     12 *
     13 * This file is distributed in the hope that it will be useful, but
     14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
     15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
     16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
     17 ***********************************************************************/
     18#include <linux/pci.h>
     19#include <linux/netdevice.h>
     20#include <linux/vmalloc.h>
     21#include "liquidio_common.h"
     22#include "octeon_droq.h"
     23#include "octeon_iq.h"
     24#include "response_manager.h"
     25#include "octeon_device.h"
     26#include "cn23xx_vf_device.h"
     27#include "octeon_main.h"
     28#include "octeon_mailbox.h"
     29
     30u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
     31{
     32	/* This gives the SLI clock per microsec */
     33	u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us;
     34
     35	/* This gives the clock cycles per millisecond */
     36	oqticks_per_us *= 1000;
     37
     38	/* This gives the oq ticks (1024 core clock cycles) per millisecond */
     39	oqticks_per_us /= 1024;
     40
     41	/* time_intr is in microseconds. The next 2 steps gives the oq ticks
     42	 * corressponding to time_intr.
     43	 */
     44	oqticks_per_us *= time_intr_in_us;
     45	oqticks_per_us /= 1000;
     46
     47	return oqticks_per_us;
     48}
     49
     50static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues)
     51{
     52	u32 loop = BUSY_READING_REG_VF_LOOP_COUNT;
     53	int ret_val = 0;
     54	u32 q_no;
     55	u64 d64;
     56
     57	for (q_no = 0; q_no < num_queues; q_no++) {
     58		/* set RST bit to 1. This bit applies to both IQ and OQ */
     59		d64 = octeon_read_csr64(oct,
     60					CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
     61		d64 |= CN23XX_PKT_INPUT_CTL_RST;
     62		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
     63				   d64);
     64	}
     65
     66	/* wait until the RST bit is clear or the RST and QUIET bits are set */
     67	for (q_no = 0; q_no < num_queues; q_no++) {
     68		u64 reg_val = octeon_read_csr64(oct,
     69					CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
     70		while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
     71		       !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
     72		       loop) {
     73			WRITE_ONCE(reg_val, octeon_read_csr64(
     74			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
     75			loop--;
     76		}
     77		if (!loop) {
     78			dev_err(&oct->pci_dev->dev,
     79				"clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
     80				q_no);
     81			return -1;
     82		}
     83		WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
     84			   ~CN23XX_PKT_INPUT_CTL_RST);
     85		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
     86				   READ_ONCE(reg_val));
     87
     88		WRITE_ONCE(reg_val, octeon_read_csr64(
     89		    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
     90		if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
     91			dev_err(&oct->pci_dev->dev,
     92				"clearing the reset failed for qno: %u\n",
     93				q_no);
     94			ret_val = -1;
     95		}
     96	}
     97
     98	return ret_val;
     99}
    100
    101static int cn23xx_vf_setup_global_input_regs(struct octeon_device *oct)
    102{
    103	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
    104	struct octeon_instr_queue *iq;
    105	u64 q_no, intr_threshold;
    106	u64 d64;
    107
    108	if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf))
    109		return -1;
    110
    111	for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
    112		void __iomem *inst_cnt_reg;
    113
    114		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_DOORBELL(q_no),
    115				   0xFFFFFFFF);
    116		iq = oct->instr_queue[q_no];
    117
    118		if (iq)
    119			inst_cnt_reg = iq->inst_cnt_reg;
    120		else
    121			inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
    122				       CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no);
    123
    124		d64 = octeon_read_csr64(oct,
    125					CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no));
    126
    127		d64 &= 0xEFFFFFFFFFFFFFFFL;
    128
    129		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
    130				   d64);
    131
    132		/* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
    133		 * the Input Queues
    134		 */
    135		octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
    136				   CN23XX_PKT_INPUT_CTL_MASK);
    137
    138		/* set the wmark level to trigger PI_INT */
    139		intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
    140				 CN23XX_PKT_IN_DONE_WMARK_MASK;
    141
    142		writeq((readq(inst_cnt_reg) &
    143			~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
    144			  CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
    145		       (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
    146		       inst_cnt_reg);
    147	}
    148	return 0;
    149}
    150
    151static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
    152{
    153	u32 reg_val;
    154	u32 q_no;
    155
    156	for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
    157		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no),
    158				 0xFFFFFFFF);
    159
    160		reg_val =
    161		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no));
    162
    163		reg_val &= 0xEFFFFFFFFFFFFFFFL;
    164
    165		reg_val =
    166		    octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
    167
    168		/* clear IPTR */
    169		reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
    170
    171		/* set DPTR */
    172		reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
    173
    174		/* reset BMODE */
    175		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
    176
    177		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
    178		 * for Output Queue ScatterList reset ROR_P, NSR_P
    179		 */
    180		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
    181		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
    182
    183#ifdef __LITTLE_ENDIAN_BITFIELD
    184		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
    185#else
    186		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
    187#endif
    188		/* No Relaxed Ordering, No Snoop, 64-bit Byte swap
    189		 * for Output Queue Data reset ROR, NSR
    190		 */
    191		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
    192		reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
    193		/* set the ES bit */
    194		reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
    195
    196		/* write all the selected settings */
    197		octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no),
    198				 reg_val);
    199	}
    200}
    201
    202static int cn23xx_setup_vf_device_regs(struct octeon_device *oct)
    203{
    204	if (cn23xx_vf_setup_global_input_regs(oct))
    205		return -1;
    206
    207	cn23xx_vf_setup_global_output_regs(oct);
    208
    209	return 0;
    210}
    211
    212static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no)
    213{
    214	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
    215	u64 pkt_in_done;
    216
    217	/* Write the start of the input queue's ring and its size */
    218	octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no),
    219			   iq->base_addr_dma);
    220	octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count);
    221
    222	/* Remember the doorbell & instruction count register addr
    223	 * for this queue
    224	 */
    225	iq->doorbell_reg =
    226	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no);
    227	iq->inst_cnt_reg =
    228	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no);
    229	dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
    230		iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
    231
    232	/* Store the current instruction counter (used in flush_iq
    233	 * calculation)
    234	 */
    235	pkt_in_done = readq(iq->inst_cnt_reg);
    236
    237	if (oct->msix_on) {
    238		/* Set CINT_ENB to enable IQ interrupt */
    239		writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
    240		       iq->inst_cnt_reg);
    241	}
    242	iq->reset_instr_cnt = 0;
    243}
    244
    245static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
    246{
    247	struct octeon_droq *droq = oct->droq[oq_no];
    248
    249	octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no),
    250			   droq->desc_ring_dma);
    251	octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
    252
    253	octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
    254			 droq->buffer_size);
    255
    256	/* Get the mapped address of the pkt_sent and pkts_credit regs */
    257	droq->pkts_sent_reg =
    258	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no);
    259	droq->pkts_credit_reg =
    260	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no);
    261}
    262
    263static void cn23xx_vf_mbox_thread(struct work_struct *work)
    264{
    265	struct cavium_wk *wk = (struct cavium_wk *)work;
    266	struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
    267
    268	octeon_mbox_process_message(mbox);
    269}
    270
    271static int cn23xx_free_vf_mbox(struct octeon_device *oct)
    272{
    273	cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work);
    274	vfree(oct->mbox[0]);
    275	return 0;
    276}
    277
    278static int cn23xx_setup_vf_mbox(struct octeon_device *oct)
    279{
    280	struct octeon_mbox *mbox = NULL;
    281
    282	mbox = vmalloc(sizeof(*mbox));
    283	if (!mbox)
    284		return 1;
    285
    286	memset(mbox, 0, sizeof(struct octeon_mbox));
    287
    288	spin_lock_init(&mbox->lock);
    289
    290	mbox->oct_dev = oct;
    291
    292	mbox->q_no = 0;
    293
    294	mbox->state = OCTEON_MBOX_STATE_IDLE;
    295
    296	/* VF mbox interrupt reg */
    297	mbox->mbox_int_reg =
    298	    (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0);
    299	/* VF reads from SIG0 reg */
    300	mbox->mbox_read_reg =
    301	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
    302	/* VF writes into SIG1 reg */
    303	mbox->mbox_write_reg =
    304	    (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
    305
    306	INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
    307			  cn23xx_vf_mbox_thread);
    308
    309	mbox->mbox_poll_wk.ctxptr = mbox;
    310
    311	oct->mbox[0] = mbox;
    312
    313	writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
    314
    315	return 0;
    316}
    317
    318static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
    319{
    320	u32 q_no;
    321
    322	for (q_no = 0; q_no < oct->num_iqs; q_no++) {
    323		u64 reg_val;
    324
    325		/* set the corresponding IQ IS_64B bit */
    326		if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
    327			reg_val = octeon_read_csr64(
    328			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
    329			reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
    330			octeon_write_csr64(
    331			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
    332		}
    333
    334		/* set the corresponding IQ ENB bit */
    335		if (oct->io_qmask.iq & BIT_ULL(q_no)) {
    336			reg_val = octeon_read_csr64(
    337			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
    338			reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
    339			octeon_write_csr64(
    340			    oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
    341		}
    342	}
    343	for (q_no = 0; q_no < oct->num_oqs; q_no++) {
    344		u32 reg_val;
    345
    346		/* set the corresponding OQ ENB bit */
    347		if (oct->io_qmask.oq & BIT_ULL(q_no)) {
    348			reg_val = octeon_read_csr(
    349			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
    350			reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
    351			octeon_write_csr(
    352			    oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
    353		}
    354	}
    355
    356	return 0;
    357}
    358
    359static void cn23xx_disable_vf_io_queues(struct octeon_device *oct)
    360{
    361	u32 num_queues = oct->num_iqs;
    362
    363	/* per HRM, rings can only be disabled via reset operation,
    364	 * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
    365	 */
    366	if (num_queues < oct->num_oqs)
    367		num_queues = oct->num_oqs;
    368
    369	cn23xx_vf_reset_io_queues(oct, num_queues);
    370}
    371
    372void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
    373{
    374	struct octeon_mbox_cmd mbox_cmd;
    375
    376	mbox_cmd.msg.u64 = 0;
    377	mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
    378	mbox_cmd.msg.s.resp_needed = 0;
    379	mbox_cmd.msg.s.cmd = OCTEON_VF_FLR_REQUEST;
    380	mbox_cmd.msg.s.len = 1;
    381	mbox_cmd.q_no = 0;
    382	mbox_cmd.recv_len = 0;
    383	mbox_cmd.recv_status = 0;
    384	mbox_cmd.fn = NULL;
    385	mbox_cmd.fn_arg = NULL;
    386
    387	octeon_mbox_write(oct, &mbox_cmd);
    388}
    389
    390static void octeon_pfvf_hs_callback(struct octeon_device *oct,
    391				    struct octeon_mbox_cmd *cmd,
    392				    void *arg)
    393{
    394	u32 major = 0;
    395
    396	memcpy((uint8_t *)&oct->pfvf_hsword, cmd->msg.s.params,
    397	       CN23XX_MAILBOX_MSGPARAM_SIZE);
    398	if (cmd->recv_len > 1)  {
    399		major = ((struct lio_version *)(cmd->data))->major;
    400		major = major << 16;
    401	}
    402
    403	atomic_set((atomic_t *)arg, major | 1);
    404}
    405
    406int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
    407{
    408	struct octeon_mbox_cmd mbox_cmd;
    409	u32 q_no, count = 0;
    410	atomic_t status;
    411	u32 pfmajor;
    412	u32 vfmajor;
    413	u32 ret;
    414
    415	/* Sending VF_ACTIVE indication to the PF driver */
    416	dev_dbg(&oct->pci_dev->dev, "requesting info from pf\n");
    417
    418	mbox_cmd.msg.u64 = 0;
    419	mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
    420	mbox_cmd.msg.s.resp_needed = 1;
    421	mbox_cmd.msg.s.cmd = OCTEON_VF_ACTIVE;
    422	mbox_cmd.msg.s.len = 2;
    423	mbox_cmd.data[0] = 0;
    424	((struct lio_version *)&mbox_cmd.data[0])->major =
    425						LIQUIDIO_BASE_MAJOR_VERSION;
    426	((struct lio_version *)&mbox_cmd.data[0])->minor =
    427						LIQUIDIO_BASE_MINOR_VERSION;
    428	((struct lio_version *)&mbox_cmd.data[0])->micro =
    429						LIQUIDIO_BASE_MICRO_VERSION;
    430	mbox_cmd.q_no = 0;
    431	mbox_cmd.recv_len = 0;
    432	mbox_cmd.recv_status = 0;
    433	mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
    434	mbox_cmd.fn_arg = &status;
    435
    436	octeon_mbox_write(oct, &mbox_cmd);
    437
    438	atomic_set(&status, 0);
    439
    440	do {
    441		schedule_timeout_uninterruptible(1);
    442	} while ((!atomic_read(&status)) && (count++ < 100000));
    443
    444	ret = atomic_read(&status);
    445	if (!ret) {
    446		dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n");
    447		return 1;
    448	}
    449
    450	for (q_no = 0 ; q_no < oct->num_iqs ; q_no++)
    451		oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
    452
    453	vfmajor = LIQUIDIO_BASE_MAJOR_VERSION;
    454	pfmajor = ret >> 16;
    455	if (pfmajor != vfmajor) {
    456		dev_err(&oct->pci_dev->dev,
    457			"VF Liquidio driver (major version %d) is not compatible with Liquidio PF driver (major version %d)\n",
    458			vfmajor, pfmajor);
    459		return 1;
    460	}
    461
    462	dev_dbg(&oct->pci_dev->dev,
    463		"VF Liquidio driver (major version %d), Liquidio PF driver (major version %d)\n",
    464		vfmajor, pfmajor);
    465
    466	dev_dbg(&oct->pci_dev->dev, "got data from pf pkind is %d\n",
    467		oct->pfvf_hsword.pkind);
    468
    469	return 0;
    470}
    471
    472static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector)
    473{
    474	struct octeon_device *oct = ioq_vector->oct_dev;
    475	u64 mbox_int_val;
    476
    477	if (!ioq_vector->droq_index) {
    478		/* read and clear by writing 1 */
    479		mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
    480		writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
    481		if (octeon_mbox_read(oct->mbox[0]))
    482			schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
    483					      msecs_to_jiffies(0));
    484	}
    485}
    486
    487static u64 cn23xx_vf_msix_interrupt_handler(void *dev)
    488{
    489	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
    490	struct octeon_device *oct = ioq_vector->oct_dev;
    491	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
    492	u64 pkts_sent;
    493	u64 ret = 0;
    494
    495	dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
    496	pkts_sent = readq(droq->pkts_sent_reg);
    497
    498	/* If our device has interrupted, then proceed. Also check
    499	 * for all f's if interrupt was triggered on an error
    500	 * and the PCI read fails.
    501	 */
    502	if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
    503		return ret;
    504
    505	/* Write count reg in sli_pkt_cnts to clear these int. */
    506	if ((pkts_sent & CN23XX_INTR_PO_INT) ||
    507	    (pkts_sent & CN23XX_INTR_PI_INT)) {
    508		if (pkts_sent & CN23XX_INTR_PO_INT)
    509			ret |= MSIX_PO_INT;
    510	}
    511
    512	if (pkts_sent & CN23XX_INTR_PI_INT)
    513		/* We will clear the count when we update the read_index. */
    514		ret |= MSIX_PI_INT;
    515
    516	if (pkts_sent & CN23XX_INTR_MBOX_INT) {
    517		cn23xx_handle_vf_mbox_intr(ioq_vector);
    518		ret |= MSIX_MBOX_INT;
    519	}
    520
    521	return ret;
    522}
    523
    524static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
    525{
    526	u32 pkt_in_done = readl(iq->inst_cnt_reg);
    527	u32 last_done;
    528	u32 new_idx;
    529
    530	last_done = pkt_in_done - iq->pkt_in_done;
    531	iq->pkt_in_done = pkt_in_done;
    532
    533	/* Modulo of the new index with the IQ size will give us
    534	 * the new index.  The iq->reset_instr_cnt is always zero for
    535	 * cn23xx, so no extra adjustments are needed.
    536	 */
    537	new_idx = (iq->octeon_read_index +
    538		   (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
    539		  iq->max_count;
    540
    541	return new_idx;
    542}
    543
    544static void cn23xx_enable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
    545{
    546	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
    547	u32 q_no, time_threshold;
    548
    549	if (intr_flag & OCTEON_OUTPUT_INTR) {
    550		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
    551			/* Set up interrupt packet and time thresholds
    552			 * for all the OQs
    553			 */
    554			time_threshold = cn23xx_vf_get_oq_ticks(
    555				oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
    556
    557			octeon_write_csr64(
    558			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
    559			    (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
    560			     ((u64)time_threshold << 32)));
    561		}
    562	}
    563
    564	if (intr_flag & OCTEON_INPUT_INTR) {
    565		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
    566			/* Set CINT_ENB to enable IQ interrupt */
    567			octeon_write_csr64(
    568			    oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
    569			    ((octeon_read_csr64(
    570				  oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
    571			      ~CN23XX_PKT_IN_DONE_CNT_MASK) |
    572			     CN23XX_INTR_CINT_ENB));
    573		}
    574	}
    575
    576	/* Set queue-0 MBOX_ENB to enable VF mailbox interrupt */
    577	if (intr_flag & OCTEON_MBOX_INTR) {
    578		octeon_write_csr64(
    579		    oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
    580		    (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) |
    581		     CN23XX_INTR_MBOX_ENB));
    582	}
    583}
    584
    585static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
    586{
    587	u32 q_no;
    588
    589	if (intr_flag & OCTEON_OUTPUT_INTR) {
    590		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
    591			/* Write all 1's in INT_LEVEL reg to disable PO_INT */
    592			octeon_write_csr64(
    593			    oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
    594			    0x3fffffffffffff);
    595		}
    596	}
    597	if (intr_flag & OCTEON_INPUT_INTR) {
    598		for (q_no = 0; q_no < oct->num_oqs; q_no++) {
    599			octeon_write_csr64(
    600			    oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
    601			    (octeon_read_csr64(
    602				 oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
    603			     ~(CN23XX_INTR_CINT_ENB |
    604			       CN23XX_PKT_IN_DONE_CNT_MASK)));
    605		}
    606	}
    607
    608	if (intr_flag & OCTEON_MBOX_INTR) {
    609		octeon_write_csr64(
    610		    oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
    611		    (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) &
    612		     ~CN23XX_INTR_MBOX_ENB));
    613	}
    614}
    615
    616int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
    617{
    618	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
    619	u32 rings_per_vf;
    620	u64 reg_val;
    621
    622	if (octeon_map_pci_barx(oct, 0, 0))
    623		return 1;
    624
    625	/* INPUT_CONTROL[RPVF] gives the VF IOq count */
    626	reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0));
    627
    628	oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
    629		      CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
    630	oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
    631		      CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
    632
    633	reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
    634
    635	rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
    636
    637	cn23xx->conf  = oct_get_config_info(oct, LIO_23XX);
    638	if (!cn23xx->conf) {
    639		dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n",
    640			__func__);
    641		octeon_unmap_pci_barx(oct, 0);
    642		return 1;
    643	}
    644
    645	if (oct->sriov_info.rings_per_vf > rings_per_vf) {
    646		dev_warn(&oct->pci_dev->dev,
    647			 "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n",
    648			 oct->sriov_info.rings_per_vf, rings_per_vf,
    649			 rings_per_vf);
    650		oct->sriov_info.rings_per_vf = rings_per_vf;
    651	} else {
    652		if (rings_per_vf > num_present_cpus()) {
    653			dev_warn(&oct->pci_dev->dev,
    654				 "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n",
    655				 rings_per_vf,
    656				 num_present_cpus(),
    657				 num_present_cpus());
    658			oct->sriov_info.rings_per_vf =
    659				num_present_cpus();
    660		} else {
    661			oct->sriov_info.rings_per_vf = rings_per_vf;
    662		}
    663	}
    664
    665	oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs;
    666	oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs;
    667	oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox;
    668	oct->fn_list.free_mbox = cn23xx_free_vf_mbox;
    669
    670	oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler;
    671
    672	oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs;
    673	oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
    674
    675	oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt;
    676	oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt;
    677
    678	oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues;
    679	oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues;
    680
    681	return 0;
    682}