cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vnic_dev.c (30115B)


      1/*
      2 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
      3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
      4 *
      5 * This program is free software; you may redistribute it and/or modify
      6 * it under the terms of the GNU General Public License as published by
      7 * the Free Software Foundation; version 2 of the License.
      8 *
      9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     16 * SOFTWARE.
     17 *
     18 */
     19
     20#include <linux/kernel.h>
     21#include <linux/errno.h>
     22#include <linux/types.h>
     23#include <linux/pci.h>
     24#include <linux/delay.h>
     25#include <linux/if_ether.h>
     26
     27#include "vnic_resource.h"
     28#include "vnic_devcmd.h"
     29#include "vnic_dev.h"
     30#include "vnic_wq.h"
     31#include "vnic_stats.h"
     32#include "enic.h"
     33
     34#define VNIC_MAX_RES_HDR_SIZE \
     35	(sizeof(struct vnic_resource_header) + \
     36	sizeof(struct vnic_resource) * RES_TYPE_MAX)
     37#define VNIC_RES_STRIDE	128
     38
     39void *vnic_dev_priv(struct vnic_dev *vdev)
     40{
     41	return vdev->priv;
     42}
     43
     44static int vnic_dev_discover_res(struct vnic_dev *vdev,
     45	struct vnic_dev_bar *bar, unsigned int num_bars)
     46{
     47	struct vnic_resource_header __iomem *rh;
     48	struct mgmt_barmap_hdr __iomem *mrh;
     49	struct vnic_resource __iomem *r;
     50	u8 type;
     51
     52	if (num_bars == 0)
     53		return -EINVAL;
     54
     55	if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
     56		vdev_err(vdev, "vNIC BAR0 res hdr length error\n");
     57		return -EINVAL;
     58	}
     59
     60	rh  = bar->vaddr;
     61	mrh = bar->vaddr;
     62	if (!rh) {
     63		vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n");
     64		return -EINVAL;
     65	}
     66
     67	/* Check for mgmt vnic in addition to normal vnic */
     68	if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
     69		(ioread32(&rh->version) != VNIC_RES_VERSION)) {
     70		if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
     71			(ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
     72			vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
     73				 VNIC_RES_MAGIC, VNIC_RES_VERSION,
     74				 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
     75				 ioread32(&rh->magic), ioread32(&rh->version));
     76			return -EINVAL;
     77		}
     78	}
     79
     80	if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
     81		r = (struct vnic_resource __iomem *)(mrh + 1);
     82	else
     83		r = (struct vnic_resource __iomem *)(rh + 1);
     84
     85
     86	while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
     87
     88		u8 bar_num = ioread8(&r->bar);
     89		u32 bar_offset = ioread32(&r->bar_offset);
     90		u32 count = ioread32(&r->count);
     91		u32 len;
     92
     93		r++;
     94
     95		if (bar_num >= num_bars)
     96			continue;
     97
     98		if (!bar[bar_num].len || !bar[bar_num].vaddr)
     99			continue;
    100
    101		switch (type) {
    102		case RES_TYPE_WQ:
    103		case RES_TYPE_RQ:
    104		case RES_TYPE_CQ:
    105		case RES_TYPE_INTR_CTRL:
    106			/* each count is stride bytes long */
    107			len = count * VNIC_RES_STRIDE;
    108			if (len + bar_offset > bar[bar_num].len) {
    109				vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
    110					 type, bar_offset, len,
    111					 bar[bar_num].len);
    112				return -EINVAL;
    113			}
    114			break;
    115		case RES_TYPE_INTR_PBA_LEGACY:
    116		case RES_TYPE_DEVCMD:
    117		case RES_TYPE_DEVCMD2:
    118			len = count;
    119			break;
    120		default:
    121			continue;
    122		}
    123
    124		vdev->res[type].count = count;
    125		vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
    126			bar_offset;
    127		vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
    128	}
    129
    130	return 0;
    131}
    132
    133unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
    134	enum vnic_res_type type)
    135{
    136	return vdev->res[type].count;
    137}
    138EXPORT_SYMBOL(vnic_dev_get_res_count);
    139
    140void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
    141	unsigned int index)
    142{
    143	if (!vdev->res[type].vaddr)
    144		return NULL;
    145
    146	switch (type) {
    147	case RES_TYPE_WQ:
    148	case RES_TYPE_RQ:
    149	case RES_TYPE_CQ:
    150	case RES_TYPE_INTR_CTRL:
    151		return (char __iomem *)vdev->res[type].vaddr +
    152			index * VNIC_RES_STRIDE;
    153	default:
    154		return (char __iomem *)vdev->res[type].vaddr;
    155	}
    156}
    157EXPORT_SYMBOL(vnic_dev_get_res);
    158
    159static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
    160	unsigned int desc_count, unsigned int desc_size)
    161{
    162	/* The base address of the desc rings must be 512 byte aligned.
    163	 * Descriptor count is aligned to groups of 32 descriptors.  A
    164	 * count of 0 means the maximum 4096 descriptors.  Descriptor
    165	 * size is aligned to 16 bytes.
    166	 */
    167
    168	unsigned int count_align = 32;
    169	unsigned int desc_align = 16;
    170
    171	ring->base_align = 512;
    172
    173	if (desc_count == 0)
    174		desc_count = 4096;
    175
    176	ring->desc_count = ALIGN(desc_count, count_align);
    177
    178	ring->desc_size = ALIGN(desc_size, desc_align);
    179
    180	ring->size = ring->desc_count * ring->desc_size;
    181	ring->size_unaligned = ring->size + ring->base_align;
    182
    183	return ring->size_unaligned;
    184}
    185
    186void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
    187{
    188	memset(ring->descs, 0, ring->size);
    189}
    190
    191int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
    192	unsigned int desc_count, unsigned int desc_size)
    193{
    194	vnic_dev_desc_ring_size(ring, desc_count, desc_size);
    195
    196	ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
    197						   ring->size_unaligned,
    198						   &ring->base_addr_unaligned,
    199						   GFP_KERNEL);
    200
    201	if (!ring->descs_unaligned) {
    202		vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n",
    203			 (int)ring->size);
    204		return -ENOMEM;
    205	}
    206
    207	ring->base_addr = ALIGN(ring->base_addr_unaligned,
    208		ring->base_align);
    209	ring->descs = (u8 *)ring->descs_unaligned +
    210		(ring->base_addr - ring->base_addr_unaligned);
    211
    212	vnic_dev_clear_desc_ring(ring);
    213
    214	ring->desc_avail = ring->desc_count - 1;
    215
    216	return 0;
    217}
    218
    219void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
    220{
    221	if (ring->descs) {
    222		dma_free_coherent(&vdev->pdev->dev, ring->size_unaligned,
    223				  ring->descs_unaligned,
    224				  ring->base_addr_unaligned);
    225		ring->descs = NULL;
    226	}
    227}
    228
    229static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
    230	int wait)
    231{
    232	struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
    233	unsigned int i;
    234	int delay;
    235	u32 status;
    236	int err;
    237
    238	status = ioread32(&devcmd->status);
    239	if (status == 0xFFFFFFFF) {
    240		/* PCI-e target device is gone */
    241		return -ENODEV;
    242	}
    243	if (status & STAT_BUSY) {
    244		vdev_neterr(vdev, "Busy devcmd %d\n", _CMD_N(cmd));
    245		return -EBUSY;
    246	}
    247
    248	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
    249		for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
    250			writeq(vdev->args[i], &devcmd->args[i]);
    251		wmb();
    252	}
    253
    254	iowrite32(cmd, &devcmd->cmd);
    255
    256	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
    257		return 0;
    258
    259	for (delay = 0; delay < wait; delay++) {
    260
    261		udelay(100);
    262
    263		status = ioread32(&devcmd->status);
    264		if (status == 0xFFFFFFFF) {
    265			/* PCI-e target device is gone */
    266			return -ENODEV;
    267		}
    268
    269		if (!(status & STAT_BUSY)) {
    270
    271			if (status & STAT_ERROR) {
    272				err = (int)readq(&devcmd->args[0]);
    273				if (err == ERR_EINVAL &&
    274				    cmd == CMD_CAPABILITY)
    275					return -err;
    276				if (err != ERR_ECMDUNKNOWN ||
    277				    cmd != CMD_CAPABILITY)
    278					vdev_neterr(vdev, "Error %d devcmd %d\n",
    279						    err, _CMD_N(cmd));
    280				return -err;
    281			}
    282
    283			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
    284				rmb();
    285				for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
    286					vdev->args[i] = readq(&devcmd->args[i]);
    287			}
    288
    289			return 0;
    290		}
    291	}
    292
    293	vdev_neterr(vdev, "Timedout devcmd %d\n", _CMD_N(cmd));
    294	return -ETIMEDOUT;
    295}
    296
    297static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
    298			  int wait)
    299{
    300	struct devcmd2_controller *dc2c = vdev->devcmd2;
    301	struct devcmd2_result *result;
    302	u8 color;
    303	unsigned int i;
    304	int delay, err;
    305	u32 fetch_index, new_posted;
    306	u32 posted = dc2c->posted;
    307
    308	fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
    309
    310	if (fetch_index == 0xFFFFFFFF)
    311		return -ENODEV;
    312
    313	new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
    314
    315	if (new_posted == fetch_index) {
    316		vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
    317			    _CMD_N(cmd), fetch_index, posted);
    318		return -EBUSY;
    319	}
    320	dc2c->cmd_ring[posted].cmd = cmd;
    321	dc2c->cmd_ring[posted].flags = 0;
    322
    323	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
    324		dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
    325	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
    326		for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
    327			dc2c->cmd_ring[posted].args[i] = vdev->args[i];
    328
    329	/* Adding write memory barrier prevents compiler and/or CPU reordering,
    330	 * thus avoiding descriptor posting before descriptor is initialized.
    331	 * Otherwise, hardware can read stale descriptor fields.
    332	 */
    333	wmb();
    334	iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
    335	dc2c->posted = new_posted;
    336
    337	if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
    338		return 0;
    339
    340	result = dc2c->result + dc2c->next_result;
    341	color = dc2c->color;
    342
    343	dc2c->next_result++;
    344	if (dc2c->next_result == dc2c->result_size) {
    345		dc2c->next_result = 0;
    346		dc2c->color = dc2c->color ? 0 : 1;
    347	}
    348
    349	for (delay = 0; delay < wait; delay++) {
    350		if (result->color == color) {
    351			if (result->error) {
    352				err = result->error;
    353				if (err != ERR_ECMDUNKNOWN ||
    354				    cmd != CMD_CAPABILITY)
    355					vdev_neterr(vdev, "Error %d devcmd %d\n",
    356						    err, _CMD_N(cmd));
    357				return -err;
    358			}
    359			if (_CMD_DIR(cmd) & _CMD_DIR_READ)
    360				for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
    361					vdev->args[i] = result->results[i];
    362
    363			return 0;
    364		}
    365		udelay(100);
    366	}
    367
    368	vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd));
    369
    370	return -ETIMEDOUT;
    371}
    372
    373static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
    374{
    375	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
    376	if (!vdev->devcmd)
    377		return -ENODEV;
    378	vdev->devcmd_rtn = _vnic_dev_cmd;
    379
    380	return 0;
    381}
    382
    383static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
    384{
    385	int err;
    386	unsigned int fetch_index;
    387
    388	if (vdev->devcmd2)
    389		return 0;
    390
    391	vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
    392	if (!vdev->devcmd2)
    393		return -ENOMEM;
    394
    395	vdev->devcmd2->color = 1;
    396	vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
    397	err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
    398				    DEVCMD2_DESC_SIZE);
    399	if (err)
    400		goto err_free_devcmd2;
    401
    402	fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
    403	if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
    404		vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n");
    405		err = -ENODEV;
    406		goto err_free_wq;
    407	}
    408
    409	enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
    410			   0);
    411	vdev->devcmd2->posted = fetch_index;
    412	vnic_wq_enable(&vdev->devcmd2->wq);
    413
    414	err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
    415				       DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
    416	if (err)
    417		goto err_disable_wq;
    418
    419	vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
    420	vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
    421	vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
    422	vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
    423			VNIC_PADDR_TARGET;
    424	vdev->args[1] = DEVCMD2_RING_SIZE;
    425
    426	err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
    427	if (err)
    428		goto err_free_desc_ring;
    429
    430	vdev->devcmd_rtn = _vnic_dev_cmd2;
    431
    432	return 0;
    433
    434err_free_desc_ring:
    435	vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
    436err_disable_wq:
    437	vnic_wq_disable(&vdev->devcmd2->wq);
    438err_free_wq:
    439	vnic_wq_free(&vdev->devcmd2->wq);
    440err_free_devcmd2:
    441	kfree(vdev->devcmd2);
    442	vdev->devcmd2 = NULL;
    443
    444	return err;
    445}
    446
    447static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
    448{
    449	vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
    450	vnic_wq_disable(&vdev->devcmd2->wq);
    451	vnic_wq_free(&vdev->devcmd2->wq);
    452	kfree(vdev->devcmd2);
    453}
    454
    455static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
    456	enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
    457	u64 *a0, u64 *a1, int wait)
    458{
    459	u32 status;
    460	int err;
    461
    462	memset(vdev->args, 0, sizeof(vdev->args));
    463
    464	vdev->args[0] = vdev->proxy_index;
    465	vdev->args[1] = cmd;
    466	vdev->args[2] = *a0;
    467	vdev->args[3] = *a1;
    468
    469	err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
    470	if (err)
    471		return err;
    472
    473	status = (u32)vdev->args[0];
    474	if (status & STAT_ERROR) {
    475		err = (int)vdev->args[1];
    476		if (err != ERR_ECMDUNKNOWN ||
    477		    cmd != CMD_CAPABILITY)
    478			vdev_neterr(vdev, "Error %d proxy devcmd %d\n",
    479				    err, _CMD_N(cmd));
    480		return err;
    481	}
    482
    483	*a0 = vdev->args[1];
    484	*a1 = vdev->args[2];
    485
    486	return 0;
    487}
    488
    489static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
    490	enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
    491{
    492	int err;
    493
    494	vdev->args[0] = *a0;
    495	vdev->args[1] = *a1;
    496
    497	err = vdev->devcmd_rtn(vdev, cmd, wait);
    498
    499	*a0 = vdev->args[0];
    500	*a1 = vdev->args[1];
    501
    502	return err;
    503}
    504
    505void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
    506{
    507	vdev->proxy = PROXY_BY_INDEX;
    508	vdev->proxy_index = index;
    509}
    510
    511void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
    512{
    513	vdev->proxy = PROXY_NONE;
    514	vdev->proxy_index = 0;
    515}
    516
    517int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
    518	u64 *a0, u64 *a1, int wait)
    519{
    520	memset(vdev->args, 0, sizeof(vdev->args));
    521
    522	switch (vdev->proxy) {
    523	case PROXY_BY_INDEX:
    524		return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
    525				a0, a1, wait);
    526	case PROXY_BY_BDF:
    527		return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
    528				a0, a1, wait);
    529	case PROXY_NONE:
    530	default:
    531		return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
    532	}
    533}
    534
    535static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
    536{
    537	u64 a0 = (u32)cmd, a1 = 0;
    538	int wait = 1000;
    539	int err;
    540
    541	err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
    542
    543	return !(err || a0);
    544}
    545
    546int vnic_dev_fw_info(struct vnic_dev *vdev,
    547	struct vnic_devcmd_fw_info **fw_info)
    548{
    549	u64 a0, a1 = 0;
    550	int wait = 1000;
    551	int err = 0;
    552
    553	if (!vdev->fw_info) {
    554		vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
    555						   sizeof(struct vnic_devcmd_fw_info),
    556						   &vdev->fw_info_pa, GFP_ATOMIC);
    557		if (!vdev->fw_info)
    558			return -ENOMEM;
    559
    560		a0 = vdev->fw_info_pa;
    561		a1 = sizeof(struct vnic_devcmd_fw_info);
    562
    563		/* only get fw_info once and cache it */
    564		if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
    565			err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
    566				&a0, &a1, wait);
    567		else
    568			err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
    569				&a0, &a1, wait);
    570	}
    571
    572	*fw_info = vdev->fw_info;
    573
    574	return err;
    575}
    576
    577int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
    578	void *value)
    579{
    580	u64 a0, a1;
    581	int wait = 1000;
    582	int err;
    583
    584	a0 = offset;
    585	a1 = size;
    586
    587	err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
    588
    589	switch (size) {
    590	case 1: *(u8 *)value = (u8)a0; break;
    591	case 2: *(u16 *)value = (u16)a0; break;
    592	case 4: *(u32 *)value = (u32)a0; break;
    593	case 8: *(u64 *)value = a0; break;
    594	default: BUG(); break;
    595	}
    596
    597	return err;
    598}
    599
    600int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
    601{
    602	u64 a0, a1;
    603	int wait = 1000;
    604
    605	if (!vdev->stats) {
    606		vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
    607						 sizeof(struct vnic_stats),
    608						 &vdev->stats_pa, GFP_ATOMIC);
    609		if (!vdev->stats)
    610			return -ENOMEM;
    611	}
    612
    613	*stats = vdev->stats;
    614	a0 = vdev->stats_pa;
    615	a1 = sizeof(struct vnic_stats);
    616
    617	return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
    618}
    619
    620int vnic_dev_close(struct vnic_dev *vdev)
    621{
    622	u64 a0 = 0, a1 = 0;
    623	int wait = 1000;
    624	return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
    625}
    626
    627int vnic_dev_enable_wait(struct vnic_dev *vdev)
    628{
    629	u64 a0 = 0, a1 = 0;
    630	int wait = 1000;
    631
    632	if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
    633		return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
    634	else
    635		return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
    636}
    637
    638int vnic_dev_disable(struct vnic_dev *vdev)
    639{
    640	u64 a0 = 0, a1 = 0;
    641	int wait = 1000;
    642	return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
    643}
    644
    645int vnic_dev_open(struct vnic_dev *vdev, int arg)
    646{
    647	u64 a0 = (u32)arg, a1 = 0;
    648	int wait = 1000;
    649	return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
    650}
    651
    652int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
    653{
    654	u64 a0 = 0, a1 = 0;
    655	int wait = 1000;
    656	int err;
    657
    658	*done = 0;
    659
    660	err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
    661	if (err)
    662		return err;
    663
    664	*done = (a0 == 0);
    665
    666	return 0;
    667}
    668
    669int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
    670{
    671	u64 a0 = (u32)arg, a1 = 0;
    672	int wait = 1000;
    673	return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
    674}
    675
    676int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
    677{
    678	u64 a0 = 0, a1 = 0;
    679	int wait = 1000;
    680	int err;
    681
    682	*done = 0;
    683
    684	err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
    685	if (err)
    686		return err;
    687
    688	*done = (a0 == 0);
    689
    690	return 0;
    691}
    692
    693int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
    694{
    695	u64 a0 = (u32)arg, a1 = 0;
    696	int wait = 1000;
    697	int err;
    698
    699	if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
    700		return vnic_dev_cmd(vdev, CMD_HANG_RESET,
    701				&a0, &a1, wait);
    702	} else {
    703		err = vnic_dev_soft_reset(vdev, arg);
    704		if (err)
    705			return err;
    706		return vnic_dev_init(vdev, 0);
    707	}
    708}
    709
    710int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
    711{
    712	u64 a0 = 0, a1 = 0;
    713	int wait = 1000;
    714	int err;
    715
    716	*done = 0;
    717
    718	if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
    719		err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
    720				&a0, &a1, wait);
    721		if (err)
    722			return err;
    723	} else {
    724		return vnic_dev_soft_reset_done(vdev, done);
    725	}
    726
    727	*done = (a0 == 0);
    728
    729	return 0;
    730}
    731
    732int vnic_dev_hang_notify(struct vnic_dev *vdev)
    733{
    734	u64 a0, a1;
    735	int wait = 1000;
    736	return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
    737}
    738
    739int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
    740{
    741	u64 a0, a1;
    742	int wait = 1000;
    743	int err, i;
    744
    745	for (i = 0; i < ETH_ALEN; i++)
    746		mac_addr[i] = 0;
    747
    748	err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
    749	if (err)
    750		return err;
    751
    752	for (i = 0; i < ETH_ALEN; i++)
    753		mac_addr[i] = ((u8 *)&a0)[i];
    754
    755	return 0;
    756}
    757
    758int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
    759	int broadcast, int promisc, int allmulti)
    760{
    761	u64 a0, a1 = 0;
    762	int wait = 1000;
    763	int err;
    764
    765	a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
    766	     (multicast ? CMD_PFILTER_MULTICAST : 0) |
    767	     (broadcast ? CMD_PFILTER_BROADCAST : 0) |
    768	     (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
    769	     (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
    770
    771	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
    772	if (err)
    773		vdev_neterr(vdev, "Can't set packet filter\n");
    774
    775	return err;
    776}
    777
    778int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
    779{
    780	u64 a0 = 0, a1 = 0;
    781	int wait = 1000;
    782	int err;
    783	int i;
    784
    785	for (i = 0; i < ETH_ALEN; i++)
    786		((u8 *)&a0)[i] = addr[i];
    787
    788	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
    789	if (err)
    790		vdev_neterr(vdev, "Can't add addr [%pM], %d\n", addr, err);
    791
    792	return err;
    793}
    794
    795int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
    796{
    797	u64 a0 = 0, a1 = 0;
    798	int wait = 1000;
    799	int err;
    800	int i;
    801
    802	for (i = 0; i < ETH_ALEN; i++)
    803		((u8 *)&a0)[i] = addr[i];
    804
    805	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
    806	if (err)
    807		vdev_neterr(vdev, "Can't del addr [%pM], %d\n", addr, err);
    808
    809	return err;
    810}
    811
    812int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
    813	u8 ig_vlan_rewrite_mode)
    814{
    815	u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
    816	int wait = 1000;
    817
    818	if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
    819		return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
    820				&a0, &a1, wait);
    821	else
    822		return 0;
    823}
    824
    825static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
    826	void *notify_addr, dma_addr_t notify_pa, u16 intr)
    827{
    828	u64 a0, a1;
    829	int wait = 1000;
    830	int r;
    831
    832	memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
    833	vdev->notify = notify_addr;
    834	vdev->notify_pa = notify_pa;
    835
    836	a0 = (u64)notify_pa;
    837	a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
    838	a1 += sizeof(struct vnic_devcmd_notify);
    839
    840	r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
    841	vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
    842	return r;
    843}
    844
    845int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
    846{
    847	void *notify_addr;
    848	dma_addr_t notify_pa;
    849
    850	if (vdev->notify || vdev->notify_pa) {
    851		vdev_neterr(vdev, "notify block %p still allocated\n",
    852			    vdev->notify);
    853		return -EINVAL;
    854	}
    855
    856	notify_addr = dma_alloc_coherent(&vdev->pdev->dev,
    857					 sizeof(struct vnic_devcmd_notify),
    858					 &notify_pa, GFP_ATOMIC);
    859	if (!notify_addr)
    860		return -ENOMEM;
    861
    862	return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
    863}
    864
    865static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
    866{
    867	u64 a0, a1;
    868	int wait = 1000;
    869	int err;
    870
    871	a0 = 0;  /* paddr = 0 to unset notify buffer */
    872	a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
    873	a1 += sizeof(struct vnic_devcmd_notify);
    874
    875	err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
    876	vdev->notify = NULL;
    877	vdev->notify_pa = 0;
    878	vdev->notify_sz = 0;
    879
    880	return err;
    881}
    882
    883int vnic_dev_notify_unset(struct vnic_dev *vdev)
    884{
    885	if (vdev->notify) {
    886		dma_free_coherent(&vdev->pdev->dev,
    887				  sizeof(struct vnic_devcmd_notify),
    888				  vdev->notify, vdev->notify_pa);
    889	}
    890
    891	return vnic_dev_notify_unsetcmd(vdev);
    892}
    893
    894static int vnic_dev_notify_ready(struct vnic_dev *vdev)
    895{
    896	u32 *words;
    897	unsigned int nwords = vdev->notify_sz / 4;
    898	unsigned int i;
    899	u32 csum;
    900
    901	if (!vdev->notify || !vdev->notify_sz)
    902		return 0;
    903
    904	do {
    905		csum = 0;
    906		memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
    907		words = (u32 *)&vdev->notify_copy;
    908		for (i = 1; i < nwords; i++)
    909			csum += words[i];
    910	} while (csum != words[0]);
    911
    912	return 1;
    913}
    914
    915int vnic_dev_init(struct vnic_dev *vdev, int arg)
    916{
    917	u64 a0 = (u32)arg, a1 = 0;
    918	int wait = 1000;
    919	int r = 0;
    920
    921	if (vnic_dev_capable(vdev, CMD_INIT))
    922		r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
    923	else {
    924		vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
    925		if (a0 & CMD_INITF_DEFAULT_MAC) {
    926			/* Emulate these for old CMD_INIT_v1 which
    927			 * didn't pass a0 so no CMD_INITF_*.
    928			 */
    929			vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
    930			vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
    931		}
    932	}
    933	return r;
    934}
    935
    936int vnic_dev_deinit(struct vnic_dev *vdev)
    937{
    938	u64 a0 = 0, a1 = 0;
    939	int wait = 1000;
    940
    941	return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
    942}
    943
    944void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
    945{
    946	/* Default: hardware intr coal timer is in units of 1.5 usecs */
    947	vdev->intr_coal_timer_info.mul = 2;
    948	vdev->intr_coal_timer_info.div = 3;
    949	vdev->intr_coal_timer_info.max_usec =
    950		vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
    951}
    952
    953int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
    954{
    955	int wait = 1000;
    956	int err;
    957
    958	memset(vdev->args, 0, sizeof(vdev->args));
    959
    960	if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
    961		err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
    962	else
    963		err = ERR_ECMDUNKNOWN;
    964
    965	/* Use defaults when firmware doesn't support the devcmd at all or
    966	 * supports it for only specific hardware
    967	 */
    968	if ((err == ERR_ECMDUNKNOWN) ||
    969		(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
    970		vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n");
    971		vnic_dev_intr_coal_timer_info_default(vdev);
    972		return 0;
    973	}
    974
    975	if (!err) {
    976		vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
    977		vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
    978		vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
    979	}
    980
    981	return err;
    982}
    983
    984int vnic_dev_link_status(struct vnic_dev *vdev)
    985{
    986	if (!vnic_dev_notify_ready(vdev))
    987		return 0;
    988
    989	return vdev->notify_copy.link_state;
    990}
    991
    992u32 vnic_dev_port_speed(struct vnic_dev *vdev)
    993{
    994	if (!vnic_dev_notify_ready(vdev))
    995		return 0;
    996
    997	return vdev->notify_copy.port_speed;
    998}
    999
   1000u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
   1001{
   1002	if (!vnic_dev_notify_ready(vdev))
   1003		return 0;
   1004
   1005	return vdev->notify_copy.msglvl;
   1006}
   1007
   1008u32 vnic_dev_mtu(struct vnic_dev *vdev)
   1009{
   1010	if (!vnic_dev_notify_ready(vdev))
   1011		return 0;
   1012
   1013	return vdev->notify_copy.mtu;
   1014}
   1015
   1016void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
   1017	enum vnic_dev_intr_mode intr_mode)
   1018{
   1019	vdev->intr_mode = intr_mode;
   1020}
   1021
   1022enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
   1023	struct vnic_dev *vdev)
   1024{
   1025	return vdev->intr_mode;
   1026}
   1027
   1028u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
   1029{
   1030	return (usec * vdev->intr_coal_timer_info.mul) /
   1031		vdev->intr_coal_timer_info.div;
   1032}
   1033
   1034u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
   1035{
   1036	return (hw_cycles * vdev->intr_coal_timer_info.div) /
   1037		vdev->intr_coal_timer_info.mul;
   1038}
   1039
   1040u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
   1041{
   1042	return vdev->intr_coal_timer_info.max_usec;
   1043}
   1044
   1045void vnic_dev_unregister(struct vnic_dev *vdev)
   1046{
   1047	if (vdev) {
   1048		if (vdev->notify)
   1049			dma_free_coherent(&vdev->pdev->dev,
   1050					  sizeof(struct vnic_devcmd_notify),
   1051					  vdev->notify, vdev->notify_pa);
   1052		if (vdev->stats)
   1053			dma_free_coherent(&vdev->pdev->dev,
   1054					  sizeof(struct vnic_stats),
   1055					  vdev->stats, vdev->stats_pa);
   1056		if (vdev->fw_info)
   1057			dma_free_coherent(&vdev->pdev->dev,
   1058					  sizeof(struct vnic_devcmd_fw_info),
   1059					  vdev->fw_info, vdev->fw_info_pa);
   1060		if (vdev->devcmd2)
   1061			vnic_dev_deinit_devcmd2(vdev);
   1062
   1063		kfree(vdev);
   1064	}
   1065}
   1066EXPORT_SYMBOL(vnic_dev_unregister);
   1067
   1068struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
   1069	void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
   1070	unsigned int num_bars)
   1071{
   1072	if (!vdev) {
   1073		vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
   1074		if (!vdev)
   1075			return NULL;
   1076	}
   1077
   1078	vdev->priv = priv;
   1079	vdev->pdev = pdev;
   1080
   1081	if (vnic_dev_discover_res(vdev, bar, num_bars))
   1082		goto err_out;
   1083
   1084	return vdev;
   1085
   1086err_out:
   1087	vnic_dev_unregister(vdev);
   1088	return NULL;
   1089}
   1090EXPORT_SYMBOL(vnic_dev_register);
   1091
   1092struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
   1093{
   1094	return vdev->pdev;
   1095}
   1096EXPORT_SYMBOL(vnic_dev_get_pdev);
   1097
   1098int vnic_devcmd_init(struct vnic_dev *vdev)
   1099{
   1100	void __iomem *res;
   1101	int err;
   1102
   1103	res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
   1104	if (res) {
   1105		err = vnic_dev_init_devcmd2(vdev);
   1106		if (err)
   1107			vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n",
   1108				  err);
   1109		else
   1110			return 0;
   1111	} else {
   1112		vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
   1113	}
   1114	err = vnic_dev_init_devcmd1(vdev);
   1115	if (err)
   1116		vdev_err(vdev, "DEVCMD1 initialization failed: %d\n", err);
   1117
   1118	return err;
   1119}
   1120
   1121int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
   1122{
   1123	u64 a0, a1 = len;
   1124	int wait = 1000;
   1125	dma_addr_t prov_pa;
   1126	void *prov_buf;
   1127	int ret;
   1128
   1129	prov_buf = dma_alloc_coherent(&vdev->pdev->dev, len, &prov_pa, GFP_ATOMIC);
   1130	if (!prov_buf)
   1131		return -ENOMEM;
   1132
   1133	memcpy(prov_buf, buf, len);
   1134
   1135	a0 = prov_pa;
   1136
   1137	ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
   1138
   1139	dma_free_coherent(&vdev->pdev->dev, len, prov_buf, prov_pa);
   1140
   1141	return ret;
   1142}
   1143
   1144int vnic_dev_enable2(struct vnic_dev *vdev, int active)
   1145{
   1146	u64 a0, a1 = 0;
   1147	int wait = 1000;
   1148
   1149	a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
   1150
   1151	return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
   1152}
   1153
   1154static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
   1155	int *status)
   1156{
   1157	u64 a0 = cmd, a1 = 0;
   1158	int wait = 1000;
   1159	int ret;
   1160
   1161	ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
   1162	if (!ret)
   1163		*status = (int)a0;
   1164
   1165	return ret;
   1166}
   1167
   1168int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
   1169{
   1170	return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
   1171}
   1172
   1173int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
   1174{
   1175	return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
   1176}
   1177
   1178int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
   1179{
   1180	u64 a0, a1;
   1181	int wait = 1000;
   1182	int i;
   1183
   1184	for (i = 0; i < ETH_ALEN; i++)
   1185		((u8 *)&a0)[i] = mac_addr[i];
   1186
   1187	return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
   1188}
   1189
   1190/* vnic_dev_classifier: Add/Delete classifier entries
   1191 * @vdev: vdev of the device
   1192 * @cmd: CLSF_ADD for Add filter
   1193 *	 CLSF_DEL for Delete filter
   1194 * @entry: In case of ADD filter, the caller passes the RQ number in this
   1195 *	   variable.
   1196 *
   1197 *	   This function stores the filter_id returned by the firmware in the
   1198 *	   same variable before return;
   1199 *
   1200 *	   In case of DEL filter, the caller passes the RQ number. Return
   1201 *	   value is irrelevant.
   1202 * @data: filter data
   1203 */
   1204int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
   1205			struct filter *data)
   1206{
   1207	u64 a0, a1;
   1208	int wait = 1000;
   1209	dma_addr_t tlv_pa;
   1210	int ret = -EINVAL;
   1211	struct filter_tlv *tlv, *tlv_va;
   1212	struct filter_action *action;
   1213	u64 tlv_size;
   1214
   1215	if (cmd == CLSF_ADD) {
   1216		tlv_size = sizeof(struct filter) +
   1217			   sizeof(struct filter_action) +
   1218			   2 * sizeof(struct filter_tlv);
   1219		tlv_va = dma_alloc_coherent(&vdev->pdev->dev, tlv_size,
   1220					    &tlv_pa, GFP_ATOMIC);
   1221		if (!tlv_va)
   1222			return -ENOMEM;
   1223		tlv = tlv_va;
   1224		a0 = tlv_pa;
   1225		a1 = tlv_size;
   1226		memset(tlv, 0, tlv_size);
   1227		tlv->type = CLSF_TLV_FILTER;
   1228		tlv->length = sizeof(struct filter);
   1229		*(struct filter *)&tlv->val = *data;
   1230
   1231		tlv = (struct filter_tlv *)((char *)tlv +
   1232					    sizeof(struct filter_tlv) +
   1233					    sizeof(struct filter));
   1234
   1235		tlv->type = CLSF_TLV_ACTION;
   1236		tlv->length = sizeof(struct filter_action);
   1237		action = (struct filter_action *)&tlv->val;
   1238		action->type = FILTER_ACTION_RQ_STEERING;
   1239		action->u.rq_idx = *entry;
   1240
   1241		ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
   1242		*entry = (u16)a0;
   1243		dma_free_coherent(&vdev->pdev->dev, tlv_size, tlv_va, tlv_pa);
   1244	} else if (cmd == CLSF_DEL) {
   1245		a0 = *entry;
   1246		ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
   1247	}
   1248
   1249	return ret;
   1250}
   1251
   1252int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
   1253{
   1254	u64 a0 = overlay;
   1255	u64 a1 = config;
   1256	int wait = 1000;
   1257
   1258	return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
   1259}
   1260
   1261int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
   1262				 u16 vxlan_udp_port_number)
   1263{
   1264	u64 a1 = vxlan_udp_port_number;
   1265	u64 a0 = overlay;
   1266	int wait = 1000;
   1267
   1268	return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
   1269}
   1270
   1271int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
   1272				       u64 *supported_versions, u64 *a1)
   1273{
   1274	u64 a0 = feature;
   1275	int wait = 1000;
   1276	int ret;
   1277
   1278	ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, a1, wait);
   1279	if (!ret)
   1280		*supported_versions = a0;
   1281
   1282	return ret;
   1283}
   1284
   1285int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type)
   1286{
   1287	u64 a0 = CMD_NIC_CFG, a1 = 0;
   1288	int wait = 1000;
   1289	int err;
   1290
   1291	err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
   1292	/* rss_hash_type is valid only when a0 is 1. Adapter which does not
   1293	 * support CMD_CAPABILITY for rss_hash_type has a0 = 0
   1294	 */
   1295	if (err || (a0 != 1))
   1296		return -EOPNOTSUPP;
   1297
   1298	a1 = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) &
   1299	     NIC_CFG_RSS_HASH_TYPE_MASK_FIELD;
   1300
   1301	*rss_hash_type = (u8)a1;
   1302
   1303	return 0;
   1304}