cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bnxt_sriov.c (33552B)


      1/* Broadcom NetXtreme-C/E network driver.
      2 *
      3 * Copyright (c) 2014-2016 Broadcom Corporation
      4 * Copyright (c) 2016-2018 Broadcom Limited
      5 *
      6 * This program is free software; you can redistribute it and/or modify
      7 * it under the terms of the GNU General Public License as published by
      8 * the Free Software Foundation.
      9 */
     10
     11#include <linux/ethtool.h>
     12#include <linux/module.h>
     13#include <linux/pci.h>
     14#include <linux/netdevice.h>
     15#include <linux/if_vlan.h>
     16#include <linux/interrupt.h>
     17#include <linux/etherdevice.h>
     18#include "bnxt_hsi.h"
     19#include "bnxt.h"
     20#include "bnxt_hwrm.h"
     21#include "bnxt_ulp.h"
     22#include "bnxt_sriov.h"
     23#include "bnxt_vfr.h"
     24#include "bnxt_ethtool.h"
     25
     26#ifdef CONFIG_BNXT_SRIOV
     27static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
     28					  struct bnxt_vf_info *vf, u16 event_id)
     29{
     30	struct hwrm_fwd_async_event_cmpl_input *req;
     31	struct hwrm_async_event_cmpl *async_cmpl;
     32	int rc = 0;
     33
     34	rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
     35	if (rc)
     36		goto exit;
     37
     38	if (vf)
     39		req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
     40	else
     41		/* broadcast this async event to all VFs */
     42		req->encap_async_event_target_id = cpu_to_le16(0xffff);
     43	async_cmpl =
     44		(struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl;
     45	async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
     46	async_cmpl->event_id = cpu_to_le16(event_id);
     47
     48	rc = hwrm_req_send(bp, req);
     49exit:
     50	if (rc)
     51		netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
     52			   rc);
     53	return rc;
     54}
     55
     56static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
     57{
     58	if (!bp->pf.active_vfs) {
     59		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
     60		return -EINVAL;
     61	}
     62	if (vf_id >= bp->pf.active_vfs) {
     63		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
     64		return -EINVAL;
     65	}
     66	return 0;
     67}
     68
     69int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
     70{
     71	struct bnxt *bp = netdev_priv(dev);
     72	struct hwrm_func_cfg_input *req;
     73	bool old_setting = false;
     74	struct bnxt_vf_info *vf;
     75	u32 func_flags;
     76	int rc;
     77
     78	if (bp->hwrm_spec_code < 0x10701)
     79		return -ENOTSUPP;
     80
     81	rc = bnxt_vf_ndo_prep(bp, vf_id);
     82	if (rc)
     83		return rc;
     84
     85	vf = &bp->pf.vf[vf_id];
     86	if (vf->flags & BNXT_VF_SPOOFCHK)
     87		old_setting = true;
     88	if (old_setting == setting)
     89		return 0;
     90
     91	if (setting)
     92		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
     93	else
     94		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
     95	/*TODO: if the driver supports VLAN filter on guest VLAN,
     96	 * the spoof check should also include vlan anti-spoofing
     97	 */
     98	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
     99	if (!rc) {
    100		req->fid = cpu_to_le16(vf->fw_fid);
    101		req->flags = cpu_to_le32(func_flags);
    102		rc = hwrm_req_send(bp, req);
    103		if (!rc) {
    104			if (setting)
    105				vf->flags |= BNXT_VF_SPOOFCHK;
    106			else
    107				vf->flags &= ~BNXT_VF_SPOOFCHK;
    108		}
    109	}
    110	return rc;
    111}
    112
    113static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
    114{
    115	struct hwrm_func_qcfg_output *resp;
    116	struct hwrm_func_qcfg_input *req;
    117	int rc;
    118
    119	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
    120	if (rc)
    121		return rc;
    122
    123	req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
    124	resp = hwrm_req_hold(bp, req);
    125	rc = hwrm_req_send(bp, req);
    126	if (!rc)
    127		vf->func_qcfg_flags = le16_to_cpu(resp->flags);
    128	hwrm_req_drop(bp, req);
    129	return rc;
    130}
    131
    132bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
    133{
    134	if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
    135		return !!(vf->flags & BNXT_VF_TRUST);
    136
    137	bnxt_hwrm_func_qcfg_flags(bp, vf);
    138	return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
    139}
    140
    141static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
    142{
    143	struct hwrm_func_cfg_input *req;
    144	int rc;
    145
    146	if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
    147		return 0;
    148
    149	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
    150	if (rc)
    151		return rc;
    152
    153	req->fid = cpu_to_le16(vf->fw_fid);
    154	if (vf->flags & BNXT_VF_TRUST)
    155		req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
    156	else
    157		req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
    158	return hwrm_req_send(bp, req);
    159}
    160
    161int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
    162{
    163	struct bnxt *bp = netdev_priv(dev);
    164	struct bnxt_vf_info *vf;
    165
    166	if (bnxt_vf_ndo_prep(bp, vf_id))
    167		return -EINVAL;
    168
    169	vf = &bp->pf.vf[vf_id];
    170	if (trusted)
    171		vf->flags |= BNXT_VF_TRUST;
    172	else
    173		vf->flags &= ~BNXT_VF_TRUST;
    174
    175	bnxt_hwrm_set_trusted_vf(bp, vf);
    176	return 0;
    177}
    178
    179int bnxt_get_vf_config(struct net_device *dev, int vf_id,
    180		       struct ifla_vf_info *ivi)
    181{
    182	struct bnxt *bp = netdev_priv(dev);
    183	struct bnxt_vf_info *vf;
    184	int rc;
    185
    186	rc = bnxt_vf_ndo_prep(bp, vf_id);
    187	if (rc)
    188		return rc;
    189
    190	ivi->vf = vf_id;
    191	vf = &bp->pf.vf[vf_id];
    192
    193	if (is_valid_ether_addr(vf->mac_addr))
    194		memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
    195	else
    196		memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
    197	ivi->max_tx_rate = vf->max_tx_rate;
    198	ivi->min_tx_rate = vf->min_tx_rate;
    199	ivi->vlan = vf->vlan;
    200	if (vf->flags & BNXT_VF_QOS)
    201		ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
    202	else
    203		ivi->qos = 0;
    204	ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
    205	ivi->trusted = bnxt_is_trusted_vf(bp, vf);
    206	if (!(vf->flags & BNXT_VF_LINK_FORCED))
    207		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
    208	else if (vf->flags & BNXT_VF_LINK_UP)
    209		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
    210	else
    211		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
    212
    213	return 0;
    214}
    215
    216int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
    217{
    218	struct bnxt *bp = netdev_priv(dev);
    219	struct hwrm_func_cfg_input *req;
    220	struct bnxt_vf_info *vf;
    221	int rc;
    222
    223	rc = bnxt_vf_ndo_prep(bp, vf_id);
    224	if (rc)
    225		return rc;
    226	/* reject bc or mc mac addr, zero mac addr means allow
    227	 * VF to use its own mac addr
    228	 */
    229	if (is_multicast_ether_addr(mac)) {
    230		netdev_err(dev, "Invalid VF ethernet address\n");
    231		return -EINVAL;
    232	}
    233	vf = &bp->pf.vf[vf_id];
    234
    235	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
    236	if (rc)
    237		return rc;
    238
    239	memcpy(vf->mac_addr, mac, ETH_ALEN);
    240
    241	req->fid = cpu_to_le16(vf->fw_fid);
    242	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
    243	memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
    244	return hwrm_req_send(bp, req);
    245}
    246
    247int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
    248		     __be16 vlan_proto)
    249{
    250	struct bnxt *bp = netdev_priv(dev);
    251	struct hwrm_func_cfg_input *req;
    252	struct bnxt_vf_info *vf;
    253	u16 vlan_tag;
    254	int rc;
    255
    256	if (bp->hwrm_spec_code < 0x10201)
    257		return -ENOTSUPP;
    258
    259	if (vlan_proto != htons(ETH_P_8021Q))
    260		return -EPROTONOSUPPORT;
    261
    262	rc = bnxt_vf_ndo_prep(bp, vf_id);
    263	if (rc)
    264		return rc;
    265
    266	/* TODO: needed to implement proper handling of user priority,
    267	 * currently fail the command if there is valid priority
    268	 */
    269	if (vlan_id > 4095 || qos)
    270		return -EINVAL;
    271
    272	vf = &bp->pf.vf[vf_id];
    273	vlan_tag = vlan_id;
    274	if (vlan_tag == vf->vlan)
    275		return 0;
    276
    277	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
    278	if (!rc) {
    279		req->fid = cpu_to_le16(vf->fw_fid);
    280		req->dflt_vlan = cpu_to_le16(vlan_tag);
    281		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
    282		rc = hwrm_req_send(bp, req);
    283		if (!rc)
    284			vf->vlan = vlan_tag;
    285	}
    286	return rc;
    287}
    288
    289int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
    290		   int max_tx_rate)
    291{
    292	struct bnxt *bp = netdev_priv(dev);
    293	struct hwrm_func_cfg_input *req;
    294	struct bnxt_vf_info *vf;
    295	u32 pf_link_speed;
    296	int rc;
    297
    298	rc = bnxt_vf_ndo_prep(bp, vf_id);
    299	if (rc)
    300		return rc;
    301
    302	vf = &bp->pf.vf[vf_id];
    303	pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
    304	if (max_tx_rate > pf_link_speed) {
    305		netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
    306			    max_tx_rate, vf_id);
    307		return -EINVAL;
    308	}
    309
    310	if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
    311		netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
    312			    min_tx_rate, vf_id);
    313		return -EINVAL;
    314	}
    315	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
    316		return 0;
    317	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
    318	if (!rc) {
    319		req->fid = cpu_to_le16(vf->fw_fid);
    320		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
    321					   FUNC_CFG_REQ_ENABLES_MIN_BW);
    322		req->max_bw = cpu_to_le32(max_tx_rate);
    323		req->min_bw = cpu_to_le32(min_tx_rate);
    324		rc = hwrm_req_send(bp, req);
    325		if (!rc) {
    326			vf->min_tx_rate = min_tx_rate;
    327			vf->max_tx_rate = max_tx_rate;
    328		}
    329	}
    330	return rc;
    331}
    332
    333int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
    334{
    335	struct bnxt *bp = netdev_priv(dev);
    336	struct bnxt_vf_info *vf;
    337	int rc;
    338
    339	rc = bnxt_vf_ndo_prep(bp, vf_id);
    340	if (rc)
    341		return rc;
    342
    343	vf = &bp->pf.vf[vf_id];
    344
    345	vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
    346	switch (link) {
    347	case IFLA_VF_LINK_STATE_AUTO:
    348		vf->flags |= BNXT_VF_LINK_UP;
    349		break;
    350	case IFLA_VF_LINK_STATE_DISABLE:
    351		vf->flags |= BNXT_VF_LINK_FORCED;
    352		break;
    353	case IFLA_VF_LINK_STATE_ENABLE:
    354		vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
    355		break;
    356	default:
    357		netdev_err(bp->dev, "Invalid link option\n");
    358		rc = -EINVAL;
    359		break;
    360	}
    361	if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
    362		rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
    363			ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
    364	return rc;
    365}
    366
    367static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
    368{
    369	int i;
    370	struct bnxt_vf_info *vf;
    371
    372	for (i = 0; i < num_vfs; i++) {
    373		vf = &bp->pf.vf[i];
    374		memset(vf, 0, sizeof(*vf));
    375	}
    376	return 0;
    377}
    378
    379static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
    380{
    381	struct hwrm_func_vf_resc_free_input *req;
    382	struct bnxt_pf_info *pf = &bp->pf;
    383	int i, rc;
    384
    385	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
    386	if (rc)
    387		return rc;
    388
    389	hwrm_req_hold(bp, req);
    390	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
    391		req->vf_id = cpu_to_le16(i);
    392		rc = hwrm_req_send(bp, req);
    393		if (rc)
    394			break;
    395	}
    396	hwrm_req_drop(bp, req);
    397	return rc;
    398}
    399
    400static void bnxt_free_vf_resources(struct bnxt *bp)
    401{
    402	struct pci_dev *pdev = bp->pdev;
    403	int i;
    404
    405	kfree(bp->pf.vf_event_bmap);
    406	bp->pf.vf_event_bmap = NULL;
    407
    408	for (i = 0; i < 4; i++) {
    409		if (bp->pf.hwrm_cmd_req_addr[i]) {
    410			dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
    411					  bp->pf.hwrm_cmd_req_addr[i],
    412					  bp->pf.hwrm_cmd_req_dma_addr[i]);
    413			bp->pf.hwrm_cmd_req_addr[i] = NULL;
    414		}
    415	}
    416
    417	bp->pf.active_vfs = 0;
    418	kfree(bp->pf.vf);
    419	bp->pf.vf = NULL;
    420}
    421
    422static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
    423{
    424	struct pci_dev *pdev = bp->pdev;
    425	u32 nr_pages, size, i, j, k = 0;
    426
    427	bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
    428	if (!bp->pf.vf)
    429		return -ENOMEM;
    430
    431	bnxt_set_vf_attr(bp, num_vfs);
    432
    433	size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
    434	nr_pages = size / BNXT_PAGE_SIZE;
    435	if (size & (BNXT_PAGE_SIZE - 1))
    436		nr_pages++;
    437
    438	for (i = 0; i < nr_pages; i++) {
    439		bp->pf.hwrm_cmd_req_addr[i] =
    440			dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
    441					   &bp->pf.hwrm_cmd_req_dma_addr[i],
    442					   GFP_KERNEL);
    443
    444		if (!bp->pf.hwrm_cmd_req_addr[i])
    445			return -ENOMEM;
    446
    447		for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
    448			struct bnxt_vf_info *vf = &bp->pf.vf[k];
    449
    450			vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
    451						j * BNXT_HWRM_REQ_MAX_SIZE;
    452			vf->hwrm_cmd_req_dma_addr =
    453				bp->pf.hwrm_cmd_req_dma_addr[i] + j *
    454				BNXT_HWRM_REQ_MAX_SIZE;
    455			k++;
    456		}
    457	}
    458
    459	/* Max 128 VF's */
    460	bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
    461	if (!bp->pf.vf_event_bmap)
    462		return -ENOMEM;
    463
    464	bp->pf.hwrm_cmd_req_pages = nr_pages;
    465	return 0;
    466}
    467
    468static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
    469{
    470	struct hwrm_func_buf_rgtr_input *req;
    471	int rc;
    472
    473	rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
    474	if (rc)
    475		return rc;
    476
    477	req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
    478	req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
    479	req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
    480	req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
    481	req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
    482	req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
    483	req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
    484
    485	return hwrm_req_send(bp, req);
    486}
    487
    488static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
    489{
    490	struct hwrm_func_cfg_input *req;
    491	struct bnxt_vf_info *vf;
    492	int rc;
    493
    494	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
    495	if (rc)
    496		return rc;
    497
    498	vf = &bp->pf.vf[vf_id];
    499	req->fid = cpu_to_le16(vf->fw_fid);
    500
    501	if (is_valid_ether_addr(vf->mac_addr)) {
    502		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
    503		memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN);
    504	}
    505	if (vf->vlan) {
    506		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
    507		req->dflt_vlan = cpu_to_le16(vf->vlan);
    508	}
    509	if (vf->max_tx_rate) {
    510		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
    511					    FUNC_CFG_REQ_ENABLES_MIN_BW);
    512		req->max_bw = cpu_to_le32(vf->max_tx_rate);
    513		req->min_bw = cpu_to_le32(vf->min_tx_rate);
    514	}
    515	if (vf->flags & BNXT_VF_TRUST)
    516		req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
    517
    518	return hwrm_req_send(bp, req);
    519}
    520
    521/* Only called by PF to reserve resources for VFs, returns actual number of
    522 * VFs configured, or < 0 on error.
    523 */
    524static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
    525{
    526	struct hwrm_func_vf_resource_cfg_input *req;
    527	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
    528	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
    529	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
    530	struct bnxt_pf_info *pf = &bp->pf;
    531	int i, rc = 0, min = 1;
    532	u16 vf_msix = 0;
    533	u16 vf_rss;
    534
    535	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
    536	if (rc)
    537		return rc;
    538
    539	if (bp->flags & BNXT_FLAG_CHIP_P5) {
    540		vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
    541		vf_ring_grps = 0;
    542	} else {
    543		vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
    544	}
    545	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
    546	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
    547	if (bp->flags & BNXT_FLAG_AGG_RINGS)
    548		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
    549	else
    550		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
    551	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
    552	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
    553	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
    554	vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
    555
    556	req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
    557	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
    558		min = 0;
    559		req->min_rsscos_ctx = cpu_to_le16(min);
    560	}
    561	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
    562	    pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
    563		req->min_cmpl_rings = cpu_to_le16(min);
    564		req->min_tx_rings = cpu_to_le16(min);
    565		req->min_rx_rings = cpu_to_le16(min);
    566		req->min_l2_ctxs = cpu_to_le16(min);
    567		req->min_vnics = cpu_to_le16(min);
    568		req->min_stat_ctx = cpu_to_le16(min);
    569		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
    570			req->min_hw_ring_grps = cpu_to_le16(min);
    571	} else {
    572		vf_cp_rings /= num_vfs;
    573		vf_tx_rings /= num_vfs;
    574		vf_rx_rings /= num_vfs;
    575		vf_vnics /= num_vfs;
    576		vf_stat_ctx /= num_vfs;
    577		vf_ring_grps /= num_vfs;
    578		vf_rss /= num_vfs;
    579
    580		req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
    581		req->min_tx_rings = cpu_to_le16(vf_tx_rings);
    582		req->min_rx_rings = cpu_to_le16(vf_rx_rings);
    583		req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
    584		req->min_vnics = cpu_to_le16(vf_vnics);
    585		req->min_stat_ctx = cpu_to_le16(vf_stat_ctx);
    586		req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
    587		req->min_rsscos_ctx = cpu_to_le16(vf_rss);
    588	}
    589	req->max_cmpl_rings = cpu_to_le16(vf_cp_rings);
    590	req->max_tx_rings = cpu_to_le16(vf_tx_rings);
    591	req->max_rx_rings = cpu_to_le16(vf_rx_rings);
    592	req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
    593	req->max_vnics = cpu_to_le16(vf_vnics);
    594	req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
    595	req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
    596	req->max_rsscos_ctx = cpu_to_le16(vf_rss);
    597	if (bp->flags & BNXT_FLAG_CHIP_P5)
    598		req->max_msix = cpu_to_le16(vf_msix / num_vfs);
    599
    600	hwrm_req_hold(bp, req);
    601	for (i = 0; i < num_vfs; i++) {
    602		if (reset)
    603			__bnxt_set_vf_params(bp, i);
    604
    605		req->vf_id = cpu_to_le16(pf->first_vf_id + i);
    606		rc = hwrm_req_send(bp, req);
    607		if (rc)
    608			break;
    609		pf->active_vfs = i + 1;
    610		pf->vf[i].fw_fid = pf->first_vf_id + i;
    611	}
    612
    613	if (pf->active_vfs) {
    614		u16 n = pf->active_vfs;
    615
    616		hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n;
    617		hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n;
    618		hw_resc->max_hw_ring_grps -=
    619			le16_to_cpu(req->min_hw_ring_grps) * n;
    620		hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n;
    621		hw_resc->max_rsscos_ctxs -=
    622			le16_to_cpu(req->min_rsscos_ctx) * n;
    623		hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
    624		hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
    625		if (bp->flags & BNXT_FLAG_CHIP_P5)
    626			hw_resc->max_irqs -= vf_msix * n;
    627
    628		rc = pf->active_vfs;
    629	}
    630	hwrm_req_drop(bp, req);
    631	return rc;
    632}
    633
    634/* Only called by PF to reserve resources for VFs, returns actual number of
    635 * VFs configured, or < 0 on error.
    636 */
    637static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
    638{
    639	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
    640	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
    641	struct bnxt_pf_info *pf = &bp->pf;
    642	struct hwrm_func_cfg_input *req;
    643	int total_vf_tx_rings = 0;
    644	u16 vf_ring_grps;
    645	u32 mtu, i;
    646	int rc;
    647
    648	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
    649	if (rc)
    650		return rc;
    651
    652	/* Remaining rings are distributed equally amongs VF's for now */
    653	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
    654	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
    655	if (bp->flags & BNXT_FLAG_AGG_RINGS)
    656		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
    657			      num_vfs;
    658	else
    659		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
    660			      num_vfs;
    661	vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
    662	vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
    663	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
    664	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
    665
    666	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
    667				   FUNC_CFG_REQ_ENABLES_MRU |
    668				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
    669				   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
    670				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
    671				   FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
    672				   FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
    673				   FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
    674				   FUNC_CFG_REQ_ENABLES_NUM_VNICS |
    675				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
    676
    677	mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
    678	req->mru = cpu_to_le16(mtu);
    679	req->admin_mtu = cpu_to_le16(mtu);
    680
    681	req->num_rsscos_ctxs = cpu_to_le16(1);
    682	req->num_cmpl_rings = cpu_to_le16(vf_cp_rings);
    683	req->num_tx_rings = cpu_to_le16(vf_tx_rings);
    684	req->num_rx_rings = cpu_to_le16(vf_rx_rings);
    685	req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
    686	req->num_l2_ctxs = cpu_to_le16(4);
    687
    688	req->num_vnics = cpu_to_le16(vf_vnics);
    689	/* FIXME spec currently uses 1 bit for stats ctx */
    690	req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
    691
    692	hwrm_req_hold(bp, req);
    693	for (i = 0; i < num_vfs; i++) {
    694		int vf_tx_rsvd = vf_tx_rings;
    695
    696		req->fid = cpu_to_le16(pf->first_vf_id + i);
    697		rc = hwrm_req_send(bp, req);
    698		if (rc)
    699			break;
    700		pf->active_vfs = i + 1;
    701		pf->vf[i].fw_fid = le16_to_cpu(req->fid);
    702		rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
    703					      &vf_tx_rsvd);
    704		if (rc)
    705			break;
    706		total_vf_tx_rings += vf_tx_rsvd;
    707	}
    708	hwrm_req_drop(bp, req);
    709	if (pf->active_vfs) {
    710		hw_resc->max_tx_rings -= total_vf_tx_rings;
    711		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
    712		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
    713		hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
    714		hw_resc->max_rsscos_ctxs -= num_vfs;
    715		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
    716		hw_resc->max_vnics -= vf_vnics * num_vfs;
    717		rc = pf->active_vfs;
    718	}
    719	return rc;
    720}
    721
    722static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
    723{
    724	if (BNXT_NEW_RM(bp))
    725		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
    726	else
    727		return bnxt_hwrm_func_cfg(bp, num_vfs);
    728}
    729
    730int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
    731{
    732	int rc;
    733
    734	/* Register buffers for VFs */
    735	rc = bnxt_hwrm_func_buf_rgtr(bp);
    736	if (rc)
    737		return rc;
    738
    739	/* Reserve resources for VFs */
    740	rc = bnxt_func_cfg(bp, *num_vfs, reset);
    741	if (rc != *num_vfs) {
    742		if (rc <= 0) {
    743			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
    744			*num_vfs = 0;
    745			return rc;
    746		}
    747		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
    748			    rc);
    749		*num_vfs = rc;
    750	}
    751
    752	bnxt_ulp_sriov_cfg(bp, *num_vfs);
    753	return 0;
    754}
    755
    756static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
    757{
    758	int rc = 0, vfs_supported;
    759	int min_rx_rings, min_tx_rings, min_rss_ctxs;
    760	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
    761	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
    762	int avail_cp, avail_stat;
    763
    764	/* Check if we can enable requested num of vf's. At a mininum
    765	 * we require 1 RX 1 TX rings for each VF. In this minimum conf
    766	 * features like TPA will not be available.
    767	 */
    768	vfs_supported = *num_vfs;
    769
    770	avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
    771	avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
    772	avail_cp = min_t(int, avail_cp, avail_stat);
    773
    774	while (vfs_supported) {
    775		min_rx_rings = vfs_supported;
    776		min_tx_rings = vfs_supported;
    777		min_rss_ctxs = vfs_supported;
    778
    779		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
    780			if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
    781			    min_rx_rings)
    782				rx_ok = 1;
    783		} else {
    784			if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
    785			    min_rx_rings)
    786				rx_ok = 1;
    787		}
    788		if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
    789		    avail_cp < min_rx_rings)
    790			rx_ok = 0;
    791
    792		if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
    793		    avail_cp >= min_tx_rings)
    794			tx_ok = 1;
    795
    796		if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
    797		    min_rss_ctxs)
    798			rss_ok = 1;
    799
    800		if (tx_ok && rx_ok && rss_ok)
    801			break;
    802
    803		vfs_supported--;
    804	}
    805
    806	if (!vfs_supported) {
    807		netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
    808		return -EINVAL;
    809	}
    810
    811	if (vfs_supported != *num_vfs) {
    812		netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
    813			    *num_vfs, vfs_supported);
    814		*num_vfs = vfs_supported;
    815	}
    816
    817	rc = bnxt_alloc_vf_resources(bp, *num_vfs);
    818	if (rc)
    819		goto err_out1;
    820
    821	rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
    822	if (rc)
    823		goto err_out2;
    824
    825	rc = pci_enable_sriov(bp->pdev, *num_vfs);
    826	if (rc)
    827		goto err_out2;
    828
    829	return 0;
    830
    831err_out2:
    832	/* Free the resources reserved for various VF's */
    833	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
    834
    835err_out1:
    836	bnxt_free_vf_resources(bp);
    837
    838	return rc;
    839}
    840
    841void bnxt_sriov_disable(struct bnxt *bp)
    842{
    843	u16 num_vfs = pci_num_vf(bp->pdev);
    844
    845	if (!num_vfs)
    846		return;
    847
    848	/* synchronize VF and VF-rep create and destroy */
    849	devl_lock(bp->dl);
    850	bnxt_vf_reps_destroy(bp);
    851
    852	if (pci_vfs_assigned(bp->pdev)) {
    853		bnxt_hwrm_fwd_async_event_cmpl(
    854			bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
    855		netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
    856			    num_vfs);
    857	} else {
    858		pci_disable_sriov(bp->pdev);
    859		/* Free the HW resources reserved for various VF's */
    860		bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
    861	}
    862	devl_unlock(bp->dl);
    863
    864	bnxt_free_vf_resources(bp);
    865
    866	/* Reclaim all resources for the PF. */
    867	rtnl_lock();
    868	bnxt_restore_pf_fw_resources(bp);
    869	rtnl_unlock();
    870
    871	bnxt_ulp_sriov_cfg(bp, 0);
    872}
    873
    874int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
    875{
    876	struct net_device *dev = pci_get_drvdata(pdev);
    877	struct bnxt *bp = netdev_priv(dev);
    878
    879	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
    880		netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
    881		return 0;
    882	}
    883
    884	rtnl_lock();
    885	if (!netif_running(dev)) {
    886		netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
    887		rtnl_unlock();
    888		return 0;
    889	}
    890	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
    891		netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
    892		rtnl_unlock();
    893		return 0;
    894	}
    895	bp->sriov_cfg = true;
    896	rtnl_unlock();
    897
    898	if (pci_vfs_assigned(bp->pdev)) {
    899		netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
    900		num_vfs = 0;
    901		goto sriov_cfg_exit;
    902	}
    903
    904	/* Check if enabled VFs is same as requested */
    905	if (num_vfs && num_vfs == bp->pf.active_vfs)
    906		goto sriov_cfg_exit;
    907
    908	/* if there are previous existing VFs, clean them up */
    909	bnxt_sriov_disable(bp);
    910	if (!num_vfs)
    911		goto sriov_cfg_exit;
    912
    913	bnxt_sriov_enable(bp, &num_vfs);
    914
    915sriov_cfg_exit:
    916	bp->sriov_cfg = false;
    917	wake_up(&bp->sriov_cfg_wait);
    918
    919	return num_vfs;
    920}
    921
    922static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
    923			      void *encap_resp, __le64 encap_resp_addr,
    924			      __le16 encap_resp_cpr, u32 msg_size)
    925{
    926	struct hwrm_fwd_resp_input *req;
    927	int rc;
    928
    929	if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
    930		return -EINVAL;
    931
    932	rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
    933	if (!rc) {
    934		/* Set the new target id */
    935		req->target_id = cpu_to_le16(vf->fw_fid);
    936		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
    937		req->encap_resp_len = cpu_to_le16(msg_size);
    938		req->encap_resp_addr = encap_resp_addr;
    939		req->encap_resp_cmpl_ring = encap_resp_cpr;
    940		memcpy(req->encap_resp, encap_resp, msg_size);
    941
    942		rc = hwrm_req_send(bp, req);
    943	}
    944	if (rc)
    945		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
    946	return rc;
    947}
    948
    949static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
    950				  u32 msg_size)
    951{
    952	struct hwrm_reject_fwd_resp_input *req;
    953	int rc;
    954
    955	if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
    956		return -EINVAL;
    957
    958	rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
    959	if (!rc) {
    960		/* Set the new target id */
    961		req->target_id = cpu_to_le16(vf->fw_fid);
    962		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
    963		memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
    964
    965		rc = hwrm_req_send(bp, req);
    966	}
    967	if (rc)
    968		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
    969	return rc;
    970}
    971
    972static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
    973				   u32 msg_size)
    974{
    975	struct hwrm_exec_fwd_resp_input *req;
    976	int rc;
    977
    978	if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
    979		return -EINVAL;
    980
    981	rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
    982	if (!rc) {
    983		/* Set the new target id */
    984		req->target_id = cpu_to_le16(vf->fw_fid);
    985		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
    986		memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
    987
    988		rc = hwrm_req_send(bp, req);
    989	}
    990	if (rc)
    991		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
    992	return rc;
    993}
    994
    995static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
    996{
    997	u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
    998	struct hwrm_func_vf_cfg_input *req =
    999		(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
   1000
   1001	/* Allow VF to set a valid MAC address, if trust is set to on or
   1002	 * if the PF assigned MAC address is zero
   1003	 */
   1004	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
   1005		bool trust = bnxt_is_trusted_vf(bp, vf);
   1006
   1007		if (is_valid_ether_addr(req->dflt_mac_addr) &&
   1008		    (trust || !is_valid_ether_addr(vf->mac_addr) ||
   1009		     ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
   1010			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
   1011			return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
   1012		}
   1013		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
   1014	}
   1015	return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
   1016}
   1017
   1018static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
   1019{
   1020	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
   1021	struct hwrm_cfa_l2_filter_alloc_input *req =
   1022		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
   1023	bool mac_ok = false;
   1024
   1025	if (!is_valid_ether_addr((const u8 *)req->l2_addr))
   1026		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
   1027
   1028	/* Allow VF to set a valid MAC address, if trust is set to on.
   1029	 * Or VF MAC address must first match MAC address in PF's context.
   1030	 * Otherwise, it must match the VF MAC address if firmware spec >=
   1031	 * 1.2.2
   1032	 */
   1033	if (bnxt_is_trusted_vf(bp, vf)) {
   1034		mac_ok = true;
   1035	} else if (is_valid_ether_addr(vf->mac_addr)) {
   1036		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
   1037			mac_ok = true;
   1038	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
   1039		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
   1040			mac_ok = true;
   1041	} else {
   1042		/* There are two cases:
   1043		 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
   1044		 *   to the PF and so it doesn't have to match
   1045		 * 2.Allow VF to modify it's own MAC when PF has not assigned a
   1046		 *   valid MAC address and firmware spec >= 0x10202
   1047		 */
   1048		mac_ok = true;
   1049	}
   1050	if (mac_ok)
   1051		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
   1052	return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
   1053}
   1054
   1055static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
   1056{
   1057	int rc = 0;
   1058
   1059	if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
   1060		/* real link */
   1061		rc = bnxt_hwrm_exec_fwd_resp(
   1062			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
   1063	} else {
   1064		struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
   1065		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
   1066
   1067		phy_qcfg_req =
   1068		(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
   1069		mutex_lock(&bp->link_lock);
   1070		memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
   1071		       sizeof(phy_qcfg_resp));
   1072		mutex_unlock(&bp->link_lock);
   1073		phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
   1074		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
   1075		phy_qcfg_resp.valid = 1;
   1076
   1077		if (vf->flags & BNXT_VF_LINK_UP) {
   1078			/* if physical link is down, force link up on VF */
   1079			if (phy_qcfg_resp.link !=
   1080			    PORT_PHY_QCFG_RESP_LINK_LINK) {
   1081				phy_qcfg_resp.link =
   1082					PORT_PHY_QCFG_RESP_LINK_LINK;
   1083				phy_qcfg_resp.link_speed = cpu_to_le16(
   1084					PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
   1085				phy_qcfg_resp.duplex_cfg =
   1086					PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
   1087				phy_qcfg_resp.duplex_state =
   1088					PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
   1089				phy_qcfg_resp.pause =
   1090					(PORT_PHY_QCFG_RESP_PAUSE_TX |
   1091					 PORT_PHY_QCFG_RESP_PAUSE_RX);
   1092			}
   1093		} else {
   1094			/* force link down */
   1095			phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
   1096			phy_qcfg_resp.link_speed = 0;
   1097			phy_qcfg_resp.duplex_state =
   1098				PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
   1099			phy_qcfg_resp.pause = 0;
   1100		}
   1101		rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
   1102					phy_qcfg_req->resp_addr,
   1103					phy_qcfg_req->cmpl_ring,
   1104					sizeof(phy_qcfg_resp));
   1105	}
   1106	return rc;
   1107}
   1108
   1109static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
   1110{
   1111	int rc = 0;
   1112	struct input *encap_req = vf->hwrm_cmd_req_addr;
   1113	u32 req_type = le16_to_cpu(encap_req->req_type);
   1114
   1115	switch (req_type) {
   1116	case HWRM_FUNC_VF_CFG:
   1117		rc = bnxt_vf_configure_mac(bp, vf);
   1118		break;
   1119	case HWRM_CFA_L2_FILTER_ALLOC:
   1120		rc = bnxt_vf_validate_set_mac(bp, vf);
   1121		break;
   1122	case HWRM_FUNC_CFG:
   1123		/* TODO Validate if VF is allowed to change mac address,
   1124		 * mtu, num of rings etc
   1125		 */
   1126		rc = bnxt_hwrm_exec_fwd_resp(
   1127			bp, vf, sizeof(struct hwrm_func_cfg_input));
   1128		break;
   1129	case HWRM_PORT_PHY_QCFG:
   1130		rc = bnxt_vf_set_link(bp, vf);
   1131		break;
   1132	default:
   1133		break;
   1134	}
   1135	return rc;
   1136}
   1137
   1138void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
   1139{
   1140	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
   1141
   1142	/* Scan through VF's and process commands */
   1143	while (1) {
   1144		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
   1145		if (vf_id >= active_vfs)
   1146			break;
   1147
   1148		clear_bit(vf_id, bp->pf.vf_event_bmap);
   1149		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
   1150		i = vf_id + 1;
   1151	}
   1152}
   1153
   1154int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
   1155{
   1156	struct hwrm_func_vf_cfg_input *req;
   1157	int rc = 0;
   1158
   1159	if (!BNXT_VF(bp))
   1160		return 0;
   1161
   1162	if (bp->hwrm_spec_code < 0x10202) {
   1163		if (is_valid_ether_addr(bp->vf.mac_addr))
   1164			rc = -EADDRNOTAVAIL;
   1165		goto mac_done;
   1166	}
   1167
   1168	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
   1169	if (rc)
   1170		goto mac_done;
   1171
   1172	req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
   1173	memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
   1174	if (!strict)
   1175		hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
   1176	rc = hwrm_req_send(bp, req);
   1177mac_done:
   1178	if (rc && strict) {
   1179		rc = -EADDRNOTAVAIL;
   1180		netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
   1181			    mac);
   1182		return rc;
   1183	}
   1184	return 0;
   1185}
   1186
   1187void bnxt_update_vf_mac(struct bnxt *bp)
   1188{
   1189	struct hwrm_func_qcaps_output *resp;
   1190	struct hwrm_func_qcaps_input *req;
   1191	bool inform_pf = false;
   1192
   1193	if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
   1194		return;
   1195
   1196	req->fid = cpu_to_le16(0xffff);
   1197
   1198	resp = hwrm_req_hold(bp, req);
   1199	if (hwrm_req_send(bp, req))
   1200		goto update_vf_mac_exit;
   1201
   1202	/* Store MAC address from the firmware.  There are 2 cases:
   1203	 * 1. MAC address is valid.  It is assigned from the PF and we
   1204	 *    need to override the current VF MAC address with it.
   1205	 * 2. MAC address is zero.  The VF will use a random MAC address by
   1206	 *    default but the stored zero MAC will allow the VF user to change
   1207	 *    the random MAC address using ndo_set_mac_address() if he wants.
   1208	 */
   1209	if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
   1210		memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
   1211		/* This means we are now using our own MAC address, let
   1212		 * the PF know about this MAC address.
   1213		 */
   1214		if (!is_valid_ether_addr(bp->vf.mac_addr))
   1215			inform_pf = true;
   1216	}
   1217
   1218	/* overwrite netdev dev_addr with admin VF MAC */
   1219	if (is_valid_ether_addr(bp->vf.mac_addr))
   1220		eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
   1221update_vf_mac_exit:
   1222	hwrm_req_drop(bp, req);
   1223	if (inform_pf)
   1224		bnxt_approve_mac(bp, bp->dev->dev_addr, false);
   1225}
   1226
   1227#else
   1228
   1229int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
   1230{
   1231	if (*num_vfs)
   1232		return -EOPNOTSUPP;
   1233	return 0;
   1234}
   1235
   1236void bnxt_sriov_disable(struct bnxt *bp)
   1237{
   1238}
   1239
   1240void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
   1241{
   1242	netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
   1243}
   1244
   1245void bnxt_update_vf_mac(struct bnxt *bp)
   1246{
   1247}
   1248
   1249int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
   1250{
   1251	return 0;
   1252}
   1253#endif