cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qlcnic_sriov_common.c (58046B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * QLogic qlcnic NIC Driver
      4 * Copyright (c) 2009-2013 QLogic Corporation
      5 */
      6
      7#include <linux/types.h>
      8
      9#include "qlcnic_sriov.h"
     10#include "qlcnic.h"
     11#include "qlcnic_83xx_hw.h"
     12
     13#define QLC_BC_COMMAND	0
     14#define QLC_BC_RESPONSE	1
     15
     16#define QLC_MBOX_RESP_TIMEOUT		(10 * HZ)
     17#define QLC_MBOX_CH_FREE_TIMEOUT	(10 * HZ)
     18
     19#define QLC_BC_MSG		0
     20#define QLC_BC_CFREE		1
     21#define QLC_BC_FLR		2
     22#define QLC_BC_HDR_SZ		16
     23#define QLC_BC_PAYLOAD_SZ	(1024 - QLC_BC_HDR_SZ)
     24
     25#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF		2048
     26#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF	512
     27
     28#define QLC_83XX_VF_RESET_FAIL_THRESH	8
     29#define QLC_BC_CMD_MAX_RETRY_CNT	5
     30
     31static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
     32static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
     33static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
     34static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
     35static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
     36static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
     37static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
     38				  struct qlcnic_cmd_args *);
     39static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
     40static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
     41static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
     42static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
     43static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
     44					struct qlcnic_cmd_args *);
     45
     46static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
     47	.read_crb			= qlcnic_83xx_read_crb,
     48	.write_crb			= qlcnic_83xx_write_crb,
     49	.read_reg			= qlcnic_83xx_rd_reg_indirect,
     50	.write_reg			= qlcnic_83xx_wrt_reg_indirect,
     51	.get_mac_address		= qlcnic_83xx_get_mac_address,
     52	.setup_intr			= qlcnic_83xx_setup_intr,
     53	.alloc_mbx_args			= qlcnic_83xx_alloc_mbx_args,
     54	.mbx_cmd			= qlcnic_sriov_issue_cmd,
     55	.get_func_no			= qlcnic_83xx_get_func_no,
     56	.api_lock			= qlcnic_83xx_cam_lock,
     57	.api_unlock			= qlcnic_83xx_cam_unlock,
     58	.process_lb_rcv_ring_diag	= qlcnic_83xx_process_rcv_ring_diag,
     59	.create_rx_ctx			= qlcnic_83xx_create_rx_ctx,
     60	.create_tx_ctx			= qlcnic_83xx_create_tx_ctx,
     61	.del_rx_ctx			= qlcnic_83xx_del_rx_ctx,
     62	.del_tx_ctx			= qlcnic_83xx_del_tx_ctx,
     63	.setup_link_event		= qlcnic_83xx_setup_link_event,
     64	.get_nic_info			= qlcnic_83xx_get_nic_info,
     65	.get_pci_info			= qlcnic_83xx_get_pci_info,
     66	.set_nic_info			= qlcnic_83xx_set_nic_info,
     67	.change_macvlan			= qlcnic_83xx_sre_macaddr_change,
     68	.napi_enable			= qlcnic_83xx_napi_enable,
     69	.napi_disable			= qlcnic_83xx_napi_disable,
     70	.config_intr_coal		= qlcnic_83xx_config_intr_coal,
     71	.config_rss			= qlcnic_83xx_config_rss,
     72	.config_hw_lro			= qlcnic_83xx_config_hw_lro,
     73	.config_promisc_mode		= qlcnic_83xx_nic_set_promisc,
     74	.change_l2_filter		= qlcnic_83xx_change_l2_filter,
     75	.get_board_info			= qlcnic_83xx_get_port_info,
     76	.free_mac_list			= qlcnic_sriov_vf_free_mac_list,
     77	.enable_sds_intr		= qlcnic_83xx_enable_sds_intr,
     78	.disable_sds_intr		= qlcnic_83xx_disable_sds_intr,
     79	.encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
     80	.encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
     81};
     82
     83static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
     84	.config_bridged_mode	= qlcnic_config_bridged_mode,
     85	.config_led		= qlcnic_config_led,
     86	.cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
     87	.napi_add		= qlcnic_83xx_napi_add,
     88	.napi_del		= qlcnic_83xx_napi_del,
     89	.shutdown		= qlcnic_sriov_vf_shutdown,
     90	.resume			= qlcnic_sriov_vf_resume,
     91	.config_ipaddr		= qlcnic_83xx_config_ipaddr,
     92	.clear_legacy_intr	= qlcnic_83xx_clear_legacy_intr,
     93};
     94
     95static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
     96	{QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
     97	{QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
     98	{QLCNIC_BC_CMD_GET_ACL, 3, 14},
     99	{QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
    100};
    101
    102static inline bool qlcnic_sriov_bc_msg_check(u32 val)
    103{
    104	return (val & (1 << QLC_BC_MSG)) ? true : false;
    105}
    106
    107static inline bool qlcnic_sriov_channel_free_check(u32 val)
    108{
    109	return (val & (1 << QLC_BC_CFREE)) ? true : false;
    110}
    111
    112static inline bool qlcnic_sriov_flr_check(u32 val)
    113{
    114	return (val & (1 << QLC_BC_FLR)) ? true : false;
    115}
    116
    117static inline u8 qlcnic_sriov_target_func_id(u32 val)
    118{
    119	return (val >> 4) & 0xff;
    120}
    121
    122static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
    123{
    124	struct pci_dev *dev = adapter->pdev;
    125	int pos;
    126	u16 stride, offset;
    127
    128	if (qlcnic_sriov_vf_check(adapter))
    129		return 0;
    130
    131	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
    132	if (!pos)
    133		return 0;
    134	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
    135	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
    136
    137	return (dev->devfn + offset + stride * vf_id) & 0xff;
    138}
    139
    140int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
    141{
    142	struct qlcnic_sriov *sriov;
    143	struct qlcnic_back_channel *bc;
    144	struct workqueue_struct *wq;
    145	struct qlcnic_vport *vp;
    146	struct qlcnic_vf_info *vf;
    147	int err, i;
    148
    149	if (!qlcnic_sriov_enable_check(adapter))
    150		return -EIO;
    151
    152	sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
    153	if (!sriov)
    154		return -ENOMEM;
    155
    156	adapter->ahw->sriov = sriov;
    157	sriov->num_vfs = num_vfs;
    158	bc = &sriov->bc;
    159	sriov->vf_info = kcalloc(num_vfs, sizeof(struct qlcnic_vf_info),
    160				 GFP_KERNEL);
    161	if (!sriov->vf_info) {
    162		err = -ENOMEM;
    163		goto qlcnic_free_sriov;
    164	}
    165
    166	wq = create_singlethread_workqueue("bc-trans");
    167	if (wq == NULL) {
    168		err = -ENOMEM;
    169		dev_err(&adapter->pdev->dev,
    170			"Cannot create bc-trans workqueue\n");
    171		goto qlcnic_free_vf_info;
    172	}
    173
    174	bc->bc_trans_wq = wq;
    175
    176	wq = create_singlethread_workqueue("async");
    177	if (wq == NULL) {
    178		err = -ENOMEM;
    179		dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
    180		goto qlcnic_destroy_trans_wq;
    181	}
    182
    183	bc->bc_async_wq =  wq;
    184	INIT_LIST_HEAD(&bc->async_cmd_list);
    185	INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
    186	spin_lock_init(&bc->queue_lock);
    187	bc->adapter = adapter;
    188
    189	for (i = 0; i < num_vfs; i++) {
    190		vf = &sriov->vf_info[i];
    191		vf->adapter = adapter;
    192		vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
    193		mutex_init(&vf->send_cmd_lock);
    194		spin_lock_init(&vf->vlan_list_lock);
    195		INIT_LIST_HEAD(&vf->rcv_act.wait_list);
    196		INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
    197		spin_lock_init(&vf->rcv_act.lock);
    198		spin_lock_init(&vf->rcv_pend.lock);
    199		init_completion(&vf->ch_free_cmpl);
    200
    201		INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
    202
    203		if (qlcnic_sriov_pf_check(adapter)) {
    204			vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
    205			if (!vp) {
    206				err = -ENOMEM;
    207				goto qlcnic_destroy_async_wq;
    208			}
    209			sriov->vf_info[i].vp = vp;
    210			vp->vlan_mode = QLC_GUEST_VLAN_MODE;
    211			vp->max_tx_bw = MAX_BW;
    212			vp->min_tx_bw = MIN_BW;
    213			vp->spoofchk = false;
    214			eth_random_addr(vp->mac);
    215			dev_info(&adapter->pdev->dev,
    216				 "MAC Address %pM is configured for VF %d\n",
    217				 vp->mac, i);
    218		}
    219	}
    220
    221	return 0;
    222
    223qlcnic_destroy_async_wq:
    224	destroy_workqueue(bc->bc_async_wq);
    225
    226qlcnic_destroy_trans_wq:
    227	destroy_workqueue(bc->bc_trans_wq);
    228
    229qlcnic_free_vf_info:
    230	kfree(sriov->vf_info);
    231
    232qlcnic_free_sriov:
    233	kfree(adapter->ahw->sriov);
    234	return err;
    235}
    236
    237void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
    238{
    239	struct qlcnic_bc_trans *trans;
    240	struct qlcnic_cmd_args cmd;
    241	unsigned long flags;
    242
    243	spin_lock_irqsave(&t_list->lock, flags);
    244
    245	while (!list_empty(&t_list->wait_list)) {
    246		trans = list_first_entry(&t_list->wait_list,
    247					 struct qlcnic_bc_trans, list);
    248		list_del(&trans->list);
    249		t_list->count--;
    250		cmd.req.arg = (u32 *)trans->req_pay;
    251		cmd.rsp.arg = (u32 *)trans->rsp_pay;
    252		qlcnic_free_mbx_args(&cmd);
    253		qlcnic_sriov_cleanup_transaction(trans);
    254	}
    255
    256	spin_unlock_irqrestore(&t_list->lock, flags);
    257}
    258
    259void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
    260{
    261	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
    262	struct qlcnic_back_channel *bc = &sriov->bc;
    263	struct qlcnic_vf_info *vf;
    264	int i;
    265
    266	if (!qlcnic_sriov_enable_check(adapter))
    267		return;
    268
    269	qlcnic_sriov_cleanup_async_list(bc);
    270	destroy_workqueue(bc->bc_async_wq);
    271
    272	for (i = 0; i < sriov->num_vfs; i++) {
    273		vf = &sriov->vf_info[i];
    274		qlcnic_sriov_cleanup_list(&vf->rcv_pend);
    275		cancel_work_sync(&vf->trans_work);
    276		qlcnic_sriov_cleanup_list(&vf->rcv_act);
    277	}
    278
    279	destroy_workqueue(bc->bc_trans_wq);
    280
    281	for (i = 0; i < sriov->num_vfs; i++)
    282		kfree(sriov->vf_info[i].vp);
    283
    284	kfree(sriov->vf_info);
    285	kfree(adapter->ahw->sriov);
    286}
    287
    288static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
    289{
    290	qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
    291	qlcnic_sriov_cfg_bc_intr(adapter, 0);
    292	__qlcnic_sriov_cleanup(adapter);
    293}
    294
    295void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
    296{
    297	if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
    298		return;
    299
    300	qlcnic_sriov_free_vlans(adapter);
    301
    302	if (qlcnic_sriov_pf_check(adapter))
    303		qlcnic_sriov_pf_cleanup(adapter);
    304
    305	if (qlcnic_sriov_vf_check(adapter))
    306		qlcnic_sriov_vf_cleanup(adapter);
    307}
    308
    309static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
    310				    u32 *pay, u8 pci_func, u8 size)
    311{
    312	struct qlcnic_hardware_context *ahw = adapter->ahw;
    313	struct qlcnic_mailbox *mbx = ahw->mailbox;
    314	struct qlcnic_cmd_args cmd;
    315	unsigned long timeout;
    316	int err;
    317
    318	memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
    319	cmd.hdr = hdr;
    320	cmd.pay = pay;
    321	cmd.pay_size = size;
    322	cmd.func_num = pci_func;
    323	cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
    324	cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
    325
    326	err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
    327	if (err) {
    328		dev_err(&adapter->pdev->dev,
    329			"%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
    330			__func__, cmd.cmd_op, cmd.type, ahw->pci_func,
    331			ahw->op_mode);
    332		return err;
    333	}
    334
    335	if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
    336		dev_err(&adapter->pdev->dev,
    337			"%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
    338			__func__, cmd.cmd_op, cmd.type, ahw->pci_func,
    339			ahw->op_mode);
    340		flush_workqueue(mbx->work_q);
    341	}
    342
    343	return cmd.rsp_opcode;
    344}
    345
    346static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
    347{
    348	adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
    349	adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
    350	adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
    351	adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
    352	adapter->num_txd = MAX_CMD_DESCRIPTORS;
    353	adapter->max_rds_rings = MAX_RDS_RINGS;
    354}
    355
    356int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
    357				   struct qlcnic_info *npar_info, u16 vport_id)
    358{
    359	struct device *dev = &adapter->pdev->dev;
    360	struct qlcnic_cmd_args cmd;
    361	int err;
    362	u32 status;
    363
    364	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
    365	if (err)
    366		return err;
    367
    368	cmd.req.arg[1] = vport_id << 16 | 0x1;
    369	err = qlcnic_issue_cmd(adapter, &cmd);
    370	if (err) {
    371		dev_err(&adapter->pdev->dev,
    372			"Failed to get vport info, err=%d\n", err);
    373		qlcnic_free_mbx_args(&cmd);
    374		return err;
    375	}
    376
    377	status = cmd.rsp.arg[2] & 0xffff;
    378	if (status & BIT_0)
    379		npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
    380	if (status & BIT_1)
    381		npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
    382	if (status & BIT_2)
    383		npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
    384	if (status & BIT_3)
    385		npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
    386	if (status & BIT_4)
    387		npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
    388	if (status & BIT_5)
    389		npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
    390	if (status & BIT_6)
    391		npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
    392	if (status & BIT_7)
    393		npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
    394	if (status & BIT_8)
    395		npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
    396	if (status & BIT_9)
    397		npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
    398
    399	npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
    400	npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
    401	npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
    402	npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
    403
    404	dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
    405		 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
    406		 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
    407		 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
    408		 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
    409		 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
    410		 npar_info->min_tx_bw, npar_info->max_tx_bw,
    411		 npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
    412		 npar_info->max_rx_mcast_mac_filters,
    413		 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
    414		 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
    415		 npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
    416		 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
    417		 npar_info->max_remote_ipv6_addrs);
    418
    419	qlcnic_free_mbx_args(&cmd);
    420	return err;
    421}
    422
    423static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
    424				      struct qlcnic_cmd_args *cmd)
    425{
    426	adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
    427	adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
    428	return 0;
    429}
    430
    431static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
    432					    struct qlcnic_cmd_args *cmd)
    433{
    434	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
    435	int i, num_vlans, ret;
    436	u16 *vlans;
    437
    438	if (sriov->allowed_vlans)
    439		return 0;
    440
    441	sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
    442	sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
    443	dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
    444		 sriov->num_allowed_vlans);
    445
    446	ret = qlcnic_sriov_alloc_vlans(adapter);
    447	if (ret)
    448		return ret;
    449
    450	if (!sriov->any_vlan)
    451		return 0;
    452
    453	num_vlans = sriov->num_allowed_vlans;
    454	sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
    455	if (!sriov->allowed_vlans)
    456		return -ENOMEM;
    457
    458	vlans = (u16 *)&cmd->rsp.arg[3];
    459	for (i = 0; i < num_vlans; i++)
    460		sriov->allowed_vlans[i] = vlans[i];
    461
    462	return 0;
    463}
    464
    465static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
    466{
    467	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
    468	struct qlcnic_cmd_args cmd;
    469	int ret = 0;
    470
    471	memset(&cmd, 0, sizeof(cmd));
    472	ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
    473	if (ret)
    474		return ret;
    475
    476	ret = qlcnic_issue_cmd(adapter, &cmd);
    477	if (ret) {
    478		dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
    479			ret);
    480	} else {
    481		sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
    482		switch (sriov->vlan_mode) {
    483		case QLC_GUEST_VLAN_MODE:
    484			ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
    485			break;
    486		case QLC_PVID_MODE:
    487			ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
    488			break;
    489		}
    490	}
    491
    492	qlcnic_free_mbx_args(&cmd);
    493	return ret;
    494}
    495
    496static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
    497{
    498	struct qlcnic_hardware_context *ahw = adapter->ahw;
    499	struct qlcnic_info nic_info;
    500	int err;
    501
    502	err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
    503	if (err)
    504		return err;
    505
    506	ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
    507
    508	err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
    509	if (err)
    510		return -EIO;
    511
    512	if (qlcnic_83xx_get_port_info(adapter))
    513		return -EIO;
    514
    515	qlcnic_sriov_vf_cfg_buff_desc(adapter);
    516	adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
    517	dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
    518		 adapter->ahw->fw_hal_version);
    519
    520	ahw->physical_port = (u8) nic_info.phys_port;
    521	ahw->switch_mode = nic_info.switch_mode;
    522	ahw->max_mtu = nic_info.max_mtu;
    523	ahw->op_mode = nic_info.op_mode;
    524	ahw->capabilities = nic_info.capabilities;
    525	return 0;
    526}
    527
    528static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter)
    529{
    530	int err;
    531
    532	adapter->flags |= QLCNIC_VLAN_FILTERING;
    533	adapter->ahw->total_nic_func = 1;
    534	INIT_LIST_HEAD(&adapter->vf_mc_list);
    535	if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
    536		dev_warn(&adapter->pdev->dev,
    537			 "Device does not support MSI interrupts\n");
    538
    539	/* compute and set default and max tx/sds rings */
    540	qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
    541	qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
    542
    543	err = qlcnic_setup_intr(adapter);
    544	if (err) {
    545		dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
    546		goto err_out_disable_msi;
    547	}
    548
    549	err = qlcnic_83xx_setup_mbx_intr(adapter);
    550	if (err)
    551		goto err_out_disable_msi;
    552
    553	err = qlcnic_sriov_init(adapter, 1);
    554	if (err)
    555		goto err_out_disable_mbx_intr;
    556
    557	err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
    558	if (err)
    559		goto err_out_cleanup_sriov;
    560
    561	err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
    562	if (err)
    563		goto err_out_disable_bc_intr;
    564
    565	err = qlcnic_sriov_vf_init_driver(adapter);
    566	if (err)
    567		goto err_out_send_channel_term;
    568
    569	err = qlcnic_sriov_get_vf_acl(adapter);
    570	if (err)
    571		goto err_out_send_channel_term;
    572
    573	err = qlcnic_setup_netdev(adapter, adapter->netdev);
    574	if (err)
    575		goto err_out_send_channel_term;
    576
    577	pci_set_drvdata(adapter->pdev, adapter);
    578	dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
    579		 adapter->netdev->name);
    580
    581	qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
    582			     adapter->ahw->idc.delay);
    583	return 0;
    584
    585err_out_send_channel_term:
    586	qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
    587
    588err_out_disable_bc_intr:
    589	qlcnic_sriov_cfg_bc_intr(adapter, 0);
    590
    591err_out_cleanup_sriov:
    592	__qlcnic_sriov_cleanup(adapter);
    593
    594err_out_disable_mbx_intr:
    595	qlcnic_83xx_free_mbx_intr(adapter);
    596
    597err_out_disable_msi:
    598	qlcnic_teardown_intr(adapter);
    599	return err;
    600}
    601
    602static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
    603{
    604	u32 state;
    605
    606	do {
    607		msleep(20);
    608		if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
    609			return -EIO;
    610		state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
    611	} while (state != QLC_83XX_IDC_DEV_READY);
    612
    613	return 0;
    614}
    615
    616int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter)
    617{
    618	struct qlcnic_hardware_context *ahw = adapter->ahw;
    619	int err;
    620
    621	set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
    622	ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
    623	ahw->reset_context = 0;
    624	adapter->fw_fail_cnt = 0;
    625	ahw->msix_supported = 1;
    626	adapter->need_fw_reset = 0;
    627	adapter->flags |= QLCNIC_TX_INTR_SHARED;
    628
    629	err = qlcnic_sriov_check_dev_ready(adapter);
    630	if (err)
    631		return err;
    632
    633	err = qlcnic_sriov_setup_vf(adapter);
    634	if (err)
    635		return err;
    636
    637	if (qlcnic_read_mac_addr(adapter))
    638		dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
    639
    640	INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
    641
    642	clear_bit(__QLCNIC_RESETTING, &adapter->state);
    643	return 0;
    644}
    645
    646void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
    647{
    648	struct qlcnic_hardware_context *ahw = adapter->ahw;
    649
    650	ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
    651	dev_info(&adapter->pdev->dev,
    652		 "HAL Version: %d Non Privileged SRIOV function\n",
    653		 ahw->fw_hal_version);
    654	adapter->nic_ops = &qlcnic_sriov_vf_ops;
    655	set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
    656	return;
    657}
    658
    659void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
    660{
    661	ahw->hw_ops		= &qlcnic_sriov_vf_hw_ops;
    662	ahw->reg_tbl		= (u32 *)qlcnic_83xx_reg_tbl;
    663	ahw->ext_reg_tbl	= (u32 *)qlcnic_83xx_ext_reg_tbl;
    664}
    665
    666static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
    667{
    668	u32 pay_size;
    669
    670	pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
    671
    672	if (pay_size)
    673		pay_size = QLC_BC_PAYLOAD_SZ;
    674	else
    675		pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
    676
    677	return pay_size;
    678}
    679
    680int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
    681{
    682	struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
    683	u8 i;
    684
    685	if (qlcnic_sriov_vf_check(adapter))
    686		return 0;
    687
    688	for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
    689		if (vf_info[i].pci_func == pci_func)
    690			return i;
    691	}
    692
    693	return -EINVAL;
    694}
    695
    696static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
    697{
    698	*trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
    699	if (!*trans)
    700		return -ENOMEM;
    701
    702	init_completion(&(*trans)->resp_cmpl);
    703	return 0;
    704}
    705
    706static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
    707					    u32 size)
    708{
    709	*hdr = kcalloc(size, sizeof(struct qlcnic_bc_hdr), GFP_ATOMIC);
    710	if (!*hdr)
    711		return -ENOMEM;
    712
    713	return 0;
    714}
    715
    716static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
    717{
    718	const struct qlcnic_mailbox_metadata *mbx_tbl;
    719	int i, size;
    720
    721	mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
    722	size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
    723
    724	for (i = 0; i < size; i++) {
    725		if (type == mbx_tbl[i].cmd) {
    726			mbx->op_type = QLC_BC_CMD;
    727			mbx->req.num = mbx_tbl[i].in_args;
    728			mbx->rsp.num = mbx_tbl[i].out_args;
    729			mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
    730					       GFP_ATOMIC);
    731			if (!mbx->req.arg)
    732				return -ENOMEM;
    733			mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
    734					       GFP_ATOMIC);
    735			if (!mbx->rsp.arg) {
    736				kfree(mbx->req.arg);
    737				mbx->req.arg = NULL;
    738				return -ENOMEM;
    739			}
    740			mbx->req.arg[0] = (type | (mbx->req.num << 16) |
    741					   (3 << 29));
    742			mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
    743			return 0;
    744		}
    745	}
    746	return -EINVAL;
    747}
    748
    749static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
    750				       struct qlcnic_cmd_args *cmd,
    751				       u16 seq, u8 msg_type)
    752{
    753	struct qlcnic_bc_hdr *hdr;
    754	int i;
    755	u32 num_regs, bc_pay_sz;
    756	u16 remainder;
    757	u8 cmd_op, num_frags, t_num_frags;
    758
    759	bc_pay_sz = QLC_BC_PAYLOAD_SZ;
    760	if (msg_type == QLC_BC_COMMAND) {
    761		trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
    762		trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
    763		num_regs = cmd->req.num;
    764		trans->req_pay_size = (num_regs * 4);
    765		num_regs = cmd->rsp.num;
    766		trans->rsp_pay_size = (num_regs * 4);
    767		cmd_op = cmd->req.arg[0] & 0xff;
    768		remainder = (trans->req_pay_size) % (bc_pay_sz);
    769		num_frags = (trans->req_pay_size) / (bc_pay_sz);
    770		if (remainder)
    771			num_frags++;
    772		t_num_frags = num_frags;
    773		if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
    774			return -ENOMEM;
    775		remainder = (trans->rsp_pay_size) % (bc_pay_sz);
    776		num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
    777		if (remainder)
    778			num_frags++;
    779		if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
    780			return -ENOMEM;
    781		num_frags  = t_num_frags;
    782		hdr = trans->req_hdr;
    783	}  else {
    784		cmd->req.arg = (u32 *)trans->req_pay;
    785		cmd->rsp.arg = (u32 *)trans->rsp_pay;
    786		cmd_op = cmd->req.arg[0] & 0xff;
    787		cmd->cmd_op = cmd_op;
    788		remainder = (trans->rsp_pay_size) % (bc_pay_sz);
    789		num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
    790		if (remainder)
    791			num_frags++;
    792		cmd->req.num = trans->req_pay_size / 4;
    793		cmd->rsp.num = trans->rsp_pay_size / 4;
    794		hdr = trans->rsp_hdr;
    795		cmd->op_type = trans->req_hdr->op_type;
    796	}
    797
    798	trans->trans_id = seq;
    799	trans->cmd_id = cmd_op;
    800	for (i = 0; i < num_frags; i++) {
    801		hdr[i].version = 2;
    802		hdr[i].msg_type = msg_type;
    803		hdr[i].op_type = cmd->op_type;
    804		hdr[i].num_cmds = 1;
    805		hdr[i].num_frags = num_frags;
    806		hdr[i].frag_num = i + 1;
    807		hdr[i].cmd_op = cmd_op;
    808		hdr[i].seq_id = seq;
    809	}
    810	return 0;
    811}
    812
    813static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
    814{
    815	if (!trans)
    816		return;
    817	kfree(trans->req_hdr);
    818	kfree(trans->rsp_hdr);
    819	kfree(trans);
    820}
    821
    822static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
    823				    struct qlcnic_bc_trans *trans, u8 type)
    824{
    825	struct qlcnic_trans_list *t_list;
    826	unsigned long flags;
    827	int ret = 0;
    828
    829	if (type == QLC_BC_RESPONSE) {
    830		t_list = &vf->rcv_act;
    831		spin_lock_irqsave(&t_list->lock, flags);
    832		t_list->count--;
    833		list_del(&trans->list);
    834		if (t_list->count > 0)
    835			ret = 1;
    836		spin_unlock_irqrestore(&t_list->lock, flags);
    837	}
    838	if (type == QLC_BC_COMMAND) {
    839		while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
    840			msleep(100);
    841		vf->send_cmd = NULL;
    842		clear_bit(QLC_BC_VF_SEND, &vf->state);
    843	}
    844	return ret;
    845}
    846
    847static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
    848					 struct qlcnic_vf_info *vf,
    849					 work_func_t func)
    850{
    851	if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
    852	    vf->adapter->need_fw_reset)
    853		return;
    854
    855	queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
    856}
    857
    858static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
    859{
    860	struct completion *cmpl = &trans->resp_cmpl;
    861
    862	if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
    863		trans->trans_state = QLC_END;
    864	else
    865		trans->trans_state = QLC_ABORT;
    866
    867	return;
    868}
    869
    870static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
    871					    u8 type)
    872{
    873	if (type == QLC_BC_RESPONSE) {
    874		trans->curr_rsp_frag++;
    875		if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
    876			trans->trans_state = QLC_INIT;
    877		else
    878			trans->trans_state = QLC_END;
    879	} else {
    880		trans->curr_req_frag++;
    881		if (trans->curr_req_frag < trans->req_hdr->num_frags)
    882			trans->trans_state = QLC_INIT;
    883		else
    884			trans->trans_state = QLC_WAIT_FOR_RESP;
    885	}
    886}
    887
    888static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
    889					       u8 type)
    890{
    891	struct qlcnic_vf_info *vf = trans->vf;
    892	struct completion *cmpl = &vf->ch_free_cmpl;
    893
    894	if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
    895		trans->trans_state = QLC_ABORT;
    896		return;
    897	}
    898
    899	clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
    900	qlcnic_sriov_handle_multi_frags(trans, type);
    901}
    902
    903static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
    904				     u32 *hdr, u32 *pay, u32 size)
    905{
    906	struct qlcnic_hardware_context *ahw = adapter->ahw;
    907	u8 i, max = 2, hdr_size, j;
    908
    909	hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
    910	max = (size / sizeof(u32)) + hdr_size;
    911
    912	for (i = 2, j = 0; j < hdr_size; i++, j++)
    913		*(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
    914	for (; j < max; i++, j++)
    915		*(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
    916}
    917
    918static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
    919{
    920	int ret = -EBUSY;
    921	u32 timeout = 10000;
    922
    923	do {
    924		if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
    925			ret = 0;
    926			break;
    927		}
    928		mdelay(1);
    929	} while (--timeout);
    930
    931	return ret;
    932}
    933
    934static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
    935{
    936	struct qlcnic_vf_info *vf = trans->vf;
    937	u32 pay_size;
    938	u32 *hdr, *pay;
    939	int ret;
    940	u8 pci_func = trans->func_id;
    941
    942	if (__qlcnic_sriov_issue_bc_post(vf))
    943		return -EBUSY;
    944
    945	if (type == QLC_BC_COMMAND) {
    946		hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
    947		pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
    948		pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
    949						       trans->curr_req_frag);
    950		pay_size = (pay_size / sizeof(u32));
    951	} else {
    952		hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
    953		pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
    954		pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
    955						       trans->curr_rsp_frag);
    956		pay_size = (pay_size / sizeof(u32));
    957	}
    958
    959	ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
    960				       pci_func, pay_size);
    961	return ret;
    962}
    963
    964static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
    965				      struct qlcnic_vf_info *vf, u8 type)
    966{
    967	bool flag = true;
    968	int err = -EIO;
    969
    970	while (flag) {
    971		if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
    972		    vf->adapter->need_fw_reset)
    973			trans->trans_state = QLC_ABORT;
    974
    975		switch (trans->trans_state) {
    976		case QLC_INIT:
    977			trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
    978			if (qlcnic_sriov_issue_bc_post(trans, type))
    979				trans->trans_state = QLC_ABORT;
    980			break;
    981		case QLC_WAIT_FOR_CHANNEL_FREE:
    982			qlcnic_sriov_wait_for_channel_free(trans, type);
    983			break;
    984		case QLC_WAIT_FOR_RESP:
    985			qlcnic_sriov_wait_for_resp(trans);
    986			break;
    987		case QLC_END:
    988			err = 0;
    989			flag = false;
    990			break;
    991		case QLC_ABORT:
    992			err = -EIO;
    993			flag = false;
    994			clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
    995			break;
    996		default:
    997			err = -EIO;
    998			flag = false;
    999		}
   1000	}
   1001	return err;
   1002}
   1003
   1004static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
   1005				    struct qlcnic_bc_trans *trans, int pci_func)
   1006{
   1007	struct qlcnic_vf_info *vf;
   1008	int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
   1009
   1010	if (index < 0)
   1011		return -EIO;
   1012
   1013	vf = &adapter->ahw->sriov->vf_info[index];
   1014	trans->vf = vf;
   1015	trans->func_id = pci_func;
   1016
   1017	if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
   1018		if (qlcnic_sriov_pf_check(adapter))
   1019			return -EIO;
   1020		if (qlcnic_sriov_vf_check(adapter) &&
   1021		    trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
   1022			return -EIO;
   1023	}
   1024
   1025	mutex_lock(&vf->send_cmd_lock);
   1026	vf->send_cmd = trans;
   1027	err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
   1028	qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
   1029	mutex_unlock(&vf->send_cmd_lock);
   1030	return err;
   1031}
   1032
   1033static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
   1034					  struct qlcnic_bc_trans *trans,
   1035					  struct qlcnic_cmd_args *cmd)
   1036{
   1037#ifdef CONFIG_QLCNIC_SRIOV
   1038	if (qlcnic_sriov_pf_check(adapter)) {
   1039		qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
   1040		return;
   1041	}
   1042#endif
   1043	cmd->rsp.arg[0] |= (0x9 << 25);
   1044	return;
   1045}
   1046
   1047static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
   1048{
   1049	struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
   1050						 trans_work);
   1051	struct qlcnic_bc_trans *trans = NULL;
   1052	struct qlcnic_adapter *adapter  = vf->adapter;
   1053	struct qlcnic_cmd_args cmd;
   1054	u8 req;
   1055
   1056	if (adapter->need_fw_reset)
   1057		return;
   1058
   1059	if (test_bit(QLC_BC_VF_FLR, &vf->state))
   1060		return;
   1061
   1062	memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
   1063	trans = list_first_entry(&vf->rcv_act.wait_list,
   1064				 struct qlcnic_bc_trans, list);
   1065	adapter = vf->adapter;
   1066
   1067	if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
   1068					QLC_BC_RESPONSE))
   1069		goto cleanup_trans;
   1070
   1071	__qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
   1072	trans->trans_state = QLC_INIT;
   1073	__qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
   1074
   1075cleanup_trans:
   1076	qlcnic_free_mbx_args(&cmd);
   1077	req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
   1078	qlcnic_sriov_cleanup_transaction(trans);
   1079	if (req)
   1080		qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
   1081					     qlcnic_sriov_process_bc_cmd);
   1082}
   1083
   1084static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
   1085					struct qlcnic_vf_info *vf)
   1086{
   1087	struct qlcnic_bc_trans *trans;
   1088	u32 pay_size;
   1089
   1090	if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
   1091		return;
   1092
   1093	trans = vf->send_cmd;
   1094
   1095	if (trans == NULL)
   1096		goto clear_send;
   1097
   1098	if (trans->trans_id != hdr->seq_id)
   1099		goto clear_send;
   1100
   1101	pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
   1102					       trans->curr_rsp_frag);
   1103	qlcnic_sriov_pull_bc_msg(vf->adapter,
   1104				 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
   1105				 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
   1106				 pay_size);
   1107	if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
   1108		goto clear_send;
   1109
   1110	complete(&trans->resp_cmpl);
   1111
   1112clear_send:
   1113	clear_bit(QLC_BC_VF_SEND, &vf->state);
   1114}
   1115
   1116int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
   1117				struct qlcnic_vf_info *vf,
   1118				struct qlcnic_bc_trans *trans)
   1119{
   1120	struct qlcnic_trans_list *t_list = &vf->rcv_act;
   1121
   1122	t_list->count++;
   1123	list_add_tail(&trans->list, &t_list->wait_list);
   1124	if (t_list->count == 1)
   1125		qlcnic_sriov_schedule_bc_cmd(sriov, vf,
   1126					     qlcnic_sriov_process_bc_cmd);
   1127	return 0;
   1128}
   1129
   1130static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
   1131				     struct qlcnic_vf_info *vf,
   1132				     struct qlcnic_bc_trans *trans)
   1133{
   1134	struct qlcnic_trans_list *t_list = &vf->rcv_act;
   1135
   1136	spin_lock(&t_list->lock);
   1137
   1138	__qlcnic_sriov_add_act_list(sriov, vf, trans);
   1139
   1140	spin_unlock(&t_list->lock);
   1141	return 0;
   1142}
   1143
   1144static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
   1145					      struct qlcnic_vf_info *vf,
   1146					      struct qlcnic_bc_hdr *hdr)
   1147{
   1148	struct qlcnic_bc_trans *trans = NULL;
   1149	struct list_head *node;
   1150	u32 pay_size, curr_frag;
   1151	u8 found = 0, active = 0;
   1152
   1153	spin_lock(&vf->rcv_pend.lock);
   1154	if (vf->rcv_pend.count > 0) {
   1155		list_for_each(node, &vf->rcv_pend.wait_list) {
   1156			trans = list_entry(node, struct qlcnic_bc_trans, list);
   1157			if (trans->trans_id == hdr->seq_id) {
   1158				found = 1;
   1159				break;
   1160			}
   1161		}
   1162	}
   1163
   1164	if (found) {
   1165		curr_frag = trans->curr_req_frag;
   1166		pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
   1167						       curr_frag);
   1168		qlcnic_sriov_pull_bc_msg(vf->adapter,
   1169					 (u32 *)(trans->req_hdr + curr_frag),
   1170					 (u32 *)(trans->req_pay + curr_frag),
   1171					 pay_size);
   1172		trans->curr_req_frag++;
   1173		if (trans->curr_req_frag >= hdr->num_frags) {
   1174			vf->rcv_pend.count--;
   1175			list_del(&trans->list);
   1176			active = 1;
   1177		}
   1178	}
   1179	spin_unlock(&vf->rcv_pend.lock);
   1180
   1181	if (active)
   1182		if (qlcnic_sriov_add_act_list(sriov, vf, trans))
   1183			qlcnic_sriov_cleanup_transaction(trans);
   1184
   1185	return;
   1186}
   1187
   1188static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
   1189				       struct qlcnic_bc_hdr *hdr,
   1190				       struct qlcnic_vf_info *vf)
   1191{
   1192	struct qlcnic_bc_trans *trans;
   1193	struct qlcnic_adapter *adapter = vf->adapter;
   1194	struct qlcnic_cmd_args cmd;
   1195	u32 pay_size;
   1196	int err;
   1197	u8 cmd_op;
   1198
   1199	if (adapter->need_fw_reset)
   1200		return;
   1201
   1202	if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
   1203	    hdr->op_type != QLC_BC_CMD &&
   1204	    hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
   1205		return;
   1206
   1207	if (hdr->frag_num > 1) {
   1208		qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
   1209		return;
   1210	}
   1211
   1212	memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
   1213	cmd_op = hdr->cmd_op;
   1214	if (qlcnic_sriov_alloc_bc_trans(&trans))
   1215		return;
   1216
   1217	if (hdr->op_type == QLC_BC_CMD)
   1218		err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
   1219	else
   1220		err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
   1221
   1222	if (err) {
   1223		qlcnic_sriov_cleanup_transaction(trans);
   1224		return;
   1225	}
   1226
   1227	cmd.op_type = hdr->op_type;
   1228	if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
   1229					QLC_BC_COMMAND)) {
   1230		qlcnic_free_mbx_args(&cmd);
   1231		qlcnic_sriov_cleanup_transaction(trans);
   1232		return;
   1233	}
   1234
   1235	pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
   1236					 trans->curr_req_frag);
   1237	qlcnic_sriov_pull_bc_msg(vf->adapter,
   1238				 (u32 *)(trans->req_hdr + trans->curr_req_frag),
   1239				 (u32 *)(trans->req_pay + trans->curr_req_frag),
   1240				 pay_size);
   1241	trans->func_id = vf->pci_func;
   1242	trans->vf = vf;
   1243	trans->trans_id = hdr->seq_id;
   1244	trans->curr_req_frag++;
   1245
   1246	if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
   1247		return;
   1248
   1249	if (trans->curr_req_frag == trans->req_hdr->num_frags) {
   1250		if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
   1251			qlcnic_free_mbx_args(&cmd);
   1252			qlcnic_sriov_cleanup_transaction(trans);
   1253		}
   1254	} else {
   1255		spin_lock(&vf->rcv_pend.lock);
   1256		list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
   1257		vf->rcv_pend.count++;
   1258		spin_unlock(&vf->rcv_pend.lock);
   1259	}
   1260}
   1261
   1262static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
   1263					  struct qlcnic_vf_info *vf)
   1264{
   1265	struct qlcnic_bc_hdr hdr;
   1266	u32 *ptr = (u32 *)&hdr;
   1267	u8 msg_type, i;
   1268
   1269	for (i = 2; i < 6; i++)
   1270		ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
   1271	msg_type = hdr.msg_type;
   1272
   1273	switch (msg_type) {
   1274	case QLC_BC_COMMAND:
   1275		qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
   1276		break;
   1277	case QLC_BC_RESPONSE:
   1278		qlcnic_sriov_handle_bc_resp(&hdr, vf);
   1279		break;
   1280	}
   1281}
   1282
   1283static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
   1284					  struct qlcnic_vf_info *vf)
   1285{
   1286	struct qlcnic_adapter *adapter = vf->adapter;
   1287
   1288	if (qlcnic_sriov_pf_check(adapter))
   1289		qlcnic_sriov_pf_handle_flr(sriov, vf);
   1290	else
   1291		dev_err(&adapter->pdev->dev,
   1292			"Invalid event to VF. VF should not get FLR event\n");
   1293}
   1294
   1295void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
   1296{
   1297	struct qlcnic_vf_info *vf;
   1298	struct qlcnic_sriov *sriov;
   1299	int index;
   1300	u8 pci_func;
   1301
   1302	sriov = adapter->ahw->sriov;
   1303	pci_func = qlcnic_sriov_target_func_id(event);
   1304	index = qlcnic_sriov_func_to_index(adapter, pci_func);
   1305
   1306	if (index < 0)
   1307		return;
   1308
   1309	vf = &sriov->vf_info[index];
   1310	vf->pci_func = pci_func;
   1311
   1312	if (qlcnic_sriov_channel_free_check(event))
   1313		complete(&vf->ch_free_cmpl);
   1314
   1315	if (qlcnic_sriov_flr_check(event)) {
   1316		qlcnic_sriov_handle_flr_event(sriov, vf);
   1317		return;
   1318	}
   1319
   1320	if (qlcnic_sriov_bc_msg_check(event))
   1321		qlcnic_sriov_handle_msg_event(sriov, vf);
   1322}
   1323
   1324int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
   1325{
   1326	struct qlcnic_cmd_args cmd;
   1327	int err;
   1328
   1329	if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
   1330		return 0;
   1331
   1332	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
   1333		return -ENOMEM;
   1334
   1335	if (enable)
   1336		cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
   1337
   1338	err = qlcnic_83xx_issue_cmd(adapter, &cmd);
   1339
   1340	if (err != QLCNIC_RCODE_SUCCESS) {
   1341		dev_err(&adapter->pdev->dev,
   1342			"Failed to %s bc events, err=%d\n",
   1343			(enable ? "enable" : "disable"), err);
   1344	}
   1345
   1346	qlcnic_free_mbx_args(&cmd);
   1347	return err;
   1348}
   1349
   1350static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
   1351				     struct qlcnic_bc_trans *trans)
   1352{
   1353	u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
   1354	u32 state;
   1355
   1356	state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
   1357	if (state == QLC_83XX_IDC_DEV_READY) {
   1358		msleep(20);
   1359		clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
   1360		trans->trans_state = QLC_INIT;
   1361		if (++adapter->fw_fail_cnt > max)
   1362			return -EIO;
   1363		else
   1364			return 0;
   1365	}
   1366
   1367	return -EIO;
   1368}
   1369
   1370static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
   1371				  struct qlcnic_cmd_args *cmd)
   1372{
   1373	struct qlcnic_hardware_context *ahw = adapter->ahw;
   1374	struct qlcnic_mailbox *mbx = ahw->mailbox;
   1375	struct device *dev = &adapter->pdev->dev;
   1376	struct qlcnic_bc_trans *trans;
   1377	int err;
   1378	u32 rsp_data, opcode, mbx_err_code, rsp;
   1379	u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
   1380	u8 func = ahw->pci_func;
   1381
   1382	rsp = qlcnic_sriov_alloc_bc_trans(&trans);
   1383	if (rsp)
   1384		goto free_cmd;
   1385
   1386	rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
   1387	if (rsp)
   1388		goto cleanup_transaction;
   1389
   1390retry:
   1391	if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
   1392		rsp = -EIO;
   1393		QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
   1394		      QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
   1395		goto err_out;
   1396	}
   1397
   1398	err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
   1399	if (err) {
   1400		dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
   1401			(cmd->req.arg[0] & 0xffff), func);
   1402		rsp = QLCNIC_RCODE_TIMEOUT;
   1403
   1404		/* After adapter reset PF driver may take some time to
   1405		 * respond to VF's request. Retry request till maximum retries.
   1406		 */
   1407		if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
   1408		    !qlcnic_sriov_retry_bc_cmd(adapter, trans))
   1409			goto retry;
   1410
   1411		goto err_out;
   1412	}
   1413
   1414	rsp_data = cmd->rsp.arg[0];
   1415	mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
   1416	opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
   1417
   1418	if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
   1419	    (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
   1420		rsp = QLCNIC_RCODE_SUCCESS;
   1421	} else {
   1422		if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
   1423			rsp = QLCNIC_RCODE_SUCCESS;
   1424		} else {
   1425			rsp = mbx_err_code;
   1426			if (!rsp)
   1427				rsp = 1;
   1428
   1429			dev_err(dev,
   1430				"MBX command 0x%x failed with err:0x%x for VF %d\n",
   1431				opcode, mbx_err_code, func);
   1432		}
   1433	}
   1434
   1435err_out:
   1436	if (rsp == QLCNIC_RCODE_TIMEOUT) {
   1437		ahw->reset_context = 1;
   1438		adapter->need_fw_reset = 1;
   1439		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
   1440	}
   1441
   1442cleanup_transaction:
   1443	qlcnic_sriov_cleanup_transaction(trans);
   1444
   1445free_cmd:
   1446	if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
   1447		qlcnic_free_mbx_args(cmd);
   1448		kfree(cmd);
   1449	}
   1450
   1451	return rsp;
   1452}
   1453
   1454
   1455static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
   1456				  struct qlcnic_cmd_args *cmd)
   1457{
   1458	if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
   1459		return qlcnic_sriov_async_issue_cmd(adapter, cmd);
   1460	else
   1461		return __qlcnic_sriov_issue_cmd(adapter, cmd);
   1462}
   1463
   1464static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
   1465{
   1466	struct qlcnic_cmd_args cmd;
   1467	struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
   1468	int ret;
   1469
   1470	memset(&cmd, 0, sizeof(cmd));
   1471	if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
   1472		return -ENOMEM;
   1473
   1474	ret = qlcnic_issue_cmd(adapter, &cmd);
   1475	if (ret) {
   1476		dev_err(&adapter->pdev->dev,
   1477			"Failed bc channel %s %d\n", cmd_op ? "term" : "init",
   1478			ret);
   1479		goto out;
   1480	}
   1481
   1482	cmd_op = (cmd.rsp.arg[0] & 0xff);
   1483	if (cmd.rsp.arg[0] >> 25 == 2)
   1484		return 2;
   1485	if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
   1486		set_bit(QLC_BC_VF_STATE, &vf->state);
   1487	else
   1488		clear_bit(QLC_BC_VF_STATE, &vf->state);
   1489
   1490out:
   1491	qlcnic_free_mbx_args(&cmd);
   1492	return ret;
   1493}
   1494
   1495static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
   1496				  enum qlcnic_mac_type mac_type)
   1497{
   1498	struct qlcnic_adapter *adapter = netdev_priv(netdev);
   1499	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
   1500	struct qlcnic_vf_info *vf;
   1501	u16 vlan_id;
   1502	int i;
   1503
   1504	vf = &adapter->ahw->sriov->vf_info[0];
   1505
   1506	if (!qlcnic_sriov_check_any_vlan(vf)) {
   1507		qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
   1508	} else {
   1509		spin_lock(&vf->vlan_list_lock);
   1510		for (i = 0; i < sriov->num_allowed_vlans; i++) {
   1511			vlan_id = vf->sriov_vlans[i];
   1512			if (vlan_id)
   1513				qlcnic_nic_add_mac(adapter, mac, vlan_id,
   1514						   mac_type);
   1515		}
   1516		spin_unlock(&vf->vlan_list_lock);
   1517		if (qlcnic_84xx_check(adapter))
   1518			qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
   1519	}
   1520}
   1521
   1522void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
   1523{
   1524	struct list_head *head = &bc->async_cmd_list;
   1525	struct qlcnic_async_cmd *entry;
   1526
   1527	flush_workqueue(bc->bc_async_wq);
   1528	cancel_work_sync(&bc->vf_async_work);
   1529
   1530	spin_lock(&bc->queue_lock);
   1531	while (!list_empty(head)) {
   1532		entry = list_entry(head->next, struct qlcnic_async_cmd,
   1533				   list);
   1534		list_del(&entry->list);
   1535		kfree(entry->cmd);
   1536		kfree(entry);
   1537	}
   1538	spin_unlock(&bc->queue_lock);
   1539}
   1540
   1541void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
   1542{
   1543	struct qlcnic_adapter *adapter = netdev_priv(netdev);
   1544	struct qlcnic_hardware_context *ahw = adapter->ahw;
   1545	static const u8 bcast_addr[ETH_ALEN] = {
   1546		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
   1547	};
   1548	struct netdev_hw_addr *ha;
   1549	u32 mode = VPORT_MISS_MODE_DROP;
   1550
   1551	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
   1552		return;
   1553
   1554	if (netdev->flags & IFF_PROMISC) {
   1555		if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
   1556			mode = VPORT_MISS_MODE_ACCEPT_ALL;
   1557	} else if ((netdev->flags & IFF_ALLMULTI) ||
   1558		   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
   1559		mode = VPORT_MISS_MODE_ACCEPT_MULTI;
   1560	} else {
   1561		qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
   1562		if (!netdev_mc_empty(netdev)) {
   1563			qlcnic_flush_mcast_mac(adapter);
   1564			netdev_for_each_mc_addr(ha, netdev)
   1565				qlcnic_vf_add_mc_list(netdev, ha->addr,
   1566						      QLCNIC_MULTICAST_MAC);
   1567		}
   1568	}
   1569
   1570	/* configure unicast MAC address, if there is not sufficient space
   1571	 * to store all the unicast addresses then enable promiscuous mode
   1572	 */
   1573	if (netdev_uc_count(netdev) > ahw->max_uc_count) {
   1574		mode = VPORT_MISS_MODE_ACCEPT_ALL;
   1575	} else if (!netdev_uc_empty(netdev)) {
   1576		netdev_for_each_uc_addr(ha, netdev)
   1577			qlcnic_vf_add_mc_list(netdev, ha->addr,
   1578					      QLCNIC_UNICAST_MAC);
   1579	}
   1580
   1581	if (adapter->pdev->is_virtfn) {
   1582		if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
   1583		    !adapter->fdb_mac_learn) {
   1584			qlcnic_alloc_lb_filters_mem(adapter);
   1585			adapter->drv_mac_learn = true;
   1586			adapter->rx_mac_learn = true;
   1587		} else {
   1588			adapter->drv_mac_learn = false;
   1589			adapter->rx_mac_learn = false;
   1590		}
   1591	}
   1592
   1593	qlcnic_nic_set_promisc(adapter, mode);
   1594}
   1595
   1596static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
   1597{
   1598	struct qlcnic_async_cmd *entry, *tmp;
   1599	struct qlcnic_back_channel *bc;
   1600	struct qlcnic_cmd_args *cmd;
   1601	struct list_head *head;
   1602	LIST_HEAD(del_list);
   1603
   1604	bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
   1605	head = &bc->async_cmd_list;
   1606
   1607	spin_lock(&bc->queue_lock);
   1608	list_splice_init(head, &del_list);
   1609	spin_unlock(&bc->queue_lock);
   1610
   1611	list_for_each_entry_safe(entry, tmp, &del_list, list) {
   1612		list_del(&entry->list);
   1613		cmd = entry->cmd;
   1614		__qlcnic_sriov_issue_cmd(bc->adapter, cmd);
   1615		kfree(entry);
   1616	}
   1617
   1618	if (!list_empty(head))
   1619		queue_work(bc->bc_async_wq, &bc->vf_async_work);
   1620
   1621	return;
   1622}
   1623
   1624static struct qlcnic_async_cmd *
   1625qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
   1626			     struct qlcnic_cmd_args *cmd)
   1627{
   1628	struct qlcnic_async_cmd *entry = NULL;
   1629
   1630	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
   1631	if (!entry)
   1632		return NULL;
   1633
   1634	entry->cmd = cmd;
   1635
   1636	spin_lock(&bc->queue_lock);
   1637	list_add_tail(&entry->list, &bc->async_cmd_list);
   1638	spin_unlock(&bc->queue_lock);
   1639
   1640	return entry;
   1641}
   1642
   1643static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
   1644					    struct qlcnic_cmd_args *cmd)
   1645{
   1646	struct qlcnic_async_cmd *entry = NULL;
   1647
   1648	entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
   1649	if (!entry) {
   1650		qlcnic_free_mbx_args(cmd);
   1651		kfree(cmd);
   1652		return;
   1653	}
   1654
   1655	queue_work(bc->bc_async_wq, &bc->vf_async_work);
   1656}
   1657
   1658static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
   1659					struct qlcnic_cmd_args *cmd)
   1660{
   1661
   1662	struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
   1663
   1664	if (adapter->need_fw_reset)
   1665		return -EIO;
   1666
   1667	qlcnic_sriov_schedule_async_cmd(bc, cmd);
   1668
   1669	return 0;
   1670}
   1671
   1672static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
   1673{
   1674	int err;
   1675
   1676	adapter->need_fw_reset = 0;
   1677	qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
   1678	qlcnic_83xx_enable_mbx_interrupt(adapter);
   1679
   1680	err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
   1681	if (err)
   1682		return err;
   1683
   1684	err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
   1685	if (err)
   1686		goto err_out_cleanup_bc_intr;
   1687
   1688	err = qlcnic_sriov_vf_init_driver(adapter);
   1689	if (err)
   1690		goto err_out_term_channel;
   1691
   1692	return 0;
   1693
   1694err_out_term_channel:
   1695	qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
   1696
   1697err_out_cleanup_bc_intr:
   1698	qlcnic_sriov_cfg_bc_intr(adapter, 0);
   1699	return err;
   1700}
   1701
   1702static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
   1703{
   1704	struct net_device *netdev = adapter->netdev;
   1705
   1706	if (netif_running(netdev)) {
   1707		if (!qlcnic_up(adapter, netdev))
   1708			qlcnic_restore_indev_addr(netdev, NETDEV_UP);
   1709	}
   1710
   1711	netif_device_attach(netdev);
   1712}
   1713
   1714static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
   1715{
   1716	struct qlcnic_hardware_context *ahw = adapter->ahw;
   1717	struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
   1718	struct net_device *netdev = adapter->netdev;
   1719	u8 i, max_ints = ahw->num_msix - 1;
   1720
   1721	netif_device_detach(netdev);
   1722	qlcnic_83xx_detach_mailbox_work(adapter);
   1723	qlcnic_83xx_disable_mbx_intr(adapter);
   1724
   1725	if (netif_running(netdev))
   1726		qlcnic_down(adapter, netdev);
   1727
   1728	for (i = 0; i < max_ints; i++) {
   1729		intr_tbl[i].id = i;
   1730		intr_tbl[i].enabled = 0;
   1731		intr_tbl[i].src = 0;
   1732	}
   1733	ahw->reset_context = 0;
   1734}
   1735
   1736static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
   1737{
   1738	struct qlcnic_hardware_context *ahw = adapter->ahw;
   1739	struct device *dev = &adapter->pdev->dev;
   1740	struct qlc_83xx_idc *idc = &ahw->idc;
   1741	u8 func = ahw->pci_func;
   1742	u32 state;
   1743
   1744	if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
   1745	    (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
   1746		if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
   1747			qlcnic_sriov_vf_attach(adapter);
   1748			adapter->fw_fail_cnt = 0;
   1749			dev_info(dev,
   1750				 "%s: Reinitialization of VF 0x%x done after FW reset\n",
   1751				 __func__, func);
   1752		} else {
   1753			dev_err(dev,
   1754				"%s: Reinitialization of VF 0x%x failed after FW reset\n",
   1755				__func__, func);
   1756			state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
   1757			dev_info(dev, "Current state 0x%x after FW reset\n",
   1758				 state);
   1759		}
   1760	}
   1761
   1762	return 0;
   1763}
   1764
   1765static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
   1766{
   1767	struct qlcnic_hardware_context *ahw = adapter->ahw;
   1768	struct qlcnic_mailbox *mbx = ahw->mailbox;
   1769	struct device *dev = &adapter->pdev->dev;
   1770	struct qlc_83xx_idc *idc = &ahw->idc;
   1771	u8 func = ahw->pci_func;
   1772	u32 state;
   1773
   1774	adapter->reset_ctx_cnt++;
   1775
   1776	/* Skip the context reset and check if FW is hung */
   1777	if (adapter->reset_ctx_cnt < 3) {
   1778		adapter->need_fw_reset = 1;
   1779		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
   1780		dev_info(dev,
   1781			 "Resetting context, wait here to check if FW is in failed state\n");
   1782		return 0;
   1783	}
   1784
   1785	/* Check if number of resets exceed the threshold.
   1786	 * If it exceeds the threshold just fail the VF.
   1787	 */
   1788	if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
   1789		clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
   1790		adapter->tx_timeo_cnt = 0;
   1791		adapter->fw_fail_cnt = 0;
   1792		adapter->reset_ctx_cnt = 0;
   1793		qlcnic_sriov_vf_detach(adapter);
   1794		dev_err(dev,
   1795			"Device context resets have exceeded the threshold, device interface will be shutdown\n");
   1796		return -EIO;
   1797	}
   1798
   1799	dev_info(dev, "Resetting context of VF 0x%x\n", func);
   1800	dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
   1801		 __func__, adapter->reset_ctx_cnt, func);
   1802	set_bit(__QLCNIC_RESETTING, &adapter->state);
   1803	adapter->need_fw_reset = 1;
   1804	clear_bit(QLC_83XX_MBX_READY, &mbx->status);
   1805	qlcnic_sriov_vf_detach(adapter);
   1806	adapter->need_fw_reset = 0;
   1807
   1808	if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
   1809		qlcnic_sriov_vf_attach(adapter);
   1810		adapter->tx_timeo_cnt = 0;
   1811		adapter->reset_ctx_cnt = 0;
   1812		adapter->fw_fail_cnt = 0;
   1813		dev_info(dev, "Done resetting context for VF 0x%x\n", func);
   1814	} else {
   1815		dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
   1816			__func__, func);
   1817		state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
   1818		dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
   1819	}
   1820
   1821	return 0;
   1822}
   1823
   1824static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
   1825{
   1826	struct qlcnic_hardware_context *ahw = adapter->ahw;
   1827	int ret = 0;
   1828
   1829	if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
   1830		ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
   1831	else if (ahw->reset_context)
   1832		ret = qlcnic_sriov_vf_handle_context_reset(adapter);
   1833
   1834	clear_bit(__QLCNIC_RESETTING, &adapter->state);
   1835	return ret;
   1836}
   1837
   1838static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
   1839{
   1840	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
   1841
   1842	dev_err(&adapter->pdev->dev, "Device is in failed state\n");
   1843	if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
   1844		qlcnic_sriov_vf_detach(adapter);
   1845
   1846	clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
   1847	clear_bit(__QLCNIC_RESETTING, &adapter->state);
   1848	return -EIO;
   1849}
   1850
   1851static int
   1852qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
   1853{
   1854	struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
   1855	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
   1856
   1857	dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
   1858	if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
   1859		set_bit(__QLCNIC_RESETTING, &adapter->state);
   1860		adapter->tx_timeo_cnt = 0;
   1861		adapter->reset_ctx_cnt = 0;
   1862		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
   1863		qlcnic_sriov_vf_detach(adapter);
   1864	}
   1865
   1866	return 0;
   1867}
   1868
   1869static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
   1870{
   1871	struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
   1872	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
   1873	u8 func = adapter->ahw->pci_func;
   1874
   1875	if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
   1876		dev_err(&adapter->pdev->dev,
   1877			"Firmware hang detected by VF 0x%x\n", func);
   1878		set_bit(__QLCNIC_RESETTING, &adapter->state);
   1879		adapter->tx_timeo_cnt = 0;
   1880		adapter->reset_ctx_cnt = 0;
   1881		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
   1882		qlcnic_sriov_vf_detach(adapter);
   1883	}
   1884	return 0;
   1885}
   1886
   1887static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
   1888{
   1889	dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
   1890	return 0;
   1891}
   1892
   1893static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
   1894{
   1895	if (adapter->fhash.fnum)
   1896		qlcnic_prune_lb_filters(adapter);
   1897}
   1898
   1899static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
   1900{
   1901	struct qlcnic_adapter *adapter;
   1902	struct qlc_83xx_idc *idc;
   1903	int ret = 0;
   1904
   1905	adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
   1906	idc = &adapter->ahw->idc;
   1907	idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
   1908
   1909	switch (idc->curr_state) {
   1910	case QLC_83XX_IDC_DEV_READY:
   1911		ret = qlcnic_sriov_vf_idc_ready_state(adapter);
   1912		break;
   1913	case QLC_83XX_IDC_DEV_NEED_RESET:
   1914	case QLC_83XX_IDC_DEV_INIT:
   1915		ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
   1916		break;
   1917	case QLC_83XX_IDC_DEV_NEED_QUISCENT:
   1918		ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
   1919		break;
   1920	case QLC_83XX_IDC_DEV_FAILED:
   1921		ret = qlcnic_sriov_vf_idc_failed_state(adapter);
   1922		break;
   1923	case QLC_83XX_IDC_DEV_QUISCENT:
   1924		break;
   1925	default:
   1926		ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
   1927	}
   1928
   1929	idc->prev_state = idc->curr_state;
   1930	qlcnic_sriov_vf_periodic_tasks(adapter);
   1931
   1932	if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
   1933		qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
   1934				     idc->delay);
   1935}
   1936
   1937static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
   1938{
   1939	while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
   1940		msleep(20);
   1941
   1942	clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
   1943	clear_bit(__QLCNIC_RESETTING, &adapter->state);
   1944	cancel_delayed_work_sync(&adapter->fw_work);
   1945}
   1946
   1947static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
   1948				      struct qlcnic_vf_info *vf, u16 vlan_id)
   1949{
   1950	int i, err = -EINVAL;
   1951
   1952	if (!vf->sriov_vlans)
   1953		return err;
   1954
   1955	spin_lock_bh(&vf->vlan_list_lock);
   1956
   1957	for (i = 0; i < sriov->num_allowed_vlans; i++) {
   1958		if (vf->sriov_vlans[i] == vlan_id) {
   1959			err = 0;
   1960			break;
   1961		}
   1962	}
   1963
   1964	spin_unlock_bh(&vf->vlan_list_lock);
   1965	return err;
   1966}
   1967
   1968static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
   1969					   struct qlcnic_vf_info *vf)
   1970{
   1971	int err = 0;
   1972
   1973	spin_lock_bh(&vf->vlan_list_lock);
   1974
   1975	if (vf->num_vlan >= sriov->num_allowed_vlans)
   1976		err = -EINVAL;
   1977
   1978	spin_unlock_bh(&vf->vlan_list_lock);
   1979	return err;
   1980}
   1981
   1982static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
   1983					  u16 vid, u8 enable)
   1984{
   1985	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
   1986	struct qlcnic_vf_info *vf;
   1987	bool vlan_exist;
   1988	u8 allowed = 0;
   1989	int i;
   1990
   1991	vf = &adapter->ahw->sriov->vf_info[0];
   1992	vlan_exist = qlcnic_sriov_check_any_vlan(vf);
   1993	if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
   1994		return -EINVAL;
   1995
   1996	if (enable) {
   1997		if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
   1998			return -EINVAL;
   1999
   2000		if (qlcnic_sriov_validate_num_vlans(sriov, vf))
   2001			return -EINVAL;
   2002
   2003		if (sriov->any_vlan) {
   2004			for (i = 0; i < sriov->num_allowed_vlans; i++) {
   2005				if (sriov->allowed_vlans[i] == vid)
   2006					allowed = 1;
   2007			}
   2008
   2009			if (!allowed)
   2010				return -EINVAL;
   2011		}
   2012	} else {
   2013		if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
   2014			return -EINVAL;
   2015	}
   2016
   2017	return 0;
   2018}
   2019
   2020static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
   2021					enum qlcnic_vlan_operations opcode)
   2022{
   2023	struct qlcnic_adapter *adapter = vf->adapter;
   2024	struct qlcnic_sriov *sriov;
   2025
   2026	sriov = adapter->ahw->sriov;
   2027
   2028	if (!vf->sriov_vlans)
   2029		return;
   2030
   2031	spin_lock_bh(&vf->vlan_list_lock);
   2032
   2033	switch (opcode) {
   2034	case QLC_VLAN_ADD:
   2035		qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
   2036		break;
   2037	case QLC_VLAN_DELETE:
   2038		qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
   2039		break;
   2040	default:
   2041		netdev_err(adapter->netdev, "Invalid VLAN operation\n");
   2042	}
   2043
   2044	spin_unlock_bh(&vf->vlan_list_lock);
   2045	return;
   2046}
   2047
   2048int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
   2049				   u16 vid, u8 enable)
   2050{
   2051	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
   2052	struct net_device *netdev = adapter->netdev;
   2053	struct qlcnic_vf_info *vf;
   2054	struct qlcnic_cmd_args cmd;
   2055	int ret;
   2056
   2057	memset(&cmd, 0, sizeof(cmd));
   2058	if (vid == 0)
   2059		return 0;
   2060
   2061	vf = &adapter->ahw->sriov->vf_info[0];
   2062	ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
   2063	if (ret)
   2064		return ret;
   2065
   2066	ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
   2067					     QLCNIC_BC_CMD_CFG_GUEST_VLAN);
   2068	if (ret)
   2069		return ret;
   2070
   2071	cmd.req.arg[1] = (enable & 1) | vid << 16;
   2072
   2073	qlcnic_sriov_cleanup_async_list(&sriov->bc);
   2074	ret = qlcnic_issue_cmd(adapter, &cmd);
   2075	if (ret) {
   2076		dev_err(&adapter->pdev->dev,
   2077			"Failed to configure guest VLAN, err=%d\n", ret);
   2078	} else {
   2079		netif_addr_lock_bh(netdev);
   2080		qlcnic_free_mac_list(adapter);
   2081		netif_addr_unlock_bh(netdev);
   2082
   2083		if (enable)
   2084			qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
   2085		else
   2086			qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
   2087
   2088		netif_addr_lock_bh(netdev);
   2089		qlcnic_set_multi(netdev);
   2090		netif_addr_unlock_bh(netdev);
   2091	}
   2092
   2093	qlcnic_free_mbx_args(&cmd);
   2094	return ret;
   2095}
   2096
   2097static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
   2098{
   2099	struct list_head *head = &adapter->mac_list;
   2100	struct qlcnic_mac_vlan_list *cur;
   2101
   2102	while (!list_empty(head)) {
   2103		cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
   2104		qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
   2105					  QLCNIC_MAC_DEL);
   2106		list_del(&cur->list);
   2107		kfree(cur);
   2108	}
   2109}
   2110
   2111
   2112static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
   2113{
   2114	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
   2115	struct net_device *netdev = adapter->netdev;
   2116
   2117	netif_device_detach(netdev);
   2118	qlcnic_cancel_idc_work(adapter);
   2119
   2120	if (netif_running(netdev))
   2121		qlcnic_down(adapter, netdev);
   2122
   2123	qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
   2124	qlcnic_sriov_cfg_bc_intr(adapter, 0);
   2125	qlcnic_83xx_disable_mbx_intr(adapter);
   2126	cancel_delayed_work_sync(&adapter->idc_aen_work);
   2127
   2128	return pci_save_state(pdev);
   2129}
   2130
   2131static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
   2132{
   2133	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
   2134	struct net_device *netdev = adapter->netdev;
   2135	int err;
   2136
   2137	set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
   2138	qlcnic_83xx_enable_mbx_interrupt(adapter);
   2139	err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
   2140	if (err)
   2141		return err;
   2142
   2143	err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
   2144	if (!err) {
   2145		if (netif_running(netdev)) {
   2146			err = qlcnic_up(adapter, netdev);
   2147			if (!err)
   2148				qlcnic_restore_indev_addr(netdev, NETDEV_UP);
   2149		}
   2150	}
   2151
   2152	netif_device_attach(netdev);
   2153	qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
   2154			     idc->delay);
   2155	return err;
   2156}
   2157
   2158int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
   2159{
   2160	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
   2161	struct qlcnic_vf_info *vf;
   2162	int i;
   2163
   2164	for (i = 0; i < sriov->num_vfs; i++) {
   2165		vf = &sriov->vf_info[i];
   2166		vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
   2167					  sizeof(*vf->sriov_vlans), GFP_KERNEL);
   2168		if (!vf->sriov_vlans)
   2169			return -ENOMEM;
   2170	}
   2171
   2172	return 0;
   2173}
   2174
   2175void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
   2176{
   2177	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
   2178	struct qlcnic_vf_info *vf;
   2179	int i;
   2180
   2181	for (i = 0; i < sriov->num_vfs; i++) {
   2182		vf = &sriov->vf_info[i];
   2183		kfree(vf->sriov_vlans);
   2184		vf->sriov_vlans = NULL;
   2185	}
   2186}
   2187
   2188void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
   2189			      struct qlcnic_vf_info *vf, u16 vlan_id)
   2190{
   2191	int i;
   2192
   2193	for (i = 0; i < sriov->num_allowed_vlans; i++) {
   2194		if (!vf->sriov_vlans[i]) {
   2195			vf->sriov_vlans[i] = vlan_id;
   2196			vf->num_vlan++;
   2197			return;
   2198		}
   2199	}
   2200}
   2201
   2202void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
   2203			      struct qlcnic_vf_info *vf, u16 vlan_id)
   2204{
   2205	int i;
   2206
   2207	for (i = 0; i < sriov->num_allowed_vlans; i++) {
   2208		if (vf->sriov_vlans[i] == vlan_id) {
   2209			vf->sriov_vlans[i] = 0;
   2210			vf->num_vlan--;
   2211			return;
   2212		}
   2213	}
   2214}
   2215
   2216bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
   2217{
   2218	bool err = false;
   2219
   2220	spin_lock_bh(&vf->vlan_list_lock);
   2221
   2222	if (vf->num_vlan)
   2223		err = true;
   2224
   2225	spin_unlock_bh(&vf->vlan_list_lock);
   2226	return err;
   2227}