cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hclgevf_main.c (95024B)


      1// SPDX-License-Identifier: GPL-2.0+
      2// Copyright (c) 2016-2017 Hisilicon Limited.
      3
      4#include <linux/etherdevice.h>
      5#include <linux/iopoll.h>
      6#include <net/rtnetlink.h>
      7#include "hclgevf_cmd.h"
      8#include "hclgevf_main.h"
      9#include "hclge_mbx.h"
     10#include "hnae3.h"
     11#include "hclgevf_devlink.h"
     12#include "hclge_comm_rss.h"
     13
     14#define HCLGEVF_NAME	"hclgevf"
     15
     16#define HCLGEVF_RESET_MAX_FAIL_CNT	5
     17
     18static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
     19static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
     20				  unsigned long delay);
     21
     22static struct hnae3_ae_algo ae_algovf;
     23
     24static struct workqueue_struct *hclgevf_wq;
     25
     26static const struct pci_device_id ae_algovf_pci_tbl[] = {
     27	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
     28	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
     29	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
     30	/* required last entry */
     31	{0, }
     32};
     33
     34MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
     35
     36static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
     37					 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
     38					 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
     39					 HCLGE_COMM_NIC_CSQ_TAIL_REG,
     40					 HCLGE_COMM_NIC_CSQ_HEAD_REG,
     41					 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
     42					 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
     43					 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
     44					 HCLGE_COMM_NIC_CRQ_TAIL_REG,
     45					 HCLGE_COMM_NIC_CRQ_HEAD_REG,
     46					 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
     47					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
     48					 HCLGE_COMM_CMDQ_INTR_EN_REG,
     49					 HCLGE_COMM_CMDQ_INTR_GEN_REG};
     50
     51static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
     52					   HCLGEVF_RST_ING,
     53					   HCLGEVF_GRO_EN_REG};
     54
     55static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
     56					 HCLGEVF_RING_RX_ADDR_H_REG,
     57					 HCLGEVF_RING_RX_BD_NUM_REG,
     58					 HCLGEVF_RING_RX_BD_LENGTH_REG,
     59					 HCLGEVF_RING_RX_MERGE_EN_REG,
     60					 HCLGEVF_RING_RX_TAIL_REG,
     61					 HCLGEVF_RING_RX_HEAD_REG,
     62					 HCLGEVF_RING_RX_FBD_NUM_REG,
     63					 HCLGEVF_RING_RX_OFFSET_REG,
     64					 HCLGEVF_RING_RX_FBD_OFFSET_REG,
     65					 HCLGEVF_RING_RX_STASH_REG,
     66					 HCLGEVF_RING_RX_BD_ERR_REG,
     67					 HCLGEVF_RING_TX_ADDR_L_REG,
     68					 HCLGEVF_RING_TX_ADDR_H_REG,
     69					 HCLGEVF_RING_TX_BD_NUM_REG,
     70					 HCLGEVF_RING_TX_PRIORITY_REG,
     71					 HCLGEVF_RING_TX_TC_REG,
     72					 HCLGEVF_RING_TX_MERGE_EN_REG,
     73					 HCLGEVF_RING_TX_TAIL_REG,
     74					 HCLGEVF_RING_TX_HEAD_REG,
     75					 HCLGEVF_RING_TX_FBD_NUM_REG,
     76					 HCLGEVF_RING_TX_OFFSET_REG,
     77					 HCLGEVF_RING_TX_EBD_NUM_REG,
     78					 HCLGEVF_RING_TX_EBD_OFFSET_REG,
     79					 HCLGEVF_RING_TX_BD_ERR_REG,
     80					 HCLGEVF_RING_EN_REG};
     81
     82static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
     83					     HCLGEVF_TQP_INTR_GL0_REG,
     84					     HCLGEVF_TQP_INTR_GL1_REG,
     85					     HCLGEVF_TQP_INTR_GL2_REG,
     86					     HCLGEVF_TQP_INTR_RL_REG};
     87
     88/* hclgevf_cmd_send - send command to command queue
     89 * @hw: pointer to the hw struct
     90 * @desc: prefilled descriptor for describing the command
     91 * @num : the number of descriptors to be sent
     92 *
     93 * This is the main send command for command queue, it
     94 * sends the queue, cleans the queue, etc
     95 */
     96int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
     97{
     98	return hclge_comm_cmd_send(&hw->hw, desc, num);
     99}
    100
    101void hclgevf_arq_init(struct hclgevf_dev *hdev)
    102{
    103	struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
    104
    105	spin_lock(&cmdq->crq.lock);
    106	/* initialize the pointers of async rx queue of mailbox */
    107	hdev->arq.hdev = hdev;
    108	hdev->arq.head = 0;
    109	hdev->arq.tail = 0;
    110	atomic_set(&hdev->arq.count, 0);
    111	spin_unlock(&cmdq->crq.lock);
    112}
    113
    114static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
    115{
    116	if (!handle->client)
    117		return container_of(handle, struct hclgevf_dev, nic);
    118	else if (handle->client->type == HNAE3_CLIENT_ROCE)
    119		return container_of(handle, struct hclgevf_dev, roce);
    120	else
    121		return container_of(handle, struct hclgevf_dev, nic);
    122}
    123
    124static void hclgevf_update_stats(struct hnae3_handle *handle,
    125				 struct net_device_stats *net_stats)
    126{
    127	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    128	int status;
    129
    130	status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
    131	if (status)
    132		dev_err(&hdev->pdev->dev,
    133			"VF update of TQPS stats fail, status = %d.\n",
    134			status);
    135}
    136
    137static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
    138{
    139	if (strset == ETH_SS_TEST)
    140		return -EOPNOTSUPP;
    141	else if (strset == ETH_SS_STATS)
    142		return hclge_comm_tqps_get_sset_count(handle);
    143
    144	return 0;
    145}
    146
    147static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
    148				u8 *data)
    149{
    150	u8 *p = (char *)data;
    151
    152	if (strset == ETH_SS_STATS)
    153		p = hclge_comm_tqps_get_strings(handle, p);
    154}
    155
    156static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
    157{
    158	hclge_comm_tqps_get_stats(handle, data);
    159}
    160
    161static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
    162				   u8 subcode)
    163{
    164	if (msg) {
    165		memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
    166		msg->code = code;
    167		msg->subcode = subcode;
    168	}
    169}
    170
    171static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
    172{
    173	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
    174	u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
    175	struct hclge_basic_info *basic_info;
    176	struct hclge_vf_to_pf_msg send_msg;
    177	unsigned long caps;
    178	int status;
    179
    180	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
    181	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
    182				      sizeof(resp_msg));
    183	if (status) {
    184		dev_err(&hdev->pdev->dev,
    185			"failed to get basic info from pf, ret = %d", status);
    186		return status;
    187	}
    188
    189	basic_info = (struct hclge_basic_info *)resp_msg;
    190
    191	hdev->hw_tc_map = basic_info->hw_tc_map;
    192	hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
    193	caps = le32_to_cpu(basic_info->pf_caps);
    194	if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
    195		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
    196
    197	return 0;
    198}
    199
    200static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
    201{
    202	struct hnae3_handle *nic = &hdev->nic;
    203	struct hclge_vf_to_pf_msg send_msg;
    204	u8 resp_msg;
    205	int ret;
    206
    207	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
    208			       HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
    209	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
    210				   sizeof(u8));
    211	if (ret) {
    212		dev_err(&hdev->pdev->dev,
    213			"VF request to get port based vlan state failed %d",
    214			ret);
    215		return ret;
    216	}
    217
    218	nic->port_base_vlan_state = resp_msg;
    219
    220	return 0;
    221}
    222
    223static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
    224{
    225#define HCLGEVF_TQPS_RSS_INFO_LEN	6
    226
    227	struct hclge_mbx_vf_queue_info *queue_info;
    228	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
    229	struct hclge_vf_to_pf_msg send_msg;
    230	int status;
    231
    232	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
    233	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
    234				      HCLGEVF_TQPS_RSS_INFO_LEN);
    235	if (status) {
    236		dev_err(&hdev->pdev->dev,
    237			"VF request to get tqp info from PF failed %d",
    238			status);
    239		return status;
    240	}
    241
    242	queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
    243	hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
    244	hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
    245	hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
    246
    247	return 0;
    248}
    249
    250static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
    251{
    252#define HCLGEVF_TQPS_DEPTH_INFO_LEN	4
    253
    254	struct hclge_mbx_vf_queue_depth *queue_depth;
    255	u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
    256	struct hclge_vf_to_pf_msg send_msg;
    257	int ret;
    258
    259	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
    260	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
    261				   HCLGEVF_TQPS_DEPTH_INFO_LEN);
    262	if (ret) {
    263		dev_err(&hdev->pdev->dev,
    264			"VF request to get tqp depth info from PF failed %d",
    265			ret);
    266		return ret;
    267	}
    268
    269	queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
    270	hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
    271	hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
    272
    273	return 0;
    274}
    275
    276static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
    277{
    278	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    279	struct hclge_vf_to_pf_msg send_msg;
    280	u16 qid_in_pf = 0;
    281	u8 resp_data[2];
    282	int ret;
    283
    284	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
    285	*(__le16 *)send_msg.data = cpu_to_le16(queue_id);
    286	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
    287				   sizeof(resp_data));
    288	if (!ret)
    289		qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
    290
    291	return qid_in_pf;
    292}
    293
    294static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
    295{
    296	struct hclge_vf_to_pf_msg send_msg;
    297	u8 resp_msg[2];
    298	int ret;
    299
    300	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
    301	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
    302				   sizeof(resp_msg));
    303	if (ret) {
    304		dev_err(&hdev->pdev->dev,
    305			"VF request to get the pf port media type failed %d",
    306			ret);
    307		return ret;
    308	}
    309
    310	hdev->hw.mac.media_type = resp_msg[0];
    311	hdev->hw.mac.module_type = resp_msg[1];
    312
    313	return 0;
    314}
    315
    316static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
    317{
    318	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
    319	struct hclge_comm_tqp *tqp;
    320	int i;
    321
    322	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
    323				  sizeof(struct hclge_comm_tqp), GFP_KERNEL);
    324	if (!hdev->htqp)
    325		return -ENOMEM;
    326
    327	tqp = hdev->htqp;
    328
    329	for (i = 0; i < hdev->num_tqps; i++) {
    330		tqp->dev = &hdev->pdev->dev;
    331		tqp->index = i;
    332
    333		tqp->q.ae_algo = &ae_algovf;
    334		tqp->q.buf_size = hdev->rx_buf_len;
    335		tqp->q.tx_desc_num = hdev->num_tx_desc;
    336		tqp->q.rx_desc_num = hdev->num_rx_desc;
    337
    338		/* need an extended offset to configure queues >=
    339		 * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
    340		 */
    341		if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
    342			tqp->q.io_base = hdev->hw.hw.io_base +
    343					 HCLGEVF_TQP_REG_OFFSET +
    344					 i * HCLGEVF_TQP_REG_SIZE;
    345		else
    346			tqp->q.io_base = hdev->hw.hw.io_base +
    347					 HCLGEVF_TQP_REG_OFFSET +
    348					 HCLGEVF_TQP_EXT_REG_OFFSET +
    349					 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
    350					 HCLGEVF_TQP_REG_SIZE;
    351
    352		/* when device supports tx push and has device memory,
    353		 * the queue can execute push mode or doorbell mode on
    354		 * device memory.
    355		 */
    356		if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
    357			tqp->q.mem_base = hdev->hw.hw.mem_base +
    358					  HCLGEVF_TQP_MEM_OFFSET(hdev, i);
    359
    360		tqp++;
    361	}
    362
    363	return 0;
    364}
    365
    366static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
    367{
    368	struct hnae3_handle *nic = &hdev->nic;
    369	struct hnae3_knic_private_info *kinfo;
    370	u16 new_tqps = hdev->num_tqps;
    371	unsigned int i;
    372	u8 num_tc = 0;
    373
    374	kinfo = &nic->kinfo;
    375	kinfo->num_tx_desc = hdev->num_tx_desc;
    376	kinfo->num_rx_desc = hdev->num_rx_desc;
    377	kinfo->rx_buf_len = hdev->rx_buf_len;
    378	for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
    379		if (hdev->hw_tc_map & BIT(i))
    380			num_tc++;
    381
    382	num_tc = num_tc ? num_tc : 1;
    383	kinfo->tc_info.num_tc = num_tc;
    384	kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc);
    385	new_tqps = kinfo->rss_size * num_tc;
    386	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
    387
    388	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
    389				  sizeof(struct hnae3_queue *), GFP_KERNEL);
    390	if (!kinfo->tqp)
    391		return -ENOMEM;
    392
    393	for (i = 0; i < kinfo->num_tqps; i++) {
    394		hdev->htqp[i].q.handle = &hdev->nic;
    395		hdev->htqp[i].q.tqp_index = i;
    396		kinfo->tqp[i] = &hdev->htqp[i].q;
    397	}
    398
    399	/* after init the max rss_size and tqps, adjust the default tqp numbers
    400	 * and rss size with the actual vector numbers
    401	 */
    402	kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
    403	kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc,
    404				kinfo->rss_size);
    405
    406	return 0;
    407}
    408
    409static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
    410{
    411	struct hclge_vf_to_pf_msg send_msg;
    412	int status;
    413
    414	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
    415	status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
    416	if (status)
    417		dev_err(&hdev->pdev->dev,
    418			"VF failed to fetch link status(%d) from PF", status);
    419}
    420
    421void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
    422{
    423	struct hnae3_handle *rhandle = &hdev->roce;
    424	struct hnae3_handle *handle = &hdev->nic;
    425	struct hnae3_client *rclient;
    426	struct hnae3_client *client;
    427
    428	if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
    429		return;
    430
    431	client = handle->client;
    432	rclient = hdev->roce_client;
    433
    434	link_state =
    435		test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
    436	if (link_state != hdev->hw.mac.link) {
    437		hdev->hw.mac.link = link_state;
    438		client->ops->link_status_change(handle, !!link_state);
    439		if (rclient && rclient->ops->link_status_change)
    440			rclient->ops->link_status_change(rhandle, !!link_state);
    441	}
    442
    443	clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
    444}
    445
    446static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
    447{
    448#define HCLGEVF_ADVERTISING	0
    449#define HCLGEVF_SUPPORTED	1
    450
    451	struct hclge_vf_to_pf_msg send_msg;
    452
    453	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
    454	send_msg.data[0] = HCLGEVF_ADVERTISING;
    455	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
    456	send_msg.data[0] = HCLGEVF_SUPPORTED;
    457	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
    458}
    459
    460static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
    461{
    462	struct hnae3_handle *nic = &hdev->nic;
    463	int ret;
    464
    465	nic->ae_algo = &ae_algovf;
    466	nic->pdev = hdev->pdev;
    467	nic->numa_node_mask = hdev->numa_node_mask;
    468	nic->flags |= HNAE3_SUPPORT_VF;
    469	nic->kinfo.io_base = hdev->hw.hw.io_base;
    470
    471	ret = hclgevf_knic_setup(hdev);
    472	if (ret)
    473		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
    474			ret);
    475	return ret;
    476}
    477
    478static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
    479{
    480	if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
    481		dev_warn(&hdev->pdev->dev,
    482			 "vector(vector_id %d) has been freed.\n", vector_id);
    483		return;
    484	}
    485
    486	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
    487	hdev->num_msi_left += 1;
    488	hdev->num_msi_used -= 1;
    489}
    490
    491static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
    492			      struct hnae3_vector_info *vector_info)
    493{
    494	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    495	struct hnae3_vector_info *vector = vector_info;
    496	int alloc = 0;
    497	int i, j;
    498
    499	vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
    500	vector_num = min(hdev->num_msi_left, vector_num);
    501
    502	for (j = 0; j < vector_num; j++) {
    503		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
    504			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
    505				vector->vector = pci_irq_vector(hdev->pdev, i);
    506				vector->io_addr = hdev->hw.hw.io_base +
    507					HCLGEVF_VECTOR_REG_BASE +
    508					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
    509				hdev->vector_status[i] = 0;
    510				hdev->vector_irq[i] = vector->vector;
    511
    512				vector++;
    513				alloc++;
    514
    515				break;
    516			}
    517		}
    518	}
    519	hdev->num_msi_left -= alloc;
    520	hdev->num_msi_used += alloc;
    521
    522	return alloc;
    523}
    524
    525static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
    526{
    527	int i;
    528
    529	for (i = 0; i < hdev->num_msi; i++)
    530		if (vector == hdev->vector_irq[i])
    531			return i;
    532
    533	return -EINVAL;
    534}
    535
    536/* for revision 0x20, vf shared the same rss config with pf */
    537static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
    538{
    539#define HCLGEVF_RSS_MBX_RESP_LEN	8
    540	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
    541	u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
    542	struct hclge_vf_to_pf_msg send_msg;
    543	u16 msg_num, hash_key_index;
    544	u8 index;
    545	int ret;
    546
    547	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
    548	msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
    549			HCLGEVF_RSS_MBX_RESP_LEN;
    550	for (index = 0; index < msg_num; index++) {
    551		send_msg.data[0] = index;
    552		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
    553					   HCLGEVF_RSS_MBX_RESP_LEN);
    554		if (ret) {
    555			dev_err(&hdev->pdev->dev,
    556				"VF get rss hash key from PF failed, ret=%d",
    557				ret);
    558			return ret;
    559		}
    560
    561		hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
    562		if (index == msg_num - 1)
    563			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
    564			       &resp_msg[0],
    565			       HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
    566		else
    567			memcpy(&rss_cfg->rss_hash_key[hash_key_index],
    568			       &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
    569	}
    570
    571	return 0;
    572}
    573
    574static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
    575			   u8 *hfunc)
    576{
    577	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    578	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
    579	int ret;
    580
    581	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
    582		hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
    583	} else {
    584		if (hfunc)
    585			*hfunc = ETH_RSS_HASH_TOP;
    586		if (key) {
    587			ret = hclgevf_get_rss_hash_key(hdev);
    588			if (ret)
    589				return ret;
    590			memcpy(key, rss_cfg->rss_hash_key,
    591			       HCLGE_COMM_RSS_KEY_SIZE);
    592		}
    593	}
    594
    595	hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
    596				     hdev->ae_dev->dev_specs.rss_ind_tbl_size);
    597
    598	return 0;
    599}
    600
    601static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
    602			   const u8 *key, const u8 hfunc)
    603{
    604	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    605	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
    606	int ret, i;
    607
    608	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
    609		ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
    610						  hfunc);
    611		if (ret)
    612			return ret;
    613	}
    614
    615	/* update the shadow RSS table with user specified qids */
    616	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
    617		rss_cfg->rss_indirection_tbl[i] = indir[i];
    618
    619	/* update the hardware */
    620	return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
    621					      rss_cfg->rss_indirection_tbl);
    622}
    623
    624static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
    625				 struct ethtool_rxnfc *nfc)
    626{
    627	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    628	int ret;
    629
    630	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
    631		return -EOPNOTSUPP;
    632
    633	ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
    634				       &hdev->rss_cfg, nfc);
    635	if (ret)
    636		dev_err(&hdev->pdev->dev,
    637		"failed to set rss tuple, ret = %d.\n", ret);
    638
    639	return ret;
    640}
    641
    642static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
    643				 struct ethtool_rxnfc *nfc)
    644{
    645	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    646	u8 tuple_sets;
    647	int ret;
    648
    649	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
    650		return -EOPNOTSUPP;
    651
    652	nfc->data = 0;
    653
    654	ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
    655				       &tuple_sets);
    656	if (ret || !tuple_sets)
    657		return ret;
    658
    659	nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
    660
    661	return 0;
    662}
    663
    664static int hclgevf_get_tc_size(struct hnae3_handle *handle)
    665{
    666	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    667	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
    668
    669	return rss_cfg->rss_size;
    670}
    671
    672static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
    673				       int vector_id,
    674				       struct hnae3_ring_chain_node *ring_chain)
    675{
    676	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    677	struct hclge_vf_to_pf_msg send_msg;
    678	struct hnae3_ring_chain_node *node;
    679	int status;
    680	int i = 0;
    681
    682	memset(&send_msg, 0, sizeof(send_msg));
    683	send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
    684		HCLGE_MBX_UNMAP_RING_TO_VECTOR;
    685	send_msg.vector_id = vector_id;
    686
    687	for (node = ring_chain; node; node = node->next) {
    688		send_msg.param[i].ring_type =
    689				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
    690
    691		send_msg.param[i].tqp_index = node->tqp_index;
    692		send_msg.param[i].int_gl_index =
    693					hnae3_get_field(node->int_gl_idx,
    694							HNAE3_RING_GL_IDX_M,
    695							HNAE3_RING_GL_IDX_S);
    696
    697		i++;
    698		if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
    699			send_msg.ring_num = i;
    700
    701			status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
    702						      NULL, 0);
    703			if (status) {
    704				dev_err(&hdev->pdev->dev,
    705					"Map TQP fail, status is %d.\n",
    706					status);
    707				return status;
    708			}
    709			i = 0;
    710		}
    711	}
    712
    713	return 0;
    714}
    715
    716static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
    717				      struct hnae3_ring_chain_node *ring_chain)
    718{
    719	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    720	int vector_id;
    721
    722	vector_id = hclgevf_get_vector_index(hdev, vector);
    723	if (vector_id < 0) {
    724		dev_err(&handle->pdev->dev,
    725			"Get vector index fail. ret =%d\n", vector_id);
    726		return vector_id;
    727	}
    728
    729	return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
    730}
    731
    732static int hclgevf_unmap_ring_from_vector(
    733				struct hnae3_handle *handle,
    734				int vector,
    735				struct hnae3_ring_chain_node *ring_chain)
    736{
    737	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    738	int ret, vector_id;
    739
    740	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
    741		return 0;
    742
    743	vector_id = hclgevf_get_vector_index(hdev, vector);
    744	if (vector_id < 0) {
    745		dev_err(&handle->pdev->dev,
    746			"Get vector index fail. ret =%d\n", vector_id);
    747		return vector_id;
    748	}
    749
    750	ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
    751	if (ret)
    752		dev_err(&handle->pdev->dev,
    753			"Unmap ring from vector fail. vector=%d, ret =%d\n",
    754			vector_id,
    755			ret);
    756
    757	return ret;
    758}
    759
    760static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
    761{
    762	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    763	int vector_id;
    764
    765	vector_id = hclgevf_get_vector_index(hdev, vector);
    766	if (vector_id < 0) {
    767		dev_err(&handle->pdev->dev,
    768			"hclgevf_put_vector get vector index fail. ret =%d\n",
    769			vector_id);
    770		return vector_id;
    771	}
    772
    773	hclgevf_free_vector(hdev, vector_id);
    774
    775	return 0;
    776}
    777
    778static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
    779					bool en_uc_pmc, bool en_mc_pmc,
    780					bool en_bc_pmc)
    781{
    782	struct hnae3_handle *handle = &hdev->nic;
    783	struct hclge_vf_to_pf_msg send_msg;
    784	int ret;
    785
    786	memset(&send_msg, 0, sizeof(send_msg));
    787	send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
    788	send_msg.en_bc = en_bc_pmc ? 1 : 0;
    789	send_msg.en_uc = en_uc_pmc ? 1 : 0;
    790	send_msg.en_mc = en_mc_pmc ? 1 : 0;
    791	send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC,
    792					     &handle->priv_flags) ? 1 : 0;
    793
    794	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
    795	if (ret)
    796		dev_err(&hdev->pdev->dev,
    797			"Set promisc mode fail, status is %d.\n", ret);
    798
    799	return ret;
    800}
    801
    802static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
    803				    bool en_mc_pmc)
    804{
    805	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    806	bool en_bc_pmc;
    807
    808	en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
    809
    810	return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
    811					    en_bc_pmc);
    812}
    813
    814static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
    815{
    816	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    817
    818	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
    819	hclgevf_task_schedule(hdev, 0);
    820}
    821
    822static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
    823{
    824	struct hnae3_handle *handle = &hdev->nic;
    825	bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
    826	bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
    827	int ret;
    828
    829	if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
    830		ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
    831		if (!ret)
    832			clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
    833	}
    834}
    835
    836static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
    837				       u16 stream_id, bool enable)
    838{
    839	struct hclgevf_cfg_com_tqp_queue_cmd *req;
    840	struct hclge_desc desc;
    841
    842	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
    843
    844	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
    845	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
    846	req->stream_id = cpu_to_le16(stream_id);
    847	if (enable)
    848		req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
    849
    850	return hclgevf_cmd_send(&hdev->hw, &desc, 1);
    851}
    852
    853static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
    854{
    855	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    856	int ret;
    857	u16 i;
    858
    859	for (i = 0; i < handle->kinfo.num_tqps; i++) {
    860		ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable);
    861		if (ret)
    862			return ret;
    863	}
    864
    865	return 0;
    866}
    867
    868static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
    869{
    870	struct hclge_vf_to_pf_msg send_msg;
    871	u8 host_mac[ETH_ALEN];
    872	int status;
    873
    874	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
    875	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
    876				      ETH_ALEN);
    877	if (status) {
    878		dev_err(&hdev->pdev->dev,
    879			"fail to get VF MAC from host %d", status);
    880		return status;
    881	}
    882
    883	ether_addr_copy(p, host_mac);
    884
    885	return 0;
    886}
    887
    888static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
    889{
    890	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    891	u8 host_mac_addr[ETH_ALEN];
    892
    893	if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
    894		return;
    895
    896	hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
    897	if (hdev->has_pf_mac)
    898		ether_addr_copy(p, host_mac_addr);
    899	else
    900		ether_addr_copy(p, hdev->hw.mac.mac_addr);
    901}
    902
    903static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
    904				bool is_first)
    905{
    906	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    907	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
    908	struct hclge_vf_to_pf_msg send_msg;
    909	u8 *new_mac_addr = (u8 *)p;
    910	int status;
    911
    912	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
    913	send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
    914	ether_addr_copy(send_msg.data, new_mac_addr);
    915	if (is_first && !hdev->has_pf_mac)
    916		eth_zero_addr(&send_msg.data[ETH_ALEN]);
    917	else
    918		ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
    919	status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
    920	if (!status)
    921		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
    922
    923	return status;
    924}
    925
    926static struct hclgevf_mac_addr_node *
    927hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
    928{
    929	struct hclgevf_mac_addr_node *mac_node, *tmp;
    930
    931	list_for_each_entry_safe(mac_node, tmp, list, node)
    932		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
    933			return mac_node;
    934
    935	return NULL;
    936}
    937
    938static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
    939				    enum HCLGEVF_MAC_NODE_STATE state)
    940{
    941	switch (state) {
    942	/* from set_rx_mode or tmp_add_list */
    943	case HCLGEVF_MAC_TO_ADD:
    944		if (mac_node->state == HCLGEVF_MAC_TO_DEL)
    945			mac_node->state = HCLGEVF_MAC_ACTIVE;
    946		break;
    947	/* only from set_rx_mode */
    948	case HCLGEVF_MAC_TO_DEL:
    949		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
    950			list_del(&mac_node->node);
    951			kfree(mac_node);
    952		} else {
    953			mac_node->state = HCLGEVF_MAC_TO_DEL;
    954		}
    955		break;
    956	/* only from tmp_add_list, the mac_node->state won't be
    957	 * HCLGEVF_MAC_ACTIVE
    958	 */
    959	case HCLGEVF_MAC_ACTIVE:
    960		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
    961			mac_node->state = HCLGEVF_MAC_ACTIVE;
    962		break;
    963	}
    964}
    965
    966static int hclgevf_update_mac_list(struct hnae3_handle *handle,
    967				   enum HCLGEVF_MAC_NODE_STATE state,
    968				   enum HCLGEVF_MAC_ADDR_TYPE mac_type,
    969				   const unsigned char *addr)
    970{
    971	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
    972	struct hclgevf_mac_addr_node *mac_node;
    973	struct list_head *list;
    974
    975	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
    976	       &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
    977
    978	spin_lock_bh(&hdev->mac_table.mac_list_lock);
    979
    980	/* if the mac addr is already in the mac list, no need to add a new
    981	 * one into it, just check the mac addr state, convert it to a new
    982	 * new state, or just remove it, or do nothing.
    983	 */
    984	mac_node = hclgevf_find_mac_node(list, addr);
    985	if (mac_node) {
    986		hclgevf_update_mac_node(mac_node, state);
    987		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
    988		return 0;
    989	}
    990	/* if this address is never added, unnecessary to delete */
    991	if (state == HCLGEVF_MAC_TO_DEL) {
    992		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
    993		return -ENOENT;
    994	}
    995
    996	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
    997	if (!mac_node) {
    998		spin_unlock_bh(&hdev->mac_table.mac_list_lock);
    999		return -ENOMEM;
   1000	}
   1001
   1002	mac_node->state = state;
   1003	ether_addr_copy(mac_node->mac_addr, addr);
   1004	list_add_tail(&mac_node->node, list);
   1005
   1006	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
   1007	return 0;
   1008}
   1009
   1010static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
   1011			       const unsigned char *addr)
   1012{
   1013	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
   1014				       HCLGEVF_MAC_ADDR_UC, addr);
   1015}
   1016
   1017static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
   1018			      const unsigned char *addr)
   1019{
   1020	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
   1021				       HCLGEVF_MAC_ADDR_UC, addr);
   1022}
   1023
   1024static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
   1025			       const unsigned char *addr)
   1026{
   1027	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
   1028				       HCLGEVF_MAC_ADDR_MC, addr);
   1029}
   1030
   1031static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
   1032			      const unsigned char *addr)
   1033{
   1034	return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
   1035				       HCLGEVF_MAC_ADDR_MC, addr);
   1036}
   1037
   1038static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
   1039				    struct hclgevf_mac_addr_node *mac_node,
   1040				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
   1041{
   1042	struct hclge_vf_to_pf_msg send_msg;
   1043	u8 code, subcode;
   1044
   1045	if (mac_type == HCLGEVF_MAC_ADDR_UC) {
   1046		code = HCLGE_MBX_SET_UNICAST;
   1047		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
   1048			subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
   1049		else
   1050			subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
   1051	} else {
   1052		code = HCLGE_MBX_SET_MULTICAST;
   1053		if (mac_node->state == HCLGEVF_MAC_TO_ADD)
   1054			subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
   1055		else
   1056			subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
   1057	}
   1058
   1059	hclgevf_build_send_msg(&send_msg, code, subcode);
   1060	ether_addr_copy(send_msg.data, mac_node->mac_addr);
   1061	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
   1062}
   1063
   1064static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
   1065				    struct list_head *list,
   1066				    enum HCLGEVF_MAC_ADDR_TYPE mac_type)
   1067{
   1068	char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
   1069	struct hclgevf_mac_addr_node *mac_node, *tmp;
   1070	int ret;
   1071
   1072	list_for_each_entry_safe(mac_node, tmp, list, node) {
   1073		ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
   1074		if  (ret) {
   1075			hnae3_format_mac_addr(format_mac_addr,
   1076					      mac_node->mac_addr);
   1077			dev_err(&hdev->pdev->dev,
   1078				"failed to configure mac %s, state = %d, ret = %d\n",
   1079				format_mac_addr, mac_node->state, ret);
   1080			return;
   1081		}
   1082		if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
   1083			mac_node->state = HCLGEVF_MAC_ACTIVE;
   1084		} else {
   1085			list_del(&mac_node->node);
   1086			kfree(mac_node);
   1087		}
   1088	}
   1089}
   1090
   1091static void hclgevf_sync_from_add_list(struct list_head *add_list,
   1092				       struct list_head *mac_list)
   1093{
   1094	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
   1095
   1096	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
   1097		/* if the mac address from tmp_add_list is not in the
   1098		 * uc/mc_mac_list, it means have received a TO_DEL request
   1099		 * during the time window of sending mac config request to PF
   1100		 * If mac_node state is ACTIVE, then change its state to TO_DEL,
   1101		 * then it will be removed at next time. If is TO_ADD, it means
   1102		 * send TO_ADD request failed, so just remove the mac node.
   1103		 */
   1104		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
   1105		if (new_node) {
   1106			hclgevf_update_mac_node(new_node, mac_node->state);
   1107			list_del(&mac_node->node);
   1108			kfree(mac_node);
   1109		} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
   1110			mac_node->state = HCLGEVF_MAC_TO_DEL;
   1111			list_move_tail(&mac_node->node, mac_list);
   1112		} else {
   1113			list_del(&mac_node->node);
   1114			kfree(mac_node);
   1115		}
   1116	}
   1117}
   1118
   1119static void hclgevf_sync_from_del_list(struct list_head *del_list,
   1120				       struct list_head *mac_list)
   1121{
   1122	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
   1123
   1124	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
   1125		new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
   1126		if (new_node) {
   1127			/* If the mac addr is exist in the mac list, it means
   1128			 * received a new request TO_ADD during the time window
   1129			 * of sending mac addr configurrequest to PF, so just
   1130			 * change the mac state to ACTIVE.
   1131			 */
   1132			new_node->state = HCLGEVF_MAC_ACTIVE;
   1133			list_del(&mac_node->node);
   1134			kfree(mac_node);
   1135		} else {
   1136			list_move_tail(&mac_node->node, mac_list);
   1137		}
   1138	}
   1139}
   1140
   1141static void hclgevf_clear_list(struct list_head *list)
   1142{
   1143	struct hclgevf_mac_addr_node *mac_node, *tmp;
   1144
   1145	list_for_each_entry_safe(mac_node, tmp, list, node) {
   1146		list_del(&mac_node->node);
   1147		kfree(mac_node);
   1148	}
   1149}
   1150
   1151static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
   1152				  enum HCLGEVF_MAC_ADDR_TYPE mac_type)
   1153{
   1154	struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
   1155	struct list_head tmp_add_list, tmp_del_list;
   1156	struct list_head *list;
   1157
   1158	INIT_LIST_HEAD(&tmp_add_list);
   1159	INIT_LIST_HEAD(&tmp_del_list);
   1160
   1161	/* move the mac addr to the tmp_add_list and tmp_del_list, then
   1162	 * we can add/delete these mac addr outside the spin lock
   1163	 */
   1164	list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
   1165		&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
   1166
   1167	spin_lock_bh(&hdev->mac_table.mac_list_lock);
   1168
   1169	list_for_each_entry_safe(mac_node, tmp, list, node) {
   1170		switch (mac_node->state) {
   1171		case HCLGEVF_MAC_TO_DEL:
   1172			list_move_tail(&mac_node->node, &tmp_del_list);
   1173			break;
   1174		case HCLGEVF_MAC_TO_ADD:
   1175			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
   1176			if (!new_node)
   1177				goto stop_traverse;
   1178
   1179			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
   1180			new_node->state = mac_node->state;
   1181			list_add_tail(&new_node->node, &tmp_add_list);
   1182			break;
   1183		default:
   1184			break;
   1185		}
   1186	}
   1187
   1188stop_traverse:
   1189	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
   1190
   1191	/* delete first, in order to get max mac table space for adding */
   1192	hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
   1193	hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
   1194
   1195	/* if some mac addresses were added/deleted fail, move back to the
   1196	 * mac_list, and retry at next time.
   1197	 */
   1198	spin_lock_bh(&hdev->mac_table.mac_list_lock);
   1199
   1200	hclgevf_sync_from_del_list(&tmp_del_list, list);
   1201	hclgevf_sync_from_add_list(&tmp_add_list, list);
   1202
   1203	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
   1204}
   1205
   1206static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
   1207{
   1208	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
   1209	hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
   1210}
   1211
   1212static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
   1213{
   1214	spin_lock_bh(&hdev->mac_table.mac_list_lock);
   1215
   1216	hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
   1217	hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
   1218
   1219	spin_unlock_bh(&hdev->mac_table.mac_list_lock);
   1220}
   1221
   1222static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
   1223{
   1224	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   1225	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
   1226	struct hclge_vf_to_pf_msg send_msg;
   1227
   1228	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
   1229		return -EOPNOTSUPP;
   1230
   1231	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
   1232			       HCLGE_MBX_ENABLE_VLAN_FILTER);
   1233	send_msg.data[0] = enable ? 1 : 0;
   1234
   1235	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
   1236}
   1237
   1238static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
   1239				   __be16 proto, u16 vlan_id,
   1240				   bool is_kill)
   1241{
   1242	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   1243	struct hclge_mbx_vlan_filter *vlan_filter;
   1244	struct hclge_vf_to_pf_msg send_msg;
   1245	int ret;
   1246
   1247	if (vlan_id > HCLGEVF_MAX_VLAN_ID)
   1248		return -EINVAL;
   1249
   1250	if (proto != htons(ETH_P_8021Q))
   1251		return -EPROTONOSUPPORT;
   1252
   1253	/* When device is resetting or reset failed, firmware is unable to
   1254	 * handle mailbox. Just record the vlan id, and remove it after
   1255	 * reset finished.
   1256	 */
   1257	if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
   1258	     test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
   1259		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
   1260		return -EBUSY;
   1261	}
   1262
   1263	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
   1264			       HCLGE_MBX_VLAN_FILTER);
   1265	vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
   1266	vlan_filter->is_kill = is_kill;
   1267	vlan_filter->vlan_id = cpu_to_le16(vlan_id);
   1268	vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
   1269
   1270	/* when remove hw vlan filter failed, record the vlan id,
   1271	 * and try to remove it from hw later, to be consistence
   1272	 * with stack.
   1273	 */
   1274	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
   1275	if (is_kill && ret)
   1276		set_bit(vlan_id, hdev->vlan_del_fail_bmap);
   1277
   1278	return ret;
   1279}
   1280
   1281static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
   1282{
   1283#define HCLGEVF_MAX_SYNC_COUNT	60
   1284	struct hnae3_handle *handle = &hdev->nic;
   1285	int ret, sync_cnt = 0;
   1286	u16 vlan_id;
   1287
   1288	vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
   1289	while (vlan_id != VLAN_N_VID) {
   1290		ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
   1291					      vlan_id, true);
   1292		if (ret)
   1293			return;
   1294
   1295		clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
   1296		sync_cnt++;
   1297		if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
   1298			return;
   1299
   1300		vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
   1301	}
   1302}
   1303
   1304static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
   1305{
   1306	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   1307	struct hclge_vf_to_pf_msg send_msg;
   1308
   1309	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
   1310			       HCLGE_MBX_VLAN_RX_OFF_CFG);
   1311	send_msg.data[0] = enable ? 1 : 0;
   1312	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
   1313}
   1314
   1315static int hclgevf_reset_tqp(struct hnae3_handle *handle)
   1316{
   1317#define HCLGEVF_RESET_ALL_QUEUE_DONE	1U
   1318	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   1319	struct hclge_vf_to_pf_msg send_msg;
   1320	u8 return_status = 0;
   1321	int ret;
   1322	u16 i;
   1323
   1324	/* disable vf queue before send queue reset msg to PF */
   1325	ret = hclgevf_tqp_enable(handle, false);
   1326	if (ret) {
   1327		dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n",
   1328			ret);
   1329		return ret;
   1330	}
   1331
   1332	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
   1333
   1334	ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status,
   1335				   sizeof(return_status));
   1336	if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE)
   1337		return ret;
   1338
   1339	for (i = 1; i < handle->kinfo.num_tqps; i++) {
   1340		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
   1341		*(__le16 *)send_msg.data = cpu_to_le16(i);
   1342		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
   1343		if (ret)
   1344			return ret;
   1345	}
   1346
   1347	return 0;
   1348}
   1349
   1350static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
   1351{
   1352	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   1353	struct hclge_mbx_mtu_info *mtu_info;
   1354	struct hclge_vf_to_pf_msg send_msg;
   1355
   1356	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
   1357	mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
   1358	mtu_info->mtu = cpu_to_le32(new_mtu);
   1359
   1360	return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
   1361}
   1362
   1363static int hclgevf_notify_client(struct hclgevf_dev *hdev,
   1364				 enum hnae3_reset_notify_type type)
   1365{
   1366	struct hnae3_client *client = hdev->nic_client;
   1367	struct hnae3_handle *handle = &hdev->nic;
   1368	int ret;
   1369
   1370	if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
   1371	    !client)
   1372		return 0;
   1373
   1374	if (!client->ops->reset_notify)
   1375		return -EOPNOTSUPP;
   1376
   1377	ret = client->ops->reset_notify(handle, type);
   1378	if (ret)
   1379		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
   1380			type, ret);
   1381
   1382	return ret;
   1383}
   1384
   1385static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
   1386				      enum hnae3_reset_notify_type type)
   1387{
   1388	struct hnae3_client *client = hdev->roce_client;
   1389	struct hnae3_handle *handle = &hdev->roce;
   1390	int ret;
   1391
   1392	if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
   1393		return 0;
   1394
   1395	if (!client->ops->reset_notify)
   1396		return -EOPNOTSUPP;
   1397
   1398	ret = client->ops->reset_notify(handle, type);
   1399	if (ret)
   1400		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
   1401			type, ret);
   1402	return ret;
   1403}
   1404
   1405static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
   1406{
   1407#define HCLGEVF_RESET_WAIT_US	20000
   1408#define HCLGEVF_RESET_WAIT_CNT	2000
   1409#define HCLGEVF_RESET_WAIT_TIMEOUT_US	\
   1410	(HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
   1411
   1412	u32 val;
   1413	int ret;
   1414
   1415	if (hdev->reset_type == HNAE3_VF_RESET)
   1416		ret = readl_poll_timeout(hdev->hw.hw.io_base +
   1417					 HCLGEVF_VF_RST_ING, val,
   1418					 !(val & HCLGEVF_VF_RST_ING_BIT),
   1419					 HCLGEVF_RESET_WAIT_US,
   1420					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
   1421	else
   1422		ret = readl_poll_timeout(hdev->hw.hw.io_base +
   1423					 HCLGEVF_RST_ING, val,
   1424					 !(val & HCLGEVF_RST_ING_BITS),
   1425					 HCLGEVF_RESET_WAIT_US,
   1426					 HCLGEVF_RESET_WAIT_TIMEOUT_US);
   1427
   1428	/* hardware completion status should be available by this time */
   1429	if (ret) {
   1430		dev_err(&hdev->pdev->dev,
   1431			"couldn't get reset done status from h/w, timeout!\n");
   1432		return ret;
   1433	}
   1434
   1435	/* we will wait a bit more to let reset of the stack to complete. This
   1436	 * might happen in case reset assertion was made by PF. Yes, this also
   1437	 * means we might end up waiting bit more even for VF reset.
   1438	 */
   1439	msleep(5000);
   1440
   1441	return 0;
   1442}
   1443
   1444static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
   1445{
   1446	u32 reg_val;
   1447
   1448	reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
   1449	if (enable)
   1450		reg_val |= HCLGEVF_NIC_SW_RST_RDY;
   1451	else
   1452		reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
   1453
   1454	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
   1455			  reg_val);
   1456}
   1457
   1458static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
   1459{
   1460	int ret;
   1461
   1462	/* uninitialize the nic client */
   1463	ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
   1464	if (ret)
   1465		return ret;
   1466
   1467	/* re-initialize the hclge device */
   1468	ret = hclgevf_reset_hdev(hdev);
   1469	if (ret) {
   1470		dev_err(&hdev->pdev->dev,
   1471			"hclge device re-init failed, VF is disabled!\n");
   1472		return ret;
   1473	}
   1474
   1475	/* bring up the nic client again */
   1476	ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
   1477	if (ret)
   1478		return ret;
   1479
   1480	/* clear handshake status with IMP */
   1481	hclgevf_reset_handshake(hdev, false);
   1482
   1483	/* bring up the nic to enable TX/RX again */
   1484	return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
   1485}
   1486
   1487static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
   1488{
   1489#define HCLGEVF_RESET_SYNC_TIME 100
   1490
   1491	if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
   1492		struct hclge_vf_to_pf_msg send_msg;
   1493		int ret;
   1494
   1495		hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
   1496		ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
   1497		if (ret) {
   1498			dev_err(&hdev->pdev->dev,
   1499				"failed to assert VF reset, ret = %d\n", ret);
   1500			return ret;
   1501		}
   1502		hdev->rst_stats.vf_func_rst_cnt++;
   1503	}
   1504
   1505	set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
   1506	/* inform hardware that preparatory work is done */
   1507	msleep(HCLGEVF_RESET_SYNC_TIME);
   1508	hclgevf_reset_handshake(hdev, true);
   1509	dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
   1510		 hdev->reset_type);
   1511
   1512	return 0;
   1513}
   1514
   1515static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
   1516{
   1517	dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
   1518		 hdev->rst_stats.vf_func_rst_cnt);
   1519	dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
   1520		 hdev->rst_stats.flr_rst_cnt);
   1521	dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
   1522		 hdev->rst_stats.vf_rst_cnt);
   1523	dev_info(&hdev->pdev->dev, "reset done count: %u\n",
   1524		 hdev->rst_stats.rst_done_cnt);
   1525	dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
   1526		 hdev->rst_stats.hw_rst_done_cnt);
   1527	dev_info(&hdev->pdev->dev, "reset count: %u\n",
   1528		 hdev->rst_stats.rst_cnt);
   1529	dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
   1530		 hdev->rst_stats.rst_fail_cnt);
   1531	dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
   1532		 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
   1533	dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
   1534		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
   1535	dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
   1536		 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
   1537	dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
   1538		 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
   1539	dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
   1540}
   1541
   1542static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
   1543{
   1544	/* recover handshake status with IMP when reset fail */
   1545	hclgevf_reset_handshake(hdev, true);
   1546	hdev->rst_stats.rst_fail_cnt++;
   1547	dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
   1548		hdev->rst_stats.rst_fail_cnt);
   1549
   1550	if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
   1551		set_bit(hdev->reset_type, &hdev->reset_pending);
   1552
   1553	if (hclgevf_is_reset_pending(hdev)) {
   1554		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
   1555		hclgevf_reset_task_schedule(hdev);
   1556	} else {
   1557		set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
   1558		hclgevf_dump_rst_info(hdev);
   1559	}
   1560}
   1561
   1562static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
   1563{
   1564	int ret;
   1565
   1566	hdev->rst_stats.rst_cnt++;
   1567
   1568	/* perform reset of the stack & ae device for a client */
   1569	ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
   1570	if (ret)
   1571		return ret;
   1572
   1573	rtnl_lock();
   1574	/* bring down the nic to stop any ongoing TX/RX */
   1575	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
   1576	rtnl_unlock();
   1577	if (ret)
   1578		return ret;
   1579
   1580	return hclgevf_reset_prepare_wait(hdev);
   1581}
   1582
   1583static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
   1584{
   1585	int ret;
   1586
   1587	hdev->rst_stats.hw_rst_done_cnt++;
   1588	ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
   1589	if (ret)
   1590		return ret;
   1591
   1592	rtnl_lock();
   1593	/* now, re-initialize the nic client and ae device */
   1594	ret = hclgevf_reset_stack(hdev);
   1595	rtnl_unlock();
   1596	if (ret) {
   1597		dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
   1598		return ret;
   1599	}
   1600
   1601	ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
   1602	/* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
   1603	 * times
   1604	 */
   1605	if (ret &&
   1606	    hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
   1607		return ret;
   1608
   1609	ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
   1610	if (ret)
   1611		return ret;
   1612
   1613	hdev->last_reset_time = jiffies;
   1614	hdev->rst_stats.rst_done_cnt++;
   1615	hdev->rst_stats.rst_fail_cnt = 0;
   1616	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
   1617
   1618	return 0;
   1619}
   1620
   1621static void hclgevf_reset(struct hclgevf_dev *hdev)
   1622{
   1623	if (hclgevf_reset_prepare(hdev))
   1624		goto err_reset;
   1625
   1626	/* check if VF could successfully fetch the hardware reset completion
   1627	 * status from the hardware
   1628	 */
   1629	if (hclgevf_reset_wait(hdev)) {
   1630		/* can't do much in this situation, will disable VF */
   1631		dev_err(&hdev->pdev->dev,
   1632			"failed to fetch H/W reset completion status\n");
   1633		goto err_reset;
   1634	}
   1635
   1636	if (hclgevf_reset_rebuild(hdev))
   1637		goto err_reset;
   1638
   1639	return;
   1640
   1641err_reset:
   1642	hclgevf_reset_err_handle(hdev);
   1643}
   1644
   1645static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
   1646						     unsigned long *addr)
   1647{
   1648	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
   1649
   1650	/* return the highest priority reset level amongst all */
   1651	if (test_bit(HNAE3_VF_RESET, addr)) {
   1652		rst_level = HNAE3_VF_RESET;
   1653		clear_bit(HNAE3_VF_RESET, addr);
   1654		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
   1655		clear_bit(HNAE3_VF_FUNC_RESET, addr);
   1656	} else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
   1657		rst_level = HNAE3_VF_FULL_RESET;
   1658		clear_bit(HNAE3_VF_FULL_RESET, addr);
   1659		clear_bit(HNAE3_VF_FUNC_RESET, addr);
   1660	} else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
   1661		rst_level = HNAE3_VF_PF_FUNC_RESET;
   1662		clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
   1663		clear_bit(HNAE3_VF_FUNC_RESET, addr);
   1664	} else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
   1665		rst_level = HNAE3_VF_FUNC_RESET;
   1666		clear_bit(HNAE3_VF_FUNC_RESET, addr);
   1667	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
   1668		rst_level = HNAE3_FLR_RESET;
   1669		clear_bit(HNAE3_FLR_RESET, addr);
   1670	}
   1671
   1672	return rst_level;
   1673}
   1674
   1675static void hclgevf_reset_event(struct pci_dev *pdev,
   1676				struct hnae3_handle *handle)
   1677{
   1678	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
   1679	struct hclgevf_dev *hdev = ae_dev->priv;
   1680
   1681	dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
   1682
   1683	if (hdev->default_reset_request)
   1684		hdev->reset_level =
   1685			hclgevf_get_reset_level(hdev,
   1686						&hdev->default_reset_request);
   1687	else
   1688		hdev->reset_level = HNAE3_VF_FUNC_RESET;
   1689
   1690	/* reset of this VF requested */
   1691	set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
   1692	hclgevf_reset_task_schedule(hdev);
   1693
   1694	hdev->last_reset_time = jiffies;
   1695}
   1696
   1697static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
   1698					  enum hnae3_reset_type rst_type)
   1699{
   1700	struct hclgevf_dev *hdev = ae_dev->priv;
   1701
   1702	set_bit(rst_type, &hdev->default_reset_request);
   1703}
   1704
   1705static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
   1706{
   1707	writel(en ? 1 : 0, vector->addr);
   1708}
   1709
   1710static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
   1711					  enum hnae3_reset_type rst_type)
   1712{
   1713#define HCLGEVF_RESET_RETRY_WAIT_MS	500
   1714#define HCLGEVF_RESET_RETRY_CNT		5
   1715
   1716	struct hclgevf_dev *hdev = ae_dev->priv;
   1717	int retry_cnt = 0;
   1718	int ret;
   1719
   1720	while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
   1721		down(&hdev->reset_sem);
   1722		set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
   1723		hdev->reset_type = rst_type;
   1724		ret = hclgevf_reset_prepare(hdev);
   1725		if (!ret && !hdev->reset_pending)
   1726			break;
   1727
   1728		dev_err(&hdev->pdev->dev,
   1729			"failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
   1730			ret, hdev->reset_pending, retry_cnt);
   1731		clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
   1732		up(&hdev->reset_sem);
   1733		msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
   1734	}
   1735
   1736	/* disable misc vector before reset done */
   1737	hclgevf_enable_vector(&hdev->misc_vector, false);
   1738
   1739	if (hdev->reset_type == HNAE3_FLR_RESET)
   1740		hdev->rst_stats.flr_rst_cnt++;
   1741}
   1742
   1743static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
   1744{
   1745	struct hclgevf_dev *hdev = ae_dev->priv;
   1746	int ret;
   1747
   1748	hclgevf_enable_vector(&hdev->misc_vector, true);
   1749
   1750	ret = hclgevf_reset_rebuild(hdev);
   1751	if (ret)
   1752		dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
   1753			 ret);
   1754
   1755	hdev->reset_type = HNAE3_NONE_RESET;
   1756	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
   1757	up(&hdev->reset_sem);
   1758}
   1759
   1760static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
   1761{
   1762	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   1763
   1764	return hdev->fw_version;
   1765}
   1766
   1767static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
   1768{
   1769	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
   1770
   1771	vector->vector_irq = pci_irq_vector(hdev->pdev,
   1772					    HCLGEVF_MISC_VECTOR_NUM);
   1773	vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
   1774	/* vector status always valid for Vector 0 */
   1775	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
   1776	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
   1777
   1778	hdev->num_msi_left -= 1;
   1779	hdev->num_msi_used += 1;
   1780}
   1781
   1782void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
   1783{
   1784	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
   1785	    test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
   1786	    !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
   1787			      &hdev->state))
   1788		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
   1789}
   1790
   1791void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
   1792{
   1793	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
   1794	    !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
   1795			      &hdev->state))
   1796		mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
   1797}
   1798
   1799static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
   1800				  unsigned long delay)
   1801{
   1802	if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
   1803	    !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
   1804		mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
   1805}
   1806
   1807static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
   1808{
   1809#define	HCLGEVF_MAX_RESET_ATTEMPTS_CNT	3
   1810
   1811	if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
   1812		return;
   1813
   1814	down(&hdev->reset_sem);
   1815	set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
   1816
   1817	if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
   1818			       &hdev->reset_state)) {
   1819		/* PF has intimated that it is about to reset the hardware.
   1820		 * We now have to poll & check if hardware has actually
   1821		 * completed the reset sequence. On hardware reset completion,
   1822		 * VF needs to reset the client and ae device.
   1823		 */
   1824		hdev->reset_attempts = 0;
   1825
   1826		hdev->last_reset_time = jiffies;
   1827		hdev->reset_type =
   1828			hclgevf_get_reset_level(hdev, &hdev->reset_pending);
   1829		if (hdev->reset_type != HNAE3_NONE_RESET)
   1830			hclgevf_reset(hdev);
   1831	} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
   1832				      &hdev->reset_state)) {
   1833		/* we could be here when either of below happens:
   1834		 * 1. reset was initiated due to watchdog timeout caused by
   1835		 *    a. IMP was earlier reset and our TX got choked down and
   1836		 *       which resulted in watchdog reacting and inducing VF
   1837		 *       reset. This also means our cmdq would be unreliable.
   1838		 *    b. problem in TX due to other lower layer(example link
   1839		 *       layer not functioning properly etc.)
   1840		 * 2. VF reset might have been initiated due to some config
   1841		 *    change.
   1842		 *
   1843		 * NOTE: Theres no clear way to detect above cases than to react
   1844		 * to the response of PF for this reset request. PF will ack the
   1845		 * 1b and 2. cases but we will not get any intimation about 1a
   1846		 * from PF as cmdq would be in unreliable state i.e. mailbox
   1847		 * communication between PF and VF would be broken.
   1848		 *
   1849		 * if we are never geting into pending state it means either:
   1850		 * 1. PF is not receiving our request which could be due to IMP
   1851		 *    reset
   1852		 * 2. PF is screwed
   1853		 * We cannot do much for 2. but to check first we can try reset
   1854		 * our PCIe + stack and see if it alleviates the problem.
   1855		 */
   1856		if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
   1857			/* prepare for full reset of stack + pcie interface */
   1858			set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
   1859
   1860			/* "defer" schedule the reset task again */
   1861			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
   1862		} else {
   1863			hdev->reset_attempts++;
   1864
   1865			set_bit(hdev->reset_level, &hdev->reset_pending);
   1866			set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
   1867		}
   1868		hclgevf_reset_task_schedule(hdev);
   1869	}
   1870
   1871	hdev->reset_type = HNAE3_NONE_RESET;
   1872	clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
   1873	up(&hdev->reset_sem);
   1874}
   1875
   1876static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
   1877{
   1878	if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
   1879		return;
   1880
   1881	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
   1882		return;
   1883
   1884	hclgevf_mbx_async_handler(hdev);
   1885
   1886	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
   1887}
   1888
   1889static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
   1890{
   1891	struct hclge_vf_to_pf_msg send_msg;
   1892	int ret;
   1893
   1894	if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
   1895		return;
   1896
   1897	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
   1898	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
   1899	if (ret)
   1900		dev_err(&hdev->pdev->dev,
   1901			"VF sends keep alive cmd failed(=%d)\n", ret);
   1902}
   1903
   1904static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
   1905{
   1906	unsigned long delta = round_jiffies_relative(HZ);
   1907	struct hnae3_handle *handle = &hdev->nic;
   1908
   1909	if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
   1910		return;
   1911
   1912	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
   1913		delta = jiffies - hdev->last_serv_processed;
   1914
   1915		if (delta < round_jiffies_relative(HZ)) {
   1916			delta = round_jiffies_relative(HZ) - delta;
   1917			goto out;
   1918		}
   1919	}
   1920
   1921	hdev->serv_processed_cnt++;
   1922	if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
   1923		hclgevf_keep_alive(hdev);
   1924
   1925	if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
   1926		hdev->last_serv_processed = jiffies;
   1927		goto out;
   1928	}
   1929
   1930	if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
   1931		hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
   1932
   1933	/* VF does not need to request link status when this bit is set, because
   1934	 * PF will push its link status to VFs when link status changed.
   1935	 */
   1936	if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state))
   1937		hclgevf_request_link_info(hdev);
   1938
   1939	hclgevf_update_link_mode(hdev);
   1940
   1941	hclgevf_sync_vlan_filter(hdev);
   1942
   1943	hclgevf_sync_mac_table(hdev);
   1944
   1945	hclgevf_sync_promisc_mode(hdev);
   1946
   1947	hdev->last_serv_processed = jiffies;
   1948
   1949out:
   1950	hclgevf_task_schedule(hdev, delta);
   1951}
   1952
   1953static void hclgevf_service_task(struct work_struct *work)
   1954{
   1955	struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
   1956						service_task.work);
   1957
   1958	hclgevf_reset_service_task(hdev);
   1959	hclgevf_mailbox_service_task(hdev);
   1960	hclgevf_periodic_service_task(hdev);
   1961
   1962	/* Handle reset and mbx again in case periodical task delays the
   1963	 * handling by calling hclgevf_task_schedule() in
   1964	 * hclgevf_periodic_service_task()
   1965	 */
   1966	hclgevf_reset_service_task(hdev);
   1967	hclgevf_mailbox_service_task(hdev);
   1968}
   1969
   1970static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
   1971{
   1972	hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
   1973}
   1974
   1975static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
   1976						      u32 *clearval)
   1977{
   1978	u32 val, cmdq_stat_reg, rst_ing_reg;
   1979
   1980	/* fetch the events from their corresponding regs */
   1981	cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
   1982					 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
   1983	if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
   1984		rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
   1985		dev_info(&hdev->pdev->dev,
   1986			 "receive reset interrupt 0x%x!\n", rst_ing_reg);
   1987		set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
   1988		set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
   1989		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
   1990		*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
   1991		hdev->rst_stats.vf_rst_cnt++;
   1992		/* set up VF hardware reset status, its PF will clear
   1993		 * this status when PF has initialized done.
   1994		 */
   1995		val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
   1996		hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
   1997				  val | HCLGEVF_VF_RST_ING_BIT);
   1998		return HCLGEVF_VECTOR0_EVENT_RST;
   1999	}
   2000
   2001	/* check for vector0 mailbox(=CMDQ RX) event source */
   2002	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
   2003		/* for revision 0x21, clearing interrupt is writing bit 0
   2004		 * to the clear register, writing bit 1 means to keep the
   2005		 * old value.
   2006		 * for revision 0x20, the clear register is a read & write
   2007		 * register, so we should just write 0 to the bit we are
   2008		 * handling, and keep other bits as cmdq_stat_reg.
   2009		 */
   2010		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
   2011			*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
   2012		else
   2013			*clearval = cmdq_stat_reg &
   2014				    ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
   2015
   2016		return HCLGEVF_VECTOR0_EVENT_MBX;
   2017	}
   2018
   2019	/* print other vector0 event source */
   2020	dev_info(&hdev->pdev->dev,
   2021		 "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
   2022		 cmdq_stat_reg);
   2023
   2024	return HCLGEVF_VECTOR0_EVENT_OTHER;
   2025}
   2026
   2027static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
   2028{
   2029	enum hclgevf_evt_cause event_cause;
   2030	struct hclgevf_dev *hdev = data;
   2031	u32 clearval;
   2032
   2033	hclgevf_enable_vector(&hdev->misc_vector, false);
   2034	event_cause = hclgevf_check_evt_cause(hdev, &clearval);
   2035	if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
   2036		hclgevf_clear_event_cause(hdev, clearval);
   2037
   2038	switch (event_cause) {
   2039	case HCLGEVF_VECTOR0_EVENT_RST:
   2040		hclgevf_reset_task_schedule(hdev);
   2041		break;
   2042	case HCLGEVF_VECTOR0_EVENT_MBX:
   2043		hclgevf_mbx_handler(hdev);
   2044		break;
   2045	default:
   2046		break;
   2047	}
   2048
   2049	hclgevf_enable_vector(&hdev->misc_vector, true);
   2050
   2051	return IRQ_HANDLED;
   2052}
   2053
   2054static int hclgevf_configure(struct hclgevf_dev *hdev)
   2055{
   2056	int ret;
   2057
   2058	hdev->gro_en = true;
   2059
   2060	ret = hclgevf_get_basic_info(hdev);
   2061	if (ret)
   2062		return ret;
   2063
   2064	/* get current port based vlan state from PF */
   2065	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
   2066	if (ret)
   2067		return ret;
   2068
   2069	/* get queue configuration from PF */
   2070	ret = hclgevf_get_queue_info(hdev);
   2071	if (ret)
   2072		return ret;
   2073
   2074	/* get queue depth info from PF */
   2075	ret = hclgevf_get_queue_depth(hdev);
   2076	if (ret)
   2077		return ret;
   2078
   2079	return hclgevf_get_pf_media_type(hdev);
   2080}
   2081
   2082static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
   2083{
   2084	struct pci_dev *pdev = ae_dev->pdev;
   2085	struct hclgevf_dev *hdev;
   2086
   2087	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
   2088	if (!hdev)
   2089		return -ENOMEM;
   2090
   2091	hdev->pdev = pdev;
   2092	hdev->ae_dev = ae_dev;
   2093	ae_dev->priv = hdev;
   2094
   2095	return 0;
   2096}
   2097
   2098static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
   2099{
   2100	struct hnae3_handle *roce = &hdev->roce;
   2101	struct hnae3_handle *nic = &hdev->nic;
   2102
   2103	roce->rinfo.num_vectors = hdev->num_roce_msix;
   2104
   2105	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
   2106	    hdev->num_msi_left == 0)
   2107		return -EINVAL;
   2108
   2109	roce->rinfo.base_vector = hdev->roce_base_msix_offset;
   2110
   2111	roce->rinfo.netdev = nic->kinfo.netdev;
   2112	roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
   2113	roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
   2114
   2115	roce->pdev = nic->pdev;
   2116	roce->ae_algo = nic->ae_algo;
   2117	roce->numa_node_mask = nic->numa_node_mask;
   2118
   2119	return 0;
   2120}
   2121
   2122static int hclgevf_config_gro(struct hclgevf_dev *hdev)
   2123{
   2124	struct hclgevf_cfg_gro_status_cmd *req;
   2125	struct hclge_desc desc;
   2126	int ret;
   2127
   2128	if (!hnae3_dev_gro_supported(hdev))
   2129		return 0;
   2130
   2131	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
   2132				     false);
   2133	req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
   2134
   2135	req->gro_en = hdev->gro_en ? 1 : 0;
   2136
   2137	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
   2138	if (ret)
   2139		dev_err(&hdev->pdev->dev,
   2140			"VF GRO hardware config cmd failed, ret = %d.\n", ret);
   2141
   2142	return ret;
   2143}
   2144
   2145static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
   2146{
   2147	struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
   2148	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
   2149	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
   2150	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
   2151	int ret;
   2152
   2153	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
   2154		ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
   2155						  rss_cfg->rss_algo,
   2156						  rss_cfg->rss_hash_key);
   2157		if (ret)
   2158			return ret;
   2159
   2160		ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw,
   2161						     false, rss_cfg);
   2162		if (ret)
   2163			return ret;
   2164	}
   2165
   2166	ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
   2167					     rss_cfg->rss_indirection_tbl);
   2168	if (ret)
   2169		return ret;
   2170
   2171	hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
   2172				   tc_offset, tc_valid, tc_size);
   2173
   2174	return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
   2175					  tc_valid, tc_size);
   2176}
   2177
   2178static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
   2179{
   2180	struct hnae3_handle *nic = &hdev->nic;
   2181	int ret;
   2182
   2183	ret = hclgevf_en_hw_strip_rxvtag(nic, true);
   2184	if (ret) {
   2185		dev_err(&hdev->pdev->dev,
   2186			"failed to enable rx vlan offload, ret = %d\n", ret);
   2187		return ret;
   2188	}
   2189
   2190	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
   2191				       false);
   2192}
   2193
   2194static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
   2195{
   2196#define HCLGEVF_FLUSH_LINK_TIMEOUT	100000
   2197
   2198	unsigned long last = hdev->serv_processed_cnt;
   2199	int i = 0;
   2200
   2201	while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
   2202	       i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
   2203	       last == hdev->serv_processed_cnt)
   2204		usleep_range(1, 1);
   2205}
   2206
   2207static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
   2208{
   2209	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   2210
   2211	if (enable) {
   2212		hclgevf_task_schedule(hdev, 0);
   2213	} else {
   2214		set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
   2215
   2216		/* flush memory to make sure DOWN is seen by service task */
   2217		smp_mb__before_atomic();
   2218		hclgevf_flush_link_update(hdev);
   2219	}
   2220}
   2221
   2222static int hclgevf_ae_start(struct hnae3_handle *handle)
   2223{
   2224	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   2225
   2226	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
   2227	clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
   2228
   2229	hclge_comm_reset_tqp_stats(handle);
   2230
   2231	hclgevf_request_link_info(hdev);
   2232
   2233	hclgevf_update_link_mode(hdev);
   2234
   2235	return 0;
   2236}
   2237
   2238static void hclgevf_ae_stop(struct hnae3_handle *handle)
   2239{
   2240	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   2241
   2242	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
   2243
   2244	if (hdev->reset_type != HNAE3_VF_RESET)
   2245		hclgevf_reset_tqp(handle);
   2246
   2247	hclge_comm_reset_tqp_stats(handle);
   2248	hclgevf_update_link_status(hdev, 0);
   2249}
   2250
   2251static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
   2252{
   2253#define HCLGEVF_STATE_ALIVE	1
   2254#define HCLGEVF_STATE_NOT_ALIVE	0
   2255
   2256	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   2257	struct hclge_vf_to_pf_msg send_msg;
   2258
   2259	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
   2260	send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
   2261				HCLGEVF_STATE_NOT_ALIVE;
   2262	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
   2263}
   2264
   2265static int hclgevf_client_start(struct hnae3_handle *handle)
   2266{
   2267	return hclgevf_set_alive(handle, true);
   2268}
   2269
   2270static void hclgevf_client_stop(struct hnae3_handle *handle)
   2271{
   2272	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   2273	int ret;
   2274
   2275	ret = hclgevf_set_alive(handle, false);
   2276	if (ret)
   2277		dev_warn(&hdev->pdev->dev,
   2278			 "%s failed %d\n", __func__, ret);
   2279}
   2280
   2281static void hclgevf_state_init(struct hclgevf_dev *hdev)
   2282{
   2283	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
   2284	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
   2285	clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
   2286
   2287	INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
   2288
   2289	mutex_init(&hdev->mbx_resp.mbx_mutex);
   2290	sema_init(&hdev->reset_sem, 1);
   2291
   2292	spin_lock_init(&hdev->mac_table.mac_list_lock);
   2293	INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
   2294	INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
   2295
   2296	/* bring the device down */
   2297	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
   2298}
   2299
   2300static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
   2301{
   2302	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
   2303	set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
   2304
   2305	if (hdev->service_task.work.func)
   2306		cancel_delayed_work_sync(&hdev->service_task);
   2307
   2308	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
   2309}
   2310
   2311static int hclgevf_init_msi(struct hclgevf_dev *hdev)
   2312{
   2313	struct pci_dev *pdev = hdev->pdev;
   2314	int vectors;
   2315	int i;
   2316
   2317	if (hnae3_dev_roce_supported(hdev))
   2318		vectors = pci_alloc_irq_vectors(pdev,
   2319						hdev->roce_base_msix_offset + 1,
   2320						hdev->num_msi,
   2321						PCI_IRQ_MSIX);
   2322	else
   2323		vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
   2324						hdev->num_msi,
   2325						PCI_IRQ_MSI | PCI_IRQ_MSIX);
   2326
   2327	if (vectors < 0) {
   2328		dev_err(&pdev->dev,
   2329			"failed(%d) to allocate MSI/MSI-X vectors\n",
   2330			vectors);
   2331		return vectors;
   2332	}
   2333	if (vectors < hdev->num_msi)
   2334		dev_warn(&hdev->pdev->dev,
   2335			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
   2336			 hdev->num_msi, vectors);
   2337
   2338	hdev->num_msi = vectors;
   2339	hdev->num_msi_left = vectors;
   2340
   2341	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
   2342					   sizeof(u16), GFP_KERNEL);
   2343	if (!hdev->vector_status) {
   2344		pci_free_irq_vectors(pdev);
   2345		return -ENOMEM;
   2346	}
   2347
   2348	for (i = 0; i < hdev->num_msi; i++)
   2349		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
   2350
   2351	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
   2352					sizeof(int), GFP_KERNEL);
   2353	if (!hdev->vector_irq) {
   2354		devm_kfree(&pdev->dev, hdev->vector_status);
   2355		pci_free_irq_vectors(pdev);
   2356		return -ENOMEM;
   2357	}
   2358
   2359	return 0;
   2360}
   2361
   2362static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
   2363{
   2364	struct pci_dev *pdev = hdev->pdev;
   2365
   2366	devm_kfree(&pdev->dev, hdev->vector_status);
   2367	devm_kfree(&pdev->dev, hdev->vector_irq);
   2368	pci_free_irq_vectors(pdev);
   2369}
   2370
   2371static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
   2372{
   2373	int ret;
   2374
   2375	hclgevf_get_misc_vector(hdev);
   2376
   2377	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
   2378		 HCLGEVF_NAME, pci_name(hdev->pdev));
   2379	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
   2380			  0, hdev->misc_vector.name, hdev);
   2381	if (ret) {
   2382		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
   2383			hdev->misc_vector.vector_irq);
   2384		return ret;
   2385	}
   2386
   2387	hclgevf_clear_event_cause(hdev, 0);
   2388
   2389	/* enable misc. vector(vector 0) */
   2390	hclgevf_enable_vector(&hdev->misc_vector, true);
   2391
   2392	return ret;
   2393}
   2394
   2395static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
   2396{
   2397	/* disable misc vector(vector 0) */
   2398	hclgevf_enable_vector(&hdev->misc_vector, false);
   2399	synchronize_irq(hdev->misc_vector.vector_irq);
   2400	free_irq(hdev->misc_vector.vector_irq, hdev);
   2401	hclgevf_free_vector(hdev, 0);
   2402}
   2403
   2404static void hclgevf_info_show(struct hclgevf_dev *hdev)
   2405{
   2406	struct device *dev = &hdev->pdev->dev;
   2407
   2408	dev_info(dev, "VF info begin:\n");
   2409
   2410	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
   2411	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
   2412	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
   2413	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
   2414	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
   2415	dev_info(dev, "PF media type of this VF: %u\n",
   2416		 hdev->hw.mac.media_type);
   2417
   2418	dev_info(dev, "VF info end.\n");
   2419}
   2420
   2421static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
   2422					    struct hnae3_client *client)
   2423{
   2424	struct hclgevf_dev *hdev = ae_dev->priv;
   2425	int rst_cnt = hdev->rst_stats.rst_cnt;
   2426	int ret;
   2427
   2428	ret = client->ops->init_instance(&hdev->nic);
   2429	if (ret)
   2430		return ret;
   2431
   2432	set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
   2433	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
   2434	    rst_cnt != hdev->rst_stats.rst_cnt) {
   2435		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
   2436
   2437		client->ops->uninit_instance(&hdev->nic, 0);
   2438		return -EBUSY;
   2439	}
   2440
   2441	hnae3_set_client_init_flag(client, ae_dev, 1);
   2442
   2443	if (netif_msg_drv(&hdev->nic))
   2444		hclgevf_info_show(hdev);
   2445
   2446	return 0;
   2447}
   2448
   2449static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
   2450					     struct hnae3_client *client)
   2451{
   2452	struct hclgevf_dev *hdev = ae_dev->priv;
   2453	int ret;
   2454
   2455	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
   2456	    !hdev->nic_client)
   2457		return 0;
   2458
   2459	ret = hclgevf_init_roce_base_info(hdev);
   2460	if (ret)
   2461		return ret;
   2462
   2463	ret = client->ops->init_instance(&hdev->roce);
   2464	if (ret)
   2465		return ret;
   2466
   2467	set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
   2468	hnae3_set_client_init_flag(client, ae_dev, 1);
   2469
   2470	return 0;
   2471}
   2472
   2473static int hclgevf_init_client_instance(struct hnae3_client *client,
   2474					struct hnae3_ae_dev *ae_dev)
   2475{
   2476	struct hclgevf_dev *hdev = ae_dev->priv;
   2477	int ret;
   2478
   2479	switch (client->type) {
   2480	case HNAE3_CLIENT_KNIC:
   2481		hdev->nic_client = client;
   2482		hdev->nic.client = client;
   2483
   2484		ret = hclgevf_init_nic_client_instance(ae_dev, client);
   2485		if (ret)
   2486			goto clear_nic;
   2487
   2488		ret = hclgevf_init_roce_client_instance(ae_dev,
   2489							hdev->roce_client);
   2490		if (ret)
   2491			goto clear_roce;
   2492
   2493		break;
   2494	case HNAE3_CLIENT_ROCE:
   2495		if (hnae3_dev_roce_supported(hdev)) {
   2496			hdev->roce_client = client;
   2497			hdev->roce.client = client;
   2498		}
   2499
   2500		ret = hclgevf_init_roce_client_instance(ae_dev, client);
   2501		if (ret)
   2502			goto clear_roce;
   2503
   2504		break;
   2505	default:
   2506		return -EINVAL;
   2507	}
   2508
   2509	return 0;
   2510
   2511clear_nic:
   2512	hdev->nic_client = NULL;
   2513	hdev->nic.client = NULL;
   2514	return ret;
   2515clear_roce:
   2516	hdev->roce_client = NULL;
   2517	hdev->roce.client = NULL;
   2518	return ret;
   2519}
   2520
   2521static void hclgevf_uninit_client_instance(struct hnae3_client *client,
   2522					   struct hnae3_ae_dev *ae_dev)
   2523{
   2524	struct hclgevf_dev *hdev = ae_dev->priv;
   2525
   2526	/* un-init roce, if it exists */
   2527	if (hdev->roce_client) {
   2528		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
   2529			msleep(HCLGEVF_WAIT_RESET_DONE);
   2530		clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
   2531
   2532		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
   2533		hdev->roce_client = NULL;
   2534		hdev->roce.client = NULL;
   2535	}
   2536
   2537	/* un-init nic/unic, if this was not called by roce client */
   2538	if (client->ops->uninit_instance && hdev->nic_client &&
   2539	    client->type != HNAE3_CLIENT_ROCE) {
   2540		while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
   2541			msleep(HCLGEVF_WAIT_RESET_DONE);
   2542		clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
   2543
   2544		client->ops->uninit_instance(&hdev->nic, 0);
   2545		hdev->nic_client = NULL;
   2546		hdev->nic.client = NULL;
   2547	}
   2548}
   2549
   2550static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
   2551{
   2552	struct pci_dev *pdev = hdev->pdev;
   2553	struct hclgevf_hw *hw = &hdev->hw;
   2554
   2555	/* for device does not have device memory, return directly */
   2556	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
   2557		return 0;
   2558
   2559	hw->hw.mem_base =
   2560		devm_ioremap_wc(&pdev->dev,
   2561				pci_resource_start(pdev, HCLGEVF_MEM_BAR),
   2562				pci_resource_len(pdev, HCLGEVF_MEM_BAR));
   2563	if (!hw->hw.mem_base) {
   2564		dev_err(&pdev->dev, "failed to map device memory\n");
   2565		return -EFAULT;
   2566	}
   2567
   2568	return 0;
   2569}
   2570
   2571static int hclgevf_pci_init(struct hclgevf_dev *hdev)
   2572{
   2573	struct pci_dev *pdev = hdev->pdev;
   2574	struct hclgevf_hw *hw;
   2575	int ret;
   2576
   2577	ret = pci_enable_device(pdev);
   2578	if (ret) {
   2579		dev_err(&pdev->dev, "failed to enable PCI device\n");
   2580		return ret;
   2581	}
   2582
   2583	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
   2584	if (ret) {
   2585		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
   2586		goto err_disable_device;
   2587	}
   2588
   2589	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
   2590	if (ret) {
   2591		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
   2592		goto err_disable_device;
   2593	}
   2594
   2595	pci_set_master(pdev);
   2596	hw = &hdev->hw;
   2597	hw->hw.io_base = pci_iomap(pdev, 2, 0);
   2598	if (!hw->hw.io_base) {
   2599		dev_err(&pdev->dev, "can't map configuration register space\n");
   2600		ret = -ENOMEM;
   2601		goto err_clr_master;
   2602	}
   2603
   2604	ret = hclgevf_dev_mem_map(hdev);
   2605	if (ret)
   2606		goto err_unmap_io_base;
   2607
   2608	return 0;
   2609
   2610err_unmap_io_base:
   2611	pci_iounmap(pdev, hdev->hw.hw.io_base);
   2612err_clr_master:
   2613	pci_clear_master(pdev);
   2614	pci_release_regions(pdev);
   2615err_disable_device:
   2616	pci_disable_device(pdev);
   2617
   2618	return ret;
   2619}
   2620
   2621static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
   2622{
   2623	struct pci_dev *pdev = hdev->pdev;
   2624
   2625	if (hdev->hw.hw.mem_base)
   2626		devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
   2627
   2628	pci_iounmap(pdev, hdev->hw.hw.io_base);
   2629	pci_clear_master(pdev);
   2630	pci_release_regions(pdev);
   2631	pci_disable_device(pdev);
   2632}
   2633
   2634static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
   2635{
   2636	struct hclgevf_query_res_cmd *req;
   2637	struct hclge_desc desc;
   2638	int ret;
   2639
   2640	hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
   2641	ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
   2642	if (ret) {
   2643		dev_err(&hdev->pdev->dev,
   2644			"query vf resource failed, ret = %d.\n", ret);
   2645		return ret;
   2646	}
   2647
   2648	req = (struct hclgevf_query_res_cmd *)desc.data;
   2649
   2650	if (hnae3_dev_roce_supported(hdev)) {
   2651		hdev->roce_base_msix_offset =
   2652		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
   2653				HCLGEVF_MSIX_OFT_ROCEE_M,
   2654				HCLGEVF_MSIX_OFT_ROCEE_S);
   2655		hdev->num_roce_msix =
   2656		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
   2657				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
   2658
   2659		/* nic's msix numbers is always equals to the roce's. */
   2660		hdev->num_nic_msix = hdev->num_roce_msix;
   2661
   2662		/* VF should have NIC vectors and Roce vectors, NIC vectors
   2663		 * are queued before Roce vectors. The offset is fixed to 64.
   2664		 */
   2665		hdev->num_msi = hdev->num_roce_msix +
   2666				hdev->roce_base_msix_offset;
   2667	} else {
   2668		hdev->num_msi =
   2669		hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
   2670				HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
   2671
   2672		hdev->num_nic_msix = hdev->num_msi;
   2673	}
   2674
   2675	if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
   2676		dev_err(&hdev->pdev->dev,
   2677			"Just %u msi resources, not enough for vf(min:2).\n",
   2678			hdev->num_nic_msix);
   2679		return -EINVAL;
   2680	}
   2681
   2682	return 0;
   2683}
   2684
   2685static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
   2686{
   2687#define HCLGEVF_MAX_NON_TSO_BD_NUM			8U
   2688
   2689	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
   2690
   2691	ae_dev->dev_specs.max_non_tso_bd_num =
   2692					HCLGEVF_MAX_NON_TSO_BD_NUM;
   2693	ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
   2694	ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
   2695	ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
   2696	ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
   2697}
   2698
   2699static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
   2700				    struct hclge_desc *desc)
   2701{
   2702	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
   2703	struct hclgevf_dev_specs_0_cmd *req0;
   2704	struct hclgevf_dev_specs_1_cmd *req1;
   2705
   2706	req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
   2707	req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
   2708
   2709	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
   2710	ae_dev->dev_specs.rss_ind_tbl_size =
   2711					le16_to_cpu(req0->rss_ind_tbl_size);
   2712	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
   2713	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
   2714	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
   2715	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
   2716}
   2717
   2718static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
   2719{
   2720	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
   2721
   2722	if (!dev_specs->max_non_tso_bd_num)
   2723		dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
   2724	if (!dev_specs->rss_ind_tbl_size)
   2725		dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
   2726	if (!dev_specs->rss_key_size)
   2727		dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
   2728	if (!dev_specs->max_int_gl)
   2729		dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
   2730	if (!dev_specs->max_frm_size)
   2731		dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
   2732}
   2733
   2734static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
   2735{
   2736	struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
   2737	int ret;
   2738	int i;
   2739
   2740	/* set default specifications as devices lower than version V3 do not
   2741	 * support querying specifications from firmware.
   2742	 */
   2743	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
   2744		hclgevf_set_default_dev_specs(hdev);
   2745		return 0;
   2746	}
   2747
   2748	for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
   2749		hclgevf_cmd_setup_basic_desc(&desc[i],
   2750					     HCLGE_OPC_QUERY_DEV_SPECS, true);
   2751		desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
   2752	}
   2753	hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
   2754
   2755	ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
   2756	if (ret)
   2757		return ret;
   2758
   2759	hclgevf_parse_dev_specs(hdev, desc);
   2760	hclgevf_check_dev_specs(hdev);
   2761
   2762	return 0;
   2763}
   2764
   2765static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
   2766{
   2767	struct pci_dev *pdev = hdev->pdev;
   2768	int ret = 0;
   2769
   2770	if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
   2771	    test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
   2772		hclgevf_misc_irq_uninit(hdev);
   2773		hclgevf_uninit_msi(hdev);
   2774		clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
   2775	}
   2776
   2777	if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
   2778		pci_set_master(pdev);
   2779		ret = hclgevf_init_msi(hdev);
   2780		if (ret) {
   2781			dev_err(&pdev->dev,
   2782				"failed(%d) to init MSI/MSI-X\n", ret);
   2783			return ret;
   2784		}
   2785
   2786		ret = hclgevf_misc_irq_init(hdev);
   2787		if (ret) {
   2788			hclgevf_uninit_msi(hdev);
   2789			dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
   2790				ret);
   2791			return ret;
   2792		}
   2793
   2794		set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
   2795	}
   2796
   2797	return ret;
   2798}
   2799
   2800static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
   2801{
   2802	struct hclge_vf_to_pf_msg send_msg;
   2803
   2804	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
   2805			       HCLGE_MBX_VPORT_LIST_CLEAR);
   2806	return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
   2807}
   2808
   2809static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev)
   2810{
   2811	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
   2812		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1);
   2813}
   2814
   2815static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev)
   2816{
   2817	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
   2818		hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0);
   2819}
   2820
   2821static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
   2822{
   2823	struct pci_dev *pdev = hdev->pdev;
   2824	int ret;
   2825
   2826	ret = hclgevf_pci_reset(hdev);
   2827	if (ret) {
   2828		dev_err(&pdev->dev, "pci reset failed %d\n", ret);
   2829		return ret;
   2830	}
   2831
   2832	hclgevf_arq_init(hdev);
   2833	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
   2834				  &hdev->fw_version, false,
   2835				  hdev->reset_pending);
   2836	if (ret) {
   2837		dev_err(&pdev->dev, "cmd failed %d\n", ret);
   2838		return ret;
   2839	}
   2840
   2841	ret = hclgevf_rss_init_hw(hdev);
   2842	if (ret) {
   2843		dev_err(&hdev->pdev->dev,
   2844			"failed(%d) to initialize RSS\n", ret);
   2845		return ret;
   2846	}
   2847
   2848	ret = hclgevf_config_gro(hdev);
   2849	if (ret)
   2850		return ret;
   2851
   2852	ret = hclgevf_init_vlan_config(hdev);
   2853	if (ret) {
   2854		dev_err(&hdev->pdev->dev,
   2855			"failed(%d) to initialize VLAN config\n", ret);
   2856		return ret;
   2857	}
   2858
   2859	/* get current port based vlan state from PF */
   2860	ret = hclgevf_get_port_base_vlan_filter_state(hdev);
   2861	if (ret)
   2862		return ret;
   2863
   2864	set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
   2865
   2866	hclgevf_init_rxd_adv_layout(hdev);
   2867
   2868	dev_info(&hdev->pdev->dev, "Reset done\n");
   2869
   2870	return 0;
   2871}
   2872
   2873static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
   2874{
   2875	struct pci_dev *pdev = hdev->pdev;
   2876	int ret;
   2877
   2878	ret = hclgevf_pci_init(hdev);
   2879	if (ret)
   2880		return ret;
   2881
   2882	ret = hclgevf_devlink_init(hdev);
   2883	if (ret)
   2884		goto err_devlink_init;
   2885
   2886	ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
   2887	if (ret)
   2888		goto err_cmd_queue_init;
   2889
   2890	hclgevf_arq_init(hdev);
   2891	ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
   2892				  &hdev->fw_version, false,
   2893				  hdev->reset_pending);
   2894	if (ret)
   2895		goto err_cmd_init;
   2896
   2897	/* Get vf resource */
   2898	ret = hclgevf_query_vf_resource(hdev);
   2899	if (ret)
   2900		goto err_cmd_init;
   2901
   2902	ret = hclgevf_query_dev_specs(hdev);
   2903	if (ret) {
   2904		dev_err(&pdev->dev,
   2905			"failed to query dev specifications, ret = %d\n", ret);
   2906		goto err_cmd_init;
   2907	}
   2908
   2909	ret = hclgevf_init_msi(hdev);
   2910	if (ret) {
   2911		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
   2912		goto err_cmd_init;
   2913	}
   2914
   2915	hclgevf_state_init(hdev);
   2916	hdev->reset_level = HNAE3_VF_FUNC_RESET;
   2917	hdev->reset_type = HNAE3_NONE_RESET;
   2918
   2919	ret = hclgevf_misc_irq_init(hdev);
   2920	if (ret)
   2921		goto err_misc_irq_init;
   2922
   2923	set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
   2924
   2925	ret = hclgevf_configure(hdev);
   2926	if (ret) {
   2927		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
   2928		goto err_config;
   2929	}
   2930
   2931	ret = hclgevf_alloc_tqps(hdev);
   2932	if (ret) {
   2933		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
   2934		goto err_config;
   2935	}
   2936
   2937	ret = hclgevf_set_handle_info(hdev);
   2938	if (ret)
   2939		goto err_config;
   2940
   2941	ret = hclgevf_config_gro(hdev);
   2942	if (ret)
   2943		goto err_config;
   2944
   2945	/* Initialize RSS for this VF */
   2946	ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
   2947				      &hdev->rss_cfg);
   2948	if (ret) {
   2949		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
   2950		goto err_config;
   2951	}
   2952
   2953	ret = hclgevf_rss_init_hw(hdev);
   2954	if (ret) {
   2955		dev_err(&hdev->pdev->dev,
   2956			"failed(%d) to initialize RSS\n", ret);
   2957		goto err_config;
   2958	}
   2959
   2960	/* ensure vf tbl list as empty before init */
   2961	ret = hclgevf_clear_vport_list(hdev);
   2962	if (ret) {
   2963		dev_err(&pdev->dev,
   2964			"failed to clear tbl list configuration, ret = %d.\n",
   2965			ret);
   2966		goto err_config;
   2967	}
   2968
   2969	ret = hclgevf_init_vlan_config(hdev);
   2970	if (ret) {
   2971		dev_err(&hdev->pdev->dev,
   2972			"failed(%d) to initialize VLAN config\n", ret);
   2973		goto err_config;
   2974	}
   2975
   2976	hclgevf_init_rxd_adv_layout(hdev);
   2977
   2978	set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
   2979
   2980	hdev->last_reset_time = jiffies;
   2981	dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
   2982		 HCLGEVF_DRIVER_NAME);
   2983
   2984	hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
   2985
   2986	return 0;
   2987
   2988err_config:
   2989	hclgevf_misc_irq_uninit(hdev);
   2990err_misc_irq_init:
   2991	hclgevf_state_uninit(hdev);
   2992	hclgevf_uninit_msi(hdev);
   2993err_cmd_init:
   2994	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
   2995err_cmd_queue_init:
   2996	hclgevf_devlink_uninit(hdev);
   2997err_devlink_init:
   2998	hclgevf_pci_uninit(hdev);
   2999	clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
   3000	return ret;
   3001}
   3002
   3003static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
   3004{
   3005	struct hclge_vf_to_pf_msg send_msg;
   3006
   3007	hclgevf_state_uninit(hdev);
   3008	hclgevf_uninit_rxd_adv_layout(hdev);
   3009
   3010	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
   3011	hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
   3012
   3013	if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
   3014		hclgevf_misc_irq_uninit(hdev);
   3015		hclgevf_uninit_msi(hdev);
   3016	}
   3017
   3018	hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
   3019	hclgevf_devlink_uninit(hdev);
   3020	hclgevf_pci_uninit(hdev);
   3021	hclgevf_uninit_mac_list(hdev);
   3022}
   3023
   3024static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
   3025{
   3026	struct pci_dev *pdev = ae_dev->pdev;
   3027	int ret;
   3028
   3029	ret = hclgevf_alloc_hdev(ae_dev);
   3030	if (ret) {
   3031		dev_err(&pdev->dev, "hclge device allocation failed\n");
   3032		return ret;
   3033	}
   3034
   3035	ret = hclgevf_init_hdev(ae_dev->priv);
   3036	if (ret) {
   3037		dev_err(&pdev->dev, "hclge device initialization failed\n");
   3038		return ret;
   3039	}
   3040
   3041	return 0;
   3042}
   3043
   3044static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
   3045{
   3046	struct hclgevf_dev *hdev = ae_dev->priv;
   3047
   3048	hclgevf_uninit_hdev(hdev);
   3049	ae_dev->priv = NULL;
   3050}
   3051
   3052static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
   3053{
   3054	struct hnae3_handle *nic = &hdev->nic;
   3055	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
   3056
   3057	return min_t(u32, hdev->rss_size_max,
   3058		     hdev->num_tqps / kinfo->tc_info.num_tc);
   3059}
   3060
   3061/**
   3062 * hclgevf_get_channels - Get the current channels enabled and max supported.
   3063 * @handle: hardware information for network interface
   3064 * @ch: ethtool channels structure
   3065 *
   3066 * We don't support separate tx and rx queues as channels. The other count
   3067 * represents how many queues are being used for control. max_combined counts
   3068 * how many queue pairs we can support. They may not be mapped 1 to 1 with
   3069 * q_vectors since we support a lot more queue pairs than q_vectors.
   3070 **/
   3071static void hclgevf_get_channels(struct hnae3_handle *handle,
   3072				 struct ethtool_channels *ch)
   3073{
   3074	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3075
   3076	ch->max_combined = hclgevf_get_max_channels(hdev);
   3077	ch->other_count = 0;
   3078	ch->max_other = 0;
   3079	ch->combined_count = handle->kinfo.rss_size;
   3080}
   3081
   3082static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
   3083					  u16 *alloc_tqps, u16 *max_rss_size)
   3084{
   3085	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3086
   3087	*alloc_tqps = hdev->num_tqps;
   3088	*max_rss_size = hdev->rss_size_max;
   3089}
   3090
   3091static void hclgevf_update_rss_size(struct hnae3_handle *handle,
   3092				    u32 new_tqps_num)
   3093{
   3094	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
   3095	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3096	u16 max_rss_size;
   3097
   3098	kinfo->req_rss_size = new_tqps_num;
   3099
   3100	max_rss_size = min_t(u16, hdev->rss_size_max,
   3101			     hdev->num_tqps / kinfo->tc_info.num_tc);
   3102
   3103	/* Use the user's configuration when it is not larger than
   3104	 * max_rss_size, otherwise, use the maximum specification value.
   3105	 */
   3106	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
   3107	    kinfo->req_rss_size <= max_rss_size)
   3108		kinfo->rss_size = kinfo->req_rss_size;
   3109	else if (kinfo->rss_size > max_rss_size ||
   3110		 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
   3111		kinfo->rss_size = max_rss_size;
   3112
   3113	kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size;
   3114}
   3115
   3116static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
   3117				bool rxfh_configured)
   3118{
   3119	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3120	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
   3121	u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
   3122	u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
   3123	u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
   3124	u16 cur_rss_size = kinfo->rss_size;
   3125	u16 cur_tqps = kinfo->num_tqps;
   3126	u32 *rss_indir;
   3127	unsigned int i;
   3128	int ret;
   3129
   3130	hclgevf_update_rss_size(handle, new_tqps_num);
   3131
   3132	hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
   3133				   tc_offset, tc_valid, tc_size);
   3134	ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
   3135					 tc_valid, tc_size);
   3136	if (ret)
   3137		return ret;
   3138
   3139	/* RSS indirection table has been configured by user */
   3140	if (rxfh_configured)
   3141		goto out;
   3142
   3143	/* Reinitializes the rss indirect table according to the new RSS size */
   3144	rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
   3145			    sizeof(u32), GFP_KERNEL);
   3146	if (!rss_indir)
   3147		return -ENOMEM;
   3148
   3149	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
   3150		rss_indir[i] = i % kinfo->rss_size;
   3151
   3152	hdev->rss_cfg.rss_size = kinfo->rss_size;
   3153
   3154	ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
   3155	if (ret)
   3156		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
   3157			ret);
   3158
   3159	kfree(rss_indir);
   3160
   3161out:
   3162	if (!ret)
   3163		dev_info(&hdev->pdev->dev,
   3164			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
   3165			 cur_rss_size, kinfo->rss_size,
   3166			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
   3167
   3168	return ret;
   3169}
   3170
   3171static int hclgevf_get_status(struct hnae3_handle *handle)
   3172{
   3173	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3174
   3175	return hdev->hw.mac.link;
   3176}
   3177
   3178static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
   3179					    u8 *auto_neg, u32 *speed,
   3180					    u8 *duplex)
   3181{
   3182	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3183
   3184	if (speed)
   3185		*speed = hdev->hw.mac.speed;
   3186	if (duplex)
   3187		*duplex = hdev->hw.mac.duplex;
   3188	if (auto_neg)
   3189		*auto_neg = AUTONEG_DISABLE;
   3190}
   3191
   3192void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
   3193				 u8 duplex)
   3194{
   3195	hdev->hw.mac.speed = speed;
   3196	hdev->hw.mac.duplex = duplex;
   3197}
   3198
   3199static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
   3200{
   3201	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3202	bool gro_en_old = hdev->gro_en;
   3203	int ret;
   3204
   3205	hdev->gro_en = enable;
   3206	ret = hclgevf_config_gro(hdev);
   3207	if (ret)
   3208		hdev->gro_en = gro_en_old;
   3209
   3210	return ret;
   3211}
   3212
   3213static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
   3214				   u8 *module_type)
   3215{
   3216	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3217
   3218	if (media_type)
   3219		*media_type = hdev->hw.mac.media_type;
   3220
   3221	if (module_type)
   3222		*module_type = hdev->hw.mac.module_type;
   3223}
   3224
   3225static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
   3226{
   3227	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3228
   3229	return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
   3230}
   3231
   3232static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
   3233{
   3234	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3235
   3236	return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
   3237}
   3238
   3239static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
   3240{
   3241	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3242
   3243	return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
   3244}
   3245
   3246static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
   3247{
   3248	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3249
   3250	return hdev->rst_stats.hw_rst_done_cnt;
   3251}
   3252
   3253static void hclgevf_get_link_mode(struct hnae3_handle *handle,
   3254				  unsigned long *supported,
   3255				  unsigned long *advertising)
   3256{
   3257	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3258
   3259	*supported = hdev->hw.mac.supported;
   3260	*advertising = hdev->hw.mac.advertising;
   3261}
   3262
   3263#define MAX_SEPARATE_NUM	4
   3264#define SEPARATOR_VALUE		0xFDFCFBFA
   3265#define REG_NUM_PER_LINE	4
   3266#define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
   3267
   3268static int hclgevf_get_regs_len(struct hnae3_handle *handle)
   3269{
   3270	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
   3271	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3272
   3273	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
   3274	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
   3275	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
   3276	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
   3277
   3278	return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
   3279		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
   3280}
   3281
   3282static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
   3283			     void *data)
   3284{
   3285	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
   3286	int i, j, reg_um, separator_num;
   3287	u32 *reg = data;
   3288
   3289	*version = hdev->fw_version;
   3290
   3291	/* fetching per-VF registers values from VF PCIe register space */
   3292	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
   3293	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
   3294	for (i = 0; i < reg_um; i++)
   3295		*reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
   3296	for (i = 0; i < separator_num; i++)
   3297		*reg++ = SEPARATOR_VALUE;
   3298
   3299	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
   3300	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
   3301	for (i = 0; i < reg_um; i++)
   3302		*reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
   3303	for (i = 0; i < separator_num; i++)
   3304		*reg++ = SEPARATOR_VALUE;
   3305
   3306	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
   3307	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
   3308	for (j = 0; j < hdev->num_tqps; j++) {
   3309		for (i = 0; i < reg_um; i++)
   3310			*reg++ = hclgevf_read_dev(&hdev->hw,
   3311						  ring_reg_addr_list[i] +
   3312						  HCLGEVF_TQP_REG_SIZE * j);
   3313		for (i = 0; i < separator_num; i++)
   3314			*reg++ = SEPARATOR_VALUE;
   3315	}
   3316
   3317	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
   3318	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
   3319	for (j = 0; j < hdev->num_msi_used - 1; j++) {
   3320		for (i = 0; i < reg_um; i++)
   3321			*reg++ = hclgevf_read_dev(&hdev->hw,
   3322						  tqp_intr_reg_addr_list[i] +
   3323						  4 * j);
   3324		for (i = 0; i < separator_num; i++)
   3325			*reg++ = SEPARATOR_VALUE;
   3326	}
   3327}
   3328
   3329void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
   3330				struct hclge_mbx_port_base_vlan *port_base_vlan)
   3331{
   3332	struct hnae3_handle *nic = &hdev->nic;
   3333	struct hclge_vf_to_pf_msg send_msg;
   3334	int ret;
   3335
   3336	rtnl_lock();
   3337
   3338	if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
   3339	    test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
   3340		dev_warn(&hdev->pdev->dev,
   3341			 "is resetting when updating port based vlan info\n");
   3342		rtnl_unlock();
   3343		return;
   3344	}
   3345
   3346	ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
   3347	if (ret) {
   3348		rtnl_unlock();
   3349		return;
   3350	}
   3351
   3352	/* send msg to PF and wait update port based vlan info */
   3353	hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
   3354			       HCLGE_MBX_PORT_BASE_VLAN_CFG);
   3355	memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
   3356	ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
   3357	if (!ret) {
   3358		if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
   3359			nic->port_base_vlan_state = state;
   3360		else
   3361			nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
   3362	}
   3363
   3364	hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
   3365	rtnl_unlock();
   3366}
   3367
   3368static const struct hnae3_ae_ops hclgevf_ops = {
   3369	.init_ae_dev = hclgevf_init_ae_dev,
   3370	.uninit_ae_dev = hclgevf_uninit_ae_dev,
   3371	.reset_prepare = hclgevf_reset_prepare_general,
   3372	.reset_done = hclgevf_reset_done,
   3373	.init_client_instance = hclgevf_init_client_instance,
   3374	.uninit_client_instance = hclgevf_uninit_client_instance,
   3375	.start = hclgevf_ae_start,
   3376	.stop = hclgevf_ae_stop,
   3377	.client_start = hclgevf_client_start,
   3378	.client_stop = hclgevf_client_stop,
   3379	.map_ring_to_vector = hclgevf_map_ring_to_vector,
   3380	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
   3381	.get_vector = hclgevf_get_vector,
   3382	.put_vector = hclgevf_put_vector,
   3383	.reset_queue = hclgevf_reset_tqp,
   3384	.get_mac_addr = hclgevf_get_mac_addr,
   3385	.set_mac_addr = hclgevf_set_mac_addr,
   3386	.add_uc_addr = hclgevf_add_uc_addr,
   3387	.rm_uc_addr = hclgevf_rm_uc_addr,
   3388	.add_mc_addr = hclgevf_add_mc_addr,
   3389	.rm_mc_addr = hclgevf_rm_mc_addr,
   3390	.get_stats = hclgevf_get_stats,
   3391	.update_stats = hclgevf_update_stats,
   3392	.get_strings = hclgevf_get_strings,
   3393	.get_sset_count = hclgevf_get_sset_count,
   3394	.get_rss_key_size = hclge_comm_get_rss_key_size,
   3395	.get_rss = hclgevf_get_rss,
   3396	.set_rss = hclgevf_set_rss,
   3397	.get_rss_tuple = hclgevf_get_rss_tuple,
   3398	.set_rss_tuple = hclgevf_set_rss_tuple,
   3399	.get_tc_size = hclgevf_get_tc_size,
   3400	.get_fw_version = hclgevf_get_fw_version,
   3401	.set_vlan_filter = hclgevf_set_vlan_filter,
   3402	.enable_vlan_filter = hclgevf_enable_vlan_filter,
   3403	.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
   3404	.reset_event = hclgevf_reset_event,
   3405	.set_default_reset_request = hclgevf_set_def_reset_request,
   3406	.set_channels = hclgevf_set_channels,
   3407	.get_channels = hclgevf_get_channels,
   3408	.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
   3409	.get_regs_len = hclgevf_get_regs_len,
   3410	.get_regs = hclgevf_get_regs,
   3411	.get_status = hclgevf_get_status,
   3412	.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
   3413	.get_media_type = hclgevf_get_media_type,
   3414	.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
   3415	.ae_dev_resetting = hclgevf_ae_dev_resetting,
   3416	.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
   3417	.set_gro_en = hclgevf_gro_en,
   3418	.set_mtu = hclgevf_set_mtu,
   3419	.get_global_queue_id = hclgevf_get_qid_global,
   3420	.set_timer_task = hclgevf_set_timer_task,
   3421	.get_link_mode = hclgevf_get_link_mode,
   3422	.set_promisc_mode = hclgevf_set_promisc_mode,
   3423	.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
   3424	.get_cmdq_stat = hclgevf_get_cmdq_stat,
   3425};
   3426
   3427static struct hnae3_ae_algo ae_algovf = {
   3428	.ops = &hclgevf_ops,
   3429	.pdev_id_table = ae_algovf_pci_tbl,
   3430};
   3431
   3432static int hclgevf_init(void)
   3433{
   3434	pr_info("%s is initializing\n", HCLGEVF_NAME);
   3435
   3436	hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
   3437	if (!hclgevf_wq) {
   3438		pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
   3439		return -ENOMEM;
   3440	}
   3441
   3442	hnae3_register_ae_algo(&ae_algovf);
   3443
   3444	return 0;
   3445}
   3446
   3447static void hclgevf_exit(void)
   3448{
   3449	hnae3_unregister_ae_algo(&ae_algovf);
   3450	destroy_workqueue(hclgevf_wq);
   3451}
   3452module_init(hclgevf_init);
   3453module_exit(hclgevf_exit);
   3454
   3455MODULE_LICENSE("GPL");
   3456MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
   3457MODULE_DESCRIPTION("HCLGEVF Driver");
   3458MODULE_VERSION(HCLGEVF_MOD_VERSION);