cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hclge_comm_cmd.c (17984B)


      1// SPDX-License-Identifier: GPL-2.0+
      2// Copyright (c) 2021-2021 Hisilicon Limited.
      3
      4#include "hnae3.h"
      5#include "hclge_comm_cmd.h"
      6
      7static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw,
      8				       struct hclge_comm_cmq_ring *ring)
      9{
     10	dma_addr_t dma = ring->desc_dma_addr;
     11	u32 reg_val;
     12
     13	if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) {
     14		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
     15				     lower_32_bits(dma));
     16		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
     17				     upper_32_bits(dma));
     18		reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
     19		reg_val &= HCLGE_COMM_NIC_SW_RST_RDY;
     20		reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
     21		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
     22		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
     23		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
     24	} else {
     25		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
     26				     lower_32_bits(dma));
     27		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
     28				     upper_32_bits(dma));
     29		reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
     30		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val);
     31		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
     32		hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
     33	}
     34}
     35
     36void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw)
     37{
     38	hclge_comm_cmd_config_regs(hw, &hw->cmq.csq);
     39	hclge_comm_cmd_config_regs(hw, &hw->cmq.crq);
     40}
     41
     42void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
     43{
     44	desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
     45				 HCLGE_COMM_CMD_FLAG_IN);
     46	if (is_read)
     47		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
     48	else
     49		desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
     50}
     51
     52static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
     53					      bool is_pf)
     54{
     55	set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
     56	set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
     57	if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
     58		set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
     59		set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
     60	}
     61}
     62
     63void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
     64				     enum hclge_opcode_type opcode,
     65				     bool is_read)
     66{
     67	memset((void *)desc, 0, sizeof(struct hclge_desc));
     68	desc->opcode = cpu_to_le16(opcode);
     69	desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
     70				 HCLGE_COMM_CMD_FLAG_IN);
     71
     72	if (is_read)
     73		desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
     74}
     75
     76int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
     77				      struct hclge_comm_hw *hw, bool en)
     78{
     79	struct hclge_comm_firmware_compat_cmd *req;
     80	struct hclge_desc desc;
     81	u32 compat = 0;
     82
     83	hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
     84
     85	if (en) {
     86		req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
     87
     88		hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1);
     89		hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1);
     90		if (hclge_comm_dev_phy_imp_supported(ae_dev))
     91			hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
     92		hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
     93		hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
     94
     95		req->compat = cpu_to_le32(compat);
     96	}
     97
     98	return hclge_comm_cmd_send(hw, &desc, 1);
     99}
    100
    101void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
    102{
    103	int size  = ring->desc_num * sizeof(struct hclge_desc);
    104
    105	if (!ring->desc)
    106		return;
    107
    108	dma_free_coherent(&ring->pdev->dev, size,
    109			  ring->desc, ring->desc_dma_addr);
    110	ring->desc = NULL;
    111}
    112
    113static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring)
    114{
    115	int size  = ring->desc_num * sizeof(struct hclge_desc);
    116
    117	ring->desc = dma_alloc_coherent(&ring->pdev->dev,
    118					size, &ring->desc_dma_addr, GFP_KERNEL);
    119	if (!ring->desc)
    120		return -ENOMEM;
    121
    122	return 0;
    123}
    124
    125static __le32 hclge_comm_build_api_caps(void)
    126{
    127	u32 api_caps = 0;
    128
    129	hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1);
    130
    131	return cpu_to_le32(api_caps);
    132}
    133
    134static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
    135	{HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
    136	{HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
    137	{HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
    138	{HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
    139	{HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
    140	{HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
    141	{HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
    142	{HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
    143	{HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
    144	{HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
    145	{HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
    146	{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
    147	{HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
    148	{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
    149	{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
    150	 HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
    151	{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
    152	{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
    153};
    154
    155static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
    156	{HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
    157	{HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
    158	{HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
    159	{HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
    160	{HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
    161	{HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
    162	{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
    163	{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
    164	{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
    165};
    166
    167static void
    168hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
    169			    struct hclge_comm_query_version_cmd *cmd)
    170{
    171	const struct hclge_comm_caps_bit_map *caps_map =
    172				is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
    173	u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
    174				ARRAY_SIZE(hclge_vf_cmd_caps);
    175	u32 caps, i;
    176
    177	caps = __le32_to_cpu(cmd->caps[0]);
    178	for (i = 0; i < size; i++)
    179		if (hnae3_get_bit(caps, caps_map[i].imp_bit))
    180			set_bit(caps_map[i].local_bit, ae_dev->caps);
    181}
    182
    183int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type)
    184{
    185	struct hclge_comm_cmq_ring *ring =
    186		(ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq :
    187						     &hw->cmq.crq;
    188	int ret;
    189
    190	ring->ring_type = ring_type;
    191
    192	ret = hclge_comm_alloc_cmd_desc(ring);
    193	if (ret)
    194		dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n",
    195			(ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ",
    196			ret);
    197
    198	return ret;
    199}
    200
    201int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
    202						struct hclge_comm_hw *hw,
    203						u32 *fw_version, bool is_pf)
    204{
    205	struct hclge_comm_query_version_cmd *resp;
    206	struct hclge_desc desc;
    207	int ret;
    208
    209	hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
    210	resp = (struct hclge_comm_query_version_cmd *)desc.data;
    211	resp->api_caps = hclge_comm_build_api_caps();
    212
    213	ret = hclge_comm_cmd_send(hw, &desc, 1);
    214	if (ret)
    215		return ret;
    216
    217	*fw_version = le32_to_cpu(resp->firmware);
    218
    219	ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
    220					 HNAE3_PCI_REVISION_BIT_SIZE;
    221	ae_dev->dev_version |= ae_dev->pdev->revision;
    222
    223	if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
    224		hclge_comm_set_default_capability(ae_dev, is_pf);
    225
    226	hclge_comm_parse_capability(ae_dev, is_pf, resp);
    227
    228	return ret;
    229}
    230
    231static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
    232				   HCLGE_OPC_STATS_32_BIT,
    233				   HCLGE_OPC_STATS_MAC,
    234				   HCLGE_OPC_STATS_MAC_ALL,
    235				   HCLGE_OPC_QUERY_32_BIT_REG,
    236				   HCLGE_OPC_QUERY_64_BIT_REG,
    237				   HCLGE_QUERY_CLEAR_MPF_RAS_INT,
    238				   HCLGE_QUERY_CLEAR_PF_RAS_INT,
    239				   HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
    240				   HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
    241				   HCLGE_QUERY_ALL_ERR_INFO };
    242
    243static bool hclge_comm_is_special_opcode(u16 opcode)
    244{
    245	/* these commands have several descriptors,
    246	 * and use the first one to save opcode and return value
    247	 */
    248	u32 i;
    249
    250	for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
    251		if (spec_opcode[i] == opcode)
    252			return true;
    253
    254	return false;
    255}
    256
    257static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
    258{
    259	int ntc = ring->next_to_clean;
    260	int ntu = ring->next_to_use;
    261	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
    262
    263	return ring->desc_num - used - 1;
    264}
    265
    266static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
    267				     struct hclge_desc *desc, int num)
    268{
    269	struct hclge_desc *desc_to_use;
    270	int handle = 0;
    271
    272	while (handle < num) {
    273		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
    274		*desc_to_use = desc[handle];
    275		(hw->cmq.csq.next_to_use)++;
    276		if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
    277			hw->cmq.csq.next_to_use = 0;
    278		handle++;
    279	}
    280}
    281
    282static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
    283					      int head)
    284{
    285	int ntc = ring->next_to_clean;
    286	int ntu = ring->next_to_use;
    287
    288	if (ntu > ntc)
    289		return head >= ntc && head <= ntu;
    290
    291	return head >= ntc || head <= ntu;
    292}
    293
    294static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
    295{
    296	struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
    297	int clean;
    298	u32 head;
    299
    300	head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
    301	rmb(); /* Make sure head is ready before touch any data */
    302
    303	if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
    304		dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
    305			 head, csq->next_to_use, csq->next_to_clean);
    306		dev_warn(&hw->cmq.csq.pdev->dev,
    307			 "Disabling any further commands to IMP firmware\n");
    308		set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
    309		dev_warn(&hw->cmq.csq.pdev->dev,
    310			 "IMP firmware watchdog reset soon expected!\n");
    311		return -EIO;
    312	}
    313
    314	clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
    315	csq->next_to_clean = head;
    316	return clean;
    317}
    318
    319static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
    320{
    321	u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
    322	return head == hw->cmq.csq.next_to_use;
    323}
    324
    325static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
    326				     bool *is_completed)
    327{
    328	u32 timeout = 0;
    329
    330	do {
    331		if (hclge_comm_cmd_csq_done(hw)) {
    332			*is_completed = true;
    333			break;
    334		}
    335		udelay(1);
    336		timeout++;
    337	} while (timeout < hw->cmq.tx_timeout);
    338}
    339
    340static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
    341{
    342	struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
    343		{ HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
    344		{ HCLGE_COMM_CMD_NO_AUTH, -EPERM },
    345		{ HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
    346		{ HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
    347		{ HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
    348		{ HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
    349		{ HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
    350		{ HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
    351		{ HCLGE_COMM_CMD_TIMEOUT, -ETIME },
    352		{ HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
    353		{ HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
    354		{ HCLGE_COMM_CMD_INVALID, -EBADR },
    355	};
    356	u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
    357	u32 i;
    358
    359	for (i = 0; i < errcode_count; i++)
    360		if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
    361			return hclge_comm_cmd_errcode[i].common_errno;
    362
    363	return -EIO;
    364}
    365
    366static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
    367				       struct hclge_desc *desc, int num,
    368				       int ntc)
    369{
    370	u16 opcode, desc_ret;
    371	int handle;
    372
    373	opcode = le16_to_cpu(desc[0].opcode);
    374	for (handle = 0; handle < num; handle++) {
    375		desc[handle] = hw->cmq.csq.desc[ntc];
    376		ntc++;
    377		if (ntc >= hw->cmq.csq.desc_num)
    378			ntc = 0;
    379	}
    380	if (likely(!hclge_comm_is_special_opcode(opcode)))
    381		desc_ret = le16_to_cpu(desc[num - 1].retval);
    382	else
    383		desc_ret = le16_to_cpu(desc[0].retval);
    384
    385	hw->cmq.last_status = desc_ret;
    386
    387	return hclge_comm_cmd_convert_err_code(desc_ret);
    388}
    389
    390static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
    391				       struct hclge_desc *desc,
    392				       int num, int ntc)
    393{
    394	bool is_completed = false;
    395	int handle, ret;
    396
    397	/* If the command is sync, wait for the firmware to write back,
    398	 * if multi descriptors to be sent, use the first one to check
    399	 */
    400	if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
    401		hclge_comm_wait_for_resp(hw, &is_completed);
    402
    403	if (!is_completed)
    404		ret = -EBADE;
    405	else
    406		ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
    407
    408	/* Clean the command send queue */
    409	handle = hclge_comm_cmd_csq_clean(hw);
    410	if (handle < 0)
    411		ret = handle;
    412	else if (handle != num)
    413		dev_warn(&hw->cmq.csq.pdev->dev,
    414			 "cleaned %d, need to clean %d\n", handle, num);
    415	return ret;
    416}
    417
    418/**
    419 * hclge_comm_cmd_send - send command to command queue
    420 * @hw: pointer to the hw struct
    421 * @desc: prefilled descriptor for describing the command
    422 * @num : the number of descriptors to be sent
    423 *
    424 * This is the main send command for command queue, it
    425 * sends the queue, cleans the queue, etc
    426 **/
    427int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
    428			int num)
    429{
    430	struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
    431	int ret;
    432	int ntc;
    433
    434	spin_lock_bh(&hw->cmq.csq.lock);
    435
    436	if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
    437		spin_unlock_bh(&hw->cmq.csq.lock);
    438		return -EBUSY;
    439	}
    440
    441	if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
    442		/* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
    443		 * need update the SW HEAD pointer csq->next_to_clean
    444		 */
    445		csq->next_to_clean =
    446			hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
    447		spin_unlock_bh(&hw->cmq.csq.lock);
    448		return -EBUSY;
    449	}
    450
    451	/**
    452	 * Record the location of desc in the ring for this time
    453	 * which will be use for hardware to write back
    454	 */
    455	ntc = hw->cmq.csq.next_to_use;
    456
    457	hclge_comm_cmd_copy_desc(hw, desc, num);
    458
    459	/* Write to hardware */
    460	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
    461			     hw->cmq.csq.next_to_use);
    462
    463	ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
    464
    465	spin_unlock_bh(&hw->cmq.csq.lock);
    466
    467	return ret;
    468}
    469
    470static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
    471{
    472	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
    473	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
    474	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
    475	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
    476	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
    477	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
    478	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
    479	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
    480	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
    481	hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
    482}
    483
    484void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
    485			   struct hclge_comm_hw *hw)
    486{
    487	struct hclge_comm_cmq *cmdq = &hw->cmq;
    488
    489	hclge_comm_firmware_compat_config(ae_dev, hw, false);
    490	set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
    491
    492	/* wait to ensure that the firmware completes the possible left
    493	 * over commands.
    494	 */
    495	msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
    496	spin_lock_bh(&cmdq->csq.lock);
    497	spin_lock(&cmdq->crq.lock);
    498	hclge_comm_cmd_uninit_regs(hw);
    499	spin_unlock(&cmdq->crq.lock);
    500	spin_unlock_bh(&cmdq->csq.lock);
    501
    502	hclge_comm_free_cmd_desc(&cmdq->csq);
    503	hclge_comm_free_cmd_desc(&cmdq->crq);
    504}
    505
    506int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
    507{
    508	struct hclge_comm_cmq *cmdq = &hw->cmq;
    509	int ret;
    510
    511	/* Setup the lock for command queue */
    512	spin_lock_init(&cmdq->csq.lock);
    513	spin_lock_init(&cmdq->crq.lock);
    514
    515	cmdq->csq.pdev = pdev;
    516	cmdq->crq.pdev = pdev;
    517
    518	/* Setup the queue entries for use cmd queue */
    519	cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
    520	cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
    521
    522	/* Setup Tx write back timeout */
    523	cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
    524
    525	/* Setup queue rings */
    526	ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
    527	if (ret) {
    528		dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
    529		return ret;
    530	}
    531
    532	ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
    533	if (ret) {
    534		dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
    535		goto err_csq;
    536	}
    537
    538	return 0;
    539err_csq:
    540	hclge_comm_free_cmd_desc(&hw->cmq.csq);
    541	return ret;
    542}
    543
    544int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
    545			u32 *fw_version, bool is_pf,
    546			unsigned long reset_pending)
    547{
    548	struct hclge_comm_cmq *cmdq = &hw->cmq;
    549	int ret;
    550
    551	spin_lock_bh(&cmdq->csq.lock);
    552	spin_lock(&cmdq->crq.lock);
    553
    554	cmdq->csq.next_to_clean = 0;
    555	cmdq->csq.next_to_use = 0;
    556	cmdq->crq.next_to_clean = 0;
    557	cmdq->crq.next_to_use = 0;
    558
    559	hclge_comm_cmd_init_regs(hw);
    560
    561	spin_unlock(&cmdq->crq.lock);
    562	spin_unlock_bh(&cmdq->csq.lock);
    563
    564	clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
    565
    566	/* Check if there is new reset pending, because the higher level
    567	 * reset may happen when lower level reset is being processed.
    568	 */
    569	if (reset_pending) {
    570		ret = -EBUSY;
    571		goto err_cmd_init;
    572	}
    573
    574	/* get version and device capabilities */
    575	ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
    576							  fw_version, is_pf);
    577	if (ret) {
    578		dev_err(&ae_dev->pdev->dev,
    579			"failed to query version and capabilities, ret = %d\n",
    580			ret);
    581		goto err_cmd_init;
    582	}
    583
    584	dev_info(&ae_dev->pdev->dev,
    585		 "The firmware version is %lu.%lu.%lu.%lu\n",
    586		 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
    587				 HNAE3_FW_VERSION_BYTE3_SHIFT),
    588		 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
    589				 HNAE3_FW_VERSION_BYTE2_SHIFT),
    590		 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
    591				 HNAE3_FW_VERSION_BYTE1_SHIFT),
    592		 hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
    593				 HNAE3_FW_VERSION_BYTE0_SHIFT));
    594
    595	if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
    596		return 0;
    597
    598	/* ask the firmware to enable some features, driver can work without
    599	 * it.
    600	 */
    601	ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
    602	if (ret)
    603		dev_warn(&ae_dev->pdev->dev,
    604			 "Firmware compatible features not enabled(%d).\n",
    605			 ret);
    606	return 0;
    607
    608err_cmd_init:
    609	set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
    610
    611	return ret;
    612}