cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hclge_tm.c (51067B)


      1// SPDX-License-Identifier: GPL-2.0+
      2// Copyright (c) 2016-2017 Hisilicon Limited.
      3
      4#include <linux/etherdevice.h>
      5
      6#include "hclge_cmd.h"
      7#include "hclge_main.h"
      8#include "hclge_tm.h"
      9
     10enum hclge_shaper_level {
     11	HCLGE_SHAPER_LVL_PRI	= 0,
     12	HCLGE_SHAPER_LVL_PG	= 1,
     13	HCLGE_SHAPER_LVL_PORT	= 2,
     14	HCLGE_SHAPER_LVL_QSET	= 3,
     15	HCLGE_SHAPER_LVL_CNT	= 4,
     16	HCLGE_SHAPER_LVL_VF	= 0,
     17	HCLGE_SHAPER_LVL_PF	= 1,
     18};
     19
     20#define HCLGE_TM_PFC_PKT_GET_CMD_NUM	3
     21#define HCLGE_TM_PFC_NUM_GET_PER_CMD	3
     22
     23#define HCLGE_SHAPER_BS_U_DEF	5
     24#define HCLGE_SHAPER_BS_S_DEF	20
     25
     26/* hclge_shaper_para_calc: calculate ir parameter for the shaper
     27 * @ir: Rate to be config, its unit is Mbps
     28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
     29 * @ir_para: parameters of IR shaper
     30 * @max_tm_rate: max tm rate is available to config
     31 *
     32 * the formula:
     33 *
     34 *		IR_b * (2 ^ IR_u) * 8
     35 * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
     36 *		Tick * (2 ^ IR_s)
     37 *
     38 * @return: 0: calculate sucessful, negative: fail
     39 */
     40static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
     41				  struct hclge_shaper_ir_para *ir_para,
     42				  u32 max_tm_rate)
     43{
     44#define DEFAULT_SHAPER_IR_B	126
     45#define DIVISOR_CLK		(1000 * 8)
     46#define DEFAULT_DIVISOR_IR_B	(DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
     47
     48	static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
     49		6 * 256,        /* Prioriy level */
     50		6 * 32,         /* Prioriy group level */
     51		6 * 8,          /* Port level */
     52		6 * 256         /* Qset level */
     53	};
     54	u8 ir_u_calc = 0;
     55	u8 ir_s_calc = 0;
     56	u32 ir_calc;
     57	u32 tick;
     58
     59	/* Calc tick */
     60	if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
     61	    ir > max_tm_rate)
     62		return -EINVAL;
     63
     64	tick = tick_array[shaper_level];
     65
     66	/**
     67	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
     68	 * the formula is changed to:
     69	 *		126 * 1 * 8
     70	 * ir_calc = ---------------- * 1000
     71	 *		tick * 1
     72	 */
     73	ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
     74
     75	if (ir_calc == ir) {
     76		ir_para->ir_b = DEFAULT_SHAPER_IR_B;
     77		ir_para->ir_u = 0;
     78		ir_para->ir_s = 0;
     79
     80		return 0;
     81	} else if (ir_calc > ir) {
     82		/* Increasing the denominator to select ir_s value */
     83		while (ir_calc >= ir && ir) {
     84			ir_s_calc++;
     85			ir_calc = DEFAULT_DIVISOR_IR_B /
     86				  (tick * (1 << ir_s_calc));
     87		}
     88
     89		ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
     90				(DIVISOR_CLK >> 1)) / DIVISOR_CLK;
     91	} else {
     92		/* Increasing the numerator to select ir_u value */
     93		u32 numerator;
     94
     95		while (ir_calc < ir) {
     96			ir_u_calc++;
     97			numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
     98			ir_calc = (numerator + (tick >> 1)) / tick;
     99		}
    100
    101		if (ir_calc == ir) {
    102			ir_para->ir_b = DEFAULT_SHAPER_IR_B;
    103		} else {
    104			u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
    105			ir_para->ir_b = (ir * tick + (denominator >> 1)) /
    106					denominator;
    107		}
    108	}
    109
    110	ir_para->ir_u = ir_u_calc;
    111	ir_para->ir_s = ir_s_calc;
    112
    113	return 0;
    114}
    115
    116static const u16 hclge_pfc_tx_stats_offset[] = {
    117	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
    118	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
    119	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
    120	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
    121	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
    122	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
    123	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
    124	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
    125};
    126
    127static const u16 hclge_pfc_rx_stats_offset[] = {
    128	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
    129	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
    130	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
    131	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
    132	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
    133	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
    134	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
    135	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
    136};
    137
    138static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
    139{
    140	const u16 *offset;
    141	int i;
    142
    143	if (tx)
    144		offset = hclge_pfc_tx_stats_offset;
    145	else
    146		offset = hclge_pfc_rx_stats_offset;
    147
    148	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
    149		stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
    150}
    151
    152void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
    153{
    154	hclge_pfc_stats_get(hdev, false, stats);
    155}
    156
    157void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
    158{
    159	hclge_pfc_stats_get(hdev, true, stats);
    160}
    161
    162int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
    163{
    164	struct hclge_desc desc;
    165
    166	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
    167
    168	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
    169		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
    170
    171	return hclge_cmd_send(&hdev->hw, &desc, 1);
    172}
    173
    174static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
    175				  u8 pfc_bitmap)
    176{
    177	struct hclge_desc desc;
    178	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
    179
    180	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
    181
    182	pfc->tx_rx_en_bitmap = tx_rx_bitmap;
    183	pfc->pri_en_bitmap = pfc_bitmap;
    184
    185	return hclge_cmd_send(&hdev->hw, &desc, 1);
    186}
    187
    188static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
    189				 u8 pause_trans_gap, u16 pause_trans_time)
    190{
    191	struct hclge_cfg_pause_param_cmd *pause_param;
    192	struct hclge_desc desc;
    193
    194	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
    195
    196	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
    197
    198	ether_addr_copy(pause_param->mac_addr, addr);
    199	ether_addr_copy(pause_param->mac_addr_extra, addr);
    200	pause_param->pause_trans_gap = pause_trans_gap;
    201	pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
    202
    203	return hclge_cmd_send(&hdev->hw, &desc, 1);
    204}
    205
    206int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
    207{
    208	struct hclge_cfg_pause_param_cmd *pause_param;
    209	struct hclge_desc desc;
    210	u16 trans_time;
    211	u8 trans_gap;
    212	int ret;
    213
    214	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
    215
    216	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
    217
    218	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
    219	if (ret)
    220		return ret;
    221
    222	trans_gap = pause_param->pause_trans_gap;
    223	trans_time = le16_to_cpu(pause_param->pause_trans_time);
    224
    225	return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
    226}
    227
    228static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
    229{
    230	u8 tc;
    231
    232	tc = hdev->tm_info.prio_tc[pri_id];
    233
    234	if (tc >= hdev->tm_info.num_tc)
    235		return -EINVAL;
    236
    237	/**
    238	 * the register for priority has four bytes, the first bytes includes
    239	 *  priority0 and priority1, the higher 4bit stands for priority1
    240	 *  while the lower 4bit stands for priority0, as below:
    241	 * first byte:	| pri_1 | pri_0 |
    242	 * second byte:	| pri_3 | pri_2 |
    243	 * third byte:	| pri_5 | pri_4 |
    244	 * fourth byte:	| pri_7 | pri_6 |
    245	 */
    246	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
    247
    248	return 0;
    249}
    250
    251static int hclge_up_to_tc_map(struct hclge_dev *hdev)
    252{
    253	struct hclge_desc desc;
    254	u8 *pri = (u8 *)desc.data;
    255	u8 pri_id;
    256	int ret;
    257
    258	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
    259
    260	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
    261		ret = hclge_fill_pri_array(hdev, pri, pri_id);
    262		if (ret)
    263			return ret;
    264	}
    265
    266	return hclge_cmd_send(&hdev->hw, &desc, 1);
    267}
    268
    269static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
    270				      u8 pg_id, u8 pri_bit_map)
    271{
    272	struct hclge_pg_to_pri_link_cmd *map;
    273	struct hclge_desc desc;
    274
    275	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
    276
    277	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
    278
    279	map->pg_id = pg_id;
    280	map->pri_bit_map = pri_bit_map;
    281
    282	return hclge_cmd_send(&hdev->hw, &desc, 1);
    283}
    284
    285static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
    286				      bool link_vld)
    287{
    288	struct hclge_qs_to_pri_link_cmd *map;
    289	struct hclge_desc desc;
    290
    291	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
    292
    293	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
    294
    295	map->qs_id = cpu_to_le16(qs_id);
    296	map->priority = pri;
    297	map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
    298
    299	return hclge_cmd_send(&hdev->hw, &desc, 1);
    300}
    301
    302static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
    303				    u16 q_id, u16 qs_id)
    304{
    305	struct hclge_nq_to_qs_link_cmd *map;
    306	struct hclge_desc desc;
    307	u16 qs_id_l;
    308	u16 qs_id_h;
    309
    310	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
    311
    312	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
    313
    314	map->nq_id = cpu_to_le16(q_id);
    315
    316	/* convert qs_id to the following format to support qset_id >= 1024
    317	 * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
    318	 *            /         / \         \
    319	 *           /         /   \         \
    320	 * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
    321	 *          | qs_id_h | vld | qs_id_l |
    322	 */
    323	qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
    324				  HCLGE_TM_QS_ID_L_S);
    325	qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
    326				  HCLGE_TM_QS_ID_H_S);
    327	hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
    328			qs_id_l);
    329	hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
    330			qs_id_h);
    331	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
    332
    333	return hclge_cmd_send(&hdev->hw, &desc, 1);
    334}
    335
    336static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
    337				  u8 dwrr)
    338{
    339	struct hclge_pg_weight_cmd *weight;
    340	struct hclge_desc desc;
    341
    342	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
    343
    344	weight = (struct hclge_pg_weight_cmd *)desc.data;
    345
    346	weight->pg_id = pg_id;
    347	weight->dwrr = dwrr;
    348
    349	return hclge_cmd_send(&hdev->hw, &desc, 1);
    350}
    351
    352static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
    353				   u8 dwrr)
    354{
    355	struct hclge_priority_weight_cmd *weight;
    356	struct hclge_desc desc;
    357
    358	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
    359
    360	weight = (struct hclge_priority_weight_cmd *)desc.data;
    361
    362	weight->pri_id = pri_id;
    363	weight->dwrr = dwrr;
    364
    365	return hclge_cmd_send(&hdev->hw, &desc, 1);
    366}
    367
    368static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
    369				  u8 dwrr)
    370{
    371	struct hclge_qs_weight_cmd *weight;
    372	struct hclge_desc desc;
    373
    374	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
    375
    376	weight = (struct hclge_qs_weight_cmd *)desc.data;
    377
    378	weight->qs_id = cpu_to_le16(qs_id);
    379	weight->dwrr = dwrr;
    380
    381	return hclge_cmd_send(&hdev->hw, &desc, 1);
    382}
    383
    384static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
    385				      u8 bs_b, u8 bs_s)
    386{
    387	u32 shapping_para = 0;
    388
    389	hclge_tm_set_field(shapping_para, IR_B, ir_b);
    390	hclge_tm_set_field(shapping_para, IR_U, ir_u);
    391	hclge_tm_set_field(shapping_para, IR_S, ir_s);
    392	hclge_tm_set_field(shapping_para, BS_B, bs_b);
    393	hclge_tm_set_field(shapping_para, BS_S, bs_s);
    394
    395	return shapping_para;
    396}
    397
    398static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
    399				    enum hclge_shap_bucket bucket, u8 pg_id,
    400				    u32 shapping_para, u32 rate)
    401{
    402	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
    403	enum hclge_opcode_type opcode;
    404	struct hclge_desc desc;
    405
    406	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
    407		 HCLGE_OPC_TM_PG_C_SHAPPING;
    408	hclge_cmd_setup_basic_desc(&desc, opcode, false);
    409
    410	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
    411
    412	shap_cfg_cmd->pg_id = pg_id;
    413
    414	shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
    415
    416	hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
    417
    418	shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
    419
    420	return hclge_cmd_send(&hdev->hw, &desc, 1);
    421}
    422
    423int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
    424{
    425	struct hclge_port_shapping_cmd *shap_cfg_cmd;
    426	struct hclge_shaper_ir_para ir_para;
    427	struct hclge_desc desc;
    428	u32 shapping_para;
    429	int ret;
    430
    431	ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
    432				     &ir_para,
    433				     hdev->ae_dev->dev_specs.max_tm_rate);
    434	if (ret)
    435		return ret;
    436
    437	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
    438	shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
    439
    440	shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
    441						   ir_para.ir_s,
    442						   HCLGE_SHAPER_BS_U_DEF,
    443						   HCLGE_SHAPER_BS_S_DEF);
    444
    445	shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
    446
    447	hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
    448
    449	shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
    450
    451	return hclge_cmd_send(&hdev->hw, &desc, 1);
    452}
    453
    454static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
    455				     enum hclge_shap_bucket bucket, u8 pri_id,
    456				     u32 shapping_para, u32 rate)
    457{
    458	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
    459	enum hclge_opcode_type opcode;
    460	struct hclge_desc desc;
    461
    462	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
    463		 HCLGE_OPC_TM_PRI_C_SHAPPING;
    464
    465	hclge_cmd_setup_basic_desc(&desc, opcode, false);
    466
    467	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
    468
    469	shap_cfg_cmd->pri_id = pri_id;
    470
    471	shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
    472
    473	hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
    474
    475	shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
    476
    477	return hclge_cmd_send(&hdev->hw, &desc, 1);
    478}
    479
    480static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
    481{
    482	struct hclge_desc desc;
    483
    484	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
    485
    486	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
    487		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
    488	else
    489		desc.data[1] = 0;
    490
    491	desc.data[0] = cpu_to_le32(pg_id);
    492
    493	return hclge_cmd_send(&hdev->hw, &desc, 1);
    494}
    495
    496static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
    497{
    498	struct hclge_desc desc;
    499
    500	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
    501
    502	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
    503		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
    504	else
    505		desc.data[1] = 0;
    506
    507	desc.data[0] = cpu_to_le32(pri_id);
    508
    509	return hclge_cmd_send(&hdev->hw, &desc, 1);
    510}
    511
    512static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
    513{
    514	struct hclge_desc desc;
    515
    516	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
    517
    518	if (mode == HCLGE_SCH_MODE_DWRR)
    519		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
    520	else
    521		desc.data[1] = 0;
    522
    523	desc.data[0] = cpu_to_le32(qs_id);
    524
    525	return hclge_cmd_send(&hdev->hw, &desc, 1);
    526}
    527
    528static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
    529			      u32 bit_map)
    530{
    531	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
    532	struct hclge_desc desc;
    533
    534	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
    535				   false);
    536
    537	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
    538
    539	bp_to_qs_map_cmd->tc_id = tc;
    540	bp_to_qs_map_cmd->qs_group_id = grp_id;
    541	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
    542
    543	return hclge_cmd_send(&hdev->hw, &desc, 1);
    544}
    545
    546int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
    547{
    548	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
    549	struct hclge_qs_shapping_cmd *shap_cfg_cmd;
    550	struct hclge_shaper_ir_para ir_para;
    551	struct hclge_dev *hdev = vport->back;
    552	struct hclge_desc desc;
    553	u32 shaper_para;
    554	int ret, i;
    555
    556	if (!max_tx_rate)
    557		max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
    558
    559	ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
    560				     &ir_para,
    561				     hdev->ae_dev->dev_specs.max_tm_rate);
    562	if (ret)
    563		return ret;
    564
    565	shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
    566						 ir_para.ir_s,
    567						 HCLGE_SHAPER_BS_U_DEF,
    568						 HCLGE_SHAPER_BS_S_DEF);
    569
    570	for (i = 0; i < kinfo->tc_info.num_tc; i++) {
    571		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
    572					   false);
    573
    574		shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
    575		shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
    576		shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
    577
    578		hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
    579		shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
    580
    581		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
    582		if (ret) {
    583			dev_err(&hdev->pdev->dev,
    584				"vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
    585				vport->vport_id, shap_cfg_cmd->qs_id,
    586				max_tx_rate, ret);
    587			return ret;
    588		}
    589	}
    590
    591	return 0;
    592}
    593
    594static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
    595{
    596	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
    597	struct hnae3_tc_info *tc_info = &kinfo->tc_info;
    598	struct hclge_dev *hdev = vport->back;
    599	u16 max_rss_size = 0;
    600	int i;
    601
    602	if (!tc_info->mqprio_active)
    603		return vport->alloc_tqps / tc_info->num_tc;
    604
    605	for (i = 0; i < HNAE3_MAX_TC; i++) {
    606		if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
    607			continue;
    608		if (max_rss_size < tc_info->tqp_count[i])
    609			max_rss_size = tc_info->tqp_count[i];
    610	}
    611
    612	return max_rss_size;
    613}
    614
    615static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
    616{
    617	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
    618	struct hnae3_tc_info *tc_info = &kinfo->tc_info;
    619	struct hclge_dev *hdev = vport->back;
    620	int sum = 0;
    621	int i;
    622
    623	if (!tc_info->mqprio_active)
    624		return kinfo->rss_size * tc_info->num_tc;
    625
    626	for (i = 0; i < HNAE3_MAX_TC; i++) {
    627		if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
    628			sum += tc_info->tqp_count[i];
    629	}
    630
    631	return sum;
    632}
    633
    634static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
    635{
    636	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
    637	struct hclge_dev *hdev = vport->back;
    638	u16 vport_max_rss_size;
    639	u16 max_rss_size;
    640
    641	/* TC configuration is shared by PF/VF in one port, only allow
    642	 * one tc for VF for simplicity. VF's vport_id is non zero.
    643	 */
    644	if (vport->vport_id) {
    645		kinfo->tc_info.max_tc = 1;
    646		kinfo->tc_info.num_tc = 1;
    647		vport->qs_offset = HNAE3_MAX_TC +
    648				   vport->vport_id - HCLGE_VF_VPORT_START_NUM;
    649		vport_max_rss_size = hdev->vf_rss_size_max;
    650	} else {
    651		kinfo->tc_info.max_tc = hdev->tc_max;
    652		kinfo->tc_info.num_tc =
    653			min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
    654		vport->qs_offset = 0;
    655		vport_max_rss_size = hdev->pf_rss_size_max;
    656	}
    657
    658	max_rss_size = min_t(u16, vport_max_rss_size,
    659			     hclge_vport_get_max_rss_size(vport));
    660
    661	/* Set to user value, no larger than max_rss_size. */
    662	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
    663	    kinfo->req_rss_size <= max_rss_size) {
    664		dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
    665			 kinfo->rss_size, kinfo->req_rss_size);
    666		kinfo->rss_size = kinfo->req_rss_size;
    667	} else if (kinfo->rss_size > max_rss_size ||
    668		   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
    669		/* Set to the maximum specification value (max_rss_size). */
    670		kinfo->rss_size = max_rss_size;
    671	}
    672}
    673
    674static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
    675{
    676	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
    677	struct hclge_dev *hdev = vport->back;
    678	u8 i;
    679
    680	hclge_tm_update_kinfo_rss_size(vport);
    681	kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
    682	vport->dwrr = 100;  /* 100 percent as init */
    683	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
    684
    685	if (vport->vport_id == PF_VPORT_ID)
    686		hdev->rss_cfg.rss_size = kinfo->rss_size;
    687
    688	/* when enable mqprio, the tc_info has been updated. */
    689	if (kinfo->tc_info.mqprio_active)
    690		return;
    691
    692	for (i = 0; i < HNAE3_MAX_TC; i++) {
    693		if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
    694			kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
    695			kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
    696		} else {
    697			/* Set to default queue if TC is disable */
    698			kinfo->tc_info.tqp_offset[i] = 0;
    699			kinfo->tc_info.tqp_count[i] = 1;
    700		}
    701	}
    702
    703	memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
    704	       sizeof_field(struct hnae3_tc_info, prio_tc));
    705}
    706
    707static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
    708{
    709	struct hclge_vport *vport = hdev->vport;
    710	u32 i;
    711
    712	for (i = 0; i < hdev->num_alloc_vport; i++) {
    713		hclge_tm_vport_tc_info_update(vport);
    714
    715		vport++;
    716	}
    717}
    718
    719static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
    720{
    721	u8 i, tc_sch_mode;
    722	u32 bw_limit;
    723
    724	for (i = 0; i < hdev->tc_max; i++) {
    725		if (i < hdev->tm_info.num_tc) {
    726			tc_sch_mode = HCLGE_SCH_MODE_DWRR;
    727			bw_limit = hdev->tm_info.pg_info[0].bw_limit;
    728		} else {
    729			tc_sch_mode = HCLGE_SCH_MODE_SP;
    730			bw_limit = 0;
    731		}
    732
    733		hdev->tm_info.tc_info[i].tc_id = i;
    734		hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
    735		hdev->tm_info.tc_info[i].pgid = 0;
    736		hdev->tm_info.tc_info[i].bw_limit = bw_limit;
    737	}
    738
    739	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
    740		hdev->tm_info.prio_tc[i] =
    741			(i >= hdev->tm_info.num_tc) ? 0 : i;
    742}
    743
    744static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
    745{
    746#define BW_PERCENT	100
    747
    748	u8 i;
    749
    750	for (i = 0; i < hdev->tm_info.num_pg; i++) {
    751		int k;
    752
    753		hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
    754
    755		hdev->tm_info.pg_info[i].pg_id = i;
    756		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
    757
    758		hdev->tm_info.pg_info[i].bw_limit =
    759					hdev->ae_dev->dev_specs.max_tm_rate;
    760
    761		if (i != 0)
    762			continue;
    763
    764		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
    765		for (k = 0; k < hdev->tm_info.num_tc; k++)
    766			hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
    767		for (; k < HNAE3_MAX_TC; k++)
    768			hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
    769	}
    770}
    771
    772static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
    773{
    774	if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
    775		if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
    776			dev_warn(&hdev->pdev->dev,
    777				 "Only 1 tc used, but last mode is FC_PFC\n");
    778
    779		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
    780	} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
    781		/* fc_mode_last_time record the last fc_mode when
    782		 * DCB is enabled, so that fc_mode can be set to
    783		 * the correct value when DCB is disabled.
    784		 */
    785		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
    786		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
    787	}
    788}
    789
    790static void hclge_update_fc_mode(struct hclge_dev *hdev)
    791{
    792	if (!hdev->tm_info.pfc_en) {
    793		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
    794		return;
    795	}
    796
    797	if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
    798		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
    799		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
    800	}
    801}
    802
    803void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
    804{
    805	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
    806		hclge_update_fc_mode(hdev);
    807	else
    808		hclge_update_fc_mode_by_dcb_flag(hdev);
    809}
    810
    811static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
    812{
    813	hclge_tm_pg_info_init(hdev);
    814
    815	hclge_tm_tc_info_init(hdev);
    816
    817	hclge_tm_vport_info_update(hdev);
    818
    819	hclge_tm_pfc_info_update(hdev);
    820}
    821
    822static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
    823{
    824	int ret;
    825	u32 i;
    826
    827	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
    828		return 0;
    829
    830	for (i = 0; i < hdev->tm_info.num_pg; i++) {
    831		/* Cfg mapping */
    832		ret = hclge_tm_pg_to_pri_map_cfg(
    833			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
    834		if (ret)
    835			return ret;
    836	}
    837
    838	return 0;
    839}
    840
    841static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
    842{
    843	u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
    844	struct hclge_shaper_ir_para ir_para;
    845	u32 shaper_para;
    846	int ret;
    847	u32 i;
    848
    849	/* Cfg pg schd */
    850	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
    851		return 0;
    852
    853	/* Pg to pri */
    854	for (i = 0; i < hdev->tm_info.num_pg; i++) {
    855		u32 rate = hdev->tm_info.pg_info[i].bw_limit;
    856
    857		/* Calc shaper para */
    858		ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
    859					     &ir_para, max_tm_rate);
    860		if (ret)
    861			return ret;
    862
    863		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
    864							 HCLGE_SHAPER_BS_U_DEF,
    865							 HCLGE_SHAPER_BS_S_DEF);
    866		ret = hclge_tm_pg_shapping_cfg(hdev,
    867					       HCLGE_TM_SHAP_C_BUCKET, i,
    868					       shaper_para, rate);
    869		if (ret)
    870			return ret;
    871
    872		shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
    873							 ir_para.ir_u,
    874							 ir_para.ir_s,
    875							 HCLGE_SHAPER_BS_U_DEF,
    876							 HCLGE_SHAPER_BS_S_DEF);
    877		ret = hclge_tm_pg_shapping_cfg(hdev,
    878					       HCLGE_TM_SHAP_P_BUCKET, i,
    879					       shaper_para, rate);
    880		if (ret)
    881			return ret;
    882	}
    883
    884	return 0;
    885}
    886
    887static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
    888{
    889	int ret;
    890	u32 i;
    891
    892	/* cfg pg schd */
    893	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
    894		return 0;
    895
    896	/* pg to prio */
    897	for (i = 0; i < hdev->tm_info.num_pg; i++) {
    898		/* Cfg dwrr */
    899		ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
    900		if (ret)
    901			return ret;
    902	}
    903
    904	return 0;
    905}
    906
    907static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
    908				   struct hclge_vport *vport)
    909{
    910	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
    911	struct hnae3_tc_info *tc_info = &kinfo->tc_info;
    912	struct hnae3_queue **tqp = kinfo->tqp;
    913	u32 i, j;
    914	int ret;
    915
    916	for (i = 0; i < tc_info->num_tc; i++) {
    917		for (j = 0; j < tc_info->tqp_count[i]; j++) {
    918			struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
    919
    920			ret = hclge_tm_q_to_qs_map_cfg(hdev,
    921						       hclge_get_queue_id(q),
    922						       vport->qs_offset + i);
    923			if (ret)
    924				return ret;
    925		}
    926	}
    927
    928	return 0;
    929}
    930
    931static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
    932{
    933	struct hclge_vport *vport = hdev->vport;
    934	u16 i, k;
    935	int ret;
    936
    937	/* Cfg qs -> pri mapping, one by one mapping */
    938	for (k = 0; k < hdev->num_alloc_vport; k++) {
    939		struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
    940
    941		for (i = 0; i < kinfo->tc_info.max_tc; i++) {
    942			u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
    943			bool link_vld = i < kinfo->tc_info.num_tc;
    944
    945			ret = hclge_tm_qs_to_pri_map_cfg(hdev,
    946							 vport[k].qs_offset + i,
    947							 pri, link_vld);
    948			if (ret)
    949				return ret;
    950		}
    951	}
    952
    953	return 0;
    954}
    955
    956static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
    957{
    958	struct hclge_vport *vport = hdev->vport;
    959	u16 i, k;
    960	int ret;
    961
    962	/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
    963	for (k = 0; k < hdev->num_alloc_vport; k++)
    964		for (i = 0; i < HNAE3_MAX_TC; i++) {
    965			ret = hclge_tm_qs_to_pri_map_cfg(hdev,
    966							 vport[k].qs_offset + i,
    967							 k, true);
    968			if (ret)
    969				return ret;
    970		}
    971
    972	return 0;
    973}
    974
    975static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
    976{
    977	struct hclge_vport *vport = hdev->vport;
    978	int ret;
    979	u32 i;
    980
    981	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
    982		ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
    983	else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
    984		ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
    985	else
    986		return -EINVAL;
    987
    988	if (ret)
    989		return ret;
    990
    991	/* Cfg q -> qs mapping */
    992	for (i = 0; i < hdev->num_alloc_vport; i++) {
    993		ret = hclge_vport_q_to_qs_map(hdev, vport);
    994		if (ret)
    995			return ret;
    996
    997		vport++;
    998	}
    999
   1000	return 0;
   1001}
   1002
   1003static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
   1004{
   1005	u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
   1006	struct hclge_shaper_ir_para ir_para;
   1007	u32 shaper_para_c, shaper_para_p;
   1008	int ret;
   1009	u32 i;
   1010
   1011	for (i = 0; i < hdev->tc_max; i++) {
   1012		u32 rate = hdev->tm_info.tc_info[i].bw_limit;
   1013
   1014		if (rate) {
   1015			ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
   1016						     &ir_para, max_tm_rate);
   1017			if (ret)
   1018				return ret;
   1019
   1020			shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
   1021								   HCLGE_SHAPER_BS_U_DEF,
   1022								   HCLGE_SHAPER_BS_S_DEF);
   1023			shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
   1024								   ir_para.ir_u,
   1025								   ir_para.ir_s,
   1026								   HCLGE_SHAPER_BS_U_DEF,
   1027								   HCLGE_SHAPER_BS_S_DEF);
   1028		} else {
   1029			shaper_para_c = 0;
   1030			shaper_para_p = 0;
   1031		}
   1032
   1033		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
   1034						shaper_para_c, rate);
   1035		if (ret)
   1036			return ret;
   1037
   1038		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
   1039						shaper_para_p, rate);
   1040		if (ret)
   1041			return ret;
   1042	}
   1043
   1044	return 0;
   1045}
   1046
   1047static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
   1048{
   1049	struct hclge_dev *hdev = vport->back;
   1050	struct hclge_shaper_ir_para ir_para;
   1051	u32 shaper_para;
   1052	int ret;
   1053
   1054	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
   1055				     &ir_para,
   1056				     hdev->ae_dev->dev_specs.max_tm_rate);
   1057	if (ret)
   1058		return ret;
   1059
   1060	shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
   1061						 HCLGE_SHAPER_BS_U_DEF,
   1062						 HCLGE_SHAPER_BS_S_DEF);
   1063	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
   1064					vport->vport_id, shaper_para,
   1065					vport->bw_limit);
   1066	if (ret)
   1067		return ret;
   1068
   1069	shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
   1070						 ir_para.ir_s,
   1071						 HCLGE_SHAPER_BS_U_DEF,
   1072						 HCLGE_SHAPER_BS_S_DEF);
   1073	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
   1074					vport->vport_id, shaper_para,
   1075					vport->bw_limit);
   1076	if (ret)
   1077		return ret;
   1078
   1079	return 0;
   1080}
   1081
   1082static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
   1083{
   1084	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
   1085	struct hclge_dev *hdev = vport->back;
   1086	u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
   1087	struct hclge_shaper_ir_para ir_para;
   1088	u32 i;
   1089	int ret;
   1090
   1091	for (i = 0; i < kinfo->tc_info.num_tc; i++) {
   1092		ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
   1093					     HCLGE_SHAPER_LVL_QSET,
   1094					     &ir_para, max_tm_rate);
   1095		if (ret)
   1096			return ret;
   1097	}
   1098
   1099	return 0;
   1100}
   1101
   1102static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
   1103{
   1104	struct hclge_vport *vport = hdev->vport;
   1105	int ret;
   1106	u32 i;
   1107
   1108	/* Need config vport shaper */
   1109	for (i = 0; i < hdev->num_alloc_vport; i++) {
   1110		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
   1111		if (ret)
   1112			return ret;
   1113
   1114		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
   1115		if (ret)
   1116			return ret;
   1117
   1118		vport++;
   1119	}
   1120
   1121	return 0;
   1122}
   1123
   1124static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
   1125{
   1126	int ret;
   1127
   1128	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
   1129		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
   1130		if (ret)
   1131			return ret;
   1132	} else {
   1133		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
   1134		if (ret)
   1135			return ret;
   1136	}
   1137
   1138	return 0;
   1139}
   1140
   1141static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
   1142{
   1143	struct hclge_vport *vport = hdev->vport;
   1144	struct hclge_pg_info *pg_info;
   1145	u8 dwrr;
   1146	int ret;
   1147	u32 i, k;
   1148
   1149	for (i = 0; i < hdev->tc_max; i++) {
   1150		pg_info =
   1151			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
   1152		dwrr = pg_info->tc_dwrr[i];
   1153
   1154		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
   1155		if (ret)
   1156			return ret;
   1157
   1158		for (k = 0; k < hdev->num_alloc_vport; k++) {
   1159			struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
   1160
   1161			if (i >= kinfo->tc_info.max_tc)
   1162				continue;
   1163
   1164			dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
   1165			ret = hclge_tm_qs_weight_cfg(
   1166				hdev, vport[k].qs_offset + i,
   1167				dwrr);
   1168			if (ret)
   1169				return ret;
   1170		}
   1171	}
   1172
   1173	return 0;
   1174}
   1175
   1176static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
   1177{
   1178#define DEFAULT_TC_OFFSET	14
   1179
   1180	struct hclge_ets_tc_weight_cmd *ets_weight;
   1181	struct hclge_desc desc;
   1182	unsigned int i;
   1183
   1184	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
   1185	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
   1186
   1187	for (i = 0; i < HNAE3_MAX_TC; i++) {
   1188		struct hclge_pg_info *pg_info;
   1189
   1190		pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
   1191		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
   1192	}
   1193
   1194	ets_weight->weight_offset = DEFAULT_TC_OFFSET;
   1195
   1196	return hclge_cmd_send(&hdev->hw, &desc, 1);
   1197}
   1198
   1199static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
   1200{
   1201	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
   1202	struct hclge_dev *hdev = vport->back;
   1203	int ret;
   1204	u8 i;
   1205
   1206	/* Vf dwrr */
   1207	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
   1208	if (ret)
   1209		return ret;
   1210
   1211	/* Qset dwrr */
   1212	for (i = 0; i < kinfo->tc_info.num_tc; i++) {
   1213		ret = hclge_tm_qs_weight_cfg(
   1214			hdev, vport->qs_offset + i,
   1215			hdev->tm_info.pg_info[0].tc_dwrr[i]);
   1216		if (ret)
   1217			return ret;
   1218	}
   1219
   1220	return 0;
   1221}
   1222
   1223static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
   1224{
   1225	struct hclge_vport *vport = hdev->vport;
   1226	int ret;
   1227	u32 i;
   1228
   1229	for (i = 0; i < hdev->num_alloc_vport; i++) {
   1230		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
   1231		if (ret)
   1232			return ret;
   1233
   1234		vport++;
   1235	}
   1236
   1237	return 0;
   1238}
   1239
   1240static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
   1241{
   1242	int ret;
   1243
   1244	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
   1245		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
   1246		if (ret)
   1247			return ret;
   1248
   1249		if (!hnae3_dev_dcb_supported(hdev))
   1250			return 0;
   1251
   1252		ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
   1253		if (ret == -EOPNOTSUPP) {
   1254			dev_warn(&hdev->pdev->dev,
   1255				 "fw %08x doesn't support ets tc weight cmd\n",
   1256				 hdev->fw_version);
   1257			ret = 0;
   1258		}
   1259
   1260		return ret;
   1261	} else {
   1262		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
   1263		if (ret)
   1264			return ret;
   1265	}
   1266
   1267	return 0;
   1268}
   1269
   1270static int hclge_tm_map_cfg(struct hclge_dev *hdev)
   1271{
   1272	int ret;
   1273
   1274	ret = hclge_up_to_tc_map(hdev);
   1275	if (ret)
   1276		return ret;
   1277
   1278	ret = hclge_tm_pg_to_pri_map(hdev);
   1279	if (ret)
   1280		return ret;
   1281
   1282	return hclge_tm_pri_q_qs_cfg(hdev);
   1283}
   1284
   1285static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
   1286{
   1287	int ret;
   1288
   1289	ret = hclge_tm_port_shaper_cfg(hdev);
   1290	if (ret)
   1291		return ret;
   1292
   1293	ret = hclge_tm_pg_shaper_cfg(hdev);
   1294	if (ret)
   1295		return ret;
   1296
   1297	return hclge_tm_pri_shaper_cfg(hdev);
   1298}
   1299
   1300int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
   1301{
   1302	int ret;
   1303
   1304	ret = hclge_tm_pg_dwrr_cfg(hdev);
   1305	if (ret)
   1306		return ret;
   1307
   1308	return hclge_tm_pri_dwrr_cfg(hdev);
   1309}
   1310
   1311static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
   1312{
   1313	int ret;
   1314	u8 i;
   1315
   1316	/* Only being config on TC-Based scheduler mode */
   1317	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
   1318		return 0;
   1319
   1320	for (i = 0; i < hdev->tm_info.num_pg; i++) {
   1321		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
   1322		if (ret)
   1323			return ret;
   1324	}
   1325
   1326	return 0;
   1327}
   1328
   1329static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
   1330{
   1331	struct hclge_vport *vport = hdev->vport;
   1332	int ret;
   1333	u8 mode;
   1334	u16 i;
   1335
   1336	ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
   1337	if (ret)
   1338		return ret;
   1339
   1340	for (i = 0; i < hdev->num_alloc_vport; i++) {
   1341		struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
   1342
   1343		if (pri_id >= kinfo->tc_info.max_tc)
   1344			continue;
   1345
   1346		mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
   1347		       HCLGE_SCH_MODE_SP;
   1348		ret = hclge_tm_qs_schd_mode_cfg(hdev,
   1349						vport[i].qs_offset + pri_id,
   1350						mode);
   1351		if (ret)
   1352			return ret;
   1353	}
   1354
   1355	return 0;
   1356}
   1357
   1358static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
   1359{
   1360	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
   1361	struct hclge_dev *hdev = vport->back;
   1362	int ret;
   1363	u8 i;
   1364
   1365	if (vport->vport_id >= HNAE3_MAX_TC)
   1366		return -EINVAL;
   1367
   1368	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
   1369	if (ret)
   1370		return ret;
   1371
   1372	for (i = 0; i < kinfo->tc_info.num_tc; i++) {
   1373		u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
   1374
   1375		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
   1376						sch_mode);
   1377		if (ret)
   1378			return ret;
   1379	}
   1380
   1381	return 0;
   1382}
   1383
   1384static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
   1385{
   1386	struct hclge_vport *vport = hdev->vport;
   1387	int ret;
   1388	u8 i;
   1389
   1390	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
   1391		for (i = 0; i < hdev->tc_max; i++) {
   1392			ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
   1393			if (ret)
   1394				return ret;
   1395		}
   1396	} else {
   1397		for (i = 0; i < hdev->num_alloc_vport; i++) {
   1398			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
   1399			if (ret)
   1400				return ret;
   1401
   1402			vport++;
   1403		}
   1404	}
   1405
   1406	return 0;
   1407}
   1408
   1409static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
   1410{
   1411	int ret;
   1412
   1413	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
   1414	if (ret)
   1415		return ret;
   1416
   1417	return hclge_tm_lvl34_schd_mode_cfg(hdev);
   1418}
   1419
   1420int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
   1421{
   1422	int ret;
   1423
   1424	/* Cfg tm mapping  */
   1425	ret = hclge_tm_map_cfg(hdev);
   1426	if (ret)
   1427		return ret;
   1428
   1429	/* Cfg tm shaper */
   1430	ret = hclge_tm_shaper_cfg(hdev);
   1431	if (ret)
   1432		return ret;
   1433
   1434	/* Cfg dwrr */
   1435	ret = hclge_tm_dwrr_cfg(hdev);
   1436	if (ret)
   1437		return ret;
   1438
   1439	/* Cfg schd mode for each level schd */
   1440	return hclge_tm_schd_mode_hw(hdev);
   1441}
   1442
   1443static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
   1444{
   1445	struct hclge_mac *mac = &hdev->hw.mac;
   1446
   1447	return hclge_pause_param_cfg(hdev, mac->mac_addr,
   1448				     HCLGE_DEFAULT_PAUSE_TRANS_GAP,
   1449				     HCLGE_DEFAULT_PAUSE_TRANS_TIME);
   1450}
   1451
   1452static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
   1453{
   1454	u8 enable_bitmap = 0;
   1455
   1456	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
   1457		enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
   1458				HCLGE_RX_MAC_PAUSE_EN_MSK;
   1459
   1460	return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
   1461				      hdev->tm_info.pfc_en);
   1462}
   1463
   1464/* for the queues that use for backpress, divides to several groups,
   1465 * each group contains 32 queue sets, which can be represented by u32 bitmap.
   1466 */
   1467static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
   1468{
   1469	u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
   1470	u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
   1471	u8 grp_num = HCLGE_BP_GRP_NUM;
   1472	int i;
   1473
   1474	if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
   1475		grp_num = HCLGE_BP_EXT_GRP_NUM;
   1476		grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
   1477		grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
   1478	}
   1479
   1480	for (i = 0; i < grp_num; i++) {
   1481		u32 qs_bitmap = 0;
   1482		int k, ret;
   1483
   1484		for (k = 0; k < hdev->num_alloc_vport; k++) {
   1485			struct hclge_vport *vport = &hdev->vport[k];
   1486			u16 qs_id = vport->qs_offset + tc;
   1487			u8 grp, sub_grp;
   1488
   1489			grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
   1490			sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
   1491						  HCLGE_BP_SUB_GRP_ID_S);
   1492			if (i == grp)
   1493				qs_bitmap |= (1 << sub_grp);
   1494		}
   1495
   1496		ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
   1497		if (ret)
   1498			return ret;
   1499	}
   1500
   1501	return 0;
   1502}
   1503
   1504static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
   1505{
   1506	bool tx_en, rx_en;
   1507
   1508	switch (hdev->tm_info.fc_mode) {
   1509	case HCLGE_FC_NONE:
   1510		tx_en = false;
   1511		rx_en = false;
   1512		break;
   1513	case HCLGE_FC_RX_PAUSE:
   1514		tx_en = false;
   1515		rx_en = true;
   1516		break;
   1517	case HCLGE_FC_TX_PAUSE:
   1518		tx_en = true;
   1519		rx_en = false;
   1520		break;
   1521	case HCLGE_FC_FULL:
   1522		tx_en = true;
   1523		rx_en = true;
   1524		break;
   1525	case HCLGE_FC_PFC:
   1526		tx_en = false;
   1527		rx_en = false;
   1528		break;
   1529	default:
   1530		tx_en = true;
   1531		rx_en = true;
   1532	}
   1533
   1534	return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
   1535}
   1536
   1537static int hclge_tm_bp_setup(struct hclge_dev *hdev)
   1538{
   1539	int ret;
   1540	int i;
   1541
   1542	for (i = 0; i < hdev->tm_info.num_tc; i++) {
   1543		ret = hclge_bp_setup_hw(hdev, i);
   1544		if (ret)
   1545			return ret;
   1546	}
   1547
   1548	return 0;
   1549}
   1550
   1551int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
   1552{
   1553	int ret;
   1554
   1555	ret = hclge_pause_param_setup_hw(hdev);
   1556	if (ret)
   1557		return ret;
   1558
   1559	ret = hclge_mac_pause_setup_hw(hdev);
   1560	if (ret)
   1561		return ret;
   1562
   1563	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
   1564	if (!hnae3_dev_dcb_supported(hdev))
   1565		return 0;
   1566
   1567	/* GE MAC does not support PFC, when driver is initializing and MAC
   1568	 * is in GE Mode, ignore the error here, otherwise initialization
   1569	 * will fail.
   1570	 */
   1571	ret = hclge_pfc_setup_hw(hdev);
   1572	if (init && ret == -EOPNOTSUPP)
   1573		dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
   1574	else if (ret) {
   1575		dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
   1576			ret);
   1577		return ret;
   1578	}
   1579
   1580	return hclge_tm_bp_setup(hdev);
   1581}
   1582
   1583void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
   1584{
   1585	struct hclge_vport *vport = hdev->vport;
   1586	struct hnae3_knic_private_info *kinfo;
   1587	u32 i, k;
   1588
   1589	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
   1590		hdev->tm_info.prio_tc[i] = prio_tc[i];
   1591
   1592		for (k = 0;  k < hdev->num_alloc_vport; k++) {
   1593			kinfo = &vport[k].nic.kinfo;
   1594			kinfo->tc_info.prio_tc[i] = prio_tc[i];
   1595		}
   1596	}
   1597}
   1598
   1599void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
   1600{
   1601	u8 bit_map = 0;
   1602	u8 i;
   1603
   1604	hdev->tm_info.num_tc = num_tc;
   1605
   1606	for (i = 0; i < hdev->tm_info.num_tc; i++)
   1607		bit_map |= BIT(i);
   1608
   1609	if (!bit_map) {
   1610		bit_map = 1;
   1611		hdev->tm_info.num_tc = 1;
   1612	}
   1613
   1614	hdev->hw_tc_map = bit_map;
   1615
   1616	hclge_tm_schd_info_init(hdev);
   1617}
   1618
   1619int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
   1620{
   1621	int ret;
   1622
   1623	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
   1624	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
   1625		return -ENOTSUPP;
   1626
   1627	ret = hclge_tm_schd_setup_hw(hdev);
   1628	if (ret)
   1629		return ret;
   1630
   1631	ret = hclge_pause_setup_hw(hdev, init);
   1632	if (ret)
   1633		return ret;
   1634
   1635	return 0;
   1636}
   1637
   1638int hclge_tm_schd_init(struct hclge_dev *hdev)
   1639{
   1640	/* fc_mode is HCLGE_FC_FULL on reset */
   1641	hdev->tm_info.fc_mode = HCLGE_FC_FULL;
   1642	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
   1643
   1644	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
   1645	    hdev->tm_info.num_pg != 1)
   1646		return -EINVAL;
   1647
   1648	hclge_tm_schd_info_init(hdev);
   1649
   1650	return hclge_tm_init_hw(hdev, true);
   1651}
   1652
   1653int hclge_tm_vport_map_update(struct hclge_dev *hdev)
   1654{
   1655	struct hclge_vport *vport = hdev->vport;
   1656	int ret;
   1657
   1658	hclge_tm_vport_tc_info_update(vport);
   1659
   1660	ret = hclge_vport_q_to_qs_map(hdev, vport);
   1661	if (ret)
   1662		return ret;
   1663
   1664	if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
   1665		return 0;
   1666
   1667	return hclge_tm_bp_setup(hdev);
   1668}
   1669
   1670int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
   1671{
   1672	struct hclge_tm_nodes_cmd *nodes;
   1673	struct hclge_desc desc;
   1674	int ret;
   1675
   1676	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
   1677		/* Each PF has 8 qsets and each VF has 1 qset */
   1678		*qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
   1679		return 0;
   1680	}
   1681
   1682	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
   1683	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1684	if (ret) {
   1685		dev_err(&hdev->pdev->dev,
   1686			"failed to get qset num, ret = %d\n", ret);
   1687		return ret;
   1688	}
   1689
   1690	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
   1691	*qset_num = le16_to_cpu(nodes->qset_num);
   1692	return 0;
   1693}
   1694
   1695int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
   1696{
   1697	struct hclge_tm_nodes_cmd *nodes;
   1698	struct hclge_desc desc;
   1699	int ret;
   1700
   1701	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
   1702		*pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
   1703		return 0;
   1704	}
   1705
   1706	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
   1707	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1708	if (ret) {
   1709		dev_err(&hdev->pdev->dev,
   1710			"failed to get pri num, ret = %d\n", ret);
   1711		return ret;
   1712	}
   1713
   1714	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
   1715	*pri_num = nodes->pri_num;
   1716	return 0;
   1717}
   1718
   1719int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
   1720			      u8 *link_vld)
   1721{
   1722	struct hclge_qs_to_pri_link_cmd *map;
   1723	struct hclge_desc desc;
   1724	int ret;
   1725
   1726	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
   1727	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
   1728	map->qs_id = cpu_to_le16(qset_id);
   1729	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1730	if (ret) {
   1731		dev_err(&hdev->pdev->dev,
   1732			"failed to get qset map priority, ret = %d\n", ret);
   1733		return ret;
   1734	}
   1735
   1736	*priority = map->priority;
   1737	*link_vld = map->link_vld;
   1738	return 0;
   1739}
   1740
   1741int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
   1742{
   1743	struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
   1744	struct hclge_desc desc;
   1745	int ret;
   1746
   1747	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
   1748	qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
   1749	qs_sch_mode->qs_id = cpu_to_le16(qset_id);
   1750	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1751	if (ret) {
   1752		dev_err(&hdev->pdev->dev,
   1753			"failed to get qset sch mode, ret = %d\n", ret);
   1754		return ret;
   1755	}
   1756
   1757	*mode = qs_sch_mode->sch_mode;
   1758	return 0;
   1759}
   1760
   1761int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
   1762{
   1763	struct hclge_qs_weight_cmd *qs_weight;
   1764	struct hclge_desc desc;
   1765	int ret;
   1766
   1767	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
   1768	qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
   1769	qs_weight->qs_id = cpu_to_le16(qset_id);
   1770	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1771	if (ret) {
   1772		dev_err(&hdev->pdev->dev,
   1773			"failed to get qset weight, ret = %d\n", ret);
   1774		return ret;
   1775	}
   1776
   1777	*weight = qs_weight->dwrr;
   1778	return 0;
   1779}
   1780
   1781int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
   1782			     struct hclge_tm_shaper_para *para)
   1783{
   1784	struct hclge_qs_shapping_cmd *shap_cfg_cmd;
   1785	struct hclge_desc desc;
   1786	u32 shapping_para;
   1787	int ret;
   1788
   1789	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
   1790	shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
   1791	shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
   1792	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1793	if (ret) {
   1794		dev_err(&hdev->pdev->dev,
   1795			"failed to get qset %u shaper, ret = %d\n", qset_id,
   1796			ret);
   1797		return ret;
   1798	}
   1799
   1800	shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
   1801	para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
   1802	para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
   1803	para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
   1804	para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
   1805	para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
   1806	para->flag = shap_cfg_cmd->flag;
   1807	para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
   1808	return 0;
   1809}
   1810
   1811int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
   1812{
   1813	struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
   1814	struct hclge_desc desc;
   1815	int ret;
   1816
   1817	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
   1818	pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
   1819	pri_sch_mode->pri_id = pri_id;
   1820	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1821	if (ret) {
   1822		dev_err(&hdev->pdev->dev,
   1823			"failed to get priority sch mode, ret = %d\n", ret);
   1824		return ret;
   1825	}
   1826
   1827	*mode = pri_sch_mode->sch_mode;
   1828	return 0;
   1829}
   1830
   1831int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
   1832{
   1833	struct hclge_priority_weight_cmd *priority_weight;
   1834	struct hclge_desc desc;
   1835	int ret;
   1836
   1837	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
   1838	priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
   1839	priority_weight->pri_id = pri_id;
   1840	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1841	if (ret) {
   1842		dev_err(&hdev->pdev->dev,
   1843			"failed to get priority weight, ret = %d\n", ret);
   1844		return ret;
   1845	}
   1846
   1847	*weight = priority_weight->dwrr;
   1848	return 0;
   1849}
   1850
   1851int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
   1852			    enum hclge_opcode_type cmd,
   1853			    struct hclge_tm_shaper_para *para)
   1854{
   1855	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
   1856	struct hclge_desc desc;
   1857	u32 shapping_para;
   1858	int ret;
   1859
   1860	if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
   1861	    cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
   1862		return -EINVAL;
   1863
   1864	hclge_cmd_setup_basic_desc(&desc, cmd, true);
   1865	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
   1866	shap_cfg_cmd->pri_id = pri_id;
   1867	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1868	if (ret) {
   1869		dev_err(&hdev->pdev->dev,
   1870			"failed to get priority shaper(%#x), ret = %d\n",
   1871			cmd, ret);
   1872		return ret;
   1873	}
   1874
   1875	shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
   1876	para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
   1877	para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
   1878	para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
   1879	para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
   1880	para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
   1881	para->flag = shap_cfg_cmd->flag;
   1882	para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
   1883	return 0;
   1884}
   1885
   1886int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
   1887{
   1888	struct hclge_nq_to_qs_link_cmd *map;
   1889	struct hclge_desc desc;
   1890	u16 qs_id_l;
   1891	u16 qs_id_h;
   1892	int ret;
   1893
   1894	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
   1895	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
   1896	map->nq_id = cpu_to_le16(q_id);
   1897	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1898	if (ret) {
   1899		dev_err(&hdev->pdev->dev,
   1900			"failed to get queue to qset map, ret = %d\n", ret);
   1901		return ret;
   1902	}
   1903	*qset_id = le16_to_cpu(map->qset_id);
   1904
   1905	/* convert qset_id to the following format, drop the vld bit
   1906	 *            | qs_id_h | vld | qs_id_l |
   1907	 * qset_id:   | 15 ~ 11 |  10 |  9 ~ 0  |
   1908	 *             \         \   /         /
   1909	 *              \         \ /         /
   1910	 * qset_id: | 15 | 14 ~ 10 |  9 ~ 0  |
   1911	 */
   1912	qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
   1913				  HCLGE_TM_QS_ID_L_S);
   1914	qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
   1915				  HCLGE_TM_QS_ID_H_EXT_S);
   1916	*qset_id = 0;
   1917	hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
   1918			qs_id_l);
   1919	hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
   1920			qs_id_h);
   1921	return 0;
   1922}
   1923
   1924int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
   1925{
   1926#define HCLGE_TM_TC_MASK		0x7
   1927
   1928	struct hclge_tqp_tx_queue_tc_cmd *tc;
   1929	struct hclge_desc desc;
   1930	int ret;
   1931
   1932	tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
   1933	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
   1934	tc->queue_id = cpu_to_le16(q_id);
   1935	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1936	if (ret) {
   1937		dev_err(&hdev->pdev->dev,
   1938			"failed to get queue to tc map, ret = %d\n", ret);
   1939		return ret;
   1940	}
   1941
   1942	*tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
   1943	return 0;
   1944}
   1945
   1946int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
   1947			       u8 *pri_bit_map)
   1948{
   1949	struct hclge_pg_to_pri_link_cmd *map;
   1950	struct hclge_desc desc;
   1951	int ret;
   1952
   1953	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
   1954	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
   1955	map->pg_id = pg_id;
   1956	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1957	if (ret) {
   1958		dev_err(&hdev->pdev->dev,
   1959			"failed to get pg to pri map, ret = %d\n", ret);
   1960		return ret;
   1961	}
   1962
   1963	*pri_bit_map = map->pri_bit_map;
   1964	return 0;
   1965}
   1966
   1967int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
   1968{
   1969	struct hclge_pg_weight_cmd *pg_weight_cmd;
   1970	struct hclge_desc desc;
   1971	int ret;
   1972
   1973	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
   1974	pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
   1975	pg_weight_cmd->pg_id = pg_id;
   1976	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1977	if (ret) {
   1978		dev_err(&hdev->pdev->dev,
   1979			"failed to get pg weight, ret = %d\n", ret);
   1980		return ret;
   1981	}
   1982
   1983	*weight = pg_weight_cmd->dwrr;
   1984	return 0;
   1985}
   1986
   1987int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
   1988{
   1989	struct hclge_desc desc;
   1990	int ret;
   1991
   1992	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
   1993	desc.data[0] = cpu_to_le32(pg_id);
   1994	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   1995	if (ret) {
   1996		dev_err(&hdev->pdev->dev,
   1997			"failed to get pg sch mode, ret = %d\n", ret);
   1998		return ret;
   1999	}
   2000
   2001	*mode = (u8)le32_to_cpu(desc.data[1]);
   2002	return 0;
   2003}
   2004
   2005int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
   2006			   enum hclge_opcode_type cmd,
   2007			   struct hclge_tm_shaper_para *para)
   2008{
   2009	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
   2010	struct hclge_desc desc;
   2011	u32 shapping_para;
   2012	int ret;
   2013
   2014	if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
   2015	    cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
   2016		return -EINVAL;
   2017
   2018	hclge_cmd_setup_basic_desc(&desc, cmd, true);
   2019	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
   2020	shap_cfg_cmd->pg_id = pg_id;
   2021	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   2022	if (ret) {
   2023		dev_err(&hdev->pdev->dev,
   2024			"failed to get pg shaper(%#x), ret = %d\n",
   2025			cmd, ret);
   2026		return ret;
   2027	}
   2028
   2029	shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
   2030	para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
   2031	para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
   2032	para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
   2033	para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
   2034	para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
   2035	para->flag = shap_cfg_cmd->flag;
   2036	para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
   2037	return 0;
   2038}
   2039
   2040int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
   2041			     struct hclge_tm_shaper_para *para)
   2042{
   2043	struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
   2044	struct hclge_desc desc;
   2045	u32 shapping_para;
   2046	int ret;
   2047
   2048	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
   2049	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
   2050	if (ret) {
   2051		dev_err(&hdev->pdev->dev,
   2052			"failed to get port shaper, ret = %d\n", ret);
   2053		return ret;
   2054	}
   2055
   2056	port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
   2057	shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
   2058	para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
   2059	para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
   2060	para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
   2061	para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
   2062	para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
   2063	para->flag = port_shap_cfg_cmd->flag;
   2064	para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);
   2065
   2066	return 0;
   2067}