cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qm.c (150132B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (c) 2019 HiSilicon Limited. */
      3#include <asm/page.h>
      4#include <linux/acpi.h>
      5#include <linux/aer.h>
      6#include <linux/bitmap.h>
      7#include <linux/dma-mapping.h>
      8#include <linux/idr.h>
      9#include <linux/io.h>
     10#include <linux/irqreturn.h>
     11#include <linux/log2.h>
     12#include <linux/pm_runtime.h>
     13#include <linux/seq_file.h>
     14#include <linux/slab.h>
     15#include <linux/uacce.h>
     16#include <linux/uaccess.h>
     17#include <uapi/misc/uacce/hisi_qm.h>
     18#include <linux/hisi_acc_qm.h>
     19
     20/* eq/aeq irq enable */
     21#define QM_VF_AEQ_INT_SOURCE		0x0
     22#define QM_VF_AEQ_INT_MASK		0x4
     23#define QM_VF_EQ_INT_SOURCE		0x8
     24#define QM_VF_EQ_INT_MASK		0xc
     25#define QM_IRQ_NUM_V1			1
     26#define QM_IRQ_NUM_PF_V2		4
     27#define QM_IRQ_NUM_VF_V2		2
     28#define QM_IRQ_NUM_VF_V3		3
     29
     30#define QM_EQ_EVENT_IRQ_VECTOR		0
     31#define QM_AEQ_EVENT_IRQ_VECTOR		1
     32#define QM_CMD_EVENT_IRQ_VECTOR		2
     33#define QM_ABNORMAL_EVENT_IRQ_VECTOR	3
     34
     35/* mailbox */
     36#define QM_MB_PING_ALL_VFS		0xffff
     37#define QM_MB_CMD_DATA_SHIFT		32
     38#define QM_MB_CMD_DATA_MASK		GENMASK(31, 0)
     39
     40/* sqc shift */
     41#define QM_SQ_HOP_NUM_SHIFT		0
     42#define QM_SQ_PAGE_SIZE_SHIFT		4
     43#define QM_SQ_BUF_SIZE_SHIFT		8
     44#define QM_SQ_SQE_SIZE_SHIFT		12
     45#define QM_SQ_PRIORITY_SHIFT		0
     46#define QM_SQ_ORDERS_SHIFT		4
     47#define QM_SQ_TYPE_SHIFT		8
     48#define QM_QC_PASID_ENABLE		0x1
     49#define QM_QC_PASID_ENABLE_SHIFT	7
     50
     51#define QM_SQ_TYPE_MASK			GENMASK(3, 0)
     52#define QM_SQ_TAIL_IDX(sqc)		((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
     53
     54/* cqc shift */
     55#define QM_CQ_HOP_NUM_SHIFT		0
     56#define QM_CQ_PAGE_SIZE_SHIFT		4
     57#define QM_CQ_BUF_SIZE_SHIFT		8
     58#define QM_CQ_CQE_SIZE_SHIFT		12
     59#define QM_CQ_PHASE_SHIFT		0
     60#define QM_CQ_FLAG_SHIFT		1
     61
     62#define QM_CQE_PHASE(cqe)		(le16_to_cpu((cqe)->w7) & 0x1)
     63#define QM_QC_CQE_SIZE			4
     64#define QM_CQ_TAIL_IDX(cqc)		((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
     65
     66/* eqc shift */
     67#define QM_EQE_AEQE_SIZE		(2UL << 12)
     68#define QM_EQC_PHASE_SHIFT		16
     69
     70#define QM_EQE_PHASE(eqe)		((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
     71#define QM_EQE_CQN_MASK			GENMASK(15, 0)
     72
     73#define QM_AEQE_PHASE(aeqe)		((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
     74#define QM_AEQE_TYPE_SHIFT		17
     75#define QM_AEQE_CQN_MASK		GENMASK(15, 0)
     76#define QM_CQ_OVERFLOW			0
     77#define QM_EQ_OVERFLOW			1
     78#define QM_CQE_ERROR			2
     79
     80#define QM_DOORBELL_CMD_SQ		0
     81#define QM_DOORBELL_CMD_CQ		1
     82#define QM_DOORBELL_CMD_EQ		2
     83#define QM_DOORBELL_CMD_AEQ		3
     84
     85#define QM_DOORBELL_BASE_V1		0x340
     86#define QM_DB_CMD_SHIFT_V1		16
     87#define QM_DB_INDEX_SHIFT_V1		32
     88#define QM_DB_PRIORITY_SHIFT_V1		48
     89#define QM_QUE_ISO_CFG_V		0x0030
     90#define QM_PAGE_SIZE			0x0034
     91#define QM_QUE_ISO_EN			0x100154
     92#define QM_CAPBILITY			0x100158
     93#define QM_QP_NUN_MASK			GENMASK(10, 0)
     94#define QM_QP_DB_INTERVAL		0x10000
     95
     96#define QM_MEM_START_INIT		0x100040
     97#define QM_MEM_INIT_DONE		0x100044
     98#define QM_VFT_CFG_RDY			0x10006c
     99#define QM_VFT_CFG_OP_WR		0x100058
    100#define QM_VFT_CFG_TYPE			0x10005c
    101#define QM_SQC_VFT			0x0
    102#define QM_CQC_VFT			0x1
    103#define QM_VFT_CFG			0x100060
    104#define QM_VFT_CFG_OP_ENABLE		0x100054
    105#define QM_PM_CTRL			0x100148
    106#define QM_IDLE_DISABLE			BIT(9)
    107
    108#define QM_VFT_CFG_DATA_L		0x100064
    109#define QM_VFT_CFG_DATA_H		0x100068
    110#define QM_SQC_VFT_BUF_SIZE		(7ULL << 8)
    111#define QM_SQC_VFT_SQC_SIZE		(5ULL << 12)
    112#define QM_SQC_VFT_INDEX_NUMBER		(1ULL << 16)
    113#define QM_SQC_VFT_START_SQN_SHIFT	28
    114#define QM_SQC_VFT_VALID		(1ULL << 44)
    115#define QM_SQC_VFT_SQN_SHIFT		45
    116#define QM_CQC_VFT_BUF_SIZE		(7ULL << 8)
    117#define QM_CQC_VFT_SQC_SIZE		(5ULL << 12)
    118#define QM_CQC_VFT_INDEX_NUMBER		(1ULL << 16)
    119#define QM_CQC_VFT_VALID		(1ULL << 28)
    120
    121#define QM_SQC_VFT_BASE_SHIFT_V2	28
    122#define QM_SQC_VFT_BASE_MASK_V2		GENMASK(15, 0)
    123#define QM_SQC_VFT_NUM_SHIFT_V2		45
    124#define QM_SQC_VFT_NUM_MASK_v2		GENMASK(9, 0)
    125
    126#define QM_DFX_CNT_CLR_CE		0x100118
    127
    128#define QM_ABNORMAL_INT_SOURCE		0x100000
    129#define QM_ABNORMAL_INT_SOURCE_CLR	GENMASK(14, 0)
    130#define QM_ABNORMAL_INT_MASK		0x100004
    131#define QM_ABNORMAL_INT_MASK_VALUE	0x7fff
    132#define QM_ABNORMAL_INT_STATUS		0x100008
    133#define QM_ABNORMAL_INT_SET		0x10000c
    134#define QM_ABNORMAL_INF00		0x100010
    135#define QM_FIFO_OVERFLOW_TYPE		0xc0
    136#define QM_FIFO_OVERFLOW_TYPE_SHIFT	6
    137#define QM_FIFO_OVERFLOW_VF		0x3f
    138#define QM_ABNORMAL_INF01		0x100014
    139#define QM_DB_TIMEOUT_TYPE		0xc0
    140#define QM_DB_TIMEOUT_TYPE_SHIFT	6
    141#define QM_DB_TIMEOUT_VF		0x3f
    142#define QM_RAS_CE_ENABLE		0x1000ec
    143#define QM_RAS_FE_ENABLE		0x1000f0
    144#define QM_RAS_NFE_ENABLE		0x1000f4
    145#define QM_RAS_CE_THRESHOLD		0x1000f8
    146#define QM_RAS_CE_TIMES_PER_IRQ		1
    147#define QM_RAS_MSI_INT_SEL		0x1040f4
    148#define QM_OOO_SHUTDOWN_SEL		0x1040f8
    149
    150#define QM_RESET_WAIT_TIMEOUT		400
    151#define QM_PEH_VENDOR_ID		0x1000d8
    152#define ACC_VENDOR_ID_VALUE		0x5a5a
    153#define QM_PEH_DFX_INFO0		0x1000fc
    154#define QM_PEH_DFX_INFO1		0x100100
    155#define QM_PEH_DFX_MASK			(BIT(0) | BIT(2))
    156#define QM_PEH_MSI_FINISH_MASK		GENMASK(19, 16)
    157#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT	3
    158#define ACC_PEH_MSI_DISABLE		GENMASK(31, 0)
    159#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN	0x1
    160#define ACC_MASTER_TRANS_RETURN_RW	3
    161#define ACC_MASTER_TRANS_RETURN		0x300150
    162#define ACC_MASTER_GLOBAL_CTRL		0x300000
    163#define ACC_AM_CFG_PORT_WR_EN		0x30001c
    164#define QM_RAS_NFE_MBIT_DISABLE		~QM_ECC_MBIT
    165#define ACC_AM_ROB_ECC_INT_STS		0x300104
    166#define ACC_ROB_ECC_ERR_MULTPL		BIT(1)
    167#define QM_MSI_CAP_ENABLE		BIT(16)
    168
    169/* interfunction communication */
    170#define QM_IFC_READY_STATUS		0x100128
    171#define QM_IFC_C_STS_M			0x10012C
    172#define QM_IFC_INT_SET_P		0x100130
    173#define QM_IFC_INT_CFG			0x100134
    174#define QM_IFC_INT_SOURCE_P		0x100138
    175#define QM_IFC_INT_SOURCE_V		0x0020
    176#define QM_IFC_INT_MASK			0x0024
    177#define QM_IFC_INT_STATUS		0x0028
    178#define QM_IFC_INT_SET_V		0x002C
    179#define QM_IFC_SEND_ALL_VFS		GENMASK(6, 0)
    180#define QM_IFC_INT_SOURCE_CLR		GENMASK(63, 0)
    181#define QM_IFC_INT_SOURCE_MASK		BIT(0)
    182#define QM_IFC_INT_DISABLE		BIT(0)
    183#define QM_IFC_INT_STATUS_MASK		BIT(0)
    184#define QM_IFC_INT_SET_MASK		BIT(0)
    185#define QM_WAIT_DST_ACK			10
    186#define QM_MAX_PF_WAIT_COUNT		10
    187#define QM_MAX_VF_WAIT_COUNT		40
    188#define QM_VF_RESET_WAIT_US            20000
    189#define QM_VF_RESET_WAIT_CNT           3000
    190#define QM_VF_RESET_WAIT_TIMEOUT_US    \
    191	(QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
    192
    193#define QM_DFX_MB_CNT_VF		0x104010
    194#define QM_DFX_DB_CNT_VF		0x104020
    195#define QM_DFX_SQE_CNT_VF_SQN		0x104030
    196#define QM_DFX_CQE_CNT_VF_CQN		0x104040
    197#define QM_DFX_QN_SHIFT			16
    198#define CURRENT_FUN_MASK		GENMASK(5, 0)
    199#define CURRENT_Q_MASK			GENMASK(31, 16)
    200
    201#define POLL_PERIOD			10
    202#define POLL_TIMEOUT			1000
    203#define WAIT_PERIOD_US_MAX		200
    204#define WAIT_PERIOD_US_MIN		100
    205#define MAX_WAIT_COUNTS			1000
    206#define QM_CACHE_WB_START		0x204
    207#define QM_CACHE_WB_DONE		0x208
    208
    209#define PCI_BAR_2			2
    210#define PCI_BAR_4			4
    211#define QM_SQE_DATA_ALIGN_MASK		GENMASK(6, 0)
    212#define QMC_ALIGN(sz)			ALIGN(sz, 32)
    213
    214#define QM_DBG_READ_LEN		256
    215#define QM_DBG_WRITE_LEN		1024
    216#define QM_DBG_TMP_BUF_LEN		22
    217#define QM_PCI_COMMAND_INVALID		~0
    218#define QM_RESET_STOP_TX_OFFSET		1
    219#define QM_RESET_STOP_RX_OFFSET		2
    220
    221#define WAIT_PERIOD			20
    222#define REMOVE_WAIT_DELAY		10
    223#define QM_SQE_ADDR_MASK		GENMASK(7, 0)
    224#define QM_EQ_DEPTH			(1024 * 2)
    225
    226#define QM_DRIVER_REMOVING		0
    227#define QM_RST_SCHED			1
    228#define QM_RESETTING			2
    229#define QM_QOS_PARAM_NUM		2
    230#define QM_QOS_VAL_NUM			1
    231#define QM_QOS_BDF_PARAM_NUM		4
    232#define QM_QOS_MAX_VAL			1000
    233#define QM_QOS_RATE			100
    234#define QM_QOS_EXPAND_RATE		1000
    235#define QM_SHAPER_CIR_B_MASK		GENMASK(7, 0)
    236#define QM_SHAPER_CIR_U_MASK		GENMASK(10, 8)
    237#define QM_SHAPER_CIR_S_MASK		GENMASK(14, 11)
    238#define QM_SHAPER_FACTOR_CIR_U_SHIFT	8
    239#define QM_SHAPER_FACTOR_CIR_S_SHIFT	11
    240#define QM_SHAPER_FACTOR_CBS_B_SHIFT	15
    241#define QM_SHAPER_FACTOR_CBS_S_SHIFT	19
    242#define QM_SHAPER_CBS_B			1
    243#define QM_SHAPER_CBS_S			16
    244#define QM_SHAPER_VFT_OFFSET		6
    245#define WAIT_FOR_QOS_VF			100
    246#define QM_QOS_MIN_ERROR_RATE		5
    247#define QM_QOS_TYPICAL_NUM		8
    248#define QM_SHAPER_MIN_CBS_S		8
    249#define QM_QOS_TICK			0x300U
    250#define QM_QOS_DIVISOR_CLK		0x1f40U
    251#define QM_QOS_MAX_CIR_B		200
    252#define QM_QOS_MIN_CIR_B		100
    253#define QM_QOS_MAX_CIR_U		6
    254#define QM_QOS_MAX_CIR_S		11
    255#define QM_QOS_VAL_MAX_LEN		32
    256#define QM_DFX_BASE		0x0100000
    257#define QM_DFX_STATE1		0x0104000
    258#define QM_DFX_STATE2		0x01040C8
    259#define QM_DFX_COMMON		0x0000
    260#define QM_DFX_BASE_LEN		0x5A
    261#define QM_DFX_STATE1_LEN		0x2E
    262#define QM_DFX_STATE2_LEN		0x11
    263#define QM_DFX_COMMON_LEN		0xC3
    264#define QM_DFX_REGS_LEN		4UL
    265#define QM_AUTOSUSPEND_DELAY		3000
    266
    267#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
    268	(((hop_num) << QM_CQ_HOP_NUM_SHIFT)	| \
    269	((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT)	| \
    270	((buf_sz) << QM_CQ_BUF_SIZE_SHIFT)	| \
    271	((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
    272
    273#define QM_MK_CQC_DW3_V2(cqe_sz) \
    274	((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
    275
    276#define QM_MK_SQC_W13(priority, orders, alg_type) \
    277	(((priority) << QM_SQ_PRIORITY_SHIFT)	| \
    278	((orders) << QM_SQ_ORDERS_SHIFT)	| \
    279	(((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
    280
    281#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
    282	(((hop_num) << QM_SQ_HOP_NUM_SHIFT)	| \
    283	((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT)	| \
    284	((buf_sz) << QM_SQ_BUF_SIZE_SHIFT)	| \
    285	((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
    286
    287#define QM_MK_SQC_DW3_V2(sqe_sz) \
    288	((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
    289
    290#define INIT_QC_COMMON(qc, base, pasid) do {			\
    291	(qc)->head = 0;						\
    292	(qc)->tail = 0;						\
    293	(qc)->base_l = cpu_to_le32(lower_32_bits(base));	\
    294	(qc)->base_h = cpu_to_le32(upper_32_bits(base));	\
    295	(qc)->dw3 = 0;						\
    296	(qc)->w8 = 0;						\
    297	(qc)->rsvd0 = 0;					\
    298	(qc)->pasid = cpu_to_le16(pasid);			\
    299	(qc)->w11 = 0;						\
    300	(qc)->rsvd1 = 0;					\
    301} while (0)
    302
    303enum vft_type {
    304	SQC_VFT = 0,
    305	CQC_VFT,
    306	SHAPER_VFT,
    307};
    308
    309enum acc_err_result {
    310	ACC_ERR_NONE,
    311	ACC_ERR_NEED_RESET,
    312	ACC_ERR_RECOVERED,
    313};
    314
    315enum qm_alg_type {
    316	ALG_TYPE_0,
    317	ALG_TYPE_1,
    318};
    319
    320enum qm_mb_cmd {
    321	QM_PF_FLR_PREPARE = 0x01,
    322	QM_PF_SRST_PREPARE,
    323	QM_PF_RESET_DONE,
    324	QM_VF_PREPARE_DONE,
    325	QM_VF_PREPARE_FAIL,
    326	QM_VF_START_DONE,
    327	QM_VF_START_FAIL,
    328	QM_PF_SET_QOS,
    329	QM_VF_GET_QOS,
    330};
    331
    332struct qm_cqe {
    333	__le32 rsvd0;
    334	__le16 cmd_id;
    335	__le16 rsvd1;
    336	__le16 sq_head;
    337	__le16 sq_num;
    338	__le16 rsvd2;
    339	__le16 w7;
    340};
    341
    342struct qm_eqe {
    343	__le32 dw0;
    344};
    345
    346struct qm_aeqe {
    347	__le32 dw0;
    348};
    349
    350struct qm_sqc {
    351	__le16 head;
    352	__le16 tail;
    353	__le32 base_l;
    354	__le32 base_h;
    355	__le32 dw3;
    356	__le16 w8;
    357	__le16 rsvd0;
    358	__le16 pasid;
    359	__le16 w11;
    360	__le16 cq_num;
    361	__le16 w13;
    362	__le32 rsvd1;
    363};
    364
    365struct qm_cqc {
    366	__le16 head;
    367	__le16 tail;
    368	__le32 base_l;
    369	__le32 base_h;
    370	__le32 dw3;
    371	__le16 w8;
    372	__le16 rsvd0;
    373	__le16 pasid;
    374	__le16 w11;
    375	__le32 dw6;
    376	__le32 rsvd1;
    377};
    378
    379struct qm_eqc {
    380	__le16 head;
    381	__le16 tail;
    382	__le32 base_l;
    383	__le32 base_h;
    384	__le32 dw3;
    385	__le32 rsvd[2];
    386	__le32 dw6;
    387};
    388
    389struct qm_aeqc {
    390	__le16 head;
    391	__le16 tail;
    392	__le32 base_l;
    393	__le32 base_h;
    394	__le32 dw3;
    395	__le32 rsvd[2];
    396	__le32 dw6;
    397};
    398
    399struct qm_mailbox {
    400	__le16 w0;
    401	__le16 queue_num;
    402	__le32 base_l;
    403	__le32 base_h;
    404	__le32 rsvd;
    405};
    406
    407struct qm_doorbell {
    408	__le16 queue_num;
    409	__le16 cmd;
    410	__le16 index;
    411	__le16 priority;
    412};
    413
    414struct hisi_qm_resource {
    415	struct hisi_qm *qm;
    416	int distance;
    417	struct list_head list;
    418};
    419
    420struct hisi_qm_hw_ops {
    421	int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
    422	void (*qm_db)(struct hisi_qm *qm, u16 qn,
    423		      u8 cmd, u16 index, u8 priority);
    424	u32 (*get_irq_num)(struct hisi_qm *qm);
    425	int (*debug_init)(struct hisi_qm *qm);
    426	void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
    427	void (*hw_error_uninit)(struct hisi_qm *qm);
    428	enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
    429	int (*stop_qp)(struct hisi_qp *qp);
    430	int (*set_msi)(struct hisi_qm *qm, bool set);
    431	int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
    432	int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
    433};
    434
    435struct qm_dfx_item {
    436	const char *name;
    437	u32 offset;
    438};
    439
    440static struct qm_dfx_item qm_dfx_files[] = {
    441	{"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
    442	{"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
    443	{"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
    444	{"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
    445	{"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
    446};
    447
    448static const char * const qm_debug_file_name[] = {
    449	[CURRENT_QM]   = "current_qm",
    450	[CURRENT_Q]    = "current_q",
    451	[CLEAR_ENABLE] = "clear_enable",
    452};
    453
    454struct hisi_qm_hw_error {
    455	u32 int_msk;
    456	const char *msg;
    457};
    458
    459static const struct hisi_qm_hw_error qm_hw_error[] = {
    460	{ .int_msk = BIT(0), .msg = "qm_axi_rresp" },
    461	{ .int_msk = BIT(1), .msg = "qm_axi_bresp" },
    462	{ .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
    463	{ .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
    464	{ .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
    465	{ .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
    466	{ .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
    467	{ .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
    468	{ .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
    469	{ .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
    470	{ .int_msk = BIT(10), .msg = "qm_db_timeout" },
    471	{ .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
    472	{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
    473	{ .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
    474	{ .int_msk = BIT(14), .msg = "qm_flr_timeout" },
    475	{ /* sentinel */ }
    476};
    477
    478/* define the QM's dfx regs region and region length */
    479static struct dfx_diff_registers qm_diff_regs[] = {
    480	{
    481		.reg_offset = QM_DFX_BASE,
    482		.reg_len = QM_DFX_BASE_LEN,
    483	}, {
    484		.reg_offset = QM_DFX_STATE1,
    485		.reg_len = QM_DFX_STATE1_LEN,
    486	}, {
    487		.reg_offset = QM_DFX_STATE2,
    488		.reg_len = QM_DFX_STATE2_LEN,
    489	}, {
    490		.reg_offset = QM_DFX_COMMON,
    491		.reg_len = QM_DFX_COMMON_LEN,
    492	},
    493};
    494
    495static const char * const qm_db_timeout[] = {
    496	"sq", "cq", "eq", "aeq",
    497};
    498
    499static const char * const qm_fifo_overflow[] = {
    500	"cq", "eq", "aeq",
    501};
    502
    503static const char * const qm_s[] = {
    504	"init", "start", "close", "stop",
    505};
    506
    507static const char * const qp_s[] = {
    508	"none", "init", "start", "stop", "close",
    509};
    510
    511struct qm_typical_qos_table {
    512	u32 start;
    513	u32 end;
    514	u32 val;
    515};
    516
    517/* the qos step is 100 */
    518static struct qm_typical_qos_table shaper_cir_s[] = {
    519	{100, 100, 4},
    520	{200, 200, 3},
    521	{300, 500, 2},
    522	{600, 1000, 1},
    523	{1100, 100000, 0},
    524};
    525
    526static struct qm_typical_qos_table shaper_cbs_s[] = {
    527	{100, 200, 9},
    528	{300, 500, 11},
    529	{600, 1000, 12},
    530	{1100, 10000, 16},
    531	{10100, 25000, 17},
    532	{25100, 50000, 18},
    533	{50100, 100000, 19}
    534};
    535
    536static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
    537{
    538	enum qm_state curr = atomic_read(&qm->status.flags);
    539	bool avail = false;
    540
    541	switch (curr) {
    542	case QM_INIT:
    543		if (new == QM_START || new == QM_CLOSE)
    544			avail = true;
    545		break;
    546	case QM_START:
    547		if (new == QM_STOP)
    548			avail = true;
    549		break;
    550	case QM_STOP:
    551		if (new == QM_CLOSE || new == QM_START)
    552			avail = true;
    553		break;
    554	default:
    555		break;
    556	}
    557
    558	dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
    559		qm_s[curr], qm_s[new]);
    560
    561	if (!avail)
    562		dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
    563			 qm_s[curr], qm_s[new]);
    564
    565	return avail;
    566}
    567
    568static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
    569			      enum qp_state new)
    570{
    571	enum qm_state qm_curr = atomic_read(&qm->status.flags);
    572	enum qp_state qp_curr = 0;
    573	bool avail = false;
    574
    575	if (qp)
    576		qp_curr = atomic_read(&qp->qp_status.flags);
    577
    578	switch (new) {
    579	case QP_INIT:
    580		if (qm_curr == QM_START || qm_curr == QM_INIT)
    581			avail = true;
    582		break;
    583	case QP_START:
    584		if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
    585		    (qm_curr == QM_START && qp_curr == QP_STOP))
    586			avail = true;
    587		break;
    588	case QP_STOP:
    589		if ((qm_curr == QM_START && qp_curr == QP_START) ||
    590		    (qp_curr == QP_INIT))
    591			avail = true;
    592		break;
    593	case QP_CLOSE:
    594		if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
    595		    (qm_curr == QM_START && qp_curr == QP_STOP) ||
    596		    (qm_curr == QM_STOP && qp_curr == QP_STOP)  ||
    597		    (qm_curr == QM_STOP && qp_curr == QP_INIT))
    598			avail = true;
    599		break;
    600	default:
    601		break;
    602	}
    603
    604	dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
    605		qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
    606
    607	if (!avail)
    608		dev_warn(&qm->pdev->dev,
    609			 "Can not change qp state from %s to %s in QM %s\n",
    610			 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
    611
    612	return avail;
    613}
    614
    615static u32 qm_get_hw_error_status(struct hisi_qm *qm)
    616{
    617	return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
    618}
    619
    620static u32 qm_get_dev_err_status(struct hisi_qm *qm)
    621{
    622	return qm->err_ini->get_dev_hw_err_status(qm);
    623}
    624
    625/* Check if the error causes the master ooo block */
    626static int qm_check_dev_error(struct hisi_qm *qm)
    627{
    628	u32 val, dev_val;
    629
    630	if (qm->fun_type == QM_HW_VF)
    631		return 0;
    632
    633	val = qm_get_hw_error_status(qm);
    634	dev_val = qm_get_dev_err_status(qm);
    635
    636	if (qm->ver < QM_HW_V3)
    637		return (val & QM_ECC_MBIT) ||
    638		       (dev_val & qm->err_info.ecc_2bits_mask);
    639
    640	return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
    641	       (dev_val & (~qm->err_info.dev_ce_mask));
    642}
    643
    644static int qm_wait_reset_finish(struct hisi_qm *qm)
    645{
    646	int delay = 0;
    647
    648	/* All reset requests need to be queued for processing */
    649	while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
    650		msleep(++delay);
    651		if (delay > QM_RESET_WAIT_TIMEOUT)
    652			return -EBUSY;
    653	}
    654
    655	return 0;
    656}
    657
    658static int qm_reset_prepare_ready(struct hisi_qm *qm)
    659{
    660	struct pci_dev *pdev = qm->pdev;
    661	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
    662
    663	/*
    664	 * PF and VF on host doesnot support resetting at the
    665	 * same time on Kunpeng920.
    666	 */
    667	if (qm->ver < QM_HW_V3)
    668		return qm_wait_reset_finish(pf_qm);
    669
    670	return qm_wait_reset_finish(qm);
    671}
    672
    673static void qm_reset_bit_clear(struct hisi_qm *qm)
    674{
    675	struct pci_dev *pdev = qm->pdev;
    676	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
    677
    678	if (qm->ver < QM_HW_V3)
    679		clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
    680
    681	clear_bit(QM_RESETTING, &qm->misc_ctl);
    682}
    683
    684static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
    685			   u64 base, u16 queue, bool op)
    686{
    687	mailbox->w0 = cpu_to_le16((cmd) |
    688		((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
    689		(0x1 << QM_MB_BUSY_SHIFT));
    690	mailbox->queue_num = cpu_to_le16(queue);
    691	mailbox->base_l = cpu_to_le32(lower_32_bits(base));
    692	mailbox->base_h = cpu_to_le32(upper_32_bits(base));
    693	mailbox->rsvd = 0;
    694}
    695
    696/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
    697int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
    698{
    699	u32 val;
    700
    701	return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
    702					  val, !((val >> QM_MB_BUSY_SHIFT) &
    703					  0x1), POLL_PERIOD, POLL_TIMEOUT);
    704}
    705EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
    706
    707/* 128 bit should be written to hardware at one time to trigger a mailbox */
    708static void qm_mb_write(struct hisi_qm *qm, const void *src)
    709{
    710	void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
    711	unsigned long tmp0 = 0, tmp1 = 0;
    712
    713	if (!IS_ENABLED(CONFIG_ARM64)) {
    714		memcpy_toio(fun_base, src, 16);
    715		dma_wmb();
    716		return;
    717	}
    718
    719	asm volatile("ldp %0, %1, %3\n"
    720		     "stp %0, %1, %2\n"
    721		     "dmb oshst\n"
    722		     : "=&r" (tmp0),
    723		       "=&r" (tmp1),
    724		       "+Q" (*((char __iomem *)fun_base))
    725		     : "Q" (*((char *)src))
    726		     : "memory");
    727}
    728
    729static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
    730{
    731	if (unlikely(hisi_qm_wait_mb_ready(qm))) {
    732		dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
    733		goto mb_busy;
    734	}
    735
    736	qm_mb_write(qm, mailbox);
    737
    738	if (unlikely(hisi_qm_wait_mb_ready(qm))) {
    739		dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
    740		goto mb_busy;
    741	}
    742
    743	return 0;
    744
    745mb_busy:
    746	atomic64_inc(&qm->debug.dfx.mb_err_cnt);
    747	return -EBUSY;
    748}
    749
    750int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
    751	       bool op)
    752{
    753	struct qm_mailbox mailbox;
    754	int ret;
    755
    756	dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
    757		queue, cmd, (unsigned long long)dma_addr);
    758
    759	qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
    760
    761	mutex_lock(&qm->mailbox_lock);
    762	ret = qm_mb_nolock(qm, &mailbox);
    763	mutex_unlock(&qm->mailbox_lock);
    764
    765	return ret;
    766}
    767EXPORT_SYMBOL_GPL(hisi_qm_mb);
    768
    769static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
    770{
    771	u64 doorbell;
    772
    773	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
    774		   ((u64)index << QM_DB_INDEX_SHIFT_V1)  |
    775		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
    776
    777	writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
    778}
    779
    780static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
    781{
    782	void __iomem *io_base = qm->io_base;
    783	u16 randata = 0;
    784	u64 doorbell;
    785
    786	if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
    787		io_base = qm->db_io_base + (u64)qn * qm->db_interval +
    788			  QM_DOORBELL_SQ_CQ_BASE_V2;
    789	else
    790		io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
    791
    792	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
    793		   ((u64)randata << QM_DB_RAND_SHIFT_V2) |
    794		   ((u64)index << QM_DB_INDEX_SHIFT_V2)	 |
    795		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
    796
    797	writeq(doorbell, io_base);
    798}
    799
    800static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
    801{
    802	dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
    803		qn, cmd, index);
    804
    805	qm->ops->qm_db(qm, qn, cmd, index, priority);
    806}
    807
    808static void qm_disable_clock_gate(struct hisi_qm *qm)
    809{
    810	u32 val;
    811
    812	/* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
    813	if (qm->ver < QM_HW_V3)
    814		return;
    815
    816	val = readl(qm->io_base + QM_PM_CTRL);
    817	val |= QM_IDLE_DISABLE;
    818	writel(val, qm->io_base +  QM_PM_CTRL);
    819}
    820
    821static int qm_dev_mem_reset(struct hisi_qm *qm)
    822{
    823	u32 val;
    824
    825	writel(0x1, qm->io_base + QM_MEM_START_INIT);
    826	return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
    827					  val & BIT(0), POLL_PERIOD,
    828					  POLL_TIMEOUT);
    829}
    830
    831static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
    832{
    833	return QM_IRQ_NUM_V1;
    834}
    835
    836static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
    837{
    838	if (qm->fun_type == QM_HW_PF)
    839		return QM_IRQ_NUM_PF_V2;
    840	else
    841		return QM_IRQ_NUM_VF_V2;
    842}
    843
    844static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
    845{
    846	if (qm->fun_type == QM_HW_PF)
    847		return QM_IRQ_NUM_PF_V2;
    848
    849	return QM_IRQ_NUM_VF_V3;
    850}
    851
    852static int qm_pm_get_sync(struct hisi_qm *qm)
    853{
    854	struct device *dev = &qm->pdev->dev;
    855	int ret;
    856
    857	if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
    858		return 0;
    859
    860	ret = pm_runtime_resume_and_get(dev);
    861	if (ret < 0) {
    862		dev_err(dev, "failed to get_sync(%d).\n", ret);
    863		return ret;
    864	}
    865
    866	return 0;
    867}
    868
    869static void qm_pm_put_sync(struct hisi_qm *qm)
    870{
    871	struct device *dev = &qm->pdev->dev;
    872
    873	if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
    874		return;
    875
    876	pm_runtime_mark_last_busy(dev);
    877	pm_runtime_put_autosuspend(dev);
    878}
    879
    880static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
    881{
    882	u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
    883
    884	return &qm->qp_array[cqn];
    885}
    886
    887static void qm_cq_head_update(struct hisi_qp *qp)
    888{
    889	if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
    890		qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
    891		qp->qp_status.cq_head = 0;
    892	} else {
    893		qp->qp_status.cq_head++;
    894	}
    895}
    896
    897static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
    898{
    899	if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
    900		return;
    901
    902	if (qp->event_cb) {
    903		qp->event_cb(qp);
    904		return;
    905	}
    906
    907	if (qp->req_cb) {
    908		struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
    909
    910		while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
    911			dma_rmb();
    912			qp->req_cb(qp, qp->sqe + qm->sqe_size *
    913				   le16_to_cpu(cqe->sq_head));
    914			qm_cq_head_update(qp);
    915			cqe = qp->cqe + qp->qp_status.cq_head;
    916			qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
    917			      qp->qp_status.cq_head, 0);
    918			atomic_dec(&qp->qp_status.used);
    919		}
    920
    921		/* set c_flag */
    922		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
    923		      qp->qp_status.cq_head, 1);
    924	}
    925}
    926
    927static void qm_work_process(struct work_struct *work)
    928{
    929	struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
    930	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
    931	struct hisi_qp *qp;
    932	int eqe_num = 0;
    933
    934	while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
    935		eqe_num++;
    936		qp = qm_to_hisi_qp(qm, eqe);
    937		qm_poll_qp(qp, qm);
    938
    939		if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
    940			qm->status.eqc_phase = !qm->status.eqc_phase;
    941			eqe = qm->eqe;
    942			qm->status.eq_head = 0;
    943		} else {
    944			eqe++;
    945			qm->status.eq_head++;
    946		}
    947
    948		if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
    949			eqe_num = 0;
    950			qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
    951		}
    952	}
    953
    954	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
    955}
    956
    957static irqreturn_t do_qm_irq(int irq, void *data)
    958{
    959	struct hisi_qm *qm = (struct hisi_qm *)data;
    960
    961	/* the workqueue created by device driver of QM */
    962	if (qm->wq)
    963		queue_work(qm->wq, &qm->work);
    964	else
    965		schedule_work(&qm->work);
    966
    967	return IRQ_HANDLED;
    968}
    969
    970static irqreturn_t qm_irq(int irq, void *data)
    971{
    972	struct hisi_qm *qm = data;
    973
    974	if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
    975		return do_qm_irq(irq, data);
    976
    977	atomic64_inc(&qm->debug.dfx.err_irq_cnt);
    978	dev_err(&qm->pdev->dev, "invalid int source\n");
    979	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
    980
    981	return IRQ_NONE;
    982}
    983
    984static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
    985{
    986	struct hisi_qm *qm = data;
    987	u32 val;
    988
    989	val = readl(qm->io_base + QM_IFC_INT_STATUS);
    990	val &= QM_IFC_INT_STATUS_MASK;
    991	if (!val)
    992		return IRQ_NONE;
    993
    994	schedule_work(&qm->cmd_process);
    995
    996	return IRQ_HANDLED;
    997}
    998
    999static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
   1000{
   1001	u32 *addr;
   1002
   1003	if (qp->is_in_kernel)
   1004		return;
   1005
   1006	addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
   1007	*addr = 1;
   1008
   1009	/* make sure setup is completed */
   1010	smp_wmb();
   1011}
   1012
   1013static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
   1014{
   1015	struct hisi_qp *qp = &qm->qp_array[qp_id];
   1016
   1017	qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
   1018	hisi_qm_stop_qp(qp);
   1019	qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
   1020}
   1021
   1022static void qm_reset_function(struct hisi_qm *qm)
   1023{
   1024	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
   1025	struct device *dev = &qm->pdev->dev;
   1026	int ret;
   1027
   1028	if (qm_check_dev_error(pf_qm))
   1029		return;
   1030
   1031	ret = qm_reset_prepare_ready(qm);
   1032	if (ret) {
   1033		dev_err(dev, "reset function not ready\n");
   1034		return;
   1035	}
   1036
   1037	ret = hisi_qm_stop(qm, QM_FLR);
   1038	if (ret) {
   1039		dev_err(dev, "failed to stop qm when reset function\n");
   1040		goto clear_bit;
   1041	}
   1042
   1043	ret = hisi_qm_start(qm);
   1044	if (ret)
   1045		dev_err(dev, "failed to start qm when reset function\n");
   1046
   1047clear_bit:
   1048	qm_reset_bit_clear(qm);
   1049}
   1050
   1051static irqreturn_t qm_aeq_thread(int irq, void *data)
   1052{
   1053	struct hisi_qm *qm = data;
   1054	struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
   1055	u32 type, qp_id;
   1056
   1057	while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
   1058		type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
   1059		qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
   1060
   1061		switch (type) {
   1062		case QM_EQ_OVERFLOW:
   1063			dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
   1064			qm_reset_function(qm);
   1065			return IRQ_HANDLED;
   1066		case QM_CQ_OVERFLOW:
   1067			dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
   1068				qp_id);
   1069			fallthrough;
   1070		case QM_CQE_ERROR:
   1071			qm_disable_qp(qm, qp_id);
   1072			break;
   1073		default:
   1074			dev_err(&qm->pdev->dev, "unknown error type %u\n",
   1075				type);
   1076			break;
   1077		}
   1078
   1079		if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
   1080			qm->status.aeqc_phase = !qm->status.aeqc_phase;
   1081			aeqe = qm->aeqe;
   1082			qm->status.aeq_head = 0;
   1083		} else {
   1084			aeqe++;
   1085			qm->status.aeq_head++;
   1086		}
   1087	}
   1088
   1089	qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
   1090
   1091	return IRQ_HANDLED;
   1092}
   1093
   1094static irqreturn_t qm_aeq_irq(int irq, void *data)
   1095{
   1096	struct hisi_qm *qm = data;
   1097
   1098	atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
   1099	if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
   1100		return IRQ_NONE;
   1101
   1102	return IRQ_WAKE_THREAD;
   1103}
   1104
   1105static void qm_irq_unregister(struct hisi_qm *qm)
   1106{
   1107	struct pci_dev *pdev = qm->pdev;
   1108
   1109	free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
   1110
   1111	if (qm->ver > QM_HW_V1) {
   1112		free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
   1113
   1114		if (qm->fun_type == QM_HW_PF)
   1115			free_irq(pci_irq_vector(pdev,
   1116				 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
   1117	}
   1118
   1119	if (qm->ver > QM_HW_V2)
   1120		free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
   1121}
   1122
   1123static void qm_init_qp_status(struct hisi_qp *qp)
   1124{
   1125	struct hisi_qp_status *qp_status = &qp->qp_status;
   1126
   1127	qp_status->sq_tail = 0;
   1128	qp_status->cq_head = 0;
   1129	qp_status->cqc_phase = true;
   1130	atomic_set(&qp_status->used, 0);
   1131}
   1132
   1133static void qm_init_prefetch(struct hisi_qm *qm)
   1134{
   1135	struct device *dev = &qm->pdev->dev;
   1136	u32 page_type = 0x0;
   1137
   1138	if (qm->ver < QM_HW_V3)
   1139		return;
   1140
   1141	switch (PAGE_SIZE) {
   1142	case SZ_4K:
   1143		page_type = 0x0;
   1144		break;
   1145	case SZ_16K:
   1146		page_type = 0x1;
   1147		break;
   1148	case SZ_64K:
   1149		page_type = 0x2;
   1150		break;
   1151	default:
   1152		dev_err(dev, "system page size is not support: %lu, default set to 4KB",
   1153			PAGE_SIZE);
   1154	}
   1155
   1156	writel(page_type, qm->io_base + QM_PAGE_SIZE);
   1157}
   1158
   1159/*
   1160 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value
   1161 * is the expected qos calculated.
   1162 * the formula:
   1163 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
   1164 *
   1165 *		IR_b * (2 ^ IR_u) * 8000
   1166 * IR(Mbps) = -------------------------
   1167 *		  Tick * (2 ^ IR_s)
   1168 */
   1169static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
   1170{
   1171	return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
   1172					(QM_QOS_TICK * (1 << cir_s));
   1173}
   1174
   1175static u32 acc_shaper_calc_cbs_s(u32 ir)
   1176{
   1177	int table_size = ARRAY_SIZE(shaper_cbs_s);
   1178	int i;
   1179
   1180	for (i = 0; i < table_size; i++) {
   1181		if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
   1182			return shaper_cbs_s[i].val;
   1183	}
   1184
   1185	return QM_SHAPER_MIN_CBS_S;
   1186}
   1187
   1188static u32 acc_shaper_calc_cir_s(u32 ir)
   1189{
   1190	int table_size = ARRAY_SIZE(shaper_cir_s);
   1191	int i;
   1192
   1193	for (i = 0; i < table_size; i++) {
   1194		if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
   1195			return shaper_cir_s[i].val;
   1196	}
   1197
   1198	return 0;
   1199}
   1200
   1201static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
   1202{
   1203	u32 cir_b, cir_u, cir_s, ir_calc;
   1204	u32 error_rate;
   1205
   1206	factor->cbs_s = acc_shaper_calc_cbs_s(ir);
   1207	cir_s = acc_shaper_calc_cir_s(ir);
   1208
   1209	for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
   1210		for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
   1211			ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
   1212
   1213			error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
   1214			if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
   1215				factor->cir_b = cir_b;
   1216				factor->cir_u = cir_u;
   1217				factor->cir_s = cir_s;
   1218				return 0;
   1219			}
   1220		}
   1221	}
   1222
   1223	return -EINVAL;
   1224}
   1225
   1226static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
   1227			    u32 number, struct qm_shaper_factor *factor)
   1228{
   1229	u64 tmp = 0;
   1230
   1231	if (number > 0) {
   1232		switch (type) {
   1233		case SQC_VFT:
   1234			if (qm->ver == QM_HW_V1) {
   1235				tmp = QM_SQC_VFT_BUF_SIZE	|
   1236				      QM_SQC_VFT_SQC_SIZE	|
   1237				      QM_SQC_VFT_INDEX_NUMBER	|
   1238				      QM_SQC_VFT_VALID		|
   1239				      (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
   1240			} else {
   1241				tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
   1242				      QM_SQC_VFT_VALID |
   1243				      (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
   1244			}
   1245			break;
   1246		case CQC_VFT:
   1247			if (qm->ver == QM_HW_V1) {
   1248				tmp = QM_CQC_VFT_BUF_SIZE	|
   1249				      QM_CQC_VFT_SQC_SIZE	|
   1250				      QM_CQC_VFT_INDEX_NUMBER	|
   1251				      QM_CQC_VFT_VALID;
   1252			} else {
   1253				tmp = QM_CQC_VFT_VALID;
   1254			}
   1255			break;
   1256		case SHAPER_VFT:
   1257			if (qm->ver >= QM_HW_V3) {
   1258				tmp = factor->cir_b |
   1259				(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
   1260				(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
   1261				(QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
   1262				(factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
   1263			}
   1264			break;
   1265		}
   1266	}
   1267
   1268	writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
   1269	writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
   1270}
   1271
   1272static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
   1273			     u32 fun_num, u32 base, u32 number)
   1274{
   1275	struct qm_shaper_factor *factor = &qm->factor[fun_num];
   1276	unsigned int val;
   1277	int ret;
   1278
   1279	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
   1280					 val & BIT(0), POLL_PERIOD,
   1281					 POLL_TIMEOUT);
   1282	if (ret)
   1283		return ret;
   1284
   1285	writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
   1286	writel(type, qm->io_base + QM_VFT_CFG_TYPE);
   1287	if (type == SHAPER_VFT)
   1288		fun_num |= base << QM_SHAPER_VFT_OFFSET;
   1289
   1290	writel(fun_num, qm->io_base + QM_VFT_CFG);
   1291
   1292	qm_vft_data_cfg(qm, type, base, number, factor);
   1293
   1294	writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
   1295	writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
   1296
   1297	return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
   1298					  val & BIT(0), POLL_PERIOD,
   1299					  POLL_TIMEOUT);
   1300}
   1301
   1302static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
   1303{
   1304	u32 qos = qm->factor[fun_num].func_qos;
   1305	int ret, i;
   1306
   1307	ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
   1308	if (ret) {
   1309		dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
   1310		return ret;
   1311	}
   1312	writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
   1313	for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
   1314		/* The base number of queue reuse for different alg type */
   1315		ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
   1316		if (ret)
   1317			return ret;
   1318	}
   1319
   1320	return 0;
   1321}
   1322
   1323/* The config should be conducted after qm_dev_mem_reset() */
   1324static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
   1325			      u32 number)
   1326{
   1327	int ret, i;
   1328
   1329	for (i = SQC_VFT; i <= CQC_VFT; i++) {
   1330		ret = qm_set_vft_common(qm, i, fun_num, base, number);
   1331		if (ret)
   1332			return ret;
   1333	}
   1334
   1335	/* init default shaper qos val */
   1336	if (qm->ver >= QM_HW_V3) {
   1337		ret = qm_shaper_init_vft(qm, fun_num);
   1338		if (ret)
   1339			goto back_sqc_cqc;
   1340	}
   1341
   1342	return 0;
   1343back_sqc_cqc:
   1344	for (i = SQC_VFT; i <= CQC_VFT; i++) {
   1345		ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
   1346		if (ret)
   1347			return ret;
   1348	}
   1349	return ret;
   1350}
   1351
   1352static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
   1353{
   1354	u64 sqc_vft;
   1355	int ret;
   1356
   1357	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
   1358	if (ret)
   1359		return ret;
   1360
   1361	sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
   1362		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
   1363	*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
   1364	*number = (QM_SQC_VFT_NUM_MASK_v2 &
   1365		   (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
   1366
   1367	return 0;
   1368}
   1369
   1370static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
   1371{
   1372	u32 remain_q_num, vfq_num;
   1373	u32 num_vfs = qm->vfs_num;
   1374
   1375	vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
   1376	if (vfq_num >= qm->max_qp_num)
   1377		return qm->max_qp_num;
   1378
   1379	remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
   1380	if (vfq_num + remain_q_num <= qm->max_qp_num)
   1381		return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
   1382
   1383	/*
   1384	 * if vfq_num + remain_q_num > max_qp_num, the last VFs,
   1385	 * each with one more queue.
   1386	 */
   1387	return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
   1388}
   1389
   1390static struct hisi_qm *file_to_qm(struct debugfs_file *file)
   1391{
   1392	struct qm_debug *debug = file->debug;
   1393
   1394	return container_of(debug, struct hisi_qm, debug);
   1395}
   1396
   1397static u32 current_q_read(struct hisi_qm *qm)
   1398{
   1399	return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
   1400}
   1401
   1402static int current_q_write(struct hisi_qm *qm, u32 val)
   1403{
   1404	u32 tmp;
   1405
   1406	if (val >= qm->debug.curr_qm_qp_num)
   1407		return -EINVAL;
   1408
   1409	tmp = val << QM_DFX_QN_SHIFT |
   1410	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
   1411	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
   1412
   1413	tmp = val << QM_DFX_QN_SHIFT |
   1414	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
   1415	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
   1416
   1417	return 0;
   1418}
   1419
   1420static u32 clear_enable_read(struct hisi_qm *qm)
   1421{
   1422	return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
   1423}
   1424
   1425/* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
   1426static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
   1427{
   1428	if (rd_clr_ctrl > 1)
   1429		return -EINVAL;
   1430
   1431	writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
   1432
   1433	return 0;
   1434}
   1435
   1436static u32 current_qm_read(struct hisi_qm *qm)
   1437{
   1438	return readl(qm->io_base + QM_DFX_MB_CNT_VF);
   1439}
   1440
   1441static int current_qm_write(struct hisi_qm *qm, u32 val)
   1442{
   1443	u32 tmp;
   1444
   1445	if (val > qm->vfs_num)
   1446		return -EINVAL;
   1447
   1448	/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
   1449	if (!val)
   1450		qm->debug.curr_qm_qp_num = qm->qp_num;
   1451	else
   1452		qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
   1453
   1454	writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
   1455	writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
   1456
   1457	tmp = val |
   1458	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
   1459	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
   1460
   1461	tmp = val |
   1462	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
   1463	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
   1464
   1465	return 0;
   1466}
   1467
   1468static ssize_t qm_debug_read(struct file *filp, char __user *buf,
   1469			     size_t count, loff_t *pos)
   1470{
   1471	struct debugfs_file *file = filp->private_data;
   1472	enum qm_debug_file index = file->index;
   1473	struct hisi_qm *qm = file_to_qm(file);
   1474	char tbuf[QM_DBG_TMP_BUF_LEN];
   1475	u32 val;
   1476	int ret;
   1477
   1478	ret = hisi_qm_get_dfx_access(qm);
   1479	if (ret)
   1480		return ret;
   1481
   1482	mutex_lock(&file->lock);
   1483	switch (index) {
   1484	case CURRENT_QM:
   1485		val = current_qm_read(qm);
   1486		break;
   1487	case CURRENT_Q:
   1488		val = current_q_read(qm);
   1489		break;
   1490	case CLEAR_ENABLE:
   1491		val = clear_enable_read(qm);
   1492		break;
   1493	default:
   1494		goto err_input;
   1495	}
   1496	mutex_unlock(&file->lock);
   1497
   1498	hisi_qm_put_dfx_access(qm);
   1499	ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
   1500	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
   1501
   1502err_input:
   1503	mutex_unlock(&file->lock);
   1504	hisi_qm_put_dfx_access(qm);
   1505	return -EINVAL;
   1506}
   1507
   1508static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
   1509			      size_t count, loff_t *pos)
   1510{
   1511	struct debugfs_file *file = filp->private_data;
   1512	enum qm_debug_file index = file->index;
   1513	struct hisi_qm *qm = file_to_qm(file);
   1514	unsigned long val;
   1515	char tbuf[QM_DBG_TMP_BUF_LEN];
   1516	int len, ret;
   1517
   1518	if (*pos != 0)
   1519		return 0;
   1520
   1521	if (count >= QM_DBG_TMP_BUF_LEN)
   1522		return -ENOSPC;
   1523
   1524	len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
   1525				     count);
   1526	if (len < 0)
   1527		return len;
   1528
   1529	tbuf[len] = '\0';
   1530	if (kstrtoul(tbuf, 0, &val))
   1531		return -EFAULT;
   1532
   1533	ret = hisi_qm_get_dfx_access(qm);
   1534	if (ret)
   1535		return ret;
   1536
   1537	mutex_lock(&file->lock);
   1538	switch (index) {
   1539	case CURRENT_QM:
   1540		ret = current_qm_write(qm, val);
   1541		break;
   1542	case CURRENT_Q:
   1543		ret = current_q_write(qm, val);
   1544		break;
   1545	case CLEAR_ENABLE:
   1546		ret = clear_enable_write(qm, val);
   1547		break;
   1548	default:
   1549		ret = -EINVAL;
   1550	}
   1551	mutex_unlock(&file->lock);
   1552
   1553	hisi_qm_put_dfx_access(qm);
   1554
   1555	if (ret)
   1556		return ret;
   1557
   1558	return count;
   1559}
   1560
   1561static const struct file_operations qm_debug_fops = {
   1562	.owner = THIS_MODULE,
   1563	.open = simple_open,
   1564	.read = qm_debug_read,
   1565	.write = qm_debug_write,
   1566};
   1567
   1568#define CNT_CYC_REGS_NUM		10
   1569static const struct debugfs_reg32 qm_dfx_regs[] = {
   1570	/* XXX_CNT are reading clear register */
   1571	{"QM_ECC_1BIT_CNT               ",  0x104000ull},
   1572	{"QM_ECC_MBIT_CNT               ",  0x104008ull},
   1573	{"QM_DFX_MB_CNT                 ",  0x104018ull},
   1574	{"QM_DFX_DB_CNT                 ",  0x104028ull},
   1575	{"QM_DFX_SQE_CNT                ",  0x104038ull},
   1576	{"QM_DFX_CQE_CNT                ",  0x104048ull},
   1577	{"QM_DFX_SEND_SQE_TO_ACC_CNT    ",  0x104050ull},
   1578	{"QM_DFX_WB_SQE_FROM_ACC_CNT    ",  0x104058ull},
   1579	{"QM_DFX_ACC_FINISH_CNT         ",  0x104060ull},
   1580	{"QM_DFX_CQE_ERR_CNT            ",  0x1040b4ull},
   1581	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
   1582	{"QM_ECC_1BIT_INF               ",  0x104004ull},
   1583	{"QM_ECC_MBIT_INF               ",  0x10400cull},
   1584	{"QM_DFX_ACC_RDY_VLD0           ",  0x1040a0ull},
   1585	{"QM_DFX_ACC_RDY_VLD1           ",  0x1040a4ull},
   1586	{"QM_DFX_AXI_RDY_VLD            ",  0x1040a8ull},
   1587	{"QM_DFX_FF_ST0                 ",  0x1040c8ull},
   1588	{"QM_DFX_FF_ST1                 ",  0x1040ccull},
   1589	{"QM_DFX_FF_ST2                 ",  0x1040d0ull},
   1590	{"QM_DFX_FF_ST3                 ",  0x1040d4ull},
   1591	{"QM_DFX_FF_ST4                 ",  0x1040d8ull},
   1592	{"QM_DFX_FF_ST5                 ",  0x1040dcull},
   1593	{"QM_DFX_FF_ST6                 ",  0x1040e0ull},
   1594	{"QM_IN_IDLE_ST                 ",  0x1040e4ull},
   1595};
   1596
   1597static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
   1598	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
   1599};
   1600
   1601/**
   1602 * hisi_qm_regs_dump() - Dump registers's value.
   1603 * @s: debugfs file handle.
   1604 * @regset: accelerator registers information.
   1605 *
   1606 * Dump accelerator registers.
   1607 */
   1608void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
   1609{
   1610	struct pci_dev *pdev = to_pci_dev(regset->dev);
   1611	struct hisi_qm *qm = pci_get_drvdata(pdev);
   1612	const struct debugfs_reg32 *regs = regset->regs;
   1613	int regs_len = regset->nregs;
   1614	int i, ret;
   1615	u32 val;
   1616
   1617	ret = hisi_qm_get_dfx_access(qm);
   1618	if (ret)
   1619		return;
   1620
   1621	for (i = 0; i < regs_len; i++) {
   1622		val = readl(regset->base + regs[i].offset);
   1623		seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
   1624	}
   1625
   1626	hisi_qm_put_dfx_access(qm);
   1627}
   1628EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
   1629
   1630static int qm_regs_show(struct seq_file *s, void *unused)
   1631{
   1632	struct hisi_qm *qm = s->private;
   1633	struct debugfs_regset32 regset;
   1634
   1635	if (qm->fun_type == QM_HW_PF) {
   1636		regset.regs = qm_dfx_regs;
   1637		regset.nregs = ARRAY_SIZE(qm_dfx_regs);
   1638	} else {
   1639		regset.regs = qm_vf_dfx_regs;
   1640		regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
   1641	}
   1642
   1643	regset.base = qm->io_base;
   1644	regset.dev = &qm->pdev->dev;
   1645
   1646	hisi_qm_regs_dump(s, &regset);
   1647
   1648	return 0;
   1649}
   1650
   1651DEFINE_SHOW_ATTRIBUTE(qm_regs);
   1652
   1653static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
   1654	const struct dfx_diff_registers *cregs, int reg_len)
   1655{
   1656	struct dfx_diff_registers *diff_regs;
   1657	u32 j, base_offset;
   1658	int i;
   1659
   1660	diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
   1661	if (!diff_regs)
   1662		return ERR_PTR(-ENOMEM);
   1663
   1664	for (i = 0; i < reg_len; i++) {
   1665		if (!cregs[i].reg_len)
   1666			continue;
   1667
   1668		diff_regs[i].reg_offset = cregs[i].reg_offset;
   1669		diff_regs[i].reg_len = cregs[i].reg_len;
   1670		diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
   1671					 GFP_KERNEL);
   1672		if (!diff_regs[i].regs)
   1673			goto alloc_error;
   1674
   1675		for (j = 0; j < diff_regs[i].reg_len; j++) {
   1676			base_offset = diff_regs[i].reg_offset +
   1677					j * QM_DFX_REGS_LEN;
   1678			diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
   1679		}
   1680	}
   1681
   1682	return diff_regs;
   1683
   1684alloc_error:
   1685	while (i > 0) {
   1686		i--;
   1687		kfree(diff_regs[i].regs);
   1688	}
   1689	kfree(diff_regs);
   1690	return ERR_PTR(-ENOMEM);
   1691}
   1692
   1693static void dfx_regs_uninit(struct hisi_qm *qm,
   1694		struct dfx_diff_registers *dregs, int reg_len)
   1695{
   1696	int i;
   1697
   1698	/* Setting the pointer is NULL to prevent double free */
   1699	for (i = 0; i < reg_len; i++) {
   1700		kfree(dregs[i].regs);
   1701		dregs[i].regs = NULL;
   1702	}
   1703	kfree(dregs);
   1704	dregs = NULL;
   1705}
   1706
   1707/**
   1708 * hisi_qm_diff_regs_init() - Allocate memory for registers.
   1709 * @qm: device qm handle.
   1710 * @dregs: diff registers handle.
   1711 * @reg_len: diff registers region length.
   1712 */
   1713int hisi_qm_diff_regs_init(struct hisi_qm *qm,
   1714		struct dfx_diff_registers *dregs, int reg_len)
   1715{
   1716	if (!qm || !dregs || reg_len <= 0)
   1717		return -EINVAL;
   1718
   1719	if (qm->fun_type != QM_HW_PF)
   1720		return 0;
   1721
   1722	qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs,
   1723						ARRAY_SIZE(qm_diff_regs));
   1724	if (IS_ERR(qm->debug.qm_diff_regs))
   1725		return PTR_ERR(qm->debug.qm_diff_regs);
   1726
   1727	qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
   1728	if (IS_ERR(qm->debug.acc_diff_regs)) {
   1729		dfx_regs_uninit(qm, qm->debug.qm_diff_regs,
   1730				ARRAY_SIZE(qm_diff_regs));
   1731		return PTR_ERR(qm->debug.acc_diff_regs);
   1732	}
   1733
   1734	return 0;
   1735}
   1736EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init);
   1737
   1738/**
   1739 * hisi_qm_diff_regs_uninit() - Free memory for registers.
   1740 * @qm: device qm handle.
   1741 * @reg_len: diff registers region length.
   1742 */
   1743void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len)
   1744{
   1745	if (!qm  || reg_len <= 0 || qm->fun_type != QM_HW_PF)
   1746		return;
   1747
   1748	dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
   1749	dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
   1750}
   1751EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit);
   1752
   1753/**
   1754 * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
   1755 * @qm: device qm handle.
   1756 * @s: Debugfs file handle.
   1757 * @dregs: diff registers handle.
   1758 * @regs_len: diff registers region length.
   1759 */
   1760void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
   1761	struct dfx_diff_registers *dregs, int regs_len)
   1762{
   1763	u32 j, val, base_offset;
   1764	int i, ret;
   1765
   1766	if (!qm || !s || !dregs || regs_len <= 0)
   1767		return;
   1768
   1769	ret = hisi_qm_get_dfx_access(qm);
   1770	if (ret)
   1771		return;
   1772
   1773	down_read(&qm->qps_lock);
   1774	for (i = 0; i < regs_len; i++) {
   1775		if (!dregs[i].reg_len)
   1776			continue;
   1777
   1778		for (j = 0; j < dregs[i].reg_len; j++) {
   1779			base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
   1780			val = readl(qm->io_base + base_offset);
   1781			if (val != dregs[i].regs[j])
   1782				seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
   1783					   base_offset, dregs[i].regs[j], val);
   1784		}
   1785	}
   1786	up_read(&qm->qps_lock);
   1787
   1788	hisi_qm_put_dfx_access(qm);
   1789}
   1790EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
   1791
   1792static int qm_diff_regs_show(struct seq_file *s, void *unused)
   1793{
   1794	struct hisi_qm *qm = s->private;
   1795
   1796	hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
   1797					ARRAY_SIZE(qm_diff_regs));
   1798
   1799	return 0;
   1800}
   1801DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
   1802
   1803static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
   1804			   size_t count, loff_t *pos)
   1805{
   1806	char buf[QM_DBG_READ_LEN];
   1807	int len;
   1808
   1809	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
   1810			"Please echo help to cmd to get help information");
   1811
   1812	return simple_read_from_buffer(buffer, count, pos, buf, len);
   1813}
   1814
   1815static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
   1816			  dma_addr_t *dma_addr)
   1817{
   1818	struct device *dev = &qm->pdev->dev;
   1819	void *ctx_addr;
   1820
   1821	ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
   1822	if (!ctx_addr)
   1823		return ERR_PTR(-ENOMEM);
   1824
   1825	*dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
   1826	if (dma_mapping_error(dev, *dma_addr)) {
   1827		dev_err(dev, "DMA mapping error!\n");
   1828		kfree(ctx_addr);
   1829		return ERR_PTR(-ENOMEM);
   1830	}
   1831
   1832	return ctx_addr;
   1833}
   1834
   1835static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
   1836			const void *ctx_addr, dma_addr_t *dma_addr)
   1837{
   1838	struct device *dev = &qm->pdev->dev;
   1839
   1840	dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
   1841	kfree(ctx_addr);
   1842}
   1843
   1844static int dump_show(struct hisi_qm *qm, void *info,
   1845		     unsigned int info_size, char *info_name)
   1846{
   1847	struct device *dev = &qm->pdev->dev;
   1848	u8 *info_buf, *info_curr = info;
   1849	u32 i;
   1850#define BYTE_PER_DW	4
   1851
   1852	info_buf = kzalloc(info_size, GFP_KERNEL);
   1853	if (!info_buf)
   1854		return -ENOMEM;
   1855
   1856	for (i = 0; i < info_size; i++, info_curr++) {
   1857		if (i % BYTE_PER_DW == 0)
   1858			info_buf[i + 3UL] = *info_curr;
   1859		else if (i % BYTE_PER_DW == 1)
   1860			info_buf[i + 1UL] = *info_curr;
   1861		else if (i % BYTE_PER_DW == 2)
   1862			info_buf[i - 1] = *info_curr;
   1863		else if (i % BYTE_PER_DW == 3)
   1864			info_buf[i - 3] = *info_curr;
   1865	}
   1866
   1867	dev_info(dev, "%s DUMP\n", info_name);
   1868	for (i = 0; i < info_size; i += BYTE_PER_DW) {
   1869		pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
   1870			info_buf[i], info_buf[i + 1UL],
   1871			info_buf[i + 2UL], info_buf[i + 3UL]);
   1872	}
   1873
   1874	kfree(info_buf);
   1875
   1876	return 0;
   1877}
   1878
   1879static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
   1880{
   1881	return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
   1882}
   1883
   1884static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
   1885{
   1886	return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
   1887}
   1888
   1889static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
   1890{
   1891	struct device *dev = &qm->pdev->dev;
   1892	struct qm_sqc *sqc, *sqc_curr;
   1893	dma_addr_t sqc_dma;
   1894	u32 qp_id;
   1895	int ret;
   1896
   1897	if (!s)
   1898		return -EINVAL;
   1899
   1900	ret = kstrtou32(s, 0, &qp_id);
   1901	if (ret || qp_id >= qm->qp_num) {
   1902		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
   1903		return -EINVAL;
   1904	}
   1905
   1906	sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
   1907	if (IS_ERR(sqc))
   1908		return PTR_ERR(sqc);
   1909
   1910	ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
   1911	if (ret) {
   1912		down_read(&qm->qps_lock);
   1913		if (qm->sqc) {
   1914			sqc_curr = qm->sqc + qp_id;
   1915
   1916			ret = dump_show(qm, sqc_curr, sizeof(*sqc),
   1917					"SOFT SQC");
   1918			if (ret)
   1919				dev_info(dev, "Show soft sqc failed!\n");
   1920		}
   1921		up_read(&qm->qps_lock);
   1922
   1923		goto err_free_ctx;
   1924	}
   1925
   1926	ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
   1927	if (ret)
   1928		dev_info(dev, "Show hw sqc failed!\n");
   1929
   1930err_free_ctx:
   1931	qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
   1932	return ret;
   1933}
   1934
   1935static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
   1936{
   1937	struct device *dev = &qm->pdev->dev;
   1938	struct qm_cqc *cqc, *cqc_curr;
   1939	dma_addr_t cqc_dma;
   1940	u32 qp_id;
   1941	int ret;
   1942
   1943	if (!s)
   1944		return -EINVAL;
   1945
   1946	ret = kstrtou32(s, 0, &qp_id);
   1947	if (ret || qp_id >= qm->qp_num) {
   1948		dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
   1949		return -EINVAL;
   1950	}
   1951
   1952	cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
   1953	if (IS_ERR(cqc))
   1954		return PTR_ERR(cqc);
   1955
   1956	ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
   1957	if (ret) {
   1958		down_read(&qm->qps_lock);
   1959		if (qm->cqc) {
   1960			cqc_curr = qm->cqc + qp_id;
   1961
   1962			ret = dump_show(qm, cqc_curr, sizeof(*cqc),
   1963					"SOFT CQC");
   1964			if (ret)
   1965				dev_info(dev, "Show soft cqc failed!\n");
   1966		}
   1967		up_read(&qm->qps_lock);
   1968
   1969		goto err_free_ctx;
   1970	}
   1971
   1972	ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
   1973	if (ret)
   1974		dev_info(dev, "Show hw cqc failed!\n");
   1975
   1976err_free_ctx:
   1977	qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
   1978	return ret;
   1979}
   1980
   1981static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
   1982			    int cmd, char *name)
   1983{
   1984	struct device *dev = &qm->pdev->dev;
   1985	dma_addr_t xeqc_dma;
   1986	void *xeqc;
   1987	int ret;
   1988
   1989	if (strsep(&s, " ")) {
   1990		dev_err(dev, "Please do not input extra characters!\n");
   1991		return -EINVAL;
   1992	}
   1993
   1994	xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
   1995	if (IS_ERR(xeqc))
   1996		return PTR_ERR(xeqc);
   1997
   1998	ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
   1999	if (ret)
   2000		goto err_free_ctx;
   2001
   2002	ret = dump_show(qm, xeqc, size, name);
   2003	if (ret)
   2004		dev_info(dev, "Show hw %s failed!\n", name);
   2005
   2006err_free_ctx:
   2007	qm_ctx_free(qm, size, xeqc, &xeqc_dma);
   2008	return ret;
   2009}
   2010
   2011static int q_dump_param_parse(struct hisi_qm *qm, char *s,
   2012			      u32 *e_id, u32 *q_id)
   2013{
   2014	struct device *dev = &qm->pdev->dev;
   2015	unsigned int qp_num = qm->qp_num;
   2016	char *presult;
   2017	int ret;
   2018
   2019	presult = strsep(&s, " ");
   2020	if (!presult) {
   2021		dev_err(dev, "Please input qp number!\n");
   2022		return -EINVAL;
   2023	}
   2024
   2025	ret = kstrtou32(presult, 0, q_id);
   2026	if (ret || *q_id >= qp_num) {
   2027		dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
   2028		return -EINVAL;
   2029	}
   2030
   2031	presult = strsep(&s, " ");
   2032	if (!presult) {
   2033		dev_err(dev, "Please input sqe number!\n");
   2034		return -EINVAL;
   2035	}
   2036
   2037	ret = kstrtou32(presult, 0, e_id);
   2038	if (ret || *e_id >= QM_Q_DEPTH) {
   2039		dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
   2040		return -EINVAL;
   2041	}
   2042
   2043	if (strsep(&s, " ")) {
   2044		dev_err(dev, "Please do not input extra characters!\n");
   2045		return -EINVAL;
   2046	}
   2047
   2048	return 0;
   2049}
   2050
   2051static int qm_sq_dump(struct hisi_qm *qm, char *s)
   2052{
   2053	struct device *dev = &qm->pdev->dev;
   2054	void *sqe, *sqe_curr;
   2055	struct hisi_qp *qp;
   2056	u32 qp_id, sqe_id;
   2057	int ret;
   2058
   2059	ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
   2060	if (ret)
   2061		return ret;
   2062
   2063	sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
   2064	if (!sqe)
   2065		return -ENOMEM;
   2066
   2067	qp = &qm->qp_array[qp_id];
   2068	memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
   2069	sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
   2070	memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
   2071	       qm->debug.sqe_mask_len);
   2072
   2073	ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
   2074	if (ret)
   2075		dev_info(dev, "Show sqe failed!\n");
   2076
   2077	kfree(sqe);
   2078
   2079	return ret;
   2080}
   2081
   2082static int qm_cq_dump(struct hisi_qm *qm, char *s)
   2083{
   2084	struct device *dev = &qm->pdev->dev;
   2085	struct qm_cqe *cqe_curr;
   2086	struct hisi_qp *qp;
   2087	u32 qp_id, cqe_id;
   2088	int ret;
   2089
   2090	ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
   2091	if (ret)
   2092		return ret;
   2093
   2094	qp = &qm->qp_array[qp_id];
   2095	cqe_curr = qp->cqe + cqe_id;
   2096	ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
   2097	if (ret)
   2098		dev_info(dev, "Show cqe failed!\n");
   2099
   2100	return ret;
   2101}
   2102
   2103static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
   2104			  size_t size, char *name)
   2105{
   2106	struct device *dev = &qm->pdev->dev;
   2107	void *xeqe;
   2108	u32 xeqe_id;
   2109	int ret;
   2110
   2111	if (!s)
   2112		return -EINVAL;
   2113
   2114	ret = kstrtou32(s, 0, &xeqe_id);
   2115	if (ret)
   2116		return -EINVAL;
   2117
   2118	if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
   2119		dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
   2120		return -EINVAL;
   2121	} else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
   2122		dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
   2123		return -EINVAL;
   2124	}
   2125
   2126	down_read(&qm->qps_lock);
   2127
   2128	if (qm->eqe && !strcmp(name, "EQE")) {
   2129		xeqe = qm->eqe + xeqe_id;
   2130	} else if (qm->aeqe && !strcmp(name, "AEQE")) {
   2131		xeqe = qm->aeqe + xeqe_id;
   2132	} else {
   2133		ret = -EINVAL;
   2134		goto err_unlock;
   2135	}
   2136
   2137	ret = dump_show(qm, xeqe, size, name);
   2138	if (ret)
   2139		dev_info(dev, "Show %s failed!\n", name);
   2140
   2141err_unlock:
   2142	up_read(&qm->qps_lock);
   2143	return ret;
   2144}
   2145
   2146static int qm_dbg_help(struct hisi_qm *qm, char *s)
   2147{
   2148	struct device *dev = &qm->pdev->dev;
   2149
   2150	if (strsep(&s, " ")) {
   2151		dev_err(dev, "Please do not input extra characters!\n");
   2152		return -EINVAL;
   2153	}
   2154
   2155	dev_info(dev, "available commands:\n");
   2156	dev_info(dev, "sqc <num>\n");
   2157	dev_info(dev, "cqc <num>\n");
   2158	dev_info(dev, "eqc\n");
   2159	dev_info(dev, "aeqc\n");
   2160	dev_info(dev, "sq <num> <e>\n");
   2161	dev_info(dev, "cq <num> <e>\n");
   2162	dev_info(dev, "eq <e>\n");
   2163	dev_info(dev, "aeq <e>\n");
   2164
   2165	return 0;
   2166}
   2167
   2168static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
   2169{
   2170	struct device *dev = &qm->pdev->dev;
   2171	char *presult, *s, *s_tmp;
   2172	int ret;
   2173
   2174	s = kstrdup(cmd_buf, GFP_KERNEL);
   2175	if (!s)
   2176		return -ENOMEM;
   2177
   2178	s_tmp = s;
   2179	presult = strsep(&s, " ");
   2180	if (!presult) {
   2181		ret = -EINVAL;
   2182		goto err_buffer_free;
   2183	}
   2184
   2185	if (!strcmp(presult, "sqc"))
   2186		ret = qm_sqc_dump(qm, s);
   2187	else if (!strcmp(presult, "cqc"))
   2188		ret = qm_cqc_dump(qm, s);
   2189	else if (!strcmp(presult, "eqc"))
   2190		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
   2191				       QM_MB_CMD_EQC, "EQC");
   2192	else if (!strcmp(presult, "aeqc"))
   2193		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
   2194				       QM_MB_CMD_AEQC, "AEQC");
   2195	else if (!strcmp(presult, "sq"))
   2196		ret = qm_sq_dump(qm, s);
   2197	else if (!strcmp(presult, "cq"))
   2198		ret = qm_cq_dump(qm, s);
   2199	else if (!strcmp(presult, "eq"))
   2200		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
   2201	else if (!strcmp(presult, "aeq"))
   2202		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
   2203	else if (!strcmp(presult, "help"))
   2204		ret = qm_dbg_help(qm, s);
   2205	else
   2206		ret = -EINVAL;
   2207
   2208	if (ret)
   2209		dev_info(dev, "Please echo help\n");
   2210
   2211err_buffer_free:
   2212	kfree(s_tmp);
   2213
   2214	return ret;
   2215}
   2216
   2217static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
   2218			    size_t count, loff_t *pos)
   2219{
   2220	struct hisi_qm *qm = filp->private_data;
   2221	char *cmd_buf, *cmd_buf_tmp;
   2222	int ret;
   2223
   2224	if (*pos)
   2225		return 0;
   2226
   2227	ret = hisi_qm_get_dfx_access(qm);
   2228	if (ret)
   2229		return ret;
   2230
   2231	/* Judge if the instance is being reset. */
   2232	if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
   2233		return 0;
   2234
   2235	if (count > QM_DBG_WRITE_LEN) {
   2236		ret = -ENOSPC;
   2237		goto put_dfx_access;
   2238	}
   2239
   2240	cmd_buf = memdup_user_nul(buffer, count);
   2241	if (IS_ERR(cmd_buf)) {
   2242		ret = PTR_ERR(cmd_buf);
   2243		goto put_dfx_access;
   2244	}
   2245
   2246	cmd_buf_tmp = strchr(cmd_buf, '\n');
   2247	if (cmd_buf_tmp) {
   2248		*cmd_buf_tmp = '\0';
   2249		count = cmd_buf_tmp - cmd_buf + 1;
   2250	}
   2251
   2252	ret = qm_cmd_write_dump(qm, cmd_buf);
   2253	if (ret) {
   2254		kfree(cmd_buf);
   2255		goto put_dfx_access;
   2256	}
   2257
   2258	kfree(cmd_buf);
   2259
   2260	ret = count;
   2261
   2262put_dfx_access:
   2263	hisi_qm_put_dfx_access(qm);
   2264	return ret;
   2265}
   2266
   2267static const struct file_operations qm_cmd_fops = {
   2268	.owner = THIS_MODULE,
   2269	.open = simple_open,
   2270	.read = qm_cmd_read,
   2271	.write = qm_cmd_write,
   2272};
   2273
   2274static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
   2275				   enum qm_debug_file index)
   2276{
   2277	struct debugfs_file *file = qm->debug.files + index;
   2278
   2279	debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
   2280			    &qm_debug_fops);
   2281
   2282	file->index = index;
   2283	mutex_init(&file->lock);
   2284	file->debug = &qm->debug;
   2285}
   2286
   2287static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
   2288{
   2289	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
   2290}
   2291
   2292static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
   2293{
   2294	qm->error_mask = ce | nfe | fe;
   2295	/* clear QM hw residual error source */
   2296	writel(QM_ABNORMAL_INT_SOURCE_CLR,
   2297	       qm->io_base + QM_ABNORMAL_INT_SOURCE);
   2298
   2299	/* configure error type */
   2300	writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
   2301	writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
   2302	writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
   2303	writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
   2304}
   2305
   2306static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
   2307{
   2308	u32 irq_enable = ce | nfe | fe;
   2309	u32 irq_unmask = ~irq_enable;
   2310
   2311	qm_hw_error_cfg(qm, ce, nfe, fe);
   2312
   2313	irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
   2314	writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
   2315}
   2316
   2317static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
   2318{
   2319	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
   2320}
   2321
   2322static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
   2323{
   2324	u32 irq_enable = ce | nfe | fe;
   2325	u32 irq_unmask = ~irq_enable;
   2326
   2327	qm_hw_error_cfg(qm, ce, nfe, fe);
   2328
   2329	/* enable close master ooo when hardware error happened */
   2330	writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
   2331
   2332	irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
   2333	writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
   2334}
   2335
   2336static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
   2337{
   2338	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
   2339
   2340	/* disable close master ooo when hardware error happened */
   2341	writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
   2342}
   2343
   2344static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
   2345{
   2346	const struct hisi_qm_hw_error *err;
   2347	struct device *dev = &qm->pdev->dev;
   2348	u32 reg_val, type, vf_num;
   2349	int i;
   2350
   2351	for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
   2352		err = &qm_hw_error[i];
   2353		if (!(err->int_msk & error_status))
   2354			continue;
   2355
   2356		dev_err(dev, "%s [error status=0x%x] found\n",
   2357			err->msg, err->int_msk);
   2358
   2359		if (err->int_msk & QM_DB_TIMEOUT) {
   2360			reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
   2361			type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
   2362			       QM_DB_TIMEOUT_TYPE_SHIFT;
   2363			vf_num = reg_val & QM_DB_TIMEOUT_VF;
   2364			dev_err(dev, "qm %s doorbell timeout in function %u\n",
   2365				qm_db_timeout[type], vf_num);
   2366		} else if (err->int_msk & QM_OF_FIFO_OF) {
   2367			reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
   2368			type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
   2369			       QM_FIFO_OVERFLOW_TYPE_SHIFT;
   2370			vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
   2371
   2372			if (type < ARRAY_SIZE(qm_fifo_overflow))
   2373				dev_err(dev, "qm %s fifo overflow in function %u\n",
   2374					qm_fifo_overflow[type], vf_num);
   2375			else
   2376				dev_err(dev, "unknown error type\n");
   2377		}
   2378	}
   2379}
   2380
   2381static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
   2382{
   2383	u32 error_status, tmp, val;
   2384
   2385	/* read err sts */
   2386	tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
   2387	error_status = qm->error_mask & tmp;
   2388
   2389	if (error_status) {
   2390		if (error_status & QM_ECC_MBIT)
   2391			qm->err_status.is_qm_ecc_mbit = true;
   2392
   2393		qm_log_hw_error(qm, error_status);
   2394		val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
   2395		/* ce error does not need to be reset */
   2396		if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
   2397			writel(error_status, qm->io_base +
   2398			       QM_ABNORMAL_INT_SOURCE);
   2399			writel(qm->err_info.nfe,
   2400			       qm->io_base + QM_RAS_NFE_ENABLE);
   2401			return ACC_ERR_RECOVERED;
   2402		}
   2403
   2404		return ACC_ERR_NEED_RESET;
   2405	}
   2406
   2407	return ACC_ERR_RECOVERED;
   2408}
   2409
   2410static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
   2411{
   2412	struct qm_mailbox mailbox;
   2413	int ret;
   2414
   2415	qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
   2416	mutex_lock(&qm->mailbox_lock);
   2417	ret = qm_mb_nolock(qm, &mailbox);
   2418	if (ret)
   2419		goto err_unlock;
   2420
   2421	*msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
   2422		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
   2423
   2424err_unlock:
   2425	mutex_unlock(&qm->mailbox_lock);
   2426	return ret;
   2427}
   2428
   2429static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
   2430{
   2431	u32 val;
   2432
   2433	if (qm->fun_type == QM_HW_PF)
   2434		writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
   2435
   2436	val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
   2437	val |= QM_IFC_INT_SOURCE_MASK;
   2438	writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
   2439}
   2440
   2441static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
   2442{
   2443	struct device *dev = &qm->pdev->dev;
   2444	u32 cmd;
   2445	u64 msg;
   2446	int ret;
   2447
   2448	ret = qm_get_mb_cmd(qm, &msg, vf_id);
   2449	if (ret) {
   2450		dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
   2451		return;
   2452	}
   2453
   2454	cmd = msg & QM_MB_CMD_DATA_MASK;
   2455	switch (cmd) {
   2456	case QM_VF_PREPARE_FAIL:
   2457		dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
   2458		break;
   2459	case QM_VF_START_FAIL:
   2460		dev_err(dev, "failed to start VF(%u)!\n", vf_id);
   2461		break;
   2462	case QM_VF_PREPARE_DONE:
   2463	case QM_VF_START_DONE:
   2464		break;
   2465	default:
   2466		dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
   2467		break;
   2468	}
   2469}
   2470
   2471static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
   2472{
   2473	struct device *dev = &qm->pdev->dev;
   2474	u32 vfs_num = qm->vfs_num;
   2475	int cnt = 0;
   2476	int ret = 0;
   2477	u64 val;
   2478	u32 i;
   2479
   2480	if (!qm->vfs_num || qm->ver < QM_HW_V3)
   2481		return 0;
   2482
   2483	while (true) {
   2484		val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
   2485		/* All VFs send command to PF, break */
   2486		if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
   2487			break;
   2488
   2489		if (++cnt > QM_MAX_PF_WAIT_COUNT) {
   2490			ret = -EBUSY;
   2491			break;
   2492		}
   2493
   2494		msleep(QM_WAIT_DST_ACK);
   2495	}
   2496
   2497	/* PF check VFs msg */
   2498	for (i = 1; i <= vfs_num; i++) {
   2499		if (val & BIT(i))
   2500			qm_handle_vf_msg(qm, i);
   2501		else
   2502			dev_err(dev, "VF(%u) not ping PF!\n", i);
   2503	}
   2504
   2505	/* PF clear interrupt to ack VFs */
   2506	qm_clear_cmd_interrupt(qm, val);
   2507
   2508	return ret;
   2509}
   2510
   2511static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
   2512{
   2513	u32 val;
   2514
   2515	val = readl(qm->io_base + QM_IFC_INT_CFG);
   2516	val &= ~QM_IFC_SEND_ALL_VFS;
   2517	val |= fun_num;
   2518	writel(val, qm->io_base + QM_IFC_INT_CFG);
   2519
   2520	val = readl(qm->io_base + QM_IFC_INT_SET_P);
   2521	val |= QM_IFC_INT_SET_MASK;
   2522	writel(val, qm->io_base + QM_IFC_INT_SET_P);
   2523}
   2524
   2525static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
   2526{
   2527	u32 val;
   2528
   2529	val = readl(qm->io_base + QM_IFC_INT_SET_V);
   2530	val |= QM_IFC_INT_SET_MASK;
   2531	writel(val, qm->io_base + QM_IFC_INT_SET_V);
   2532}
   2533
   2534static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
   2535{
   2536	struct device *dev = &qm->pdev->dev;
   2537	struct qm_mailbox mailbox;
   2538	int cnt = 0;
   2539	u64 val;
   2540	int ret;
   2541
   2542	qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
   2543	mutex_lock(&qm->mailbox_lock);
   2544	ret = qm_mb_nolock(qm, &mailbox);
   2545	if (ret) {
   2546		dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
   2547		goto err_unlock;
   2548	}
   2549
   2550	qm_trigger_vf_interrupt(qm, fun_num);
   2551	while (true) {
   2552		msleep(QM_WAIT_DST_ACK);
   2553		val = readq(qm->io_base + QM_IFC_READY_STATUS);
   2554		/* if VF respond, PF notifies VF successfully. */
   2555		if (!(val & BIT(fun_num)))
   2556			goto err_unlock;
   2557
   2558		if (++cnt > QM_MAX_PF_WAIT_COUNT) {
   2559			dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
   2560			ret = -ETIMEDOUT;
   2561			break;
   2562		}
   2563	}
   2564
   2565err_unlock:
   2566	mutex_unlock(&qm->mailbox_lock);
   2567	return ret;
   2568}
   2569
   2570static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
   2571{
   2572	struct device *dev = &qm->pdev->dev;
   2573	u32 vfs_num = qm->vfs_num;
   2574	struct qm_mailbox mailbox;
   2575	u64 val = 0;
   2576	int cnt = 0;
   2577	int ret;
   2578	u32 i;
   2579
   2580	qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
   2581	mutex_lock(&qm->mailbox_lock);
   2582	/* PF sends command to all VFs by mailbox */
   2583	ret = qm_mb_nolock(qm, &mailbox);
   2584	if (ret) {
   2585		dev_err(dev, "failed to send command to VFs!\n");
   2586		mutex_unlock(&qm->mailbox_lock);
   2587		return ret;
   2588	}
   2589
   2590	qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
   2591	while (true) {
   2592		msleep(QM_WAIT_DST_ACK);
   2593		val = readq(qm->io_base + QM_IFC_READY_STATUS);
   2594		/* If all VFs acked, PF notifies VFs successfully. */
   2595		if (!(val & GENMASK(vfs_num, 1))) {
   2596			mutex_unlock(&qm->mailbox_lock);
   2597			return 0;
   2598		}
   2599
   2600		if (++cnt > QM_MAX_PF_WAIT_COUNT)
   2601			break;
   2602	}
   2603
   2604	mutex_unlock(&qm->mailbox_lock);
   2605
   2606	/* Check which vf respond timeout. */
   2607	for (i = 1; i <= vfs_num; i++) {
   2608		if (val & BIT(i))
   2609			dev_err(dev, "failed to get response from VF(%u)!\n", i);
   2610	}
   2611
   2612	return -ETIMEDOUT;
   2613}
   2614
   2615static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
   2616{
   2617	struct qm_mailbox mailbox;
   2618	int cnt = 0;
   2619	u32 val;
   2620	int ret;
   2621
   2622	qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
   2623	mutex_lock(&qm->mailbox_lock);
   2624	ret = qm_mb_nolock(qm, &mailbox);
   2625	if (ret) {
   2626		dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
   2627		goto unlock;
   2628	}
   2629
   2630	qm_trigger_pf_interrupt(qm);
   2631	/* Waiting for PF response */
   2632	while (true) {
   2633		msleep(QM_WAIT_DST_ACK);
   2634		val = readl(qm->io_base + QM_IFC_INT_SET_V);
   2635		if (!(val & QM_IFC_INT_STATUS_MASK))
   2636			break;
   2637
   2638		if (++cnt > QM_MAX_VF_WAIT_COUNT) {
   2639			ret = -ETIMEDOUT;
   2640			break;
   2641		}
   2642	}
   2643
   2644unlock:
   2645	mutex_unlock(&qm->mailbox_lock);
   2646	return ret;
   2647}
   2648
   2649static int qm_stop_qp(struct hisi_qp *qp)
   2650{
   2651	return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
   2652}
   2653
   2654static int qm_set_msi(struct hisi_qm *qm, bool set)
   2655{
   2656	struct pci_dev *pdev = qm->pdev;
   2657
   2658	if (set) {
   2659		pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
   2660				       0);
   2661	} else {
   2662		pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
   2663				       ACC_PEH_MSI_DISABLE);
   2664		if (qm->err_status.is_qm_ecc_mbit ||
   2665		    qm->err_status.is_dev_ecc_mbit)
   2666			return 0;
   2667
   2668		mdelay(1);
   2669		if (readl(qm->io_base + QM_PEH_DFX_INFO0))
   2670			return -EFAULT;
   2671	}
   2672
   2673	return 0;
   2674}
   2675
   2676static void qm_wait_msi_finish(struct hisi_qm *qm)
   2677{
   2678	struct pci_dev *pdev = qm->pdev;
   2679	u32 cmd = ~0;
   2680	int cnt = 0;
   2681	u32 val;
   2682	int ret;
   2683
   2684	while (true) {
   2685		pci_read_config_dword(pdev, pdev->msi_cap +
   2686				      PCI_MSI_PENDING_64, &cmd);
   2687		if (!cmd)
   2688			break;
   2689
   2690		if (++cnt > MAX_WAIT_COUNTS) {
   2691			pci_warn(pdev, "failed to empty MSI PENDING!\n");
   2692			break;
   2693		}
   2694
   2695		udelay(1);
   2696	}
   2697
   2698	ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
   2699					 val, !(val & QM_PEH_DFX_MASK),
   2700					 POLL_PERIOD, POLL_TIMEOUT);
   2701	if (ret)
   2702		pci_warn(pdev, "failed to empty PEH MSI!\n");
   2703
   2704	ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
   2705					 val, !(val & QM_PEH_MSI_FINISH_MASK),
   2706					 POLL_PERIOD, POLL_TIMEOUT);
   2707	if (ret)
   2708		pci_warn(pdev, "failed to finish MSI operation!\n");
   2709}
   2710
   2711static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
   2712{
   2713	struct pci_dev *pdev = qm->pdev;
   2714	int ret = -ETIMEDOUT;
   2715	u32 cmd, i;
   2716
   2717	pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
   2718	if (set)
   2719		cmd |= QM_MSI_CAP_ENABLE;
   2720	else
   2721		cmd &= ~QM_MSI_CAP_ENABLE;
   2722
   2723	pci_write_config_dword(pdev, pdev->msi_cap, cmd);
   2724	if (set) {
   2725		for (i = 0; i < MAX_WAIT_COUNTS; i++) {
   2726			pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
   2727			if (cmd & QM_MSI_CAP_ENABLE)
   2728				return 0;
   2729
   2730			udelay(1);
   2731		}
   2732	} else {
   2733		udelay(WAIT_PERIOD_US_MIN);
   2734		qm_wait_msi_finish(qm);
   2735		ret = 0;
   2736	}
   2737
   2738	return ret;
   2739}
   2740
   2741static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
   2742	.qm_db = qm_db_v1,
   2743	.get_irq_num = qm_get_irq_num_v1,
   2744	.hw_error_init = qm_hw_error_init_v1,
   2745	.set_msi = qm_set_msi,
   2746};
   2747
   2748static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
   2749	.get_vft = qm_get_vft_v2,
   2750	.qm_db = qm_db_v2,
   2751	.get_irq_num = qm_get_irq_num_v2,
   2752	.hw_error_init = qm_hw_error_init_v2,
   2753	.hw_error_uninit = qm_hw_error_uninit_v2,
   2754	.hw_error_handle = qm_hw_error_handle_v2,
   2755	.set_msi = qm_set_msi,
   2756};
   2757
   2758static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
   2759	.get_vft = qm_get_vft_v2,
   2760	.qm_db = qm_db_v2,
   2761	.get_irq_num = qm_get_irq_num_v3,
   2762	.hw_error_init = qm_hw_error_init_v3,
   2763	.hw_error_uninit = qm_hw_error_uninit_v3,
   2764	.hw_error_handle = qm_hw_error_handle_v2,
   2765	.stop_qp = qm_stop_qp,
   2766	.set_msi = qm_set_msi_v3,
   2767	.ping_all_vfs = qm_ping_all_vfs,
   2768	.ping_pf = qm_ping_pf,
   2769};
   2770
   2771static void *qm_get_avail_sqe(struct hisi_qp *qp)
   2772{
   2773	struct hisi_qp_status *qp_status = &qp->qp_status;
   2774	u16 sq_tail = qp_status->sq_tail;
   2775
   2776	if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
   2777		return NULL;
   2778
   2779	return qp->sqe + sq_tail * qp->qm->sqe_size;
   2780}
   2781
   2782static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
   2783{
   2784	u64 *addr;
   2785
   2786	/* Use last 64 bits of DUS to reset status. */
   2787	addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
   2788	*addr = 0;
   2789}
   2790
   2791static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
   2792{
   2793	struct device *dev = &qm->pdev->dev;
   2794	struct hisi_qp *qp;
   2795	int qp_id;
   2796
   2797	if (!qm_qp_avail_state(qm, NULL, QP_INIT))
   2798		return ERR_PTR(-EPERM);
   2799
   2800	if (qm->qp_in_used == qm->qp_num) {
   2801		dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
   2802				     qm->qp_num);
   2803		atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
   2804		return ERR_PTR(-EBUSY);
   2805	}
   2806
   2807	qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
   2808	if (qp_id < 0) {
   2809		dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
   2810				    qm->qp_num);
   2811		atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
   2812		return ERR_PTR(-EBUSY);
   2813	}
   2814
   2815	qp = &qm->qp_array[qp_id];
   2816	hisi_qm_unset_hw_reset(qp);
   2817	memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
   2818
   2819	qp->event_cb = NULL;
   2820	qp->req_cb = NULL;
   2821	qp->qp_id = qp_id;
   2822	qp->alg_type = alg_type;
   2823	qp->is_in_kernel = true;
   2824	qm->qp_in_used++;
   2825	atomic_set(&qp->qp_status.flags, QP_INIT);
   2826
   2827	return qp;
   2828}
   2829
   2830/**
   2831 * hisi_qm_create_qp() - Create a queue pair from qm.
   2832 * @qm: The qm we create a qp from.
   2833 * @alg_type: Accelerator specific algorithm type in sqc.
   2834 *
   2835 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
   2836 * qp memory fails.
   2837 */
   2838static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
   2839{
   2840	struct hisi_qp *qp;
   2841	int ret;
   2842
   2843	ret = qm_pm_get_sync(qm);
   2844	if (ret)
   2845		return ERR_PTR(ret);
   2846
   2847	down_write(&qm->qps_lock);
   2848	qp = qm_create_qp_nolock(qm, alg_type);
   2849	up_write(&qm->qps_lock);
   2850
   2851	if (IS_ERR(qp))
   2852		qm_pm_put_sync(qm);
   2853
   2854	return qp;
   2855}
   2856
   2857/**
   2858 * hisi_qm_release_qp() - Release a qp back to its qm.
   2859 * @qp: The qp we want to release.
   2860 *
   2861 * This function releases the resource of a qp.
   2862 */
   2863static void hisi_qm_release_qp(struct hisi_qp *qp)
   2864{
   2865	struct hisi_qm *qm = qp->qm;
   2866
   2867	down_write(&qm->qps_lock);
   2868
   2869	if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
   2870		up_write(&qm->qps_lock);
   2871		return;
   2872	}
   2873
   2874	qm->qp_in_used--;
   2875	idr_remove(&qm->qp_idr, qp->qp_id);
   2876
   2877	up_write(&qm->qps_lock);
   2878
   2879	qm_pm_put_sync(qm);
   2880}
   2881
   2882static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
   2883{
   2884	struct hisi_qm *qm = qp->qm;
   2885	struct device *dev = &qm->pdev->dev;
   2886	enum qm_hw_ver ver = qm->ver;
   2887	struct qm_sqc *sqc;
   2888	dma_addr_t sqc_dma;
   2889	int ret;
   2890
   2891	sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
   2892	if (!sqc)
   2893		return -ENOMEM;
   2894
   2895	INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
   2896	if (ver == QM_HW_V1) {
   2897		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
   2898		sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
   2899	} else {
   2900		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
   2901		sqc->w8 = 0; /* rand_qc */
   2902	}
   2903	sqc->cq_num = cpu_to_le16(qp_id);
   2904	sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
   2905
   2906	if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
   2907		sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
   2908				       QM_QC_PASID_ENABLE_SHIFT);
   2909
   2910	sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
   2911				 DMA_TO_DEVICE);
   2912	if (dma_mapping_error(dev, sqc_dma)) {
   2913		kfree(sqc);
   2914		return -ENOMEM;
   2915	}
   2916
   2917	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
   2918	dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
   2919	kfree(sqc);
   2920
   2921	return ret;
   2922}
   2923
   2924static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
   2925{
   2926	struct hisi_qm *qm = qp->qm;
   2927	struct device *dev = &qm->pdev->dev;
   2928	enum qm_hw_ver ver = qm->ver;
   2929	struct qm_cqc *cqc;
   2930	dma_addr_t cqc_dma;
   2931	int ret;
   2932
   2933	cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
   2934	if (!cqc)
   2935		return -ENOMEM;
   2936
   2937	INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
   2938	if (ver == QM_HW_V1) {
   2939		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
   2940							QM_QC_CQE_SIZE));
   2941		cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
   2942	} else {
   2943		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
   2944		cqc->w8 = 0; /* rand_qc */
   2945	}
   2946	cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
   2947
   2948	if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
   2949		cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
   2950
   2951	cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
   2952				 DMA_TO_DEVICE);
   2953	if (dma_mapping_error(dev, cqc_dma)) {
   2954		kfree(cqc);
   2955		return -ENOMEM;
   2956	}
   2957
   2958	ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
   2959	dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
   2960	kfree(cqc);
   2961
   2962	return ret;
   2963}
   2964
   2965static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
   2966{
   2967	int ret;
   2968
   2969	qm_init_qp_status(qp);
   2970
   2971	ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
   2972	if (ret)
   2973		return ret;
   2974
   2975	return qm_cq_ctx_cfg(qp, qp_id, pasid);
   2976}
   2977
   2978static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
   2979{
   2980	struct hisi_qm *qm = qp->qm;
   2981	struct device *dev = &qm->pdev->dev;
   2982	int qp_id = qp->qp_id;
   2983	u32 pasid = arg;
   2984	int ret;
   2985
   2986	if (!qm_qp_avail_state(qm, qp, QP_START))
   2987		return -EPERM;
   2988
   2989	ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
   2990	if (ret)
   2991		return ret;
   2992
   2993	atomic_set(&qp->qp_status.flags, QP_START);
   2994	dev_dbg(dev, "queue %d started\n", qp_id);
   2995
   2996	return 0;
   2997}
   2998
   2999/**
   3000 * hisi_qm_start_qp() - Start a qp into running.
   3001 * @qp: The qp we want to start to run.
   3002 * @arg: Accelerator specific argument.
   3003 *
   3004 * After this function, qp can receive request from user. Return 0 if
   3005 * successful, Return -EBUSY if failed.
   3006 */
   3007int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
   3008{
   3009	struct hisi_qm *qm = qp->qm;
   3010	int ret;
   3011
   3012	down_write(&qm->qps_lock);
   3013	ret = qm_start_qp_nolock(qp, arg);
   3014	up_write(&qm->qps_lock);
   3015
   3016	return ret;
   3017}
   3018EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
   3019
   3020/**
   3021 * qp_stop_fail_cb() - call request cb.
   3022 * @qp: stopped failed qp.
   3023 *
   3024 * Callback function should be called whether task completed or not.
   3025 */
   3026static void qp_stop_fail_cb(struct hisi_qp *qp)
   3027{
   3028	int qp_used = atomic_read(&qp->qp_status.used);
   3029	u16 cur_tail = qp->qp_status.sq_tail;
   3030	u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
   3031	struct hisi_qm *qm = qp->qm;
   3032	u16 pos;
   3033	int i;
   3034
   3035	for (i = 0; i < qp_used; i++) {
   3036		pos = (i + cur_head) % QM_Q_DEPTH;
   3037		qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
   3038		atomic_dec(&qp->qp_status.used);
   3039	}
   3040}
   3041
   3042/**
   3043 * qm_drain_qp() - Drain a qp.
   3044 * @qp: The qp we want to drain.
   3045 *
   3046 * Determine whether the queue is cleared by judging the tail pointers of
   3047 * sq and cq.
   3048 */
   3049static int qm_drain_qp(struct hisi_qp *qp)
   3050{
   3051	size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
   3052	struct hisi_qm *qm = qp->qm;
   3053	struct device *dev = &qm->pdev->dev;
   3054	struct qm_sqc *sqc;
   3055	struct qm_cqc *cqc;
   3056	dma_addr_t dma_addr;
   3057	int ret = 0, i = 0;
   3058	void *addr;
   3059
   3060	/* No need to judge if master OOO is blocked. */
   3061	if (qm_check_dev_error(qm))
   3062		return 0;
   3063
   3064	/* Kunpeng930 supports drain qp by device */
   3065	if (qm->ops->stop_qp) {
   3066		ret = qm->ops->stop_qp(qp);
   3067		if (ret)
   3068			dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
   3069		return ret;
   3070	}
   3071
   3072	addr = qm_ctx_alloc(qm, size, &dma_addr);
   3073	if (IS_ERR(addr)) {
   3074		dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
   3075		return -ENOMEM;
   3076	}
   3077
   3078	while (++i) {
   3079		ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
   3080		if (ret) {
   3081			dev_err_ratelimited(dev, "Failed to dump sqc!\n");
   3082			break;
   3083		}
   3084		sqc = addr;
   3085
   3086		ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
   3087				      qp->qp_id);
   3088		if (ret) {
   3089			dev_err_ratelimited(dev, "Failed to dump cqc!\n");
   3090			break;
   3091		}
   3092		cqc = addr + sizeof(struct qm_sqc);
   3093
   3094		if ((sqc->tail == cqc->tail) &&
   3095		    (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
   3096			break;
   3097
   3098		if (i == MAX_WAIT_COUNTS) {
   3099			dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
   3100			ret = -EBUSY;
   3101			break;
   3102		}
   3103
   3104		usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
   3105	}
   3106
   3107	qm_ctx_free(qm, size, addr, &dma_addr);
   3108
   3109	return ret;
   3110}
   3111
   3112static int qm_stop_qp_nolock(struct hisi_qp *qp)
   3113{
   3114	struct device *dev = &qp->qm->pdev->dev;
   3115	int ret;
   3116
   3117	/*
   3118	 * It is allowed to stop and release qp when reset, If the qp is
   3119	 * stopped when reset but still want to be released then, the
   3120	 * is_resetting flag should be set negative so that this qp will not
   3121	 * be restarted after reset.
   3122	 */
   3123	if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
   3124		qp->is_resetting = false;
   3125		return 0;
   3126	}
   3127
   3128	if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
   3129		return -EPERM;
   3130
   3131	atomic_set(&qp->qp_status.flags, QP_STOP);
   3132
   3133	ret = qm_drain_qp(qp);
   3134	if (ret)
   3135		dev_err(dev, "Failed to drain out data for stopping!\n");
   3136
   3137	if (qp->qm->wq)
   3138		flush_workqueue(qp->qm->wq);
   3139	else
   3140		flush_work(&qp->qm->work);
   3141
   3142	if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
   3143		qp_stop_fail_cb(qp);
   3144
   3145	dev_dbg(dev, "stop queue %u!", qp->qp_id);
   3146
   3147	return 0;
   3148}
   3149
   3150/**
   3151 * hisi_qm_stop_qp() - Stop a qp in qm.
   3152 * @qp: The qp we want to stop.
   3153 *
   3154 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
   3155 */
   3156int hisi_qm_stop_qp(struct hisi_qp *qp)
   3157{
   3158	int ret;
   3159
   3160	down_write(&qp->qm->qps_lock);
   3161	ret = qm_stop_qp_nolock(qp);
   3162	up_write(&qp->qm->qps_lock);
   3163
   3164	return ret;
   3165}
   3166EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
   3167
   3168/**
   3169 * hisi_qp_send() - Queue up a task in the hardware queue.
   3170 * @qp: The qp in which to put the message.
   3171 * @msg: The message.
   3172 *
   3173 * This function will return -EBUSY if qp is currently full, and -EAGAIN
   3174 * if qp related qm is resetting.
   3175 *
   3176 * Note: This function may run with qm_irq_thread and ACC reset at same time.
   3177 *       It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
   3178 *       reset may happen, we have no lock here considering performance. This
   3179 *       causes current qm_db sending fail or can not receive sended sqe. QM
   3180 *       sync/async receive function should handle the error sqe. ACC reset
   3181 *       done function should clear used sqe to 0.
   3182 */
   3183int hisi_qp_send(struct hisi_qp *qp, const void *msg)
   3184{
   3185	struct hisi_qp_status *qp_status = &qp->qp_status;
   3186	u16 sq_tail = qp_status->sq_tail;
   3187	u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
   3188	void *sqe = qm_get_avail_sqe(qp);
   3189
   3190	if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
   3191		     atomic_read(&qp->qm->status.flags) == QM_STOP ||
   3192		     qp->is_resetting)) {
   3193		dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
   3194		return -EAGAIN;
   3195	}
   3196
   3197	if (!sqe)
   3198		return -EBUSY;
   3199
   3200	memcpy(sqe, msg, qp->qm->sqe_size);
   3201
   3202	qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
   3203	atomic_inc(&qp->qp_status.used);
   3204	qp_status->sq_tail = sq_tail_next;
   3205
   3206	return 0;
   3207}
   3208EXPORT_SYMBOL_GPL(hisi_qp_send);
   3209
   3210static void hisi_qm_cache_wb(struct hisi_qm *qm)
   3211{
   3212	unsigned int val;
   3213
   3214	if (qm->ver == QM_HW_V1)
   3215		return;
   3216
   3217	writel(0x1, qm->io_base + QM_CACHE_WB_START);
   3218	if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
   3219				       val, val & BIT(0), POLL_PERIOD,
   3220				       POLL_TIMEOUT))
   3221		dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
   3222}
   3223
   3224static void qm_qp_event_notifier(struct hisi_qp *qp)
   3225{
   3226	wake_up_interruptible(&qp->uacce_q->wait);
   3227}
   3228
   3229 /* This function returns free number of qp in qm. */
   3230static int hisi_qm_get_available_instances(struct uacce_device *uacce)
   3231{
   3232	struct hisi_qm *qm = uacce->priv;
   3233	int ret;
   3234
   3235	down_read(&qm->qps_lock);
   3236	ret = qm->qp_num - qm->qp_in_used;
   3237	up_read(&qm->qps_lock);
   3238
   3239	return ret;
   3240}
   3241
   3242static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
   3243{
   3244	int i;
   3245
   3246	for (i = 0; i < qm->qp_num; i++)
   3247		qm_set_qp_disable(&qm->qp_array[i], offset);
   3248}
   3249
   3250static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
   3251				   unsigned long arg,
   3252				   struct uacce_queue *q)
   3253{
   3254	struct hisi_qm *qm = uacce->priv;
   3255	struct hisi_qp *qp;
   3256	u8 alg_type = 0;
   3257
   3258	qp = hisi_qm_create_qp(qm, alg_type);
   3259	if (IS_ERR(qp))
   3260		return PTR_ERR(qp);
   3261
   3262	q->priv = qp;
   3263	q->uacce = uacce;
   3264	qp->uacce_q = q;
   3265	qp->event_cb = qm_qp_event_notifier;
   3266	qp->pasid = arg;
   3267	qp->is_in_kernel = false;
   3268
   3269	return 0;
   3270}
   3271
   3272static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
   3273{
   3274	struct hisi_qp *qp = q->priv;
   3275
   3276	hisi_qm_cache_wb(qp->qm);
   3277	hisi_qm_release_qp(qp);
   3278}
   3279
   3280/* map sq/cq/doorbell to user space */
   3281static int hisi_qm_uacce_mmap(struct uacce_queue *q,
   3282			      struct vm_area_struct *vma,
   3283			      struct uacce_qfile_region *qfr)
   3284{
   3285	struct hisi_qp *qp = q->priv;
   3286	struct hisi_qm *qm = qp->qm;
   3287	resource_size_t phys_base = qm->db_phys_base +
   3288				    qp->qp_id * qm->db_interval;
   3289	size_t sz = vma->vm_end - vma->vm_start;
   3290	struct pci_dev *pdev = qm->pdev;
   3291	struct device *dev = &pdev->dev;
   3292	unsigned long vm_pgoff;
   3293	int ret;
   3294
   3295	switch (qfr->type) {
   3296	case UACCE_QFRT_MMIO:
   3297		if (qm->ver == QM_HW_V1) {
   3298			if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
   3299				return -EINVAL;
   3300		} else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
   3301			if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
   3302			    QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
   3303				return -EINVAL;
   3304		} else {
   3305			if (sz > qm->db_interval)
   3306				return -EINVAL;
   3307		}
   3308
   3309		vma->vm_flags |= VM_IO;
   3310
   3311		return remap_pfn_range(vma, vma->vm_start,
   3312				       phys_base >> PAGE_SHIFT,
   3313				       sz, pgprot_noncached(vma->vm_page_prot));
   3314	case UACCE_QFRT_DUS:
   3315		if (sz != qp->qdma.size)
   3316			return -EINVAL;
   3317
   3318		/*
   3319		 * dma_mmap_coherent() requires vm_pgoff as 0
   3320		 * restore vm_pfoff to initial value for mmap()
   3321		 */
   3322		vm_pgoff = vma->vm_pgoff;
   3323		vma->vm_pgoff = 0;
   3324		ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
   3325					qp->qdma.dma, sz);
   3326		vma->vm_pgoff = vm_pgoff;
   3327		return ret;
   3328
   3329	default:
   3330		return -EINVAL;
   3331	}
   3332}
   3333
   3334static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
   3335{
   3336	struct hisi_qp *qp = q->priv;
   3337
   3338	return hisi_qm_start_qp(qp, qp->pasid);
   3339}
   3340
   3341static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
   3342{
   3343	hisi_qm_stop_qp(q->priv);
   3344}
   3345
   3346static int hisi_qm_is_q_updated(struct uacce_queue *q)
   3347{
   3348	struct hisi_qp *qp = q->priv;
   3349	struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
   3350	int updated = 0;
   3351
   3352	while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
   3353		/* make sure to read data from memory */
   3354		dma_rmb();
   3355		qm_cq_head_update(qp);
   3356		cqe = qp->cqe + qp->qp_status.cq_head;
   3357		updated = 1;
   3358	}
   3359
   3360	return updated;
   3361}
   3362
   3363static void qm_set_sqctype(struct uacce_queue *q, u16 type)
   3364{
   3365	struct hisi_qm *qm = q->uacce->priv;
   3366	struct hisi_qp *qp = q->priv;
   3367
   3368	down_write(&qm->qps_lock);
   3369	qp->alg_type = type;
   3370	up_write(&qm->qps_lock);
   3371}
   3372
   3373static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
   3374				unsigned long arg)
   3375{
   3376	struct hisi_qp *qp = q->priv;
   3377	struct hisi_qp_ctx qp_ctx;
   3378
   3379	if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
   3380		if (copy_from_user(&qp_ctx, (void __user *)arg,
   3381				   sizeof(struct hisi_qp_ctx)))
   3382			return -EFAULT;
   3383
   3384		if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
   3385			return -EINVAL;
   3386
   3387		qm_set_sqctype(q, qp_ctx.qc_type);
   3388		qp_ctx.id = qp->qp_id;
   3389
   3390		if (copy_to_user((void __user *)arg, &qp_ctx,
   3391				 sizeof(struct hisi_qp_ctx)))
   3392			return -EFAULT;
   3393	} else {
   3394		return -EINVAL;
   3395	}
   3396
   3397	return 0;
   3398}
   3399
   3400static const struct uacce_ops uacce_qm_ops = {
   3401	.get_available_instances = hisi_qm_get_available_instances,
   3402	.get_queue = hisi_qm_uacce_get_queue,
   3403	.put_queue = hisi_qm_uacce_put_queue,
   3404	.start_queue = hisi_qm_uacce_start_queue,
   3405	.stop_queue = hisi_qm_uacce_stop_queue,
   3406	.mmap = hisi_qm_uacce_mmap,
   3407	.ioctl = hisi_qm_uacce_ioctl,
   3408	.is_q_updated = hisi_qm_is_q_updated,
   3409};
   3410
   3411static int qm_alloc_uacce(struct hisi_qm *qm)
   3412{
   3413	struct pci_dev *pdev = qm->pdev;
   3414	struct uacce_device *uacce;
   3415	unsigned long mmio_page_nr;
   3416	unsigned long dus_page_nr;
   3417	struct uacce_interface interface = {
   3418		.flags = UACCE_DEV_SVA,
   3419		.ops = &uacce_qm_ops,
   3420	};
   3421	int ret;
   3422
   3423	ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
   3424		      sizeof(interface.name));
   3425	if (ret < 0)
   3426		return -ENAMETOOLONG;
   3427
   3428	uacce = uacce_alloc(&pdev->dev, &interface);
   3429	if (IS_ERR(uacce))
   3430		return PTR_ERR(uacce);
   3431
   3432	if (uacce->flags & UACCE_DEV_SVA) {
   3433		qm->use_sva = true;
   3434	} else {
   3435		/* only consider sva case */
   3436		uacce_remove(uacce);
   3437		qm->uacce = NULL;
   3438		return -EINVAL;
   3439	}
   3440
   3441	uacce->is_vf = pdev->is_virtfn;
   3442	uacce->priv = qm;
   3443	uacce->algs = qm->algs;
   3444
   3445	if (qm->ver == QM_HW_V1)
   3446		uacce->api_ver = HISI_QM_API_VER_BASE;
   3447	else if (qm->ver == QM_HW_V2)
   3448		uacce->api_ver = HISI_QM_API_VER2_BASE;
   3449	else
   3450		uacce->api_ver = HISI_QM_API_VER3_BASE;
   3451
   3452	if (qm->ver == QM_HW_V1)
   3453		mmio_page_nr = QM_DOORBELL_PAGE_NR;
   3454	else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
   3455		mmio_page_nr = QM_DOORBELL_PAGE_NR +
   3456			QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
   3457	else
   3458		mmio_page_nr = qm->db_interval / PAGE_SIZE;
   3459
   3460	/* Add one more page for device or qp status */
   3461	dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
   3462		       sizeof(struct qm_cqe) * QM_Q_DEPTH  + PAGE_SIZE) >>
   3463					 PAGE_SHIFT;
   3464
   3465	uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
   3466	uacce->qf_pg_num[UACCE_QFRT_DUS]  = dus_page_nr;
   3467
   3468	qm->uacce = uacce;
   3469
   3470	return 0;
   3471}
   3472
   3473/**
   3474 * qm_frozen() - Try to froze QM to cut continuous queue request. If
   3475 * there is user on the QM, return failure without doing anything.
   3476 * @qm: The qm needed to be fronzen.
   3477 *
   3478 * This function frozes QM, then we can do SRIOV disabling.
   3479 */
   3480static int qm_frozen(struct hisi_qm *qm)
   3481{
   3482	if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
   3483		return 0;
   3484
   3485	down_write(&qm->qps_lock);
   3486
   3487	if (!qm->qp_in_used) {
   3488		qm->qp_in_used = qm->qp_num;
   3489		up_write(&qm->qps_lock);
   3490		set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
   3491		return 0;
   3492	}
   3493
   3494	up_write(&qm->qps_lock);
   3495
   3496	return -EBUSY;
   3497}
   3498
   3499static int qm_try_frozen_vfs(struct pci_dev *pdev,
   3500			     struct hisi_qm_list *qm_list)
   3501{
   3502	struct hisi_qm *qm, *vf_qm;
   3503	struct pci_dev *dev;
   3504	int ret = 0;
   3505
   3506	if (!qm_list || !pdev)
   3507		return -EINVAL;
   3508
   3509	/* Try to frozen all the VFs as disable SRIOV */
   3510	mutex_lock(&qm_list->lock);
   3511	list_for_each_entry(qm, &qm_list->list, list) {
   3512		dev = qm->pdev;
   3513		if (dev == pdev)
   3514			continue;
   3515		if (pci_physfn(dev) == pdev) {
   3516			vf_qm = pci_get_drvdata(dev);
   3517			ret = qm_frozen(vf_qm);
   3518			if (ret)
   3519				goto frozen_fail;
   3520		}
   3521	}
   3522
   3523frozen_fail:
   3524	mutex_unlock(&qm_list->lock);
   3525
   3526	return ret;
   3527}
   3528
   3529/**
   3530 * hisi_qm_wait_task_finish() - Wait until the task is finished
   3531 * when removing the driver.
   3532 * @qm: The qm needed to wait for the task to finish.
   3533 * @qm_list: The list of all available devices.
   3534 */
   3535void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
   3536{
   3537	while (qm_frozen(qm) ||
   3538	       ((qm->fun_type == QM_HW_PF) &&
   3539	       qm_try_frozen_vfs(qm->pdev, qm_list))) {
   3540		msleep(WAIT_PERIOD);
   3541	}
   3542
   3543	while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
   3544	       test_bit(QM_RESETTING, &qm->misc_ctl))
   3545		msleep(WAIT_PERIOD);
   3546
   3547	udelay(REMOVE_WAIT_DELAY);
   3548}
   3549EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
   3550
   3551static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
   3552{
   3553	struct device *dev = &qm->pdev->dev;
   3554	struct qm_dma *qdma;
   3555	int i;
   3556
   3557	for (i = num - 1; i >= 0; i--) {
   3558		qdma = &qm->qp_array[i].qdma;
   3559		dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
   3560	}
   3561
   3562	kfree(qm->qp_array);
   3563}
   3564
   3565static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
   3566{
   3567	struct device *dev = &qm->pdev->dev;
   3568	size_t off = qm->sqe_size * QM_Q_DEPTH;
   3569	struct hisi_qp *qp;
   3570
   3571	qp = &qm->qp_array[id];
   3572	qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
   3573					 GFP_KERNEL);
   3574	if (!qp->qdma.va)
   3575		return -ENOMEM;
   3576
   3577	qp->sqe = qp->qdma.va;
   3578	qp->sqe_dma = qp->qdma.dma;
   3579	qp->cqe = qp->qdma.va + off;
   3580	qp->cqe_dma = qp->qdma.dma + off;
   3581	qp->qdma.size = dma_size;
   3582	qp->qm = qm;
   3583	qp->qp_id = id;
   3584
   3585	return 0;
   3586}
   3587
   3588static void hisi_qm_pre_init(struct hisi_qm *qm)
   3589{
   3590	struct pci_dev *pdev = qm->pdev;
   3591
   3592	if (qm->ver == QM_HW_V1)
   3593		qm->ops = &qm_hw_ops_v1;
   3594	else if (qm->ver == QM_HW_V2)
   3595		qm->ops = &qm_hw_ops_v2;
   3596	else
   3597		qm->ops = &qm_hw_ops_v3;
   3598
   3599	pci_set_drvdata(pdev, qm);
   3600	mutex_init(&qm->mailbox_lock);
   3601	init_rwsem(&qm->qps_lock);
   3602	qm->qp_in_used = 0;
   3603	qm->misc_ctl = false;
   3604	if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
   3605		if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
   3606			dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
   3607	}
   3608}
   3609
   3610static void qm_cmd_uninit(struct hisi_qm *qm)
   3611{
   3612	u32 val;
   3613
   3614	if (qm->ver < QM_HW_V3)
   3615		return;
   3616
   3617	val = readl(qm->io_base + QM_IFC_INT_MASK);
   3618	val |= QM_IFC_INT_DISABLE;
   3619	writel(val, qm->io_base + QM_IFC_INT_MASK);
   3620}
   3621
   3622static void qm_cmd_init(struct hisi_qm *qm)
   3623{
   3624	u32 val;
   3625
   3626	if (qm->ver < QM_HW_V3)
   3627		return;
   3628
   3629	/* Clear communication interrupt source */
   3630	qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
   3631
   3632	/* Enable pf to vf communication reg. */
   3633	val = readl(qm->io_base + QM_IFC_INT_MASK);
   3634	val &= ~QM_IFC_INT_DISABLE;
   3635	writel(val, qm->io_base + QM_IFC_INT_MASK);
   3636}
   3637
   3638static void qm_put_pci_res(struct hisi_qm *qm)
   3639{
   3640	struct pci_dev *pdev = qm->pdev;
   3641
   3642	if (qm->use_db_isolation)
   3643		iounmap(qm->db_io_base);
   3644
   3645	iounmap(qm->io_base);
   3646	pci_release_mem_regions(pdev);
   3647}
   3648
   3649static void hisi_qm_pci_uninit(struct hisi_qm *qm)
   3650{
   3651	struct pci_dev *pdev = qm->pdev;
   3652
   3653	pci_free_irq_vectors(pdev);
   3654	qm_put_pci_res(qm);
   3655	pci_disable_device(pdev);
   3656}
   3657
   3658static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
   3659{
   3660	if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
   3661		writel(state, qm->io_base + QM_VF_STATE);
   3662}
   3663
   3664static void qm_last_regs_uninit(struct hisi_qm *qm)
   3665{
   3666	struct qm_debug *debug = &qm->debug;
   3667
   3668	if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
   3669		return;
   3670
   3671	kfree(debug->qm_last_words);
   3672	debug->qm_last_words = NULL;
   3673}
   3674
   3675/**
   3676 * hisi_qm_uninit() - Uninitialize qm.
   3677 * @qm: The qm needed uninit.
   3678 *
   3679 * This function uninits qm related device resources.
   3680 */
   3681void hisi_qm_uninit(struct hisi_qm *qm)
   3682{
   3683	struct pci_dev *pdev = qm->pdev;
   3684	struct device *dev = &pdev->dev;
   3685
   3686	qm_last_regs_uninit(qm);
   3687
   3688	qm_cmd_uninit(qm);
   3689	kfree(qm->factor);
   3690	down_write(&qm->qps_lock);
   3691
   3692	if (!qm_avail_state(qm, QM_CLOSE)) {
   3693		up_write(&qm->qps_lock);
   3694		return;
   3695	}
   3696
   3697	hisi_qp_memory_uninit(qm, qm->qp_num);
   3698	idr_destroy(&qm->qp_idr);
   3699
   3700	if (qm->qdma.va) {
   3701		hisi_qm_cache_wb(qm);
   3702		dma_free_coherent(dev, qm->qdma.size,
   3703				  qm->qdma.va, qm->qdma.dma);
   3704	}
   3705	hisi_qm_set_state(qm, QM_NOT_READY);
   3706	up_write(&qm->qps_lock);
   3707
   3708	qm_irq_unregister(qm);
   3709	hisi_qm_pci_uninit(qm);
   3710	if (qm->use_sva) {
   3711		uacce_remove(qm->uacce);
   3712		qm->uacce = NULL;
   3713	}
   3714}
   3715EXPORT_SYMBOL_GPL(hisi_qm_uninit);
   3716
   3717/**
   3718 * hisi_qm_get_vft() - Get vft from a qm.
   3719 * @qm: The qm we want to get its vft.
   3720 * @base: The base number of queue in vft.
   3721 * @number: The number of queues in vft.
   3722 *
   3723 * We can allocate multiple queues to a qm by configuring virtual function
   3724 * table. We get related configures by this function. Normally, we call this
   3725 * function in VF driver to get the queue information.
   3726 *
   3727 * qm hw v1 does not support this interface.
   3728 */
   3729static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
   3730{
   3731	if (!base || !number)
   3732		return -EINVAL;
   3733
   3734	if (!qm->ops->get_vft) {
   3735		dev_err(&qm->pdev->dev, "Don't support vft read!\n");
   3736		return -EINVAL;
   3737	}
   3738
   3739	return qm->ops->get_vft(qm, base, number);
   3740}
   3741
   3742/**
   3743 * hisi_qm_set_vft() - Set vft to a qm.
   3744 * @qm: The qm we want to set its vft.
   3745 * @fun_num: The function number.
   3746 * @base: The base number of queue in vft.
   3747 * @number: The number of queues in vft.
   3748 *
   3749 * This function is alway called in PF driver, it is used to assign queues
   3750 * among PF and VFs.
   3751 *
   3752 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
   3753 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
   3754 * (VF function number 0x2)
   3755 */
   3756static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
   3757		    u32 number)
   3758{
   3759	u32 max_q_num = qm->ctrl_qp_num;
   3760
   3761	if (base >= max_q_num || number > max_q_num ||
   3762	    (base + number) > max_q_num)
   3763		return -EINVAL;
   3764
   3765	return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
   3766}
   3767
   3768static void qm_init_eq_aeq_status(struct hisi_qm *qm)
   3769{
   3770	struct hisi_qm_status *status = &qm->status;
   3771
   3772	status->eq_head = 0;
   3773	status->aeq_head = 0;
   3774	status->eqc_phase = true;
   3775	status->aeqc_phase = true;
   3776}
   3777
   3778static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
   3779{
   3780	/* Clear eq/aeq interrupt source */
   3781	qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
   3782	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
   3783
   3784	writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
   3785	writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
   3786}
   3787
   3788static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
   3789{
   3790	writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
   3791	writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
   3792}
   3793
   3794static int qm_eq_ctx_cfg(struct hisi_qm *qm)
   3795{
   3796	struct device *dev = &qm->pdev->dev;
   3797	struct qm_eqc *eqc;
   3798	dma_addr_t eqc_dma;
   3799	int ret;
   3800
   3801	eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
   3802	if (!eqc)
   3803		return -ENOMEM;
   3804
   3805	eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
   3806	eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
   3807	if (qm->ver == QM_HW_V1)
   3808		eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
   3809	eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
   3810
   3811	eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
   3812				 DMA_TO_DEVICE);
   3813	if (dma_mapping_error(dev, eqc_dma)) {
   3814		kfree(eqc);
   3815		return -ENOMEM;
   3816	}
   3817
   3818	ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
   3819	dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
   3820	kfree(eqc);
   3821
   3822	return ret;
   3823}
   3824
   3825static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
   3826{
   3827	struct device *dev = &qm->pdev->dev;
   3828	struct qm_aeqc *aeqc;
   3829	dma_addr_t aeqc_dma;
   3830	int ret;
   3831
   3832	aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
   3833	if (!aeqc)
   3834		return -ENOMEM;
   3835
   3836	aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
   3837	aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
   3838	aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
   3839
   3840	aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
   3841				  DMA_TO_DEVICE);
   3842	if (dma_mapping_error(dev, aeqc_dma)) {
   3843		kfree(aeqc);
   3844		return -ENOMEM;
   3845	}
   3846
   3847	ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
   3848	dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
   3849	kfree(aeqc);
   3850
   3851	return ret;
   3852}
   3853
   3854static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
   3855{
   3856	struct device *dev = &qm->pdev->dev;
   3857	int ret;
   3858
   3859	qm_init_eq_aeq_status(qm);
   3860
   3861	ret = qm_eq_ctx_cfg(qm);
   3862	if (ret) {
   3863		dev_err(dev, "Set eqc failed!\n");
   3864		return ret;
   3865	}
   3866
   3867	return qm_aeq_ctx_cfg(qm);
   3868}
   3869
   3870static int __hisi_qm_start(struct hisi_qm *qm)
   3871{
   3872	int ret;
   3873
   3874	WARN_ON(!qm->qdma.va);
   3875
   3876	if (qm->fun_type == QM_HW_PF) {
   3877		ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
   3878		if (ret)
   3879			return ret;
   3880	}
   3881
   3882	ret = qm_eq_aeq_ctx_cfg(qm);
   3883	if (ret)
   3884		return ret;
   3885
   3886	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
   3887	if (ret)
   3888		return ret;
   3889
   3890	ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
   3891	if (ret)
   3892		return ret;
   3893
   3894	qm_init_prefetch(qm);
   3895	qm_enable_eq_aeq_interrupts(qm);
   3896
   3897	return 0;
   3898}
   3899
   3900/**
   3901 * hisi_qm_start() - start qm
   3902 * @qm: The qm to be started.
   3903 *
   3904 * This function starts a qm, then we can allocate qp from this qm.
   3905 */
   3906int hisi_qm_start(struct hisi_qm *qm)
   3907{
   3908	struct device *dev = &qm->pdev->dev;
   3909	int ret = 0;
   3910
   3911	down_write(&qm->qps_lock);
   3912
   3913	if (!qm_avail_state(qm, QM_START)) {
   3914		up_write(&qm->qps_lock);
   3915		return -EPERM;
   3916	}
   3917
   3918	dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
   3919
   3920	if (!qm->qp_num) {
   3921		dev_err(dev, "qp_num should not be 0\n");
   3922		ret = -EINVAL;
   3923		goto err_unlock;
   3924	}
   3925
   3926	ret = __hisi_qm_start(qm);
   3927	if (!ret)
   3928		atomic_set(&qm->status.flags, QM_START);
   3929
   3930	hisi_qm_set_state(qm, QM_READY);
   3931err_unlock:
   3932	up_write(&qm->qps_lock);
   3933	return ret;
   3934}
   3935EXPORT_SYMBOL_GPL(hisi_qm_start);
   3936
   3937static int qm_restart(struct hisi_qm *qm)
   3938{
   3939	struct device *dev = &qm->pdev->dev;
   3940	struct hisi_qp *qp;
   3941	int ret, i;
   3942
   3943	ret = hisi_qm_start(qm);
   3944	if (ret < 0)
   3945		return ret;
   3946
   3947	down_write(&qm->qps_lock);
   3948	for (i = 0; i < qm->qp_num; i++) {
   3949		qp = &qm->qp_array[i];
   3950		if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
   3951		    qp->is_resetting == true) {
   3952			ret = qm_start_qp_nolock(qp, 0);
   3953			if (ret < 0) {
   3954				dev_err(dev, "Failed to start qp%d!\n", i);
   3955
   3956				up_write(&qm->qps_lock);
   3957				return ret;
   3958			}
   3959			qp->is_resetting = false;
   3960		}
   3961	}
   3962	up_write(&qm->qps_lock);
   3963
   3964	return 0;
   3965}
   3966
   3967/* Stop started qps in reset flow */
   3968static int qm_stop_started_qp(struct hisi_qm *qm)
   3969{
   3970	struct device *dev = &qm->pdev->dev;
   3971	struct hisi_qp *qp;
   3972	int i, ret;
   3973
   3974	for (i = 0; i < qm->qp_num; i++) {
   3975		qp = &qm->qp_array[i];
   3976		if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
   3977			qp->is_resetting = true;
   3978			ret = qm_stop_qp_nolock(qp);
   3979			if (ret < 0) {
   3980				dev_err(dev, "Failed to stop qp%d!\n", i);
   3981				return ret;
   3982			}
   3983		}
   3984	}
   3985
   3986	return 0;
   3987}
   3988
   3989
   3990/**
   3991 * qm_clear_queues() - Clear all queues memory in a qm.
   3992 * @qm: The qm in which the queues will be cleared.
   3993 *
   3994 * This function clears all queues memory in a qm. Reset of accelerator can
   3995 * use this to clear queues.
   3996 */
   3997static void qm_clear_queues(struct hisi_qm *qm)
   3998{
   3999	struct hisi_qp *qp;
   4000	int i;
   4001
   4002	for (i = 0; i < qm->qp_num; i++) {
   4003		qp = &qm->qp_array[i];
   4004		if (qp->is_in_kernel && qp->is_resetting)
   4005			memset(qp->qdma.va, 0, qp->qdma.size);
   4006	}
   4007
   4008	memset(qm->qdma.va, 0, qm->qdma.size);
   4009}
   4010
   4011/**
   4012 * hisi_qm_stop() - Stop a qm.
   4013 * @qm: The qm which will be stopped.
   4014 * @r: The reason to stop qm.
   4015 *
   4016 * This function stops qm and its qps, then qm can not accept request.
   4017 * Related resources are not released at this state, we can use hisi_qm_start
   4018 * to let qm start again.
   4019 */
   4020int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
   4021{
   4022	struct device *dev = &qm->pdev->dev;
   4023	int ret = 0;
   4024
   4025	down_write(&qm->qps_lock);
   4026
   4027	qm->status.stop_reason = r;
   4028	if (!qm_avail_state(qm, QM_STOP)) {
   4029		ret = -EPERM;
   4030		goto err_unlock;
   4031	}
   4032
   4033	if (qm->status.stop_reason == QM_SOFT_RESET ||
   4034	    qm->status.stop_reason == QM_FLR) {
   4035		hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
   4036		ret = qm_stop_started_qp(qm);
   4037		if (ret < 0) {
   4038			dev_err(dev, "Failed to stop started qp!\n");
   4039			goto err_unlock;
   4040		}
   4041		hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
   4042	}
   4043
   4044	qm_disable_eq_aeq_interrupts(qm);
   4045	if (qm->fun_type == QM_HW_PF) {
   4046		ret = hisi_qm_set_vft(qm, 0, 0, 0);
   4047		if (ret < 0) {
   4048			dev_err(dev, "Failed to set vft!\n");
   4049			ret = -EBUSY;
   4050			goto err_unlock;
   4051		}
   4052	}
   4053
   4054	qm_clear_queues(qm);
   4055	atomic_set(&qm->status.flags, QM_STOP);
   4056
   4057err_unlock:
   4058	up_write(&qm->qps_lock);
   4059	return ret;
   4060}
   4061EXPORT_SYMBOL_GPL(hisi_qm_stop);
   4062
   4063static ssize_t qm_status_read(struct file *filp, char __user *buffer,
   4064			      size_t count, loff_t *pos)
   4065{
   4066	struct hisi_qm *qm = filp->private_data;
   4067	char buf[QM_DBG_READ_LEN];
   4068	int val, len;
   4069
   4070	val = atomic_read(&qm->status.flags);
   4071	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
   4072
   4073	return simple_read_from_buffer(buffer, count, pos, buf, len);
   4074}
   4075
   4076static const struct file_operations qm_status_fops = {
   4077	.owner = THIS_MODULE,
   4078	.open = simple_open,
   4079	.read = qm_status_read,
   4080};
   4081
   4082static int qm_debugfs_atomic64_set(void *data, u64 val)
   4083{
   4084	if (val)
   4085		return -EINVAL;
   4086
   4087	atomic64_set((atomic64_t *)data, 0);
   4088
   4089	return 0;
   4090}
   4091
   4092static int qm_debugfs_atomic64_get(void *data, u64 *val)
   4093{
   4094	*val = atomic64_read((atomic64_t *)data);
   4095
   4096	return 0;
   4097}
   4098
   4099DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
   4100			 qm_debugfs_atomic64_set, "%llu\n");
   4101
   4102static void qm_hw_error_init(struct hisi_qm *qm)
   4103{
   4104	struct hisi_qm_err_info *err_info = &qm->err_info;
   4105
   4106	if (!qm->ops->hw_error_init) {
   4107		dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
   4108		return;
   4109	}
   4110
   4111	qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
   4112}
   4113
   4114static void qm_hw_error_uninit(struct hisi_qm *qm)
   4115{
   4116	if (!qm->ops->hw_error_uninit) {
   4117		dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
   4118		return;
   4119	}
   4120
   4121	qm->ops->hw_error_uninit(qm);
   4122}
   4123
   4124static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
   4125{
   4126	if (!qm->ops->hw_error_handle) {
   4127		dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
   4128		return ACC_ERR_NONE;
   4129	}
   4130
   4131	return qm->ops->hw_error_handle(qm);
   4132}
   4133
   4134/**
   4135 * hisi_qm_dev_err_init() - Initialize device error configuration.
   4136 * @qm: The qm for which we want to do error initialization.
   4137 *
   4138 * Initialize QM and device error related configuration.
   4139 */
   4140void hisi_qm_dev_err_init(struct hisi_qm *qm)
   4141{
   4142	if (qm->fun_type == QM_HW_VF)
   4143		return;
   4144
   4145	qm_hw_error_init(qm);
   4146
   4147	if (!qm->err_ini->hw_err_enable) {
   4148		dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
   4149		return;
   4150	}
   4151	qm->err_ini->hw_err_enable(qm);
   4152}
   4153EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
   4154
   4155/**
   4156 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
   4157 * @qm: The qm for which we want to do error uninitialization.
   4158 *
   4159 * Uninitialize QM and device error related configuration.
   4160 */
   4161void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
   4162{
   4163	if (qm->fun_type == QM_HW_VF)
   4164		return;
   4165
   4166	qm_hw_error_uninit(qm);
   4167
   4168	if (!qm->err_ini->hw_err_disable) {
   4169		dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
   4170		return;
   4171	}
   4172	qm->err_ini->hw_err_disable(qm);
   4173}
   4174EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
   4175
   4176/**
   4177 * hisi_qm_free_qps() - free multiple queue pairs.
   4178 * @qps: The queue pairs need to be freed.
   4179 * @qp_num: The num of queue pairs.
   4180 */
   4181void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
   4182{
   4183	int i;
   4184
   4185	if (!qps || qp_num <= 0)
   4186		return;
   4187
   4188	for (i = qp_num - 1; i >= 0; i--)
   4189		hisi_qm_release_qp(qps[i]);
   4190}
   4191EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
   4192
   4193static void free_list(struct list_head *head)
   4194{
   4195	struct hisi_qm_resource *res, *tmp;
   4196
   4197	list_for_each_entry_safe(res, tmp, head, list) {
   4198		list_del(&res->list);
   4199		kfree(res);
   4200	}
   4201}
   4202
   4203static int hisi_qm_sort_devices(int node, struct list_head *head,
   4204				struct hisi_qm_list *qm_list)
   4205{
   4206	struct hisi_qm_resource *res, *tmp;
   4207	struct hisi_qm *qm;
   4208	struct list_head *n;
   4209	struct device *dev;
   4210	int dev_node = 0;
   4211
   4212	list_for_each_entry(qm, &qm_list->list, list) {
   4213		dev = &qm->pdev->dev;
   4214
   4215		if (IS_ENABLED(CONFIG_NUMA)) {
   4216			dev_node = dev_to_node(dev);
   4217			if (dev_node < 0)
   4218				dev_node = 0;
   4219		}
   4220
   4221		res = kzalloc(sizeof(*res), GFP_KERNEL);
   4222		if (!res)
   4223			return -ENOMEM;
   4224
   4225		res->qm = qm;
   4226		res->distance = node_distance(dev_node, node);
   4227		n = head;
   4228		list_for_each_entry(tmp, head, list) {
   4229			if (res->distance < tmp->distance) {
   4230				n = &tmp->list;
   4231				break;
   4232			}
   4233		}
   4234		list_add_tail(&res->list, n);
   4235	}
   4236
   4237	return 0;
   4238}
   4239
   4240/**
   4241 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
   4242 * @qm_list: The list of all available devices.
   4243 * @qp_num: The number of queue pairs need created.
   4244 * @alg_type: The algorithm type.
   4245 * @node: The numa node.
   4246 * @qps: The queue pairs need created.
   4247 *
   4248 * This function will sort all available device according to numa distance.
   4249 * Then try to create all queue pairs from one device, if all devices do
   4250 * not meet the requirements will return error.
   4251 */
   4252int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
   4253			   u8 alg_type, int node, struct hisi_qp **qps)
   4254{
   4255	struct hisi_qm_resource *tmp;
   4256	int ret = -ENODEV;
   4257	LIST_HEAD(head);
   4258	int i;
   4259
   4260	if (!qps || !qm_list || qp_num <= 0)
   4261		return -EINVAL;
   4262
   4263	mutex_lock(&qm_list->lock);
   4264	if (hisi_qm_sort_devices(node, &head, qm_list)) {
   4265		mutex_unlock(&qm_list->lock);
   4266		goto err;
   4267	}
   4268
   4269	list_for_each_entry(tmp, &head, list) {
   4270		for (i = 0; i < qp_num; i++) {
   4271			qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
   4272			if (IS_ERR(qps[i])) {
   4273				hisi_qm_free_qps(qps, i);
   4274				break;
   4275			}
   4276		}
   4277
   4278		if (i == qp_num) {
   4279			ret = 0;
   4280			break;
   4281		}
   4282	}
   4283
   4284	mutex_unlock(&qm_list->lock);
   4285	if (ret)
   4286		pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
   4287			node, alg_type, qp_num);
   4288
   4289err:
   4290	free_list(&head);
   4291	return ret;
   4292}
   4293EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
   4294
   4295static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
   4296{
   4297	u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
   4298	u32 max_qp_num = qm->max_qp_num;
   4299	u32 q_base = qm->qp_num;
   4300	int ret;
   4301
   4302	if (!num_vfs)
   4303		return -EINVAL;
   4304
   4305	vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
   4306
   4307	/* If vfs_q_num is less than num_vfs, return error. */
   4308	if (vfs_q_num < num_vfs)
   4309		return -EINVAL;
   4310
   4311	q_num = vfs_q_num / num_vfs;
   4312	remain_q_num = vfs_q_num % num_vfs;
   4313
   4314	for (i = num_vfs; i > 0; i--) {
   4315		/*
   4316		 * if q_num + remain_q_num > max_qp_num in last vf, divide the
   4317		 * remaining queues equally.
   4318		 */
   4319		if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
   4320			act_q_num = q_num + remain_q_num;
   4321			remain_q_num = 0;
   4322		} else if (remain_q_num > 0) {
   4323			act_q_num = q_num + 1;
   4324			remain_q_num--;
   4325		} else {
   4326			act_q_num = q_num;
   4327		}
   4328
   4329		act_q_num = min_t(int, act_q_num, max_qp_num);
   4330		ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
   4331		if (ret) {
   4332			for (j = num_vfs; j > i; j--)
   4333				hisi_qm_set_vft(qm, j, 0, 0);
   4334			return ret;
   4335		}
   4336		q_base += act_q_num;
   4337	}
   4338
   4339	return 0;
   4340}
   4341
   4342static int qm_clear_vft_config(struct hisi_qm *qm)
   4343{
   4344	int ret;
   4345	u32 i;
   4346
   4347	for (i = 1; i <= qm->vfs_num; i++) {
   4348		ret = hisi_qm_set_vft(qm, i, 0, 0);
   4349		if (ret)
   4350			return ret;
   4351	}
   4352	qm->vfs_num = 0;
   4353
   4354	return 0;
   4355}
   4356
   4357static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
   4358{
   4359	struct device *dev = &qm->pdev->dev;
   4360	u32 ir = qos * QM_QOS_RATE;
   4361	int ret, total_vfs, i;
   4362
   4363	total_vfs = pci_sriov_get_totalvfs(qm->pdev);
   4364	if (fun_index > total_vfs)
   4365		return -EINVAL;
   4366
   4367	qm->factor[fun_index].func_qos = qos;
   4368
   4369	ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
   4370	if (ret) {
   4371		dev_err(dev, "failed to calculate shaper parameter!\n");
   4372		return -EINVAL;
   4373	}
   4374
   4375	for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
   4376		/* The base number of queue reuse for different alg type */
   4377		ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
   4378		if (ret) {
   4379			dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
   4380			return -EINVAL;
   4381		}
   4382	}
   4383
   4384	return 0;
   4385}
   4386
   4387static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
   4388{
   4389	u64 cir_u = 0, cir_b = 0, cir_s = 0;
   4390	u64 shaper_vft, ir_calc, ir;
   4391	unsigned int val;
   4392	u32 error_rate;
   4393	int ret;
   4394
   4395	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
   4396					 val & BIT(0), POLL_PERIOD,
   4397					 POLL_TIMEOUT);
   4398	if (ret)
   4399		return 0;
   4400
   4401	writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
   4402	writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
   4403	writel(fun_index, qm->io_base + QM_VFT_CFG);
   4404
   4405	writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
   4406	writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
   4407
   4408	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
   4409					 val & BIT(0), POLL_PERIOD,
   4410					 POLL_TIMEOUT);
   4411	if (ret)
   4412		return 0;
   4413
   4414	shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
   4415		  ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
   4416
   4417	cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
   4418	cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
   4419	cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
   4420
   4421	cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
   4422	cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
   4423
   4424	ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
   4425
   4426	ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
   4427
   4428	error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
   4429	if (error_rate > QM_QOS_MIN_ERROR_RATE) {
   4430		pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
   4431		return 0;
   4432	}
   4433
   4434	return ir;
   4435}
   4436
   4437static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
   4438{
   4439	struct device *dev = &qm->pdev->dev;
   4440	u64 mb_cmd;
   4441	u32 qos;
   4442	int ret;
   4443
   4444	qos = qm_get_shaper_vft_qos(qm, fun_num);
   4445	if (!qos) {
   4446		dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
   4447		return;
   4448	}
   4449
   4450	mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
   4451	ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
   4452	if (ret)
   4453		dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
   4454}
   4455
   4456static int qm_vf_read_qos(struct hisi_qm *qm)
   4457{
   4458	int cnt = 0;
   4459	int ret = -EINVAL;
   4460
   4461	/* reset mailbox qos val */
   4462	qm->mb_qos = 0;
   4463
   4464	/* vf ping pf to get function qos */
   4465	if (qm->ops->ping_pf) {
   4466		ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
   4467		if (ret) {
   4468			pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
   4469			return ret;
   4470		}
   4471	}
   4472
   4473	while (true) {
   4474		msleep(QM_WAIT_DST_ACK);
   4475		if (qm->mb_qos)
   4476			break;
   4477
   4478		if (++cnt > QM_MAX_VF_WAIT_COUNT) {
   4479			pci_err(qm->pdev, "PF ping VF timeout!\n");
   4480			return  -ETIMEDOUT;
   4481		}
   4482	}
   4483
   4484	return ret;
   4485}
   4486
   4487static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
   4488			       size_t count, loff_t *pos)
   4489{
   4490	struct hisi_qm *qm = filp->private_data;
   4491	char tbuf[QM_DBG_READ_LEN];
   4492	u32 qos_val, ir;
   4493	int ret;
   4494
   4495	ret = hisi_qm_get_dfx_access(qm);
   4496	if (ret)
   4497		return ret;
   4498
   4499	/* Mailbox and reset cannot be operated at the same time */
   4500	if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
   4501		pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
   4502		ret = -EAGAIN;
   4503		goto err_put_dfx_access;
   4504	}
   4505
   4506	if (qm->fun_type == QM_HW_PF) {
   4507		ir = qm_get_shaper_vft_qos(qm, 0);
   4508	} else {
   4509		ret = qm_vf_read_qos(qm);
   4510		if (ret)
   4511			goto err_get_status;
   4512		ir = qm->mb_qos;
   4513	}
   4514
   4515	qos_val = ir / QM_QOS_RATE;
   4516	ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
   4517
   4518	ret =  simple_read_from_buffer(buf, count, pos, tbuf, ret);
   4519
   4520err_get_status:
   4521	clear_bit(QM_RESETTING, &qm->misc_ctl);
   4522err_put_dfx_access:
   4523	hisi_qm_put_dfx_access(qm);
   4524	return ret;
   4525}
   4526
   4527static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
   4528{
   4529	int buflen = strlen(buf);
   4530	int ret, i;
   4531
   4532	for (i = 0; i < buflen; i++) {
   4533		if (!isdigit(buf[i]))
   4534			return -EINVAL;
   4535	}
   4536
   4537	ret = sscanf(buf, "%lu", val);
   4538	if (ret != QM_QOS_VAL_NUM)
   4539		return -EINVAL;
   4540
   4541	return 0;
   4542}
   4543
   4544static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
   4545			       unsigned long *val,
   4546			       unsigned int *fun_index)
   4547{
   4548	char tbuf_bdf[QM_DBG_READ_LEN] = {0};
   4549	char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
   4550	u32 tmp1, device, function;
   4551	int ret, bus;
   4552
   4553	ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
   4554	if (ret != QM_QOS_PARAM_NUM)
   4555		return -EINVAL;
   4556
   4557	ret = qm_qos_value_init(val_buf, val);
   4558	if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
   4559		pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
   4560		return -EINVAL;
   4561	}
   4562
   4563	ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function);
   4564	if (ret != QM_QOS_BDF_PARAM_NUM) {
   4565		pci_err(qm->pdev, "input pci bdf value is error!\n");
   4566		return -EINVAL;
   4567	}
   4568
   4569	*fun_index = PCI_DEVFN(device, function);
   4570
   4571	return 0;
   4572}
   4573
   4574static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
   4575			       size_t count, loff_t *pos)
   4576{
   4577	struct hisi_qm *qm = filp->private_data;
   4578	char tbuf[QM_DBG_READ_LEN];
   4579	unsigned int fun_index;
   4580	unsigned long val;
   4581	int len, ret;
   4582
   4583	if (qm->fun_type == QM_HW_VF)
   4584		return -EINVAL;
   4585
   4586	if (*pos != 0)
   4587		return 0;
   4588
   4589	if (count >= QM_DBG_READ_LEN)
   4590		return -ENOSPC;
   4591
   4592	len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
   4593	if (len < 0)
   4594		return len;
   4595
   4596	tbuf[len] = '\0';
   4597	ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
   4598	if (ret)
   4599		return ret;
   4600
   4601	/* Mailbox and reset cannot be operated at the same time */
   4602	if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
   4603		pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
   4604		return -EAGAIN;
   4605	}
   4606
   4607	ret = qm_pm_get_sync(qm);
   4608	if (ret) {
   4609		ret = -EINVAL;
   4610		goto err_get_status;
   4611	}
   4612
   4613	ret = qm_func_shaper_enable(qm, fun_index, val);
   4614	if (ret) {
   4615		pci_err(qm->pdev, "failed to enable function shaper!\n");
   4616		ret = -EINVAL;
   4617		goto err_put_sync;
   4618	}
   4619
   4620	pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
   4621		 fun_index, val);
   4622	ret = count;
   4623
   4624err_put_sync:
   4625	qm_pm_put_sync(qm);
   4626err_get_status:
   4627	clear_bit(QM_RESETTING, &qm->misc_ctl);
   4628	return ret;
   4629}
   4630
   4631static const struct file_operations qm_algqos_fops = {
   4632	.owner = THIS_MODULE,
   4633	.open = simple_open,
   4634	.read = qm_algqos_read,
   4635	.write = qm_algqos_write,
   4636};
   4637
   4638/**
   4639 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
   4640 * @qm: The qm for which we want to add debugfs files.
   4641 *
   4642 * Create function qos debugfs files.
   4643 */
   4644static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
   4645{
   4646	if (qm->fun_type == QM_HW_PF)
   4647		debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
   4648				    qm, &qm_algqos_fops);
   4649	else
   4650		debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
   4651				    qm, &qm_algqos_fops);
   4652}
   4653
   4654/**
   4655 * hisi_qm_debug_init() - Initialize qm related debugfs files.
   4656 * @qm: The qm for which we want to add debugfs files.
   4657 *
   4658 * Create qm related debugfs files.
   4659 */
   4660void hisi_qm_debug_init(struct hisi_qm *qm)
   4661{
   4662	struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
   4663	struct qm_dfx *dfx = &qm->debug.dfx;
   4664	struct dentry *qm_d;
   4665	void *data;
   4666	int i;
   4667
   4668	qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
   4669	qm->debug.qm_d = qm_d;
   4670
   4671	/* only show this in PF */
   4672	if (qm->fun_type == QM_HW_PF) {
   4673		qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
   4674		for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
   4675			qm_create_debugfs_file(qm, qm->debug.qm_d, i);
   4676	}
   4677
   4678	if (qm_regs)
   4679		debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
   4680					qm, &qm_diff_regs_fops);
   4681
   4682	debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
   4683
   4684	debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
   4685
   4686	debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
   4687			&qm_status_fops);
   4688	for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
   4689		data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
   4690		debugfs_create_file(qm_dfx_files[i].name,
   4691			0644,
   4692			qm_d,
   4693			data,
   4694			&qm_atomic64_ops);
   4695	}
   4696
   4697	if (qm->ver >= QM_HW_V3)
   4698		hisi_qm_set_algqos_init(qm);
   4699}
   4700EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
   4701
   4702/**
   4703 * hisi_qm_debug_regs_clear() - clear qm debug related registers.
   4704 * @qm: The qm for which we want to clear its debug registers.
   4705 */
   4706void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
   4707{
   4708	const struct debugfs_reg32 *regs;
   4709	int i;
   4710
   4711	/* clear current_qm */
   4712	writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
   4713	writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
   4714
   4715	/* clear current_q */
   4716	writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
   4717	writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
   4718
   4719	/*
   4720	 * these registers are reading and clearing, so clear them after
   4721	 * reading them.
   4722	 */
   4723	writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
   4724
   4725	regs = qm_dfx_regs;
   4726	for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
   4727		readl(qm->io_base + regs->offset);
   4728		regs++;
   4729	}
   4730
   4731	/* clear clear_enable */
   4732	writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
   4733}
   4734EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
   4735
   4736/**
   4737 * hisi_qm_sriov_enable() - enable virtual functions
   4738 * @pdev: the PCIe device
   4739 * @max_vfs: the number of virtual functions to enable
   4740 *
   4741 * Returns the number of enabled VFs. If there are VFs enabled already or
   4742 * max_vfs is more than the total number of device can be enabled, returns
   4743 * failure.
   4744 */
   4745int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
   4746{
   4747	struct hisi_qm *qm = pci_get_drvdata(pdev);
   4748	int pre_existing_vfs, num_vfs, total_vfs, ret;
   4749
   4750	ret = qm_pm_get_sync(qm);
   4751	if (ret)
   4752		return ret;
   4753
   4754	total_vfs = pci_sriov_get_totalvfs(pdev);
   4755	pre_existing_vfs = pci_num_vf(pdev);
   4756	if (pre_existing_vfs) {
   4757		pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
   4758			pre_existing_vfs);
   4759		goto err_put_sync;
   4760	}
   4761
   4762	num_vfs = min_t(int, max_vfs, total_vfs);
   4763	ret = qm_vf_q_assign(qm, num_vfs);
   4764	if (ret) {
   4765		pci_err(pdev, "Can't assign queues for VF!\n");
   4766		goto err_put_sync;
   4767	}
   4768
   4769	qm->vfs_num = num_vfs;
   4770
   4771	ret = pci_enable_sriov(pdev, num_vfs);
   4772	if (ret) {
   4773		pci_err(pdev, "Can't enable VF!\n");
   4774		qm_clear_vft_config(qm);
   4775		goto err_put_sync;
   4776	}
   4777
   4778	pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
   4779
   4780	return num_vfs;
   4781
   4782err_put_sync:
   4783	qm_pm_put_sync(qm);
   4784	return ret;
   4785}
   4786EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
   4787
   4788/**
   4789 * hisi_qm_sriov_disable - disable virtual functions
   4790 * @pdev: the PCI device.
   4791 * @is_frozen: true when all the VFs are frozen.
   4792 *
   4793 * Return failure if there are VFs assigned already or VF is in used.
   4794 */
   4795int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
   4796{
   4797	struct hisi_qm *qm = pci_get_drvdata(pdev);
   4798	int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
   4799	int ret;
   4800
   4801	if (pci_vfs_assigned(pdev)) {
   4802		pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
   4803		return -EPERM;
   4804	}
   4805
   4806	/* While VF is in used, SRIOV cannot be disabled. */
   4807	if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
   4808		pci_err(pdev, "Task is using its VF!\n");
   4809		return -EBUSY;
   4810	}
   4811
   4812	pci_disable_sriov(pdev);
   4813	/* clear vf function shaper configure array */
   4814	memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
   4815	ret = qm_clear_vft_config(qm);
   4816	if (ret)
   4817		return ret;
   4818
   4819	qm_pm_put_sync(qm);
   4820
   4821	return 0;
   4822}
   4823EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
   4824
   4825/**
   4826 * hisi_qm_sriov_configure - configure the number of VFs
   4827 * @pdev: The PCI device
   4828 * @num_vfs: The number of VFs need enabled
   4829 *
   4830 * Enable SR-IOV according to num_vfs, 0 means disable.
   4831 */
   4832int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
   4833{
   4834	if (num_vfs == 0)
   4835		return hisi_qm_sriov_disable(pdev, false);
   4836	else
   4837		return hisi_qm_sriov_enable(pdev, num_vfs);
   4838}
   4839EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
   4840
   4841static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
   4842{
   4843	u32 err_sts;
   4844
   4845	if (!qm->err_ini->get_dev_hw_err_status) {
   4846		dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
   4847		return ACC_ERR_NONE;
   4848	}
   4849
   4850	/* get device hardware error status */
   4851	err_sts = qm->err_ini->get_dev_hw_err_status(qm);
   4852	if (err_sts) {
   4853		if (err_sts & qm->err_info.ecc_2bits_mask)
   4854			qm->err_status.is_dev_ecc_mbit = true;
   4855
   4856		if (qm->err_ini->log_dev_hw_err)
   4857			qm->err_ini->log_dev_hw_err(qm, err_sts);
   4858
   4859		/* ce error does not need to be reset */
   4860		if ((err_sts | qm->err_info.dev_ce_mask) ==
   4861		     qm->err_info.dev_ce_mask) {
   4862			if (qm->err_ini->clear_dev_hw_err_status)
   4863				qm->err_ini->clear_dev_hw_err_status(qm,
   4864								err_sts);
   4865
   4866			return ACC_ERR_RECOVERED;
   4867		}
   4868
   4869		return ACC_ERR_NEED_RESET;
   4870	}
   4871
   4872	return ACC_ERR_RECOVERED;
   4873}
   4874
   4875static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
   4876{
   4877	enum acc_err_result qm_ret, dev_ret;
   4878
   4879	/* log qm error */
   4880	qm_ret = qm_hw_error_handle(qm);
   4881
   4882	/* log device error */
   4883	dev_ret = qm_dev_err_handle(qm);
   4884
   4885	return (qm_ret == ACC_ERR_NEED_RESET ||
   4886		dev_ret == ACC_ERR_NEED_RESET) ?
   4887		ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
   4888}
   4889
   4890/**
   4891 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
   4892 * @pdev: The PCI device which need report error.
   4893 * @state: The connectivity between CPU and device.
   4894 *
   4895 * We register this function into PCIe AER handlers, It will report device or
   4896 * qm hardware error status when error occur.
   4897 */
   4898pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
   4899					  pci_channel_state_t state)
   4900{
   4901	struct hisi_qm *qm = pci_get_drvdata(pdev);
   4902	enum acc_err_result ret;
   4903
   4904	if (pdev->is_virtfn)
   4905		return PCI_ERS_RESULT_NONE;
   4906
   4907	pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
   4908	if (state == pci_channel_io_perm_failure)
   4909		return PCI_ERS_RESULT_DISCONNECT;
   4910
   4911	ret = qm_process_dev_error(qm);
   4912	if (ret == ACC_ERR_NEED_RESET)
   4913		return PCI_ERS_RESULT_NEED_RESET;
   4914
   4915	return PCI_ERS_RESULT_RECOVERED;
   4916}
   4917EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
   4918
   4919static int qm_check_req_recv(struct hisi_qm *qm)
   4920{
   4921	struct pci_dev *pdev = qm->pdev;
   4922	int ret;
   4923	u32 val;
   4924
   4925	if (qm->ver >= QM_HW_V3)
   4926		return 0;
   4927
   4928	writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
   4929	ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
   4930					 (val == ACC_VENDOR_ID_VALUE),
   4931					 POLL_PERIOD, POLL_TIMEOUT);
   4932	if (ret) {
   4933		dev_err(&pdev->dev, "Fails to read QM reg!\n");
   4934		return ret;
   4935	}
   4936
   4937	writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
   4938	ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
   4939					 (val == PCI_VENDOR_ID_HUAWEI),
   4940					 POLL_PERIOD, POLL_TIMEOUT);
   4941	if (ret)
   4942		dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
   4943
   4944	return ret;
   4945}
   4946
   4947static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
   4948{
   4949	struct pci_dev *pdev = qm->pdev;
   4950	u16 cmd;
   4951	int i;
   4952
   4953	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
   4954	if (set)
   4955		cmd |= PCI_COMMAND_MEMORY;
   4956	else
   4957		cmd &= ~PCI_COMMAND_MEMORY;
   4958
   4959	pci_write_config_word(pdev, PCI_COMMAND, cmd);
   4960	for (i = 0; i < MAX_WAIT_COUNTS; i++) {
   4961		pci_read_config_word(pdev, PCI_COMMAND, &cmd);
   4962		if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
   4963			return 0;
   4964
   4965		udelay(1);
   4966	}
   4967
   4968	return -ETIMEDOUT;
   4969}
   4970
   4971static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
   4972{
   4973	struct pci_dev *pdev = qm->pdev;
   4974	u16 sriov_ctrl;
   4975	int pos;
   4976	int i;
   4977
   4978	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
   4979	pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
   4980	if (set)
   4981		sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
   4982	else
   4983		sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
   4984	pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
   4985
   4986	for (i = 0; i < MAX_WAIT_COUNTS; i++) {
   4987		pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
   4988		if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
   4989		    ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
   4990			return 0;
   4991
   4992		udelay(1);
   4993	}
   4994
   4995	return -ETIMEDOUT;
   4996}
   4997
   4998static int qm_vf_reset_prepare(struct hisi_qm *qm,
   4999			       enum qm_stop_reason stop_reason)
   5000{
   5001	struct hisi_qm_list *qm_list = qm->qm_list;
   5002	struct pci_dev *pdev = qm->pdev;
   5003	struct pci_dev *virtfn;
   5004	struct hisi_qm *vf_qm;
   5005	int ret = 0;
   5006
   5007	mutex_lock(&qm_list->lock);
   5008	list_for_each_entry(vf_qm, &qm_list->list, list) {
   5009		virtfn = vf_qm->pdev;
   5010		if (virtfn == pdev)
   5011			continue;
   5012
   5013		if (pci_physfn(virtfn) == pdev) {
   5014			/* save VFs PCIE BAR configuration */
   5015			pci_save_state(virtfn);
   5016
   5017			ret = hisi_qm_stop(vf_qm, stop_reason);
   5018			if (ret)
   5019				goto stop_fail;
   5020		}
   5021	}
   5022
   5023stop_fail:
   5024	mutex_unlock(&qm_list->lock);
   5025	return ret;
   5026}
   5027
   5028static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
   5029			   enum qm_stop_reason stop_reason)
   5030{
   5031	struct pci_dev *pdev = qm->pdev;
   5032	int ret;
   5033
   5034	if (!qm->vfs_num)
   5035		return 0;
   5036
   5037	/* Kunpeng930 supports to notify VFs to stop before PF reset */
   5038	if (qm->ops->ping_all_vfs) {
   5039		ret = qm->ops->ping_all_vfs(qm, cmd);
   5040		if (ret)
   5041			pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
   5042	} else {
   5043		ret = qm_vf_reset_prepare(qm, stop_reason);
   5044		if (ret)
   5045			pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
   5046	}
   5047
   5048	return ret;
   5049}
   5050
   5051static int qm_controller_reset_prepare(struct hisi_qm *qm)
   5052{
   5053	struct pci_dev *pdev = qm->pdev;
   5054	int ret;
   5055
   5056	ret = qm_reset_prepare_ready(qm);
   5057	if (ret) {
   5058		pci_err(pdev, "Controller reset not ready!\n");
   5059		return ret;
   5060	}
   5061
   5062	/* PF obtains the information of VF by querying the register. */
   5063	qm_cmd_uninit(qm);
   5064
   5065	/* Whether VFs stop successfully, soft reset will continue. */
   5066	ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
   5067	if (ret)
   5068		pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
   5069
   5070	ret = hisi_qm_stop(qm, QM_SOFT_RESET);
   5071	if (ret) {
   5072		pci_err(pdev, "Fails to stop QM!\n");
   5073		qm_reset_bit_clear(qm);
   5074		return ret;
   5075	}
   5076
   5077	ret = qm_wait_vf_prepare_finish(qm);
   5078	if (ret)
   5079		pci_err(pdev, "failed to stop by vfs in soft reset!\n");
   5080
   5081	clear_bit(QM_RST_SCHED, &qm->misc_ctl);
   5082
   5083	return 0;
   5084}
   5085
   5086static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
   5087{
   5088	u32 nfe_enb = 0;
   5089
   5090	/* Kunpeng930 hardware automatically close master ooo when NFE occurs */
   5091	if (qm->ver >= QM_HW_V3)
   5092		return;
   5093
   5094	if (!qm->err_status.is_dev_ecc_mbit &&
   5095	    qm->err_status.is_qm_ecc_mbit &&
   5096	    qm->err_ini->close_axi_master_ooo) {
   5097
   5098		qm->err_ini->close_axi_master_ooo(qm);
   5099
   5100	} else if (qm->err_status.is_dev_ecc_mbit &&
   5101		   !qm->err_status.is_qm_ecc_mbit &&
   5102		   !qm->err_ini->close_axi_master_ooo) {
   5103
   5104		nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
   5105		writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
   5106		       qm->io_base + QM_RAS_NFE_ENABLE);
   5107		writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
   5108	}
   5109}
   5110
   5111static int qm_soft_reset(struct hisi_qm *qm)
   5112{
   5113	struct pci_dev *pdev = qm->pdev;
   5114	int ret;
   5115	u32 val;
   5116
   5117	/* Ensure all doorbells and mailboxes received by QM */
   5118	ret = qm_check_req_recv(qm);
   5119	if (ret)
   5120		return ret;
   5121
   5122	if (qm->vfs_num) {
   5123		ret = qm_set_vf_mse(qm, false);
   5124		if (ret) {
   5125			pci_err(pdev, "Fails to disable vf MSE bit.\n");
   5126			return ret;
   5127		}
   5128	}
   5129
   5130	ret = qm->ops->set_msi(qm, false);
   5131	if (ret) {
   5132		pci_err(pdev, "Fails to disable PEH MSI bit.\n");
   5133		return ret;
   5134	}
   5135
   5136	qm_dev_ecc_mbit_handle(qm);
   5137
   5138	/* OOO register set and check */
   5139	writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
   5140	       qm->io_base + ACC_MASTER_GLOBAL_CTRL);
   5141
   5142	/* If bus lock, reset chip */
   5143	ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
   5144					 val,
   5145					 (val == ACC_MASTER_TRANS_RETURN_RW),
   5146					 POLL_PERIOD, POLL_TIMEOUT);
   5147	if (ret) {
   5148		pci_emerg(pdev, "Bus lock! Please reset system.\n");
   5149		return ret;
   5150	}
   5151
   5152	if (qm->err_ini->close_sva_prefetch)
   5153		qm->err_ini->close_sva_prefetch(qm);
   5154
   5155	ret = qm_set_pf_mse(qm, false);
   5156	if (ret) {
   5157		pci_err(pdev, "Fails to disable pf MSE bit.\n");
   5158		return ret;
   5159	}
   5160
   5161	/* The reset related sub-control registers are not in PCI BAR */
   5162	if (ACPI_HANDLE(&pdev->dev)) {
   5163		unsigned long long value = 0;
   5164		acpi_status s;
   5165
   5166		s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
   5167					  qm->err_info.acpi_rst,
   5168					  NULL, &value);
   5169		if (ACPI_FAILURE(s)) {
   5170			pci_err(pdev, "NO controller reset method!\n");
   5171			return -EIO;
   5172		}
   5173
   5174		if (value) {
   5175			pci_err(pdev, "Reset step %llu failed!\n", value);
   5176			return -EIO;
   5177		}
   5178	} else {
   5179		pci_err(pdev, "No reset method!\n");
   5180		return -EINVAL;
   5181	}
   5182
   5183	return 0;
   5184}
   5185
   5186static int qm_vf_reset_done(struct hisi_qm *qm)
   5187{
   5188	struct hisi_qm_list *qm_list = qm->qm_list;
   5189	struct pci_dev *pdev = qm->pdev;
   5190	struct pci_dev *virtfn;
   5191	struct hisi_qm *vf_qm;
   5192	int ret = 0;
   5193
   5194	mutex_lock(&qm_list->lock);
   5195	list_for_each_entry(vf_qm, &qm_list->list, list) {
   5196		virtfn = vf_qm->pdev;
   5197		if (virtfn == pdev)
   5198			continue;
   5199
   5200		if (pci_physfn(virtfn) == pdev) {
   5201			/* enable VFs PCIE BAR configuration */
   5202			pci_restore_state(virtfn);
   5203
   5204			ret = qm_restart(vf_qm);
   5205			if (ret)
   5206				goto restart_fail;
   5207		}
   5208	}
   5209
   5210restart_fail:
   5211	mutex_unlock(&qm_list->lock);
   5212	return ret;
   5213}
   5214
   5215static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
   5216{
   5217	struct pci_dev *pdev = qm->pdev;
   5218	int ret;
   5219
   5220	if (!qm->vfs_num)
   5221		return 0;
   5222
   5223	ret = qm_vf_q_assign(qm, qm->vfs_num);
   5224	if (ret) {
   5225		pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
   5226		return ret;
   5227	}
   5228
   5229	/* Kunpeng930 supports to notify VFs to start after PF reset. */
   5230	if (qm->ops->ping_all_vfs) {
   5231		ret = qm->ops->ping_all_vfs(qm, cmd);
   5232		if (ret)
   5233			pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
   5234	} else {
   5235		ret = qm_vf_reset_done(qm);
   5236		if (ret)
   5237			pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
   5238	}
   5239
   5240	return ret;
   5241}
   5242
   5243static int qm_dev_hw_init(struct hisi_qm *qm)
   5244{
   5245	return qm->err_ini->hw_init(qm);
   5246}
   5247
   5248static void qm_restart_prepare(struct hisi_qm *qm)
   5249{
   5250	u32 value;
   5251
   5252	if (qm->err_ini->open_sva_prefetch)
   5253		qm->err_ini->open_sva_prefetch(qm);
   5254
   5255	if (qm->ver >= QM_HW_V3)
   5256		return;
   5257
   5258	if (!qm->err_status.is_qm_ecc_mbit &&
   5259	    !qm->err_status.is_dev_ecc_mbit)
   5260		return;
   5261
   5262	/* temporarily close the OOO port used for PEH to write out MSI */
   5263	value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
   5264	writel(value & ~qm->err_info.msi_wr_port,
   5265	       qm->io_base + ACC_AM_CFG_PORT_WR_EN);
   5266
   5267	/* clear dev ecc 2bit error source if having */
   5268	value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
   5269	if (value && qm->err_ini->clear_dev_hw_err_status)
   5270		qm->err_ini->clear_dev_hw_err_status(qm, value);
   5271
   5272	/* clear QM ecc mbit error source */
   5273	writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
   5274
   5275	/* clear AM Reorder Buffer ecc mbit source */
   5276	writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
   5277}
   5278
   5279static void qm_restart_done(struct hisi_qm *qm)
   5280{
   5281	u32 value;
   5282
   5283	if (qm->ver >= QM_HW_V3)
   5284		goto clear_flags;
   5285
   5286	if (!qm->err_status.is_qm_ecc_mbit &&
   5287	    !qm->err_status.is_dev_ecc_mbit)
   5288		return;
   5289
   5290	/* open the OOO port for PEH to write out MSI */
   5291	value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
   5292	value |= qm->err_info.msi_wr_port;
   5293	writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
   5294
   5295clear_flags:
   5296	qm->err_status.is_qm_ecc_mbit = false;
   5297	qm->err_status.is_dev_ecc_mbit = false;
   5298}
   5299
   5300static int qm_controller_reset_done(struct hisi_qm *qm)
   5301{
   5302	struct pci_dev *pdev = qm->pdev;
   5303	int ret;
   5304
   5305	ret = qm->ops->set_msi(qm, true);
   5306	if (ret) {
   5307		pci_err(pdev, "Fails to enable PEH MSI bit!\n");
   5308		return ret;
   5309	}
   5310
   5311	ret = qm_set_pf_mse(qm, true);
   5312	if (ret) {
   5313		pci_err(pdev, "Fails to enable pf MSE bit!\n");
   5314		return ret;
   5315	}
   5316
   5317	if (qm->vfs_num) {
   5318		ret = qm_set_vf_mse(qm, true);
   5319		if (ret) {
   5320			pci_err(pdev, "Fails to enable vf MSE bit!\n");
   5321			return ret;
   5322		}
   5323	}
   5324
   5325	ret = qm_dev_hw_init(qm);
   5326	if (ret) {
   5327		pci_err(pdev, "Failed to init device\n");
   5328		return ret;
   5329	}
   5330
   5331	qm_restart_prepare(qm);
   5332	hisi_qm_dev_err_init(qm);
   5333	if (qm->err_ini->open_axi_master_ooo)
   5334		qm->err_ini->open_axi_master_ooo(qm);
   5335
   5336	ret = qm_dev_mem_reset(qm);
   5337	if (ret) {
   5338		pci_err(pdev, "failed to reset device memory\n");
   5339		return ret;
   5340	}
   5341
   5342	ret = qm_restart(qm);
   5343	if (ret) {
   5344		pci_err(pdev, "Failed to start QM!\n");
   5345		return ret;
   5346	}
   5347
   5348	ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
   5349	if (ret)
   5350		pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
   5351
   5352	ret = qm_wait_vf_prepare_finish(qm);
   5353	if (ret)
   5354		pci_err(pdev, "failed to start by vfs in soft reset!\n");
   5355
   5356	qm_cmd_init(qm);
   5357	qm_restart_done(qm);
   5358
   5359	qm_reset_bit_clear(qm);
   5360
   5361	return 0;
   5362}
   5363
   5364static void qm_show_last_dfx_regs(struct hisi_qm *qm)
   5365{
   5366	struct qm_debug *debug = &qm->debug;
   5367	struct pci_dev *pdev = qm->pdev;
   5368	u32 val;
   5369	int i;
   5370
   5371	if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
   5372		return;
   5373
   5374	for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
   5375		val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
   5376		if (debug->qm_last_words[i] != val)
   5377			pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
   5378			qm_dfx_regs[i].name, debug->qm_last_words[i], val);
   5379	}
   5380}
   5381
   5382static int qm_controller_reset(struct hisi_qm *qm)
   5383{
   5384	struct pci_dev *pdev = qm->pdev;
   5385	int ret;
   5386
   5387	pci_info(pdev, "Controller resetting...\n");
   5388
   5389	ret = qm_controller_reset_prepare(qm);
   5390	if (ret) {
   5391		hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
   5392		hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
   5393		clear_bit(QM_RST_SCHED, &qm->misc_ctl);
   5394		return ret;
   5395	}
   5396
   5397	qm_show_last_dfx_regs(qm);
   5398	if (qm->err_ini->show_last_dfx_regs)
   5399		qm->err_ini->show_last_dfx_regs(qm);
   5400
   5401	ret = qm_soft_reset(qm);
   5402	if (ret) {
   5403		pci_err(pdev, "Controller reset failed (%d)\n", ret);
   5404		qm_reset_bit_clear(qm);
   5405		return ret;
   5406	}
   5407
   5408	ret = qm_controller_reset_done(qm);
   5409	if (ret) {
   5410		qm_reset_bit_clear(qm);
   5411		return ret;
   5412	}
   5413
   5414	pci_info(pdev, "Controller reset complete\n");
   5415
   5416	return 0;
   5417}
   5418
   5419/**
   5420 * hisi_qm_dev_slot_reset() - slot reset
   5421 * @pdev: the PCIe device
   5422 *
   5423 * This function offers QM relate PCIe device reset interface. Drivers which
   5424 * use QM can use this function as slot_reset in its struct pci_error_handlers.
   5425 */
   5426pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
   5427{
   5428	struct hisi_qm *qm = pci_get_drvdata(pdev);
   5429	int ret;
   5430
   5431	if (pdev->is_virtfn)
   5432		return PCI_ERS_RESULT_RECOVERED;
   5433
   5434	pci_aer_clear_nonfatal_status(pdev);
   5435
   5436	/* reset pcie device controller */
   5437	ret = qm_controller_reset(qm);
   5438	if (ret) {
   5439		pci_err(pdev, "Controller reset failed (%d)\n", ret);
   5440		return PCI_ERS_RESULT_DISCONNECT;
   5441	}
   5442
   5443	return PCI_ERS_RESULT_RECOVERED;
   5444}
   5445EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
   5446
   5447void hisi_qm_reset_prepare(struct pci_dev *pdev)
   5448{
   5449	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
   5450	struct hisi_qm *qm = pci_get_drvdata(pdev);
   5451	u32 delay = 0;
   5452	int ret;
   5453
   5454	hisi_qm_dev_err_uninit(pf_qm);
   5455
   5456	/*
   5457	 * Check whether there is an ECC mbit error, If it occurs, need to
   5458	 * wait for soft reset to fix it.
   5459	 */
   5460	while (qm_check_dev_error(pf_qm)) {
   5461		msleep(++delay);
   5462		if (delay > QM_RESET_WAIT_TIMEOUT)
   5463			return;
   5464	}
   5465
   5466	ret = qm_reset_prepare_ready(qm);
   5467	if (ret) {
   5468		pci_err(pdev, "FLR not ready!\n");
   5469		return;
   5470	}
   5471
   5472	/* PF obtains the information of VF by querying the register. */
   5473	if (qm->fun_type == QM_HW_PF)
   5474		qm_cmd_uninit(qm);
   5475
   5476	ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
   5477	if (ret)
   5478		pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
   5479
   5480	ret = hisi_qm_stop(qm, QM_FLR);
   5481	if (ret) {
   5482		pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
   5483		hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
   5484		hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
   5485		return;
   5486	}
   5487
   5488	ret = qm_wait_vf_prepare_finish(qm);
   5489	if (ret)
   5490		pci_err(pdev, "failed to stop by vfs in FLR!\n");
   5491
   5492	pci_info(pdev, "FLR resetting...\n");
   5493}
   5494EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
   5495
   5496static bool qm_flr_reset_complete(struct pci_dev *pdev)
   5497{
   5498	struct pci_dev *pf_pdev = pci_physfn(pdev);
   5499	struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
   5500	u32 id;
   5501
   5502	pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
   5503	if (id == QM_PCI_COMMAND_INVALID) {
   5504		pci_err(pdev, "Device can not be used!\n");
   5505		return false;
   5506	}
   5507
   5508	return true;
   5509}
   5510
   5511void hisi_qm_reset_done(struct pci_dev *pdev)
   5512{
   5513	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
   5514	struct hisi_qm *qm = pci_get_drvdata(pdev);
   5515	int ret;
   5516
   5517	if (qm->fun_type == QM_HW_PF) {
   5518		ret = qm_dev_hw_init(qm);
   5519		if (ret) {
   5520			pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
   5521			goto flr_done;
   5522		}
   5523	}
   5524
   5525	hisi_qm_dev_err_init(pf_qm);
   5526
   5527	ret = qm_restart(qm);
   5528	if (ret) {
   5529		pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
   5530		goto flr_done;
   5531	}
   5532
   5533	ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
   5534	if (ret)
   5535		pci_err(pdev, "failed to start vfs by pf in FLR.\n");
   5536
   5537	ret = qm_wait_vf_prepare_finish(qm);
   5538	if (ret)
   5539		pci_err(pdev, "failed to start by vfs in FLR!\n");
   5540
   5541flr_done:
   5542	if (qm->fun_type == QM_HW_PF)
   5543		qm_cmd_init(qm);
   5544
   5545	if (qm_flr_reset_complete(pdev))
   5546		pci_info(pdev, "FLR reset complete\n");
   5547
   5548	qm_reset_bit_clear(qm);
   5549}
   5550EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
   5551
   5552static irqreturn_t qm_abnormal_irq(int irq, void *data)
   5553{
   5554	struct hisi_qm *qm = data;
   5555	enum acc_err_result ret;
   5556
   5557	atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
   5558	ret = qm_process_dev_error(qm);
   5559	if (ret == ACC_ERR_NEED_RESET &&
   5560	    !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
   5561	    !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
   5562		schedule_work(&qm->rst_work);
   5563
   5564	return IRQ_HANDLED;
   5565}
   5566
   5567static int qm_irq_register(struct hisi_qm *qm)
   5568{
   5569	struct pci_dev *pdev = qm->pdev;
   5570	int ret;
   5571
   5572	ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
   5573			  qm_irq, 0, qm->dev_name, qm);
   5574	if (ret)
   5575		return ret;
   5576
   5577	if (qm->ver > QM_HW_V1) {
   5578		ret = request_threaded_irq(pci_irq_vector(pdev,
   5579					   QM_AEQ_EVENT_IRQ_VECTOR),
   5580					   qm_aeq_irq, qm_aeq_thread,
   5581					   0, qm->dev_name, qm);
   5582		if (ret)
   5583			goto err_aeq_irq;
   5584
   5585		if (qm->fun_type == QM_HW_PF) {
   5586			ret = request_irq(pci_irq_vector(pdev,
   5587					  QM_ABNORMAL_EVENT_IRQ_VECTOR),
   5588					  qm_abnormal_irq, 0, qm->dev_name, qm);
   5589			if (ret)
   5590				goto err_abonormal_irq;
   5591		}
   5592	}
   5593
   5594	if (qm->ver > QM_HW_V2) {
   5595		ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
   5596				qm_mb_cmd_irq, 0, qm->dev_name, qm);
   5597		if (ret)
   5598			goto err_mb_cmd_irq;
   5599	}
   5600
   5601	return 0;
   5602
   5603err_mb_cmd_irq:
   5604	if (qm->fun_type == QM_HW_PF)
   5605		free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
   5606err_abonormal_irq:
   5607	free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
   5608err_aeq_irq:
   5609	free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
   5610	return ret;
   5611}
   5612
   5613/**
   5614 * hisi_qm_dev_shutdown() - Shutdown device.
   5615 * @pdev: The device will be shutdown.
   5616 *
   5617 * This function will stop qm when OS shutdown or rebooting.
   5618 */
   5619void hisi_qm_dev_shutdown(struct pci_dev *pdev)
   5620{
   5621	struct hisi_qm *qm = pci_get_drvdata(pdev);
   5622	int ret;
   5623
   5624	ret = hisi_qm_stop(qm, QM_NORMAL);
   5625	if (ret)
   5626		dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
   5627}
   5628EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
   5629
   5630static void hisi_qm_controller_reset(struct work_struct *rst_work)
   5631{
   5632	struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
   5633	int ret;
   5634
   5635	ret = qm_pm_get_sync(qm);
   5636	if (ret) {
   5637		clear_bit(QM_RST_SCHED, &qm->misc_ctl);
   5638		return;
   5639	}
   5640
   5641	/* reset pcie device controller */
   5642	ret = qm_controller_reset(qm);
   5643	if (ret)
   5644		dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
   5645
   5646	qm_pm_put_sync(qm);
   5647}
   5648
   5649static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
   5650				   enum qm_stop_reason stop_reason)
   5651{
   5652	enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
   5653	struct pci_dev *pdev = qm->pdev;
   5654	int ret;
   5655
   5656	ret = qm_reset_prepare_ready(qm);
   5657	if (ret) {
   5658		dev_err(&pdev->dev, "reset prepare not ready!\n");
   5659		atomic_set(&qm->status.flags, QM_STOP);
   5660		cmd = QM_VF_PREPARE_FAIL;
   5661		goto err_prepare;
   5662	}
   5663
   5664	ret = hisi_qm_stop(qm, stop_reason);
   5665	if (ret) {
   5666		dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
   5667		atomic_set(&qm->status.flags, QM_STOP);
   5668		cmd = QM_VF_PREPARE_FAIL;
   5669		goto err_prepare;
   5670	} else {
   5671		goto out;
   5672	}
   5673
   5674err_prepare:
   5675	hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
   5676	hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
   5677out:
   5678	pci_save_state(pdev);
   5679	ret = qm->ops->ping_pf(qm, cmd);
   5680	if (ret)
   5681		dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
   5682}
   5683
   5684static void qm_pf_reset_vf_done(struct hisi_qm *qm)
   5685{
   5686	enum qm_mb_cmd cmd = QM_VF_START_DONE;
   5687	struct pci_dev *pdev = qm->pdev;
   5688	int ret;
   5689
   5690	pci_restore_state(pdev);
   5691	ret = hisi_qm_start(qm);
   5692	if (ret) {
   5693		dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
   5694		cmd = QM_VF_START_FAIL;
   5695	}
   5696
   5697	ret = qm->ops->ping_pf(qm, cmd);
   5698	if (ret)
   5699		dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
   5700
   5701	qm_reset_bit_clear(qm);
   5702}
   5703
   5704static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
   5705{
   5706	struct device *dev = &qm->pdev->dev;
   5707	u32 val, cmd;
   5708	u64 msg;
   5709	int ret;
   5710
   5711	/* Wait for reset to finish */
   5712	ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
   5713					 val == BIT(0), QM_VF_RESET_WAIT_US,
   5714					 QM_VF_RESET_WAIT_TIMEOUT_US);
   5715	/* hardware completion status should be available by this time */
   5716	if (ret) {
   5717		dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
   5718		return -ETIMEDOUT;
   5719	}
   5720
   5721	/*
   5722	 * Whether message is got successfully,
   5723	 * VF needs to ack PF by clearing the interrupt.
   5724	 */
   5725	ret = qm_get_mb_cmd(qm, &msg, 0);
   5726	qm_clear_cmd_interrupt(qm, 0);
   5727	if (ret) {
   5728		dev_err(dev, "failed to get msg from PF in reset done!\n");
   5729		return ret;
   5730	}
   5731
   5732	cmd = msg & QM_MB_CMD_DATA_MASK;
   5733	if (cmd != QM_PF_RESET_DONE) {
   5734		dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
   5735		ret = -EINVAL;
   5736	}
   5737
   5738	return ret;
   5739}
   5740
   5741static void qm_pf_reset_vf_process(struct hisi_qm *qm,
   5742				   enum qm_stop_reason stop_reason)
   5743{
   5744	struct device *dev = &qm->pdev->dev;
   5745	int ret;
   5746
   5747	dev_info(dev, "device reset start...\n");
   5748
   5749	/* The message is obtained by querying the register during resetting */
   5750	qm_cmd_uninit(qm);
   5751	qm_pf_reset_vf_prepare(qm, stop_reason);
   5752
   5753	ret = qm_wait_pf_reset_finish(qm);
   5754	if (ret)
   5755		goto err_get_status;
   5756
   5757	qm_pf_reset_vf_done(qm);
   5758	qm_cmd_init(qm);
   5759
   5760	dev_info(dev, "device reset done.\n");
   5761
   5762	return;
   5763
   5764err_get_status:
   5765	qm_cmd_init(qm);
   5766	qm_reset_bit_clear(qm);
   5767}
   5768
   5769static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
   5770{
   5771	struct device *dev = &qm->pdev->dev;
   5772	u64 msg;
   5773	u32 cmd;
   5774	int ret;
   5775
   5776	/*
   5777	 * Get the msg from source by sending mailbox. Whether message is got
   5778	 * successfully, destination needs to ack source by clearing the interrupt.
   5779	 */
   5780	ret = qm_get_mb_cmd(qm, &msg, fun_num);
   5781	qm_clear_cmd_interrupt(qm, BIT(fun_num));
   5782	if (ret) {
   5783		dev_err(dev, "failed to get msg from source!\n");
   5784		return;
   5785	}
   5786
   5787	cmd = msg & QM_MB_CMD_DATA_MASK;
   5788	switch (cmd) {
   5789	case QM_PF_FLR_PREPARE:
   5790		qm_pf_reset_vf_process(qm, QM_FLR);
   5791		break;
   5792	case QM_PF_SRST_PREPARE:
   5793		qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
   5794		break;
   5795	case QM_VF_GET_QOS:
   5796		qm_vf_get_qos(qm, fun_num);
   5797		break;
   5798	case QM_PF_SET_QOS:
   5799		qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
   5800		break;
   5801	default:
   5802		dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
   5803		break;
   5804	}
   5805}
   5806
   5807static void qm_cmd_process(struct work_struct *cmd_process)
   5808{
   5809	struct hisi_qm *qm = container_of(cmd_process,
   5810					struct hisi_qm, cmd_process);
   5811	u32 vfs_num = qm->vfs_num;
   5812	u64 val;
   5813	u32 i;
   5814
   5815	if (qm->fun_type == QM_HW_PF) {
   5816		val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
   5817		if (!val)
   5818			return;
   5819
   5820		for (i = 1; i <= vfs_num; i++) {
   5821			if (val & BIT(i))
   5822				qm_handle_cmd_msg(qm, i);
   5823		}
   5824
   5825		return;
   5826	}
   5827
   5828	qm_handle_cmd_msg(qm, 0);
   5829}
   5830
   5831/**
   5832 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
   5833 * @qm: The qm needs add.
   5834 * @qm_list: The qm list.
   5835 *
   5836 * This function adds qm to qm list, and will register algorithm to
   5837 * crypto when the qm list is empty.
   5838 */
   5839int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
   5840{
   5841	struct device *dev = &qm->pdev->dev;
   5842	int flag = 0;
   5843	int ret = 0;
   5844
   5845	mutex_lock(&qm_list->lock);
   5846	if (list_empty(&qm_list->list))
   5847		flag = 1;
   5848	list_add_tail(&qm->list, &qm_list->list);
   5849	mutex_unlock(&qm_list->lock);
   5850
   5851	if (qm->ver <= QM_HW_V2 && qm->use_sva) {
   5852		dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
   5853		return 0;
   5854	}
   5855
   5856	if (flag) {
   5857		ret = qm_list->register_to_crypto(qm);
   5858		if (ret) {
   5859			mutex_lock(&qm_list->lock);
   5860			list_del(&qm->list);
   5861			mutex_unlock(&qm_list->lock);
   5862		}
   5863	}
   5864
   5865	return ret;
   5866}
   5867EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
   5868
   5869/**
   5870 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
   5871 * qm list.
   5872 * @qm: The qm needs delete.
   5873 * @qm_list: The qm list.
   5874 *
   5875 * This function deletes qm from qm list, and will unregister algorithm
   5876 * from crypto when the qm list is empty.
   5877 */
   5878void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
   5879{
   5880	mutex_lock(&qm_list->lock);
   5881	list_del(&qm->list);
   5882	mutex_unlock(&qm_list->lock);
   5883
   5884	if (qm->ver <= QM_HW_V2 && qm->use_sva)
   5885		return;
   5886
   5887	if (list_empty(&qm_list->list))
   5888		qm_list->unregister_from_crypto(qm);
   5889}
   5890EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
   5891
   5892static int qm_get_qp_num(struct hisi_qm *qm)
   5893{
   5894	if (qm->ver == QM_HW_V1)
   5895		qm->ctrl_qp_num = QM_QNUM_V1;
   5896	else if (qm->ver == QM_HW_V2)
   5897		qm->ctrl_qp_num = QM_QNUM_V2;
   5898	else
   5899		qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
   5900					QM_QP_NUN_MASK;
   5901
   5902	if (qm->use_db_isolation)
   5903		qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
   5904				  QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
   5905	else
   5906		qm->max_qp_num = qm->ctrl_qp_num;
   5907
   5908	/* check if qp number is valid */
   5909	if (qm->qp_num > qm->max_qp_num) {
   5910		dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
   5911			qm->qp_num, qm->max_qp_num);
   5912		return -EINVAL;
   5913	}
   5914
   5915	return 0;
   5916}
   5917
   5918static int qm_get_pci_res(struct hisi_qm *qm)
   5919{
   5920	struct pci_dev *pdev = qm->pdev;
   5921	struct device *dev = &pdev->dev;
   5922	int ret;
   5923
   5924	ret = pci_request_mem_regions(pdev, qm->dev_name);
   5925	if (ret < 0) {
   5926		dev_err(dev, "Failed to request mem regions!\n");
   5927		return ret;
   5928	}
   5929
   5930	qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
   5931	qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
   5932	if (!qm->io_base) {
   5933		ret = -EIO;
   5934		goto err_request_mem_regions;
   5935	}
   5936
   5937	if (qm->ver > QM_HW_V2) {
   5938		if (qm->fun_type == QM_HW_PF)
   5939			qm->use_db_isolation = readl(qm->io_base +
   5940						     QM_QUE_ISO_EN) & BIT(0);
   5941		else
   5942			qm->use_db_isolation = readl(qm->io_base +
   5943						     QM_QUE_ISO_CFG_V) & BIT(0);
   5944	}
   5945
   5946	if (qm->use_db_isolation) {
   5947		qm->db_interval = QM_QP_DB_INTERVAL;
   5948		qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
   5949		qm->db_io_base = ioremap(qm->db_phys_base,
   5950					 pci_resource_len(pdev, PCI_BAR_4));
   5951		if (!qm->db_io_base) {
   5952			ret = -EIO;
   5953			goto err_ioremap;
   5954		}
   5955	} else {
   5956		qm->db_phys_base = qm->phys_base;
   5957		qm->db_io_base = qm->io_base;
   5958		qm->db_interval = 0;
   5959	}
   5960
   5961	if (qm->fun_type == QM_HW_PF) {
   5962		ret = qm_get_qp_num(qm);
   5963		if (ret)
   5964			goto err_db_ioremap;
   5965	}
   5966
   5967	return 0;
   5968
   5969err_db_ioremap:
   5970	if (qm->use_db_isolation)
   5971		iounmap(qm->db_io_base);
   5972err_ioremap:
   5973	iounmap(qm->io_base);
   5974err_request_mem_regions:
   5975	pci_release_mem_regions(pdev);
   5976	return ret;
   5977}
   5978
   5979static int hisi_qm_pci_init(struct hisi_qm *qm)
   5980{
   5981	struct pci_dev *pdev = qm->pdev;
   5982	struct device *dev = &pdev->dev;
   5983	unsigned int num_vec;
   5984	int ret;
   5985
   5986	ret = pci_enable_device_mem(pdev);
   5987	if (ret < 0) {
   5988		dev_err(dev, "Failed to enable device mem!\n");
   5989		return ret;
   5990	}
   5991
   5992	ret = qm_get_pci_res(qm);
   5993	if (ret)
   5994		goto err_disable_pcidev;
   5995
   5996	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
   5997	if (ret < 0)
   5998		goto err_get_pci_res;
   5999	pci_set_master(pdev);
   6000
   6001	if (!qm->ops->get_irq_num) {
   6002		ret = -EOPNOTSUPP;
   6003		goto err_get_pci_res;
   6004	}
   6005	num_vec = qm->ops->get_irq_num(qm);
   6006	ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
   6007	if (ret < 0) {
   6008		dev_err(dev, "Failed to enable MSI vectors!\n");
   6009		goto err_get_pci_res;
   6010	}
   6011
   6012	return 0;
   6013
   6014err_get_pci_res:
   6015	qm_put_pci_res(qm);
   6016err_disable_pcidev:
   6017	pci_disable_device(pdev);
   6018	return ret;
   6019}
   6020
   6021static void hisi_qm_init_work(struct hisi_qm *qm)
   6022{
   6023	INIT_WORK(&qm->work, qm_work_process);
   6024	if (qm->fun_type == QM_HW_PF)
   6025		INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
   6026
   6027	if (qm->ver > QM_HW_V2)
   6028		INIT_WORK(&qm->cmd_process, qm_cmd_process);
   6029}
   6030
   6031static int hisi_qp_alloc_memory(struct hisi_qm *qm)
   6032{
   6033	struct device *dev = &qm->pdev->dev;
   6034	size_t qp_dma_size;
   6035	int i, ret;
   6036
   6037	qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
   6038	if (!qm->qp_array)
   6039		return -ENOMEM;
   6040
   6041	/* one more page for device or qp statuses */
   6042	qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
   6043		      sizeof(struct qm_cqe) * QM_Q_DEPTH;
   6044	qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
   6045	for (i = 0; i < qm->qp_num; i++) {
   6046		ret = hisi_qp_memory_init(qm, qp_dma_size, i);
   6047		if (ret)
   6048			goto err_init_qp_mem;
   6049
   6050		dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
   6051	}
   6052
   6053	return 0;
   6054err_init_qp_mem:
   6055	hisi_qp_memory_uninit(qm, i);
   6056
   6057	return ret;
   6058}
   6059
   6060static int hisi_qm_memory_init(struct hisi_qm *qm)
   6061{
   6062	struct device *dev = &qm->pdev->dev;
   6063	int ret, total_func, i;
   6064	size_t off = 0;
   6065
   6066	total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
   6067	qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
   6068	if (!qm->factor)
   6069		return -ENOMEM;
   6070	for (i = 0; i < total_func; i++)
   6071		qm->factor[i].func_qos = QM_QOS_MAX_VAL;
   6072
   6073#define QM_INIT_BUF(qm, type, num) do { \
   6074	(qm)->type = ((qm)->qdma.va + (off)); \
   6075	(qm)->type##_dma = (qm)->qdma.dma + (off); \
   6076	off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
   6077} while (0)
   6078
   6079	idr_init(&qm->qp_idr);
   6080	qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
   6081			QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
   6082			QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
   6083			QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
   6084	qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
   6085					 GFP_ATOMIC);
   6086	dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
   6087	if (!qm->qdma.va) {
   6088		ret =  -ENOMEM;
   6089		goto err_alloc_qdma;
   6090	}
   6091
   6092	QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
   6093	QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
   6094	QM_INIT_BUF(qm, sqc, qm->qp_num);
   6095	QM_INIT_BUF(qm, cqc, qm->qp_num);
   6096
   6097	ret = hisi_qp_alloc_memory(qm);
   6098	if (ret)
   6099		goto err_alloc_qp_array;
   6100
   6101	return 0;
   6102
   6103err_alloc_qp_array:
   6104	dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
   6105err_alloc_qdma:
   6106	kfree(qm->factor);
   6107
   6108	return ret;
   6109}
   6110
   6111static void qm_last_regs_init(struct hisi_qm *qm)
   6112{
   6113	int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
   6114	struct qm_debug *debug = &qm->debug;
   6115	int i;
   6116
   6117	if (qm->fun_type == QM_HW_VF)
   6118		return;
   6119
   6120	debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int),
   6121								GFP_KERNEL);
   6122	if (!debug->qm_last_words)
   6123		return;
   6124
   6125	for (i = 0; i < dfx_regs_num; i++) {
   6126		debug->qm_last_words[i] = readl_relaxed(qm->io_base +
   6127			qm_dfx_regs[i].offset);
   6128	}
   6129}
   6130
   6131/**
   6132 * hisi_qm_init() - Initialize configures about qm.
   6133 * @qm: The qm needing init.
   6134 *
   6135 * This function init qm, then we can call hisi_qm_start to put qm into work.
   6136 */
   6137int hisi_qm_init(struct hisi_qm *qm)
   6138{
   6139	struct pci_dev *pdev = qm->pdev;
   6140	struct device *dev = &pdev->dev;
   6141	int ret;
   6142
   6143	hisi_qm_pre_init(qm);
   6144
   6145	ret = hisi_qm_pci_init(qm);
   6146	if (ret)
   6147		return ret;
   6148
   6149	ret = qm_irq_register(qm);
   6150	if (ret)
   6151		goto err_pci_init;
   6152
   6153	if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
   6154		/* v2 starts to support get vft by mailbox */
   6155		ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
   6156		if (ret)
   6157			goto err_irq_register;
   6158	}
   6159
   6160	if (qm->fun_type == QM_HW_PF) {
   6161		qm_disable_clock_gate(qm);
   6162		ret = qm_dev_mem_reset(qm);
   6163		if (ret) {
   6164			dev_err(dev, "failed to reset device memory\n");
   6165			goto err_irq_register;
   6166		}
   6167	}
   6168
   6169	if (qm->mode == UACCE_MODE_SVA) {
   6170		ret = qm_alloc_uacce(qm);
   6171		if (ret < 0)
   6172			dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
   6173	}
   6174
   6175	ret = hisi_qm_memory_init(qm);
   6176	if (ret)
   6177		goto err_alloc_uacce;
   6178
   6179	hisi_qm_init_work(qm);
   6180	qm_cmd_init(qm);
   6181	atomic_set(&qm->status.flags, QM_INIT);
   6182
   6183	qm_last_regs_init(qm);
   6184
   6185	return 0;
   6186
   6187err_alloc_uacce:
   6188	if (qm->use_sva) {
   6189		uacce_remove(qm->uacce);
   6190		qm->uacce = NULL;
   6191	}
   6192err_irq_register:
   6193	qm_irq_unregister(qm);
   6194err_pci_init:
   6195	hisi_qm_pci_uninit(qm);
   6196	return ret;
   6197}
   6198EXPORT_SYMBOL_GPL(hisi_qm_init);
   6199
   6200/**
   6201 * hisi_qm_get_dfx_access() - Try to get dfx access.
   6202 * @qm: pointer to accelerator device.
   6203 *
   6204 * Try to get dfx access, then user can get message.
   6205 *
   6206 * If device is in suspended, return failure, otherwise
   6207 * bump up the runtime PM usage counter.
   6208 */
   6209int hisi_qm_get_dfx_access(struct hisi_qm *qm)
   6210{
   6211	struct device *dev = &qm->pdev->dev;
   6212
   6213	if (pm_runtime_suspended(dev)) {
   6214		dev_info(dev, "can not read/write - device in suspended.\n");
   6215		return -EAGAIN;
   6216	}
   6217
   6218	return qm_pm_get_sync(qm);
   6219}
   6220EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
   6221
   6222/**
   6223 * hisi_qm_put_dfx_access() - Put dfx access.
   6224 * @qm: pointer to accelerator device.
   6225 *
   6226 * Put dfx access, drop runtime PM usage counter.
   6227 */
   6228void hisi_qm_put_dfx_access(struct hisi_qm *qm)
   6229{
   6230	qm_pm_put_sync(qm);
   6231}
   6232EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
   6233
   6234/**
   6235 * hisi_qm_pm_init() - Initialize qm runtime PM.
   6236 * @qm: pointer to accelerator device.
   6237 *
   6238 * Function that initialize qm runtime PM.
   6239 */
   6240void hisi_qm_pm_init(struct hisi_qm *qm)
   6241{
   6242	struct device *dev = &qm->pdev->dev;
   6243
   6244	if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
   6245		return;
   6246
   6247	pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
   6248	pm_runtime_use_autosuspend(dev);
   6249	pm_runtime_put_noidle(dev);
   6250}
   6251EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
   6252
   6253/**
   6254 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
   6255 * @qm: pointer to accelerator device.
   6256 *
   6257 * Function that uninitialize qm runtime PM.
   6258 */
   6259void hisi_qm_pm_uninit(struct hisi_qm *qm)
   6260{
   6261	struct device *dev = &qm->pdev->dev;
   6262
   6263	if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
   6264		return;
   6265
   6266	pm_runtime_get_noresume(dev);
   6267	pm_runtime_dont_use_autosuspend(dev);
   6268}
   6269EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
   6270
   6271static int qm_prepare_for_suspend(struct hisi_qm *qm)
   6272{
   6273	struct pci_dev *pdev = qm->pdev;
   6274	int ret;
   6275	u32 val;
   6276
   6277	ret = qm->ops->set_msi(qm, false);
   6278	if (ret) {
   6279		pci_err(pdev, "failed to disable MSI before suspending!\n");
   6280		return ret;
   6281	}
   6282
   6283	/* shutdown OOO register */
   6284	writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
   6285	       qm->io_base + ACC_MASTER_GLOBAL_CTRL);
   6286
   6287	ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
   6288					 val,
   6289					 (val == ACC_MASTER_TRANS_RETURN_RW),
   6290					 POLL_PERIOD, POLL_TIMEOUT);
   6291	if (ret) {
   6292		pci_emerg(pdev, "Bus lock! Please reset system.\n");
   6293		return ret;
   6294	}
   6295
   6296	ret = qm_set_pf_mse(qm, false);
   6297	if (ret)
   6298		pci_err(pdev, "failed to disable MSE before suspending!\n");
   6299
   6300	return ret;
   6301}
   6302
   6303static int qm_rebuild_for_resume(struct hisi_qm *qm)
   6304{
   6305	struct pci_dev *pdev = qm->pdev;
   6306	int ret;
   6307
   6308	ret = qm_set_pf_mse(qm, true);
   6309	if (ret) {
   6310		pci_err(pdev, "failed to enable MSE after resuming!\n");
   6311		return ret;
   6312	}
   6313
   6314	ret = qm->ops->set_msi(qm, true);
   6315	if (ret) {
   6316		pci_err(pdev, "failed to enable MSI after resuming!\n");
   6317		return ret;
   6318	}
   6319
   6320	ret = qm_dev_hw_init(qm);
   6321	if (ret) {
   6322		pci_err(pdev, "failed to init device after resuming\n");
   6323		return ret;
   6324	}
   6325
   6326	qm_cmd_init(qm);
   6327	hisi_qm_dev_err_init(qm);
   6328	qm_disable_clock_gate(qm);
   6329	ret = qm_dev_mem_reset(qm);
   6330	if (ret)
   6331		pci_err(pdev, "failed to reset device memory\n");
   6332
   6333	return ret;
   6334}
   6335
   6336/**
   6337 * hisi_qm_suspend() - Runtime suspend of given device.
   6338 * @dev: device to suspend.
   6339 *
   6340 * Function that suspend the device.
   6341 */
   6342int hisi_qm_suspend(struct device *dev)
   6343{
   6344	struct pci_dev *pdev = to_pci_dev(dev);
   6345	struct hisi_qm *qm = pci_get_drvdata(pdev);
   6346	int ret;
   6347
   6348	pci_info(pdev, "entering suspended state\n");
   6349
   6350	ret = hisi_qm_stop(qm, QM_NORMAL);
   6351	if (ret) {
   6352		pci_err(pdev, "failed to stop qm(%d)\n", ret);
   6353		return ret;
   6354	}
   6355
   6356	ret = qm_prepare_for_suspend(qm);
   6357	if (ret)
   6358		pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
   6359
   6360	return ret;
   6361}
   6362EXPORT_SYMBOL_GPL(hisi_qm_suspend);
   6363
   6364/**
   6365 * hisi_qm_resume() - Runtime resume of given device.
   6366 * @dev: device to resume.
   6367 *
   6368 * Function that resume the device.
   6369 */
   6370int hisi_qm_resume(struct device *dev)
   6371{
   6372	struct pci_dev *pdev = to_pci_dev(dev);
   6373	struct hisi_qm *qm = pci_get_drvdata(pdev);
   6374	int ret;
   6375
   6376	pci_info(pdev, "resuming from suspend state\n");
   6377
   6378	ret = qm_rebuild_for_resume(qm);
   6379	if (ret) {
   6380		pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
   6381		return ret;
   6382	}
   6383
   6384	ret = hisi_qm_start(qm);
   6385	if (ret)
   6386		pci_err(pdev, "failed to start qm(%d)\n", ret);
   6387
   6388	return ret;
   6389}
   6390EXPORT_SYMBOL_GPL(hisi_qm_resume);
   6391
   6392MODULE_LICENSE("GPL v2");
   6393MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
   6394MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");