cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sec_main.c (31789B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (c) 2019 HiSilicon Limited. */
      3
      4#include <linux/acpi.h>
      5#include <linux/aer.h>
      6#include <linux/bitops.h>
      7#include <linux/debugfs.h>
      8#include <linux/init.h>
      9#include <linux/io.h>
     10#include <linux/iommu.h>
     11#include <linux/kernel.h>
     12#include <linux/module.h>
     13#include <linux/pci.h>
     14#include <linux/pm_runtime.h>
     15#include <linux/seq_file.h>
     16#include <linux/topology.h>
     17#include <linux/uacce.h>
     18
     19#include "sec.h"
     20
     21#define SEC_VF_NUM			63
     22#define SEC_QUEUE_NUM_V1		4096
     23#define PCI_DEVICE_ID_HUAWEI_SEC_PF	0xa255
     24
     25#define SEC_BD_ERR_CHK_EN0		0xEFFFFFFF
     26#define SEC_BD_ERR_CHK_EN1		0x7ffff7fd
     27#define SEC_BD_ERR_CHK_EN3		0xffffbfff
     28
     29#define SEC_SQE_SIZE			128
     30#define SEC_SQ_SIZE			(SEC_SQE_SIZE * QM_Q_DEPTH)
     31#define SEC_PF_DEF_Q_NUM		256
     32#define SEC_PF_DEF_Q_BASE		0
     33#define SEC_CTX_Q_NUM_DEF		2
     34#define SEC_CTX_Q_NUM_MAX		32
     35
     36#define SEC_CTRL_CNT_CLR_CE		0x301120
     37#define SEC_CTRL_CNT_CLR_CE_BIT	BIT(0)
     38#define SEC_CORE_INT_SOURCE		0x301010
     39#define SEC_CORE_INT_MASK		0x301000
     40#define SEC_CORE_INT_STATUS		0x301008
     41#define SEC_CORE_SRAM_ECC_ERR_INFO	0x301C14
     42#define SEC_ECC_NUM			16
     43#define SEC_ECC_MASH			0xFF
     44#define SEC_CORE_INT_DISABLE		0x0
     45#define SEC_CORE_INT_ENABLE		0x7c1ff
     46#define SEC_CORE_INT_CLEAR		0x7c1ff
     47#define SEC_SAA_ENABLE			0x17f
     48
     49#define SEC_RAS_CE_REG			0x301050
     50#define SEC_RAS_FE_REG			0x301054
     51#define SEC_RAS_NFE_REG			0x301058
     52#define SEC_RAS_CE_ENB_MSK		0x88
     53#define SEC_RAS_FE_ENB_MSK		0x0
     54#define SEC_RAS_NFE_ENB_MSK		0x7c177
     55#define SEC_OOO_SHUTDOWN_SEL		0x301014
     56#define SEC_RAS_DISABLE		0x0
     57#define SEC_MEM_START_INIT_REG	0x301100
     58#define SEC_MEM_INIT_DONE_REG		0x301104
     59
     60/* clock gating */
     61#define SEC_CONTROL_REG		0x301200
     62#define SEC_DYNAMIC_GATE_REG		0x30121c
     63#define SEC_CORE_AUTO_GATE		0x30212c
     64#define SEC_DYNAMIC_GATE_EN		0x7bff
     65#define SEC_CORE_AUTO_GATE_EN		GENMASK(3, 0)
     66#define SEC_CLK_GATE_ENABLE		BIT(3)
     67#define SEC_CLK_GATE_DISABLE		(~BIT(3))
     68
     69#define SEC_TRNG_EN_SHIFT		8
     70#define SEC_AXI_SHUTDOWN_ENABLE	BIT(12)
     71#define SEC_AXI_SHUTDOWN_DISABLE	0xFFFFEFFF
     72
     73#define SEC_INTERFACE_USER_CTRL0_REG	0x301220
     74#define SEC_INTERFACE_USER_CTRL1_REG	0x301224
     75#define SEC_SAA_EN_REG			0x301270
     76#define SEC_BD_ERR_CHK_EN_REG0		0x301380
     77#define SEC_BD_ERR_CHK_EN_REG1		0x301384
     78#define SEC_BD_ERR_CHK_EN_REG3		0x30138c
     79
     80#define SEC_USER0_SMMU_NORMAL		(BIT(23) | BIT(15))
     81#define SEC_USER1_SMMU_NORMAL		(BIT(31) | BIT(23) | BIT(15) | BIT(7))
     82#define SEC_USER1_ENABLE_CONTEXT_SSV	BIT(24)
     83#define SEC_USER1_ENABLE_DATA_SSV	BIT(16)
     84#define SEC_USER1_WB_CONTEXT_SSV	BIT(8)
     85#define SEC_USER1_WB_DATA_SSV		BIT(0)
     86#define SEC_USER1_SVA_SET		(SEC_USER1_ENABLE_CONTEXT_SSV | \
     87					SEC_USER1_ENABLE_DATA_SSV | \
     88					SEC_USER1_WB_CONTEXT_SSV |  \
     89					SEC_USER1_WB_DATA_SSV)
     90#define SEC_USER1_SMMU_SVA		(SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET)
     91#define SEC_USER1_SMMU_MASK		(~SEC_USER1_SVA_SET)
     92#define SEC_INTERFACE_USER_CTRL0_REG_V3	0x302220
     93#define SEC_INTERFACE_USER_CTRL1_REG_V3	0x302224
     94#define SEC_USER1_SMMU_NORMAL_V3	(BIT(23) | BIT(17) | BIT(11) | BIT(5))
     95#define SEC_USER1_SMMU_MASK_V3		0xFF79E79E
     96#define SEC_CORE_INT_STATUS_M_ECC	BIT(2)
     97
     98#define SEC_PREFETCH_CFG		0x301130
     99#define SEC_SVA_TRANS			0x301EC4
    100#define SEC_PREFETCH_ENABLE		(~(BIT(0) | BIT(1) | BIT(11)))
    101#define SEC_PREFETCH_DISABLE		BIT(1)
    102#define SEC_SVA_DISABLE_READY		(BIT(7) | BIT(11))
    103
    104#define SEC_DELAY_10_US			10
    105#define SEC_POLL_TIMEOUT_US		1000
    106#define SEC_DBGFS_VAL_MAX_LEN		20
    107#define SEC_SINGLE_PORT_MAX_TRANS	0x2060
    108
    109#define SEC_SQE_MASK_OFFSET		64
    110#define SEC_SQE_MASK_LEN		48
    111#define SEC_SHAPER_TYPE_RATE		400
    112
    113#define SEC_DFX_BASE		0x301000
    114#define SEC_DFX_CORE		0x302100
    115#define SEC_DFX_COMMON1		0x301600
    116#define SEC_DFX_COMMON2		0x301C00
    117#define SEC_DFX_BASE_LEN		0x9D
    118#define SEC_DFX_CORE_LEN		0x32B
    119#define SEC_DFX_COMMON1_LEN		0x45
    120#define SEC_DFX_COMMON2_LEN		0xBA
    121
    122struct sec_hw_error {
    123	u32 int_msk;
    124	const char *msg;
    125};
    126
    127struct sec_dfx_item {
    128	const char *name;
    129	u32 offset;
    130};
    131
    132static const char sec_name[] = "hisi_sec2";
    133static struct dentry *sec_debugfs_root;
    134
    135static struct hisi_qm_list sec_devices = {
    136	.register_to_crypto	= sec_register_to_crypto,
    137	.unregister_from_crypto	= sec_unregister_from_crypto,
    138};
    139
    140static const struct sec_hw_error sec_hw_errors[] = {
    141	{
    142		.int_msk = BIT(0),
    143		.msg = "sec_axi_rresp_err_rint"
    144	},
    145	{
    146		.int_msk = BIT(1),
    147		.msg = "sec_axi_bresp_err_rint"
    148	},
    149	{
    150		.int_msk = BIT(2),
    151		.msg = "sec_ecc_2bit_err_rint"
    152	},
    153	{
    154		.int_msk = BIT(3),
    155		.msg = "sec_ecc_1bit_err_rint"
    156	},
    157	{
    158		.int_msk = BIT(4),
    159		.msg = "sec_req_trng_timeout_rint"
    160	},
    161	{
    162		.int_msk = BIT(5),
    163		.msg = "sec_fsm_hbeat_rint"
    164	},
    165	{
    166		.int_msk = BIT(6),
    167		.msg = "sec_channel_req_rng_timeout_rint"
    168	},
    169	{
    170		.int_msk = BIT(7),
    171		.msg = "sec_bd_err_rint"
    172	},
    173	{
    174		.int_msk = BIT(8),
    175		.msg = "sec_chain_buff_err_rint"
    176	},
    177	{
    178		.int_msk = BIT(14),
    179		.msg = "sec_no_secure_access"
    180	},
    181	{
    182		.int_msk = BIT(15),
    183		.msg = "sec_wrapping_key_auth_err"
    184	},
    185	{
    186		.int_msk = BIT(16),
    187		.msg = "sec_km_key_crc_fail"
    188	},
    189	{
    190		.int_msk = BIT(17),
    191		.msg = "sec_axi_poison_err"
    192	},
    193	{
    194		.int_msk = BIT(18),
    195		.msg = "sec_sva_err"
    196	},
    197	{}
    198};
    199
    200static const char * const sec_dbg_file_name[] = {
    201	[SEC_CLEAR_ENABLE] = "clear_enable",
    202};
    203
    204static struct sec_dfx_item sec_dfx_labels[] = {
    205	{"send_cnt", offsetof(struct sec_dfx, send_cnt)},
    206	{"recv_cnt", offsetof(struct sec_dfx, recv_cnt)},
    207	{"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)},
    208	{"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)},
    209	{"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)},
    210	{"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)},
    211	{"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)},
    212};
    213
    214static const struct debugfs_reg32 sec_dfx_regs[] = {
    215	{"SEC_PF_ABNORMAL_INT_SOURCE    ",  0x301010},
    216	{"SEC_SAA_EN                    ",  0x301270},
    217	{"SEC_BD_LATENCY_MIN            ",  0x301600},
    218	{"SEC_BD_LATENCY_MAX            ",  0x301608},
    219	{"SEC_BD_LATENCY_AVG            ",  0x30160C},
    220	{"SEC_BD_NUM_IN_SAA0            ",  0x301670},
    221	{"SEC_BD_NUM_IN_SAA1            ",  0x301674},
    222	{"SEC_BD_NUM_IN_SEC             ",  0x301680},
    223	{"SEC_ECC_1BIT_CNT              ",  0x301C00},
    224	{"SEC_ECC_1BIT_INFO             ",  0x301C04},
    225	{"SEC_ECC_2BIT_CNT              ",  0x301C10},
    226	{"SEC_ECC_2BIT_INFO             ",  0x301C14},
    227	{"SEC_BD_SAA0                   ",  0x301C20},
    228	{"SEC_BD_SAA1                   ",  0x301C24},
    229	{"SEC_BD_SAA2                   ",  0x301C28},
    230	{"SEC_BD_SAA3                   ",  0x301C2C},
    231	{"SEC_BD_SAA4                   ",  0x301C30},
    232	{"SEC_BD_SAA5                   ",  0x301C34},
    233	{"SEC_BD_SAA6                   ",  0x301C38},
    234	{"SEC_BD_SAA7                   ",  0x301C3C},
    235	{"SEC_BD_SAA8                   ",  0x301C40},
    236};
    237
    238/* define the SEC's dfx regs region and region length */
    239static struct dfx_diff_registers sec_diff_regs[] = {
    240	{
    241		.reg_offset = SEC_DFX_BASE,
    242		.reg_len = SEC_DFX_BASE_LEN,
    243	}, {
    244		.reg_offset = SEC_DFX_COMMON1,
    245		.reg_len = SEC_DFX_COMMON1_LEN,
    246	}, {
    247		.reg_offset = SEC_DFX_COMMON2,
    248		.reg_len = SEC_DFX_COMMON2_LEN,
    249	}, {
    250		.reg_offset = SEC_DFX_CORE,
    251		.reg_len = SEC_DFX_CORE_LEN,
    252	},
    253};
    254
    255static int sec_diff_regs_show(struct seq_file *s, void *unused)
    256{
    257	struct hisi_qm *qm = s->private;
    258
    259	hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
    260					ARRAY_SIZE(sec_diff_regs));
    261
    262	return 0;
    263}
    264DEFINE_SHOW_ATTRIBUTE(sec_diff_regs);
    265
    266static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp)
    267{
    268	return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF);
    269}
    270
    271static const struct kernel_param_ops sec_pf_q_num_ops = {
    272	.set = sec_pf_q_num_set,
    273	.get = param_get_int,
    274};
    275
    276static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
    277module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
    278MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
    279
    280static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
    281{
    282	u32 ctx_q_num;
    283	int ret;
    284
    285	if (!val)
    286		return -EINVAL;
    287
    288	ret = kstrtou32(val, 10, &ctx_q_num);
    289	if (ret)
    290		return -EINVAL;
    291
    292	if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
    293		pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
    294		return -EINVAL;
    295	}
    296
    297	return param_set_int(val, kp);
    298}
    299
    300static const struct kernel_param_ops sec_ctx_q_num_ops = {
    301	.set = sec_ctx_q_num_set,
    302	.get = param_get_int,
    303};
    304static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
    305module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
    306MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)");
    307
    308static const struct kernel_param_ops vfs_num_ops = {
    309	.set = vfs_num_set,
    310	.get = param_get_int,
    311};
    312
    313static u32 vfs_num;
    314module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
    315MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
    316
    317void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
    318{
    319	hisi_qm_free_qps(qps, qp_num);
    320	kfree(qps);
    321}
    322
    323struct hisi_qp **sec_create_qps(void)
    324{
    325	int node = cpu_to_node(smp_processor_id());
    326	u32 ctx_num = ctx_q_num;
    327	struct hisi_qp **qps;
    328	int ret;
    329
    330	qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
    331	if (!qps)
    332		return NULL;
    333
    334	ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps);
    335	if (!ret)
    336		return qps;
    337
    338	kfree(qps);
    339	return NULL;
    340}
    341
    342static const struct kernel_param_ops sec_uacce_mode_ops = {
    343	.set = uacce_mode_set,
    344	.get = param_get_int,
    345};
    346
    347/*
    348 * uacce_mode = 0 means sec only register to crypto,
    349 * uacce_mode = 1 means sec both register to crypto and uacce.
    350 */
    351static u32 uacce_mode = UACCE_MODE_NOUACCE;
    352module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
    353MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
    354
    355static const struct pci_device_id sec_dev_ids[] = {
    356	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) },
    357	{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
    358	{ 0, }
    359};
    360MODULE_DEVICE_TABLE(pci, sec_dev_ids);
    361
    362static void sec_set_endian(struct hisi_qm *qm)
    363{
    364	u32 reg;
    365
    366	reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
    367	reg &= ~(BIT(1) | BIT(0));
    368	if (!IS_ENABLED(CONFIG_64BIT))
    369		reg |= BIT(1);
    370
    371
    372	if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
    373		reg |= BIT(0);
    374
    375	writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
    376}
    377
    378static void sec_engine_sva_config(struct hisi_qm *qm)
    379{
    380	u32 reg;
    381
    382	if (qm->ver > QM_HW_V2) {
    383		reg = readl_relaxed(qm->io_base +
    384				SEC_INTERFACE_USER_CTRL0_REG_V3);
    385		reg |= SEC_USER0_SMMU_NORMAL;
    386		writel_relaxed(reg, qm->io_base +
    387				SEC_INTERFACE_USER_CTRL0_REG_V3);
    388
    389		reg = readl_relaxed(qm->io_base +
    390				SEC_INTERFACE_USER_CTRL1_REG_V3);
    391		reg &= SEC_USER1_SMMU_MASK_V3;
    392		reg |= SEC_USER1_SMMU_NORMAL_V3;
    393		writel_relaxed(reg, qm->io_base +
    394				SEC_INTERFACE_USER_CTRL1_REG_V3);
    395	} else {
    396		reg = readl_relaxed(qm->io_base +
    397				SEC_INTERFACE_USER_CTRL0_REG);
    398		reg |= SEC_USER0_SMMU_NORMAL;
    399		writel_relaxed(reg, qm->io_base +
    400				SEC_INTERFACE_USER_CTRL0_REG);
    401		reg = readl_relaxed(qm->io_base +
    402				SEC_INTERFACE_USER_CTRL1_REG);
    403		reg &= SEC_USER1_SMMU_MASK;
    404		if (qm->use_sva)
    405			reg |= SEC_USER1_SMMU_SVA;
    406		else
    407			reg |= SEC_USER1_SMMU_NORMAL;
    408		writel_relaxed(reg, qm->io_base +
    409				SEC_INTERFACE_USER_CTRL1_REG);
    410	}
    411}
    412
    413static void sec_open_sva_prefetch(struct hisi_qm *qm)
    414{
    415	u32 val;
    416	int ret;
    417
    418	if (qm->ver < QM_HW_V3)
    419		return;
    420
    421	/* Enable prefetch */
    422	val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
    423	val &= SEC_PREFETCH_ENABLE;
    424	writel(val, qm->io_base + SEC_PREFETCH_CFG);
    425
    426	ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
    427					 val, !(val & SEC_PREFETCH_DISABLE),
    428					 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
    429	if (ret)
    430		pci_err(qm->pdev, "failed to open sva prefetch\n");
    431}
    432
    433static void sec_close_sva_prefetch(struct hisi_qm *qm)
    434{
    435	u32 val;
    436	int ret;
    437
    438	if (qm->ver < QM_HW_V3)
    439		return;
    440
    441	val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
    442	val |= SEC_PREFETCH_DISABLE;
    443	writel(val, qm->io_base + SEC_PREFETCH_CFG);
    444
    445	ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
    446					 val, !(val & SEC_SVA_DISABLE_READY),
    447					 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
    448	if (ret)
    449		pci_err(qm->pdev, "failed to close sva prefetch\n");
    450}
    451
    452static void sec_enable_clock_gate(struct hisi_qm *qm)
    453{
    454	u32 val;
    455
    456	if (qm->ver < QM_HW_V3)
    457		return;
    458
    459	val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
    460	val |= SEC_CLK_GATE_ENABLE;
    461	writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
    462
    463	val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG);
    464	val |= SEC_DYNAMIC_GATE_EN;
    465	writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG);
    466
    467	val = readl(qm->io_base + SEC_CORE_AUTO_GATE);
    468	val |= SEC_CORE_AUTO_GATE_EN;
    469	writel(val, qm->io_base + SEC_CORE_AUTO_GATE);
    470}
    471
    472static void sec_disable_clock_gate(struct hisi_qm *qm)
    473{
    474	u32 val;
    475
    476	/* Kunpeng920 needs to close clock gating */
    477	val = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
    478	val &= SEC_CLK_GATE_DISABLE;
    479	writel_relaxed(val, qm->io_base + SEC_CONTROL_REG);
    480}
    481
    482static int sec_engine_init(struct hisi_qm *qm)
    483{
    484	int ret;
    485	u32 reg;
    486
    487	/* disable clock gate control before mem init */
    488	sec_disable_clock_gate(qm);
    489
    490	writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG);
    491
    492	ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG,
    493					 reg, reg & 0x1, SEC_DELAY_10_US,
    494					 SEC_POLL_TIMEOUT_US);
    495	if (ret) {
    496		pci_err(qm->pdev, "fail to init sec mem\n");
    497		return ret;
    498	}
    499
    500	reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
    501	reg |= (0x1 << SEC_TRNG_EN_SHIFT);
    502	writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
    503
    504	sec_engine_sva_config(qm);
    505
    506	writel(SEC_SINGLE_PORT_MAX_TRANS,
    507	       qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
    508
    509	writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
    510
    511	/* HW V2 enable sm4 extra mode, as ctr/ecb */
    512	if (qm->ver < QM_HW_V3)
    513		writel_relaxed(SEC_BD_ERR_CHK_EN0,
    514			       qm->io_base + SEC_BD_ERR_CHK_EN_REG0);
    515
    516	/* Enable sm4 xts mode multiple iv */
    517	writel_relaxed(SEC_BD_ERR_CHK_EN1,
    518		       qm->io_base + SEC_BD_ERR_CHK_EN_REG1);
    519	writel_relaxed(SEC_BD_ERR_CHK_EN3,
    520		       qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
    521
    522	/* config endian */
    523	sec_set_endian(qm);
    524
    525	sec_enable_clock_gate(qm);
    526
    527	return 0;
    528}
    529
    530static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
    531{
    532	/* qm user domain */
    533	writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
    534	writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
    535	writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
    536	writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
    537	writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
    538
    539	/* qm cache */
    540	writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
    541	writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
    542
    543	/* disable FLR triggered by BME(bus master enable) */
    544	writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
    545	writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
    546
    547	/* enable sqc,cqc writeback */
    548	writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
    549	       CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
    550	       FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
    551
    552	return sec_engine_init(qm);
    553}
    554
    555/* sec_debug_regs_clear() - clear the sec debug regs */
    556static void sec_debug_regs_clear(struct hisi_qm *qm)
    557{
    558	int i;
    559
    560	/* clear sec dfx regs */
    561	writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE);
    562	for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
    563		readl(qm->io_base + sec_dfx_regs[i].offset);
    564
    565	/* clear rdclr_en */
    566	writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE);
    567
    568	hisi_qm_debug_regs_clear(qm);
    569}
    570
    571static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
    572{
    573	u32 val1, val2;
    574
    575	val1 = readl(qm->io_base + SEC_CONTROL_REG);
    576	if (enable) {
    577		val1 |= SEC_AXI_SHUTDOWN_ENABLE;
    578		val2 = SEC_RAS_NFE_ENB_MSK;
    579	} else {
    580		val1 &= SEC_AXI_SHUTDOWN_DISABLE;
    581		val2 = 0x0;
    582	}
    583
    584	if (qm->ver > QM_HW_V2)
    585		writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
    586
    587	writel(val1, qm->io_base + SEC_CONTROL_REG);
    588}
    589
    590static void sec_hw_error_enable(struct hisi_qm *qm)
    591{
    592	if (qm->ver == QM_HW_V1) {
    593		writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
    594		pci_info(qm->pdev, "V1 not support hw error handle\n");
    595		return;
    596	}
    597
    598	/* clear SEC hw error source if having */
    599	writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
    600
    601	/* enable RAS int */
    602	writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
    603	writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
    604	writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
    605
    606	/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
    607	sec_master_ooo_ctrl(qm, true);
    608
    609	/* enable SEC hw error interrupts */
    610	writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
    611}
    612
    613static void sec_hw_error_disable(struct hisi_qm *qm)
    614{
    615	/* disable SEC hw error interrupts */
    616	writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
    617
    618	/* disable SEC block master OOO when nfe occurs on Kunpeng930 */
    619	sec_master_ooo_ctrl(qm, false);
    620
    621	/* disable RAS int */
    622	writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
    623	writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
    624	writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
    625}
    626
    627static u32 sec_clear_enable_read(struct hisi_qm *qm)
    628{
    629	return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
    630			SEC_CTRL_CNT_CLR_CE_BIT;
    631}
    632
    633static int sec_clear_enable_write(struct hisi_qm *qm, u32 val)
    634{
    635	u32 tmp;
    636
    637	if (val != 1 && val)
    638		return -EINVAL;
    639
    640	tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
    641	       ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
    642	writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
    643
    644	return 0;
    645}
    646
    647static ssize_t sec_debug_read(struct file *filp, char __user *buf,
    648			       size_t count, loff_t *pos)
    649{
    650	struct sec_debug_file *file = filp->private_data;
    651	char tbuf[SEC_DBGFS_VAL_MAX_LEN];
    652	struct hisi_qm *qm = file->qm;
    653	u32 val;
    654	int ret;
    655
    656	ret = hisi_qm_get_dfx_access(qm);
    657	if (ret)
    658		return ret;
    659
    660	spin_lock_irq(&file->lock);
    661
    662	switch (file->index) {
    663	case SEC_CLEAR_ENABLE:
    664		val = sec_clear_enable_read(qm);
    665		break;
    666	default:
    667		goto err_input;
    668	}
    669
    670	spin_unlock_irq(&file->lock);
    671
    672	hisi_qm_put_dfx_access(qm);
    673	ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
    674	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
    675
    676err_input:
    677	spin_unlock_irq(&file->lock);
    678	hisi_qm_put_dfx_access(qm);
    679	return -EINVAL;
    680}
    681
    682static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
    683			       size_t count, loff_t *pos)
    684{
    685	struct sec_debug_file *file = filp->private_data;
    686	char tbuf[SEC_DBGFS_VAL_MAX_LEN];
    687	struct hisi_qm *qm = file->qm;
    688	unsigned long val;
    689	int len, ret;
    690
    691	if (*pos != 0)
    692		return 0;
    693
    694	if (count >= SEC_DBGFS_VAL_MAX_LEN)
    695		return -ENOSPC;
    696
    697	len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
    698				     pos, buf, count);
    699	if (len < 0)
    700		return len;
    701
    702	tbuf[len] = '\0';
    703	if (kstrtoul(tbuf, 0, &val))
    704		return -EFAULT;
    705
    706	ret = hisi_qm_get_dfx_access(qm);
    707	if (ret)
    708		return ret;
    709
    710	spin_lock_irq(&file->lock);
    711
    712	switch (file->index) {
    713	case SEC_CLEAR_ENABLE:
    714		ret = sec_clear_enable_write(qm, val);
    715		if (ret)
    716			goto err_input;
    717		break;
    718	default:
    719		ret = -EINVAL;
    720		goto err_input;
    721	}
    722
    723	ret = count;
    724
    725 err_input:
    726	spin_unlock_irq(&file->lock);
    727	hisi_qm_put_dfx_access(qm);
    728	return ret;
    729}
    730
    731static const struct file_operations sec_dbg_fops = {
    732	.owner = THIS_MODULE,
    733	.open = simple_open,
    734	.read = sec_debug_read,
    735	.write = sec_debug_write,
    736};
    737
    738static int sec_debugfs_atomic64_get(void *data, u64 *val)
    739{
    740	*val = atomic64_read((atomic64_t *)data);
    741
    742	return 0;
    743}
    744
    745static int sec_debugfs_atomic64_set(void *data, u64 val)
    746{
    747	if (val)
    748		return -EINVAL;
    749
    750	atomic64_set((atomic64_t *)data, 0);
    751
    752	return 0;
    753}
    754
    755DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
    756			 sec_debugfs_atomic64_set, "%lld\n");
    757
    758static int sec_regs_show(struct seq_file *s, void *unused)
    759{
    760	hisi_qm_regs_dump(s, s->private);
    761
    762	return 0;
    763}
    764
    765DEFINE_SHOW_ATTRIBUTE(sec_regs);
    766
    767static int sec_core_debug_init(struct hisi_qm *qm)
    768{
    769	struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs;
    770	struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
    771	struct device *dev = &qm->pdev->dev;
    772	struct sec_dfx *dfx = &sec->debug.dfx;
    773	struct debugfs_regset32 *regset;
    774	struct dentry *tmp_d;
    775	int i;
    776
    777	tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
    778
    779	regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
    780	if (!regset)
    781		return -ENOMEM;
    782
    783	regset->regs = sec_dfx_regs;
    784	regset->nregs = ARRAY_SIZE(sec_dfx_regs);
    785	regset->base = qm->io_base;
    786	regset->dev = dev;
    787
    788	if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF)
    789		debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops);
    790	if (qm->fun_type == QM_HW_PF && sec_regs)
    791		debugfs_create_file("diff_regs", 0444, tmp_d,
    792				      qm, &sec_diff_regs_fops);
    793
    794	for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) {
    795		atomic64_t *data = (atomic64_t *)((uintptr_t)dfx +
    796					sec_dfx_labels[i].offset);
    797		debugfs_create_file(sec_dfx_labels[i].name, 0644,
    798				   tmp_d, data, &sec_atomic64_ops);
    799	}
    800
    801	return 0;
    802}
    803
    804static int sec_debug_init(struct hisi_qm *qm)
    805{
    806	struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
    807	int i;
    808
    809	if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) {
    810		for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) {
    811			spin_lock_init(&sec->debug.files[i].lock);
    812			sec->debug.files[i].index = i;
    813			sec->debug.files[i].qm = qm;
    814
    815			debugfs_create_file(sec_dbg_file_name[i], 0600,
    816						  qm->debug.debug_root,
    817						  sec->debug.files + i,
    818						  &sec_dbg_fops);
    819		}
    820	}
    821
    822	return sec_core_debug_init(qm);
    823}
    824
    825static int sec_debugfs_init(struct hisi_qm *qm)
    826{
    827	struct device *dev = &qm->pdev->dev;
    828	int ret;
    829
    830	qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
    831						  sec_debugfs_root);
    832	qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET;
    833	qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN;
    834
    835	ret = hisi_qm_diff_regs_init(qm, sec_diff_regs,
    836				ARRAY_SIZE(sec_diff_regs));
    837	if (ret) {
    838		dev_warn(dev, "Failed to init SEC diff regs!\n");
    839		goto debugfs_remove;
    840	}
    841
    842	hisi_qm_debug_init(qm);
    843
    844	ret = sec_debug_init(qm);
    845	if (ret)
    846		goto failed_to_create;
    847
    848	return 0;
    849
    850failed_to_create:
    851	hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
    852debugfs_remove:
    853	debugfs_remove_recursive(sec_debugfs_root);
    854	return ret;
    855}
    856
    857static void sec_debugfs_exit(struct hisi_qm *qm)
    858{
    859	hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs));
    860
    861	debugfs_remove_recursive(qm->debug.debug_root);
    862}
    863
    864static int sec_show_last_regs_init(struct hisi_qm *qm)
    865{
    866	struct qm_debug *debug = &qm->debug;
    867	int i;
    868
    869	debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs),
    870					sizeof(unsigned int), GFP_KERNEL);
    871	if (!debug->last_words)
    872		return -ENOMEM;
    873
    874	for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++)
    875		debug->last_words[i] = readl_relaxed(qm->io_base +
    876							sec_dfx_regs[i].offset);
    877
    878	return 0;
    879}
    880
    881static void sec_show_last_regs_uninit(struct hisi_qm *qm)
    882{
    883	struct qm_debug *debug = &qm->debug;
    884
    885	if (qm->fun_type == QM_HW_VF || !debug->last_words)
    886		return;
    887
    888	kfree(debug->last_words);
    889	debug->last_words = NULL;
    890}
    891
    892static void sec_show_last_dfx_regs(struct hisi_qm *qm)
    893{
    894	struct qm_debug *debug = &qm->debug;
    895	struct pci_dev *pdev = qm->pdev;
    896	u32 val;
    897	int i;
    898
    899	if (qm->fun_type == QM_HW_VF || !debug->last_words)
    900		return;
    901
    902	/* dumps last word of the debugging registers during controller reset */
    903	for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) {
    904		val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset);
    905		if (val != debug->last_words[i])
    906			pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
    907				sec_dfx_regs[i].name, debug->last_words[i], val);
    908	}
    909}
    910
    911static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
    912{
    913	const struct sec_hw_error *errs = sec_hw_errors;
    914	struct device *dev = &qm->pdev->dev;
    915	u32 err_val;
    916
    917	while (errs->msg) {
    918		if (errs->int_msk & err_sts) {
    919			dev_err(dev, "%s [error status=0x%x] found\n",
    920					errs->msg, errs->int_msk);
    921
    922			if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
    923				err_val = readl(qm->io_base +
    924						SEC_CORE_SRAM_ECC_ERR_INFO);
    925				dev_err(dev, "multi ecc sram num=0x%x\n",
    926						((err_val) >> SEC_ECC_NUM) &
    927						SEC_ECC_MASH);
    928			}
    929		}
    930		errs++;
    931	}
    932}
    933
    934static u32 sec_get_hw_err_status(struct hisi_qm *qm)
    935{
    936	return readl(qm->io_base + SEC_CORE_INT_STATUS);
    937}
    938
    939static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
    940{
    941	writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
    942}
    943
    944static void sec_open_axi_master_ooo(struct hisi_qm *qm)
    945{
    946	u32 val;
    947
    948	val = readl(qm->io_base + SEC_CONTROL_REG);
    949	writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG);
    950	writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG);
    951}
    952
    953static void sec_err_info_init(struct hisi_qm *qm)
    954{
    955	struct hisi_qm_err_info *err_info = &qm->err_info;
    956
    957	err_info->ce = QM_BASE_CE;
    958	err_info->fe = 0;
    959	err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
    960	err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
    961	err_info->msi_wr_port = BIT(0);
    962	err_info->acpi_rst = "SRST";
    963	err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
    964			QM_ACC_WB_NOT_READY_TIMEOUT;
    965}
    966
    967static const struct hisi_qm_err_ini sec_err_ini = {
    968	.hw_init		= sec_set_user_domain_and_cache,
    969	.hw_err_enable		= sec_hw_error_enable,
    970	.hw_err_disable		= sec_hw_error_disable,
    971	.get_dev_hw_err_status	= sec_get_hw_err_status,
    972	.clear_dev_hw_err_status = sec_clear_hw_err_status,
    973	.log_dev_hw_err		= sec_log_hw_error,
    974	.open_axi_master_ooo	= sec_open_axi_master_ooo,
    975	.open_sva_prefetch	= sec_open_sva_prefetch,
    976	.close_sva_prefetch	= sec_close_sva_prefetch,
    977	.show_last_dfx_regs	= sec_show_last_dfx_regs,
    978	.err_info_init		= sec_err_info_init,
    979};
    980
    981static int sec_pf_probe_init(struct sec_dev *sec)
    982{
    983	struct hisi_qm *qm = &sec->qm;
    984	int ret;
    985
    986	qm->err_ini = &sec_err_ini;
    987	qm->err_ini->err_info_init(qm);
    988
    989	ret = sec_set_user_domain_and_cache(qm);
    990	if (ret)
    991		return ret;
    992
    993	sec_open_sva_prefetch(qm);
    994	hisi_qm_dev_err_init(qm);
    995	sec_debug_regs_clear(qm);
    996	ret = sec_show_last_regs_init(qm);
    997	if (ret)
    998		pci_err(qm->pdev, "Failed to init last word regs!\n");
    999
   1000	return ret;
   1001}
   1002
   1003static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
   1004{
   1005	int ret;
   1006
   1007	qm->pdev = pdev;
   1008	qm->ver = pdev->revision;
   1009	qm->algs = "cipher\ndigest\naead";
   1010	qm->mode = uacce_mode;
   1011	qm->sqe_size = SEC_SQE_SIZE;
   1012	qm->dev_name = sec_name;
   1013
   1014	qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ?
   1015			QM_HW_PF : QM_HW_VF;
   1016	if (qm->fun_type == QM_HW_PF) {
   1017		qm->qp_base = SEC_PF_DEF_Q_BASE;
   1018		qm->qp_num = pf_q_num;
   1019		qm->debug.curr_qm_qp_num = pf_q_num;
   1020		qm->qm_list = &sec_devices;
   1021	} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
   1022		/*
   1023		 * have no way to get qm configure in VM in v1 hardware,
   1024		 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
   1025		 * to trigger only one VF in v1 hardware.
   1026		 * v2 hardware has no such problem.
   1027		 */
   1028		qm->qp_base = SEC_PF_DEF_Q_NUM;
   1029		qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
   1030	}
   1031
   1032	/*
   1033	 * WQ_HIGHPRI: SEC request must be low delayed,
   1034	 * so need a high priority workqueue.
   1035	 * WQ_UNBOUND: SEC task is likely with long
   1036	 * running CPU intensive workloads.
   1037	 */
   1038	qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
   1039				 WQ_UNBOUND, num_online_cpus(),
   1040				 pci_name(qm->pdev));
   1041	if (!qm->wq) {
   1042		pci_err(qm->pdev, "fail to alloc workqueue\n");
   1043		return -ENOMEM;
   1044	}
   1045
   1046	ret = hisi_qm_init(qm);
   1047	if (ret)
   1048		destroy_workqueue(qm->wq);
   1049
   1050	return ret;
   1051}
   1052
   1053static void sec_qm_uninit(struct hisi_qm *qm)
   1054{
   1055	hisi_qm_uninit(qm);
   1056}
   1057
   1058static int sec_probe_init(struct sec_dev *sec)
   1059{
   1060	u32 type_rate = SEC_SHAPER_TYPE_RATE;
   1061	struct hisi_qm *qm = &sec->qm;
   1062	int ret;
   1063
   1064	if (qm->fun_type == QM_HW_PF) {
   1065		ret = sec_pf_probe_init(sec);
   1066		if (ret)
   1067			return ret;
   1068		/* enable shaper type 0 */
   1069		if (qm->ver >= QM_HW_V3) {
   1070			type_rate |= QM_SHAPER_ENABLE;
   1071			qm->type_rate = type_rate;
   1072		}
   1073	}
   1074
   1075	return 0;
   1076}
   1077
   1078static void sec_probe_uninit(struct hisi_qm *qm)
   1079{
   1080	hisi_qm_dev_err_uninit(qm);
   1081
   1082	destroy_workqueue(qm->wq);
   1083}
   1084
   1085static void sec_iommu_used_check(struct sec_dev *sec)
   1086{
   1087	struct iommu_domain *domain;
   1088	struct device *dev = &sec->qm.pdev->dev;
   1089
   1090	domain = iommu_get_domain_for_dev(dev);
   1091
   1092	/* Check if iommu is used */
   1093	sec->iommu_used = false;
   1094	if (domain) {
   1095		if (domain->type & __IOMMU_DOMAIN_PAGING)
   1096			sec->iommu_used = true;
   1097		dev_info(dev, "SMMU Opened, the iommu type = %u\n",
   1098			domain->type);
   1099	}
   1100}
   1101
   1102static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
   1103{
   1104	struct sec_dev *sec;
   1105	struct hisi_qm *qm;
   1106	int ret;
   1107
   1108	sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
   1109	if (!sec)
   1110		return -ENOMEM;
   1111
   1112	qm = &sec->qm;
   1113	ret = sec_qm_init(qm, pdev);
   1114	if (ret) {
   1115		pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret);
   1116		return ret;
   1117	}
   1118
   1119	sec->ctx_q_num = ctx_q_num;
   1120	sec_iommu_used_check(sec);
   1121
   1122	ret = sec_probe_init(sec);
   1123	if (ret) {
   1124		pci_err(pdev, "Failed to probe!\n");
   1125		goto err_qm_uninit;
   1126	}
   1127
   1128	ret = hisi_qm_start(qm);
   1129	if (ret) {
   1130		pci_err(pdev, "Failed to start sec qm!\n");
   1131		goto err_probe_uninit;
   1132	}
   1133
   1134	ret = sec_debugfs_init(qm);
   1135	if (ret)
   1136		pci_warn(pdev, "Failed to init debugfs!\n");
   1137
   1138	if (qm->qp_num >= ctx_q_num) {
   1139		ret = hisi_qm_alg_register(qm, &sec_devices);
   1140		if (ret < 0) {
   1141			pr_err("Failed to register driver to crypto.\n");
   1142			goto err_qm_stop;
   1143		}
   1144	} else {
   1145		pci_warn(qm->pdev,
   1146			"Failed to use kernel mode, qp not enough!\n");
   1147	}
   1148
   1149	if (qm->uacce) {
   1150		ret = uacce_register(qm->uacce);
   1151		if (ret) {
   1152			pci_err(pdev, "failed to register uacce (%d)!\n", ret);
   1153			goto err_alg_unregister;
   1154		}
   1155	}
   1156
   1157	if (qm->fun_type == QM_HW_PF && vfs_num) {
   1158		ret = hisi_qm_sriov_enable(pdev, vfs_num);
   1159		if (ret < 0)
   1160			goto err_alg_unregister;
   1161	}
   1162
   1163	hisi_qm_pm_init(qm);
   1164
   1165	return 0;
   1166
   1167err_alg_unregister:
   1168	if (qm->qp_num >= ctx_q_num)
   1169		hisi_qm_alg_unregister(qm, &sec_devices);
   1170err_qm_stop:
   1171	sec_debugfs_exit(qm);
   1172	hisi_qm_stop(qm, QM_NORMAL);
   1173err_probe_uninit:
   1174	sec_show_last_regs_uninit(qm);
   1175	sec_probe_uninit(qm);
   1176err_qm_uninit:
   1177	sec_qm_uninit(qm);
   1178	return ret;
   1179}
   1180
   1181static void sec_remove(struct pci_dev *pdev)
   1182{
   1183	struct hisi_qm *qm = pci_get_drvdata(pdev);
   1184
   1185	hisi_qm_pm_uninit(qm);
   1186	hisi_qm_wait_task_finish(qm, &sec_devices);
   1187	if (qm->qp_num >= ctx_q_num)
   1188		hisi_qm_alg_unregister(qm, &sec_devices);
   1189
   1190	if (qm->fun_type == QM_HW_PF && qm->vfs_num)
   1191		hisi_qm_sriov_disable(pdev, true);
   1192
   1193	sec_debugfs_exit(qm);
   1194
   1195	(void)hisi_qm_stop(qm, QM_NORMAL);
   1196
   1197	if (qm->fun_type == QM_HW_PF)
   1198		sec_debug_regs_clear(qm);
   1199	sec_show_last_regs_uninit(qm);
   1200
   1201	sec_probe_uninit(qm);
   1202
   1203	sec_qm_uninit(qm);
   1204}
   1205
   1206static const struct dev_pm_ops sec_pm_ops = {
   1207	SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
   1208};
   1209
   1210static const struct pci_error_handlers sec_err_handler = {
   1211	.error_detected = hisi_qm_dev_err_detected,
   1212	.slot_reset	= hisi_qm_dev_slot_reset,
   1213	.reset_prepare	= hisi_qm_reset_prepare,
   1214	.reset_done	= hisi_qm_reset_done,
   1215};
   1216
   1217static struct pci_driver sec_pci_driver = {
   1218	.name = "hisi_sec2",
   1219	.id_table = sec_dev_ids,
   1220	.probe = sec_probe,
   1221	.remove = sec_remove,
   1222	.err_handler = &sec_err_handler,
   1223	.sriov_configure = hisi_qm_sriov_configure,
   1224	.shutdown = hisi_qm_dev_shutdown,
   1225	.driver.pm = &sec_pm_ops,
   1226};
   1227
   1228struct pci_driver *hisi_sec_get_pf_driver(void)
   1229{
   1230	return &sec_pci_driver;
   1231}
   1232EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver);
   1233
   1234static void sec_register_debugfs(void)
   1235{
   1236	if (!debugfs_initialized())
   1237		return;
   1238
   1239	sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
   1240}
   1241
   1242static void sec_unregister_debugfs(void)
   1243{
   1244	debugfs_remove_recursive(sec_debugfs_root);
   1245}
   1246
   1247static int __init sec_init(void)
   1248{
   1249	int ret;
   1250
   1251	hisi_qm_init_list(&sec_devices);
   1252	sec_register_debugfs();
   1253
   1254	ret = pci_register_driver(&sec_pci_driver);
   1255	if (ret < 0) {
   1256		sec_unregister_debugfs();
   1257		pr_err("Failed to register pci driver.\n");
   1258		return ret;
   1259	}
   1260
   1261	return 0;
   1262}
   1263
   1264static void __exit sec_exit(void)
   1265{
   1266	pci_unregister_driver(&sec_pci_driver);
   1267	sec_unregister_debugfs();
   1268}
   1269
   1270module_init(sec_init);
   1271module_exit(sec_exit);
   1272
   1273MODULE_LICENSE("GPL v2");
   1274MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
   1275MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>");
   1276MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>");
   1277MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>");
   1278MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");