cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hisi_acc_qm.h (12851B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/* Copyright (c) 2019 HiSilicon Limited. */
      3#ifndef HISI_ACC_QM_H
      4#define HISI_ACC_QM_H
      5
      6#include <linux/bitfield.h>
      7#include <linux/debugfs.h>
      8#include <linux/iopoll.h>
      9#include <linux/module.h>
     10#include <linux/pci.h>
     11
     12#define QM_QNUM_V1			4096
     13#define QM_QNUM_V2			1024
     14#define QM_MAX_VFS_NUM_V2		63
     15
     16/* qm user domain */
     17#define QM_ARUSER_M_CFG_1		0x100088
     18#define AXUSER_SNOOP_ENABLE		BIT(30)
     19#define AXUSER_CMD_TYPE			GENMASK(14, 12)
     20#define AXUSER_CMD_SMMU_NORMAL		1
     21#define AXUSER_NS			BIT(6)
     22#define AXUSER_NO			BIT(5)
     23#define AXUSER_FP			BIT(4)
     24#define AXUSER_SSV			BIT(0)
     25#define AXUSER_BASE			(AXUSER_SNOOP_ENABLE |		\
     26					FIELD_PREP(AXUSER_CMD_TYPE,	\
     27					AXUSER_CMD_SMMU_NORMAL) |	\
     28					AXUSER_NS | AXUSER_NO | AXUSER_FP)
     29#define QM_ARUSER_M_CFG_ENABLE		0x100090
     30#define ARUSER_M_CFG_ENABLE		0xfffffffe
     31#define QM_AWUSER_M_CFG_1		0x100098
     32#define QM_AWUSER_M_CFG_ENABLE		0x1000a0
     33#define AWUSER_M_CFG_ENABLE		0xfffffffe
     34#define QM_WUSER_M_CFG_ENABLE		0x1000a8
     35#define WUSER_M_CFG_ENABLE		0xffffffff
     36
     37/* mailbox */
     38#define QM_MB_CMD_SQC                   0x0
     39#define QM_MB_CMD_CQC                   0x1
     40#define QM_MB_CMD_EQC                   0x2
     41#define QM_MB_CMD_AEQC                  0x3
     42#define QM_MB_CMD_SQC_BT                0x4
     43#define QM_MB_CMD_CQC_BT                0x5
     44#define QM_MB_CMD_SQC_VFT_V2            0x6
     45#define QM_MB_CMD_STOP_QP               0x8
     46#define QM_MB_CMD_SRC                   0xc
     47#define QM_MB_CMD_DST                   0xd
     48
     49#define QM_MB_CMD_SEND_BASE		0x300
     50#define QM_MB_EVENT_SHIFT               8
     51#define QM_MB_BUSY_SHIFT		13
     52#define QM_MB_OP_SHIFT			14
     53#define QM_MB_CMD_DATA_ADDR_L		0x304
     54#define QM_MB_CMD_DATA_ADDR_H		0x308
     55#define QM_MB_MAX_WAIT_CNT		6000
     56
     57/* doorbell */
     58#define QM_DOORBELL_CMD_SQ              0
     59#define QM_DOORBELL_CMD_CQ              1
     60#define QM_DOORBELL_CMD_EQ              2
     61#define QM_DOORBELL_CMD_AEQ             3
     62
     63#define QM_DOORBELL_SQ_CQ_BASE_V2	0x1000
     64#define QM_DOORBELL_EQ_AEQ_BASE_V2	0x2000
     65#define QM_QP_MAX_NUM_SHIFT             11
     66#define QM_DB_CMD_SHIFT_V2		12
     67#define QM_DB_RAND_SHIFT_V2		16
     68#define QM_DB_INDEX_SHIFT_V2		32
     69#define QM_DB_PRIORITY_SHIFT_V2		48
     70#define QM_VF_STATE			0x60
     71
     72/* qm cache */
     73#define QM_CACHE_CTL			0x100050
     74#define SQC_CACHE_ENABLE		BIT(0)
     75#define CQC_CACHE_ENABLE		BIT(1)
     76#define SQC_CACHE_WB_ENABLE		BIT(4)
     77#define SQC_CACHE_WB_THRD		GENMASK(10, 5)
     78#define CQC_CACHE_WB_ENABLE		BIT(11)
     79#define CQC_CACHE_WB_THRD		GENMASK(17, 12)
     80#define QM_AXI_M_CFG			0x1000ac
     81#define AXI_M_CFG			0xffff
     82#define QM_AXI_M_CFG_ENABLE		0x1000b0
     83#define AM_CFG_SINGLE_PORT_MAX_TRANS	0x300014
     84#define AXI_M_CFG_ENABLE		0xffffffff
     85#define QM_PEH_AXUSER_CFG		0x1000cc
     86#define QM_PEH_AXUSER_CFG_ENABLE	0x1000d0
     87#define PEH_AXUSER_CFG			0x401001
     88#define PEH_AXUSER_CFG_ENABLE		0xffffffff
     89
     90#define QM_AXI_RRESP			BIT(0)
     91#define QM_AXI_BRESP			BIT(1)
     92#define QM_ECC_MBIT			BIT(2)
     93#define QM_ECC_1BIT			BIT(3)
     94#define QM_ACC_GET_TASK_TIMEOUT		BIT(4)
     95#define QM_ACC_DO_TASK_TIMEOUT		BIT(5)
     96#define QM_ACC_WB_NOT_READY_TIMEOUT	BIT(6)
     97#define QM_SQ_CQ_VF_INVALID		BIT(7)
     98#define QM_CQ_VF_INVALID		BIT(8)
     99#define QM_SQ_VF_INVALID		BIT(9)
    100#define QM_DB_TIMEOUT			BIT(10)
    101#define QM_OF_FIFO_OF			BIT(11)
    102#define QM_DB_RANDOM_INVALID		BIT(12)
    103#define QM_MAILBOX_TIMEOUT		BIT(13)
    104#define QM_FLR_TIMEOUT			BIT(14)
    105
    106#define QM_BASE_NFE	(QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \
    107			 QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \
    108			 QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \
    109			 QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT)
    110#define QM_BASE_CE			QM_ECC_1BIT
    111
    112#define QM_Q_DEPTH			1024
    113#define QM_MIN_QNUM                     2
    114#define HISI_ACC_SGL_SGE_NR_MAX		255
    115#define QM_SHAPER_CFG			0x100164
    116#define QM_SHAPER_ENABLE		BIT(30)
    117#define QM_SHAPER_TYPE1_OFFSET		10
    118
    119/* page number for queue file region */
    120#define QM_DOORBELL_PAGE_NR		1
    121
    122/* uacce mode of the driver */
    123#define UACCE_MODE_NOUACCE		0 /* don't use uacce */
    124#define UACCE_MODE_SVA			1 /* use uacce sva mode */
    125#define UACCE_MODE_DESC	"0(default) means only register to crypto, 1 means both register to crypto and uacce"
    126
    127enum qm_stop_reason {
    128	QM_NORMAL,
    129	QM_SOFT_RESET,
    130	QM_FLR,
    131};
    132
    133enum qm_state {
    134	QM_INIT = 0,
    135	QM_START,
    136	QM_CLOSE,
    137	QM_STOP,
    138};
    139
    140enum qp_state {
    141	QP_INIT = 1,
    142	QP_START,
    143	QP_STOP,
    144	QP_CLOSE,
    145};
    146
    147enum qm_hw_ver {
    148	QM_HW_UNKNOWN = -1,
    149	QM_HW_V1 = 0x20,
    150	QM_HW_V2 = 0x21,
    151	QM_HW_V3 = 0x30,
    152};
    153
    154enum qm_fun_type {
    155	QM_HW_PF,
    156	QM_HW_VF,
    157};
    158
    159enum qm_debug_file {
    160	CURRENT_QM,
    161	CURRENT_Q,
    162	CLEAR_ENABLE,
    163	DEBUG_FILE_NUM,
    164};
    165
    166enum qm_vf_state {
    167	QM_READY = 0,
    168	QM_NOT_READY,
    169};
    170
    171struct dfx_diff_registers {
    172	u32 *regs;
    173	u32 reg_offset;
    174	u32 reg_len;
    175};
    176
    177struct qm_dfx {
    178	atomic64_t err_irq_cnt;
    179	atomic64_t aeq_irq_cnt;
    180	atomic64_t abnormal_irq_cnt;
    181	atomic64_t create_qp_err_cnt;
    182	atomic64_t mb_err_cnt;
    183};
    184
    185struct debugfs_file {
    186	enum qm_debug_file index;
    187	struct mutex lock;
    188	struct qm_debug *debug;
    189};
    190
    191struct qm_debug {
    192	u32 curr_qm_qp_num;
    193	u32 sqe_mask_offset;
    194	u32 sqe_mask_len;
    195	struct qm_dfx dfx;
    196	struct dentry *debug_root;
    197	struct dentry *qm_d;
    198	struct debugfs_file files[DEBUG_FILE_NUM];
    199	unsigned int *qm_last_words;
    200	/* ACC engines recoreding last regs */
    201	unsigned int *last_words;
    202	struct dfx_diff_registers *qm_diff_regs;
    203	struct dfx_diff_registers *acc_diff_regs;
    204};
    205
    206struct qm_shaper_factor {
    207	u32 func_qos;
    208	u64 cir_b;
    209	u64 cir_u;
    210	u64 cir_s;
    211	u64 cbs_s;
    212};
    213
    214struct qm_dma {
    215	void *va;
    216	dma_addr_t dma;
    217	size_t size;
    218};
    219
    220struct hisi_qm_status {
    221	u32 eq_head;
    222	bool eqc_phase;
    223	u32 aeq_head;
    224	bool aeqc_phase;
    225	atomic_t flags;
    226	int stop_reason;
    227};
    228
    229struct hisi_qm;
    230
    231struct hisi_qm_err_info {
    232	char *acpi_rst;
    233	u32 msi_wr_port;
    234	u32 ecc_2bits_mask;
    235	u32 dev_ce_mask;
    236	u32 ce;
    237	u32 nfe;
    238	u32 fe;
    239};
    240
    241struct hisi_qm_err_status {
    242	u32 is_qm_ecc_mbit;
    243	u32 is_dev_ecc_mbit;
    244};
    245
    246struct hisi_qm_err_ini {
    247	int (*hw_init)(struct hisi_qm *qm);
    248	void (*hw_err_enable)(struct hisi_qm *qm);
    249	void (*hw_err_disable)(struct hisi_qm *qm);
    250	u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
    251	void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
    252	void (*open_axi_master_ooo)(struct hisi_qm *qm);
    253	void (*close_axi_master_ooo)(struct hisi_qm *qm);
    254	void (*open_sva_prefetch)(struct hisi_qm *qm);
    255	void (*close_sva_prefetch)(struct hisi_qm *qm);
    256	void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
    257	void (*show_last_dfx_regs)(struct hisi_qm *qm);
    258	void (*err_info_init)(struct hisi_qm *qm);
    259};
    260
    261struct hisi_qm_list {
    262	struct mutex lock;
    263	struct list_head list;
    264	int (*register_to_crypto)(struct hisi_qm *qm);
    265	void (*unregister_from_crypto)(struct hisi_qm *qm);
    266};
    267
    268struct hisi_qm {
    269	enum qm_hw_ver ver;
    270	enum qm_fun_type fun_type;
    271	const char *dev_name;
    272	struct pci_dev *pdev;
    273	void __iomem *io_base;
    274	void __iomem *db_io_base;
    275	u32 sqe_size;
    276	u32 qp_base;
    277	u32 qp_num;
    278	u32 qp_in_used;
    279	u32 ctrl_qp_num;
    280	u32 max_qp_num;
    281	u32 vfs_num;
    282	u32 db_interval;
    283	struct list_head list;
    284	struct hisi_qm_list *qm_list;
    285
    286	struct qm_dma qdma;
    287	struct qm_sqc *sqc;
    288	struct qm_cqc *cqc;
    289	struct qm_eqe *eqe;
    290	struct qm_aeqe *aeqe;
    291	dma_addr_t sqc_dma;
    292	dma_addr_t cqc_dma;
    293	dma_addr_t eqe_dma;
    294	dma_addr_t aeqe_dma;
    295
    296	struct hisi_qm_status status;
    297	const struct hisi_qm_err_ini *err_ini;
    298	struct hisi_qm_err_info err_info;
    299	struct hisi_qm_err_status err_status;
    300	unsigned long misc_ctl; /* driver removing and reset sched */
    301
    302	struct rw_semaphore qps_lock;
    303	struct idr qp_idr;
    304	struct hisi_qp *qp_array;
    305
    306	struct mutex mailbox_lock;
    307
    308	const struct hisi_qm_hw_ops *ops;
    309
    310	struct qm_debug debug;
    311
    312	u32 error_mask;
    313
    314	struct workqueue_struct *wq;
    315	struct work_struct work;
    316	struct work_struct rst_work;
    317	struct work_struct cmd_process;
    318
    319	const char *algs;
    320	bool use_sva;
    321	bool is_frozen;
    322
    323	/* doorbell isolation enable */
    324	bool use_db_isolation;
    325	resource_size_t phys_base;
    326	resource_size_t db_phys_base;
    327	struct uacce_device *uacce;
    328	int mode;
    329	struct qm_shaper_factor *factor;
    330	u32 mb_qos;
    331	u32 type_rate;
    332};
    333
    334struct hisi_qp_status {
    335	atomic_t used;
    336	u16 sq_tail;
    337	u16 cq_head;
    338	bool cqc_phase;
    339	atomic_t flags;
    340};
    341
    342struct hisi_qp_ops {
    343	int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
    344};
    345
    346struct hisi_qp {
    347	u32 qp_id;
    348	u8 alg_type;
    349	u8 req_type;
    350
    351	struct qm_dma qdma;
    352	void *sqe;
    353	struct qm_cqe *cqe;
    354	dma_addr_t sqe_dma;
    355	dma_addr_t cqe_dma;
    356
    357	struct hisi_qp_status qp_status;
    358	struct hisi_qp_ops *hw_ops;
    359	void *qp_ctx;
    360	void (*req_cb)(struct hisi_qp *qp, void *data);
    361	void (*event_cb)(struct hisi_qp *qp);
    362
    363	struct hisi_qm *qm;
    364	bool is_resetting;
    365	bool is_in_kernel;
    366	u16 pasid;
    367	struct uacce_queue *uacce_q;
    368};
    369
    370static inline int q_num_set(const char *val, const struct kernel_param *kp,
    371			    unsigned int device)
    372{
    373	struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
    374					      device, NULL);
    375	u32 n, q_num;
    376	int ret;
    377
    378	if (!val)
    379		return -EINVAL;
    380
    381	if (!pdev) {
    382		q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2);
    383		pr_info("No device found currently, suppose queue number is %u\n",
    384			q_num);
    385	} else {
    386		if (pdev->revision == QM_HW_V1)
    387			q_num = QM_QNUM_V1;
    388		else
    389			q_num = QM_QNUM_V2;
    390	}
    391
    392	ret = kstrtou32(val, 10, &n);
    393	if (ret || n < QM_MIN_QNUM || n > q_num)
    394		return -EINVAL;
    395
    396	return param_set_int(val, kp);
    397}
    398
    399static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
    400{
    401	u32 n;
    402	int ret;
    403
    404	if (!val)
    405		return -EINVAL;
    406
    407	ret = kstrtou32(val, 10, &n);
    408	if (ret < 0)
    409		return ret;
    410
    411	if (n > QM_MAX_VFS_NUM_V2)
    412		return -EINVAL;
    413
    414	return param_set_int(val, kp);
    415}
    416
    417static inline int mode_set(const char *val, const struct kernel_param *kp)
    418{
    419	u32 n;
    420	int ret;
    421
    422	if (!val)
    423		return -EINVAL;
    424
    425	ret = kstrtou32(val, 10, &n);
    426	if (ret != 0 || (n != UACCE_MODE_SVA &&
    427			 n != UACCE_MODE_NOUACCE))
    428		return -EINVAL;
    429
    430	return param_set_int(val, kp);
    431}
    432
    433static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
    434{
    435	return mode_set(val, kp);
    436}
    437
    438static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
    439{
    440	INIT_LIST_HEAD(&qm_list->list);
    441	mutex_init(&qm_list->lock);
    442}
    443
    444int hisi_qm_init(struct hisi_qm *qm);
    445void hisi_qm_uninit(struct hisi_qm *qm);
    446int hisi_qm_start(struct hisi_qm *qm);
    447int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
    448int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
    449int hisi_qm_stop_qp(struct hisi_qp *qp);
    450int hisi_qp_send(struct hisi_qp *qp, const void *msg);
    451void hisi_qm_debug_init(struct hisi_qm *qm);
    452void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
    453int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
    454int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
    455int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
    456void hisi_qm_dev_err_init(struct hisi_qm *qm);
    457void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
    458int hisi_qm_diff_regs_init(struct hisi_qm *qm,
    459		struct dfx_diff_registers *dregs, int reg_len);
    460void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len);
    461void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
    462		struct dfx_diff_registers *dregs, int regs_len);
    463
    464pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
    465					  pci_channel_state_t state);
    466pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
    467void hisi_qm_reset_prepare(struct pci_dev *pdev);
    468void hisi_qm_reset_done(struct pci_dev *pdev);
    469
    470int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
    471int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
    472	       bool op);
    473
    474struct hisi_acc_sgl_pool;
    475struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
    476	struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
    477	u32 index, dma_addr_t *hw_sgl_dma);
    478void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
    479			   struct hisi_acc_hw_sgl *hw_sgl);
    480struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
    481						   u32 count, u32 sge_nr);
    482void hisi_acc_free_sgl_pool(struct device *dev,
    483			    struct hisi_acc_sgl_pool *pool);
    484int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
    485			   u8 alg_type, int node, struct hisi_qp **qps);
    486void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
    487void hisi_qm_dev_shutdown(struct pci_dev *pdev);
    488void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
    489int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
    490void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
    491int hisi_qm_resume(struct device *dev);
    492int hisi_qm_suspend(struct device *dev);
    493void hisi_qm_pm_uninit(struct hisi_qm *qm);
    494void hisi_qm_pm_init(struct hisi_qm *qm);
    495int hisi_qm_get_dfx_access(struct hisi_qm *qm);
    496void hisi_qm_put_dfx_access(struct hisi_qm *qm);
    497void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
    498
    499/* Used by VFIO ACC live migration driver */
    500struct pci_driver *hisi_sec_get_pf_driver(void);
    501struct pci_driver *hisi_hpre_get_pf_driver(void);
    502struct pci_driver *hisi_zip_get_pf_driver(void);
    503#endif