cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hisi_acc_vfio_pci.c (34158B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2021, HiSilicon Ltd.
      4 */
      5
      6#include <linux/device.h>
      7#include <linux/eventfd.h>
      8#include <linux/file.h>
      9#include <linux/hisi_acc_qm.h>
     10#include <linux/interrupt.h>
     11#include <linux/module.h>
     12#include <linux/pci.h>
     13#include <linux/vfio.h>
     14#include <linux/vfio_pci_core.h>
     15#include <linux/anon_inodes.h>
     16
     17#include "hisi_acc_vfio_pci.h"
     18
     19/* return 0 on VM acc device ready, -ETIMEDOUT hardware timeout */
     20static int qm_wait_dev_not_ready(struct hisi_qm *qm)
     21{
     22	u32 val;
     23
     24	return readl_relaxed_poll_timeout(qm->io_base + QM_VF_STATE,
     25				val, !(val & 0x1), MB_POLL_PERIOD_US,
     26				MB_POLL_TIMEOUT_US);
     27}
     28
     29/*
     30 * Each state Reg is checked 100 times,
     31 * with a delay of 100 microseconds after each check
     32 */
     33static u32 qm_check_reg_state(struct hisi_qm *qm, u32 regs)
     34{
     35	int check_times = 0;
     36	u32 state;
     37
     38	state = readl(qm->io_base + regs);
     39	while (state && check_times < ERROR_CHECK_TIMEOUT) {
     40		udelay(CHECK_DELAY_TIME);
     41		state = readl(qm->io_base + regs);
     42		check_times++;
     43	}
     44
     45	return state;
     46}
     47
     48static int qm_read_regs(struct hisi_qm *qm, u32 reg_addr,
     49			u32 *data, u8 nums)
     50{
     51	int i;
     52
     53	if (nums < 1 || nums > QM_REGS_MAX_LEN)
     54		return -EINVAL;
     55
     56	for (i = 0; i < nums; i++) {
     57		data[i] = readl(qm->io_base + reg_addr);
     58		reg_addr += QM_REG_ADDR_OFFSET;
     59	}
     60
     61	return 0;
     62}
     63
     64static int qm_write_regs(struct hisi_qm *qm, u32 reg,
     65			 u32 *data, u8 nums)
     66{
     67	int i;
     68
     69	if (nums < 1 || nums > QM_REGS_MAX_LEN)
     70		return -EINVAL;
     71
     72	for (i = 0; i < nums; i++)
     73		writel(data[i], qm->io_base + reg + i * QM_REG_ADDR_OFFSET);
     74
     75	return 0;
     76}
     77
     78static int qm_get_vft(struct hisi_qm *qm, u32 *base)
     79{
     80	u64 sqc_vft;
     81	u32 qp_num;
     82	int ret;
     83
     84	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
     85	if (ret)
     86		return ret;
     87
     88	sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
     89		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
     90		  QM_XQC_ADDR_OFFSET);
     91	*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
     92	qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
     93		  (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
     94
     95	return qp_num;
     96}
     97
     98static int qm_get_sqc(struct hisi_qm *qm, u64 *addr)
     99{
    100	int ret;
    101
    102	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, 0, 0, 1);
    103	if (ret)
    104		return ret;
    105
    106	*addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
    107		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
    108		  QM_XQC_ADDR_OFFSET);
    109
    110	return 0;
    111}
    112
    113static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
    114{
    115	int ret;
    116
    117	ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, 0, 0, 1);
    118	if (ret)
    119		return ret;
    120
    121	*addr = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
    122		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) <<
    123		  QM_XQC_ADDR_OFFSET);
    124
    125	return 0;
    126}
    127
    128static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
    129{
    130	struct device *dev = &qm->pdev->dev;
    131	int ret;
    132
    133	ret = qm_read_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
    134	if (ret) {
    135		dev_err(dev, "failed to read QM_VF_AEQ_INT_MASK\n");
    136		return ret;
    137	}
    138
    139	ret = qm_read_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
    140	if (ret) {
    141		dev_err(dev, "failed to read QM_VF_EQ_INT_MASK\n");
    142		return ret;
    143	}
    144
    145	ret = qm_read_regs(qm, QM_IFC_INT_SOURCE_V,
    146			   &vf_data->ifc_int_source, 1);
    147	if (ret) {
    148		dev_err(dev, "failed to read QM_IFC_INT_SOURCE_V\n");
    149		return ret;
    150	}
    151
    152	ret = qm_read_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
    153	if (ret) {
    154		dev_err(dev, "failed to read QM_IFC_INT_MASK\n");
    155		return ret;
    156	}
    157
    158	ret = qm_read_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
    159	if (ret) {
    160		dev_err(dev, "failed to read QM_IFC_INT_SET_V\n");
    161		return ret;
    162	}
    163
    164	ret = qm_read_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
    165	if (ret) {
    166		dev_err(dev, "failed to read QM_PAGE_SIZE\n");
    167		return ret;
    168	}
    169
    170	/* QM_EQC_DW has 7 regs */
    171	ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
    172	if (ret) {
    173		dev_err(dev, "failed to read QM_EQC_DW\n");
    174		return ret;
    175	}
    176
    177	/* QM_AEQC_DW has 7 regs */
    178	ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
    179	if (ret) {
    180		dev_err(dev, "failed to read QM_AEQC_DW\n");
    181		return ret;
    182	}
    183
    184	return 0;
    185}
    186
    187static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
    188{
    189	struct device *dev = &qm->pdev->dev;
    190	int ret;
    191
    192	/* check VF state */
    193	if (unlikely(hisi_qm_wait_mb_ready(qm))) {
    194		dev_err(&qm->pdev->dev, "QM device is not ready to write\n");
    195		return -EBUSY;
    196	}
    197
    198	ret = qm_write_regs(qm, QM_VF_AEQ_INT_MASK, &vf_data->aeq_int_mask, 1);
    199	if (ret) {
    200		dev_err(dev, "failed to write QM_VF_AEQ_INT_MASK\n");
    201		return ret;
    202	}
    203
    204	ret = qm_write_regs(qm, QM_VF_EQ_INT_MASK, &vf_data->eq_int_mask, 1);
    205	if (ret) {
    206		dev_err(dev, "failed to write QM_VF_EQ_INT_MASK\n");
    207		return ret;
    208	}
    209
    210	ret = qm_write_regs(qm, QM_IFC_INT_SOURCE_V,
    211			    &vf_data->ifc_int_source, 1);
    212	if (ret) {
    213		dev_err(dev, "failed to write QM_IFC_INT_SOURCE_V\n");
    214		return ret;
    215	}
    216
    217	ret = qm_write_regs(qm, QM_IFC_INT_MASK, &vf_data->ifc_int_mask, 1);
    218	if (ret) {
    219		dev_err(dev, "failed to write QM_IFC_INT_MASK\n");
    220		return ret;
    221	}
    222
    223	ret = qm_write_regs(qm, QM_IFC_INT_SET_V, &vf_data->ifc_int_set, 1);
    224	if (ret) {
    225		dev_err(dev, "failed to write QM_IFC_INT_SET_V\n");
    226		return ret;
    227	}
    228
    229	ret = qm_write_regs(qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
    230	if (ret) {
    231		dev_err(dev, "failed to write QM_QUE_ISO_CFG_V\n");
    232		return ret;
    233	}
    234
    235	ret = qm_write_regs(qm, QM_PAGE_SIZE, &vf_data->page_size, 1);
    236	if (ret) {
    237		dev_err(dev, "failed to write QM_PAGE_SIZE\n");
    238		return ret;
    239	}
    240
    241	/* QM_EQC_DW has 7 regs */
    242	ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
    243	if (ret) {
    244		dev_err(dev, "failed to write QM_EQC_DW\n");
    245		return ret;
    246	}
    247
    248	/* QM_AEQC_DW has 7 regs */
    249	ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
    250	if (ret) {
    251		dev_err(dev, "failed to write QM_AEQC_DW\n");
    252		return ret;
    253	}
    254
    255	return 0;
    256}
    257
    258static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
    259		  u16 index, u8 priority)
    260{
    261	u64 doorbell;
    262	u64 dbase;
    263	u16 randata = 0;
    264
    265	if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
    266		dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
    267	else
    268		dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
    269
    270	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
    271		   ((u64)randata << QM_DB_RAND_SHIFT_V2) |
    272		   ((u64)index << QM_DB_INDEX_SHIFT_V2)	 |
    273		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
    274
    275	writeq(doorbell, qm->io_base + dbase);
    276}
    277
    278static int pf_qm_get_qp_num(struct hisi_qm *qm, int vf_id, u32 *rbase)
    279{
    280	unsigned int val;
    281	u64 sqc_vft;
    282	u32 qp_num;
    283	int ret;
    284
    285	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
    286					 val & BIT(0), MB_POLL_PERIOD_US,
    287					 MB_POLL_TIMEOUT_US);
    288	if (ret)
    289		return ret;
    290
    291	writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
    292	/* 0 mean SQC VFT */
    293	writel(0x0, qm->io_base + QM_VFT_CFG_TYPE);
    294	writel(vf_id, qm->io_base + QM_VFT_CFG);
    295
    296	writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
    297	writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
    298
    299	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
    300					 val & BIT(0), MB_POLL_PERIOD_US,
    301					 MB_POLL_TIMEOUT_US);
    302	if (ret)
    303		return ret;
    304
    305	sqc_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
    306		  ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) <<
    307		  QM_XQC_ADDR_OFFSET);
    308	*rbase = QM_SQC_VFT_BASE_MASK_V2 &
    309		  (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
    310	qp_num = (QM_SQC_VFT_NUM_MASK_V2 &
    311		  (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
    312
    313	return qp_num;
    314}
    315
    316static void qm_dev_cmd_init(struct hisi_qm *qm)
    317{
    318	/* Clear VF communication status registers. */
    319	writel(0x1, qm->io_base + QM_IFC_INT_SOURCE_V);
    320
    321	/* Enable pf and vf communication. */
    322	writel(0x0, qm->io_base + QM_IFC_INT_MASK);
    323}
    324
    325static int vf_qm_cache_wb(struct hisi_qm *qm)
    326{
    327	unsigned int val;
    328
    329	writel(0x1, qm->io_base + QM_CACHE_WB_START);
    330	if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
    331				       val, val & BIT(0), MB_POLL_PERIOD_US,
    332				       MB_POLL_TIMEOUT_US)) {
    333		dev_err(&qm->pdev->dev, "vf QM writeback sqc cache fail\n");
    334		return -EINVAL;
    335	}
    336
    337	return 0;
    338}
    339
    340static struct hisi_acc_vf_core_device *hssi_acc_drvdata(struct pci_dev *pdev)
    341{
    342	struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev);
    343
    344	return container_of(core_device, struct hisi_acc_vf_core_device,
    345			    core_device);
    346}
    347
    348static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev,
    349			    struct hisi_qm *qm)
    350{
    351	int i;
    352
    353	for (i = 0; i < qm->qp_num; i++)
    354		qm_db(qm, i, QM_DOORBELL_CMD_SQ, 0, 1);
    355}
    356
    357static int vf_qm_func_stop(struct hisi_qm *qm)
    358{
    359	return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
    360}
    361
    362static int vf_qm_check_match(struct hisi_acc_vf_core_device *hisi_acc_vdev,
    363			     struct hisi_acc_vf_migration_file *migf)
    364{
    365	struct acc_vf_data *vf_data = &migf->vf_data;
    366	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
    367	struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
    368	struct device *dev = &vf_qm->pdev->dev;
    369	u32 que_iso_state;
    370	int ret;
    371
    372	if (migf->total_length < QM_MATCH_SIZE)
    373		return -EINVAL;
    374
    375	if (vf_data->acc_magic != ACC_DEV_MAGIC) {
    376		dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
    377		return -EINVAL;
    378	}
    379
    380	if (vf_data->dev_id != hisi_acc_vdev->vf_dev->device) {
    381		dev_err(dev, "failed to match VF devices\n");
    382		return -EINVAL;
    383	}
    384
    385	/* vf qp num check */
    386	ret = qm_get_vft(vf_qm, &vf_qm->qp_base);
    387	if (ret <= 0) {
    388		dev_err(dev, "failed to get vft qp nums\n");
    389		return -EINVAL;
    390	}
    391
    392	if (ret != vf_data->qp_num) {
    393		dev_err(dev, "failed to match VF qp num\n");
    394		return -EINVAL;
    395	}
    396
    397	vf_qm->qp_num = ret;
    398
    399	/* vf isolation state check */
    400	ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &que_iso_state, 1);
    401	if (ret) {
    402		dev_err(dev, "failed to read QM_QUE_ISO_CFG_V\n");
    403		return ret;
    404	}
    405
    406	if (vf_data->que_iso_cfg != que_iso_state) {
    407		dev_err(dev, "failed to match isolation state\n");
    408		return ret;
    409	}
    410
    411	ret = qm_write_regs(vf_qm, QM_VF_STATE, &vf_data->vf_qm_state, 1);
    412	if (ret) {
    413		dev_err(dev, "failed to write QM_VF_STATE\n");
    414		return ret;
    415	}
    416
    417	hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
    418	return 0;
    419}
    420
    421static int vf_qm_get_match_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
    422				struct acc_vf_data *vf_data)
    423{
    424	struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
    425	struct device *dev = &pf_qm->pdev->dev;
    426	int vf_id = hisi_acc_vdev->vf_id;
    427	int ret;
    428
    429	vf_data->acc_magic = ACC_DEV_MAGIC;
    430	/* save device id */
    431	vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
    432
    433	/* vf qp num save from PF */
    434	ret = pf_qm_get_qp_num(pf_qm, vf_id, &vf_data->qp_base);
    435	if (ret <= 0) {
    436		dev_err(dev, "failed to get vft qp nums!\n");
    437		return -EINVAL;
    438	}
    439
    440	vf_data->qp_num = ret;
    441
    442	/* VF isolation state save from PF */
    443	ret = qm_read_regs(pf_qm, QM_QUE_ISO_CFG_V, &vf_data->que_iso_cfg, 1);
    444	if (ret) {
    445		dev_err(dev, "failed to read QM_QUE_ISO_CFG_V!\n");
    446		return ret;
    447	}
    448
    449	return 0;
    450}
    451
    452static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
    453			   struct hisi_acc_vf_migration_file *migf)
    454{
    455	struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
    456	struct device *dev = &qm->pdev->dev;
    457	struct acc_vf_data *vf_data = &migf->vf_data;
    458	int ret;
    459
    460	/* Return if only match data was transferred */
    461	if (migf->total_length == QM_MATCH_SIZE)
    462		return 0;
    463
    464	if (migf->total_length < sizeof(struct acc_vf_data))
    465		return -EINVAL;
    466
    467	qm->eqe_dma = vf_data->eqe_dma;
    468	qm->aeqe_dma = vf_data->aeqe_dma;
    469	qm->sqc_dma = vf_data->sqc_dma;
    470	qm->cqc_dma = vf_data->cqc_dma;
    471
    472	qm->qp_base = vf_data->qp_base;
    473	qm->qp_num = vf_data->qp_num;
    474
    475	ret = qm_set_regs(qm, vf_data);
    476	if (ret) {
    477		dev_err(dev, "Set VF regs failed\n");
    478		return ret;
    479	}
    480
    481	ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
    482	if (ret) {
    483		dev_err(dev, "Set sqc failed\n");
    484		return ret;
    485	}
    486
    487	ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
    488	if (ret) {
    489		dev_err(dev, "Set cqc failed\n");
    490		return ret;
    491	}
    492
    493	qm_dev_cmd_init(qm);
    494	return 0;
    495}
    496
    497static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
    498			    struct hisi_acc_vf_migration_file *migf)
    499{
    500	struct acc_vf_data *vf_data = &migf->vf_data;
    501	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
    502	struct device *dev = &vf_qm->pdev->dev;
    503	int ret;
    504
    505	ret = vf_qm_get_match_data(hisi_acc_vdev, vf_data);
    506	if (ret)
    507		return ret;
    508
    509	if (unlikely(qm_wait_dev_not_ready(vf_qm))) {
    510		/* Update state and return with match data */
    511		vf_data->vf_qm_state = QM_NOT_READY;
    512		hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
    513		migf->total_length = QM_MATCH_SIZE;
    514		return 0;
    515	}
    516
    517	vf_data->vf_qm_state = QM_READY;
    518	hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
    519
    520	ret = vf_qm_cache_wb(vf_qm);
    521	if (ret) {
    522		dev_err(dev, "failed to writeback QM Cache!\n");
    523		return ret;
    524	}
    525
    526	ret = qm_get_regs(vf_qm, vf_data);
    527	if (ret)
    528		return -EINVAL;
    529
    530	/* Every reg is 32 bit, the dma address is 64 bit. */
    531	vf_data->eqe_dma = vf_data->qm_eqc_dw[2];
    532	vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
    533	vf_data->eqe_dma |= vf_data->qm_eqc_dw[1];
    534	vf_data->aeqe_dma = vf_data->qm_aeqc_dw[2];
    535	vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
    536	vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1];
    537
    538	/* Through SQC_BT/CQC_BT to get sqc and cqc address */
    539	ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
    540	if (ret) {
    541		dev_err(dev, "failed to read SQC addr!\n");
    542		return -EINVAL;
    543	}
    544
    545	ret = qm_get_cqc(vf_qm, &vf_data->cqc_dma);
    546	if (ret) {
    547		dev_err(dev, "failed to read CQC addr!\n");
    548		return -EINVAL;
    549	}
    550
    551	migf->total_length = sizeof(struct acc_vf_data);
    552	return 0;
    553}
    554
    555/* Check the PF's RAS state and Function INT state */
    556static int
    557hisi_acc_check_int_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    558{
    559	struct hisi_qm *vfqm = &hisi_acc_vdev->vf_qm;
    560	struct hisi_qm *qm = hisi_acc_vdev->pf_qm;
    561	struct pci_dev *vf_pdev = hisi_acc_vdev->vf_dev;
    562	struct device *dev = &qm->pdev->dev;
    563	u32 state;
    564
    565	/* Check RAS state */
    566	state = qm_check_reg_state(qm, QM_ABNORMAL_INT_STATUS);
    567	if (state) {
    568		dev_err(dev, "failed to check QM RAS state!\n");
    569		return -EBUSY;
    570	}
    571
    572	/* Check Function Communication state between PF and VF */
    573	state = qm_check_reg_state(vfqm, QM_IFC_INT_STATUS);
    574	if (state) {
    575		dev_err(dev, "failed to check QM IFC INT state!\n");
    576		return -EBUSY;
    577	}
    578	state = qm_check_reg_state(vfqm, QM_IFC_INT_SET_V);
    579	if (state) {
    580		dev_err(dev, "failed to check QM IFC INT SET state!\n");
    581		return -EBUSY;
    582	}
    583
    584	/* Check submodule task state */
    585	switch (vf_pdev->device) {
    586	case PCI_DEVICE_ID_HUAWEI_SEC_VF:
    587		state = qm_check_reg_state(qm, SEC_CORE_INT_STATUS);
    588		if (state) {
    589			dev_err(dev, "failed to check QM SEC Core INT state!\n");
    590			return -EBUSY;
    591		}
    592		return 0;
    593	case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
    594		state = qm_check_reg_state(qm, HPRE_HAC_INT_STATUS);
    595		if (state) {
    596			dev_err(dev, "failed to check QM HPRE HAC INT state!\n");
    597			return -EBUSY;
    598		}
    599		return 0;
    600	case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
    601		state = qm_check_reg_state(qm, HZIP_CORE_INT_STATUS);
    602		if (state) {
    603			dev_err(dev, "failed to check QM ZIP Core INT state!\n");
    604			return -EBUSY;
    605		}
    606		return 0;
    607	default:
    608		dev_err(dev, "failed to detect acc module type!\n");
    609		return -EINVAL;
    610	}
    611}
    612
    613static void hisi_acc_vf_disable_fd(struct hisi_acc_vf_migration_file *migf)
    614{
    615	mutex_lock(&migf->lock);
    616	migf->disabled = true;
    617	migf->total_length = 0;
    618	migf->filp->f_pos = 0;
    619	mutex_unlock(&migf->lock);
    620}
    621
    622static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    623{
    624	if (hisi_acc_vdev->resuming_migf) {
    625		hisi_acc_vf_disable_fd(hisi_acc_vdev->resuming_migf);
    626		fput(hisi_acc_vdev->resuming_migf->filp);
    627		hisi_acc_vdev->resuming_migf = NULL;
    628	}
    629
    630	if (hisi_acc_vdev->saving_migf) {
    631		hisi_acc_vf_disable_fd(hisi_acc_vdev->saving_migf);
    632		fput(hisi_acc_vdev->saving_migf->filp);
    633		hisi_acc_vdev->saving_migf = NULL;
    634	}
    635}
    636
    637/*
    638 * This function is called in all state_mutex unlock cases to
    639 * handle a 'deferred_reset' if exists.
    640 */
    641static void
    642hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    643{
    644again:
    645	spin_lock(&hisi_acc_vdev->reset_lock);
    646	if (hisi_acc_vdev->deferred_reset) {
    647		hisi_acc_vdev->deferred_reset = false;
    648		spin_unlock(&hisi_acc_vdev->reset_lock);
    649		hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
    650		hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
    651		hisi_acc_vf_disable_fds(hisi_acc_vdev);
    652		goto again;
    653	}
    654	mutex_unlock(&hisi_acc_vdev->state_mutex);
    655	spin_unlock(&hisi_acc_vdev->reset_lock);
    656}
    657
    658static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    659{
    660	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
    661
    662	if (hisi_acc_vdev->vf_qm_state != QM_READY)
    663		return;
    664
    665	vf_qm_fun_reset(hisi_acc_vdev, vf_qm);
    666}
    667
    668static int hisi_acc_vf_load_state(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    669{
    670	struct device *dev = &hisi_acc_vdev->vf_dev->dev;
    671	struct hisi_acc_vf_migration_file *migf = hisi_acc_vdev->resuming_migf;
    672	int ret;
    673
    674	/* Check dev compatibility */
    675	ret = vf_qm_check_match(hisi_acc_vdev, migf);
    676	if (ret) {
    677		dev_err(dev, "failed to match the VF!\n");
    678		return ret;
    679	}
    680	/* Recover data to VF */
    681	ret = vf_qm_load_data(hisi_acc_vdev, migf);
    682	if (ret) {
    683		dev_err(dev, "failed to recover the VF!\n");
    684		return ret;
    685	}
    686
    687	return 0;
    688}
    689
    690static int hisi_acc_vf_release_file(struct inode *inode, struct file *filp)
    691{
    692	struct hisi_acc_vf_migration_file *migf = filp->private_data;
    693
    694	hisi_acc_vf_disable_fd(migf);
    695	mutex_destroy(&migf->lock);
    696	kfree(migf);
    697	return 0;
    698}
    699
    700static ssize_t hisi_acc_vf_resume_write(struct file *filp, const char __user *buf,
    701					size_t len, loff_t *pos)
    702{
    703	struct hisi_acc_vf_migration_file *migf = filp->private_data;
    704	loff_t requested_length;
    705	ssize_t done = 0;
    706	int ret;
    707
    708	if (pos)
    709		return -ESPIPE;
    710	pos = &filp->f_pos;
    711
    712	if (*pos < 0 ||
    713	    check_add_overflow((loff_t)len, *pos, &requested_length))
    714		return -EINVAL;
    715
    716	if (requested_length > sizeof(struct acc_vf_data))
    717		return -ENOMEM;
    718
    719	mutex_lock(&migf->lock);
    720	if (migf->disabled) {
    721		done = -ENODEV;
    722		goto out_unlock;
    723	}
    724
    725	ret = copy_from_user(&migf->vf_data, buf, len);
    726	if (ret) {
    727		done = -EFAULT;
    728		goto out_unlock;
    729	}
    730	*pos += len;
    731	done = len;
    732	migf->total_length += len;
    733out_unlock:
    734	mutex_unlock(&migf->lock);
    735	return done;
    736}
    737
    738static const struct file_operations hisi_acc_vf_resume_fops = {
    739	.owner = THIS_MODULE,
    740	.write = hisi_acc_vf_resume_write,
    741	.release = hisi_acc_vf_release_file,
    742	.llseek = no_llseek,
    743};
    744
    745static struct hisi_acc_vf_migration_file *
    746hisi_acc_vf_pci_resume(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    747{
    748	struct hisi_acc_vf_migration_file *migf;
    749
    750	migf = kzalloc(sizeof(*migf), GFP_KERNEL);
    751	if (!migf)
    752		return ERR_PTR(-ENOMEM);
    753
    754	migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_resume_fops, migf,
    755					O_WRONLY);
    756	if (IS_ERR(migf->filp)) {
    757		int err = PTR_ERR(migf->filp);
    758
    759		kfree(migf);
    760		return ERR_PTR(err);
    761	}
    762
    763	stream_open(migf->filp->f_inode, migf->filp);
    764	mutex_init(&migf->lock);
    765	return migf;
    766}
    767
    768static ssize_t hisi_acc_vf_save_read(struct file *filp, char __user *buf, size_t len,
    769				     loff_t *pos)
    770{
    771	struct hisi_acc_vf_migration_file *migf = filp->private_data;
    772	ssize_t done = 0;
    773	int ret;
    774
    775	if (pos)
    776		return -ESPIPE;
    777	pos = &filp->f_pos;
    778
    779	mutex_lock(&migf->lock);
    780	if (*pos > migf->total_length) {
    781		done = -EINVAL;
    782		goto out_unlock;
    783	}
    784
    785	if (migf->disabled) {
    786		done = -ENODEV;
    787		goto out_unlock;
    788	}
    789
    790	len = min_t(size_t, migf->total_length - *pos, len);
    791	if (len) {
    792		ret = copy_to_user(buf, &migf->vf_data, len);
    793		if (ret) {
    794			done = -EFAULT;
    795			goto out_unlock;
    796		}
    797		*pos += len;
    798		done = len;
    799	}
    800out_unlock:
    801	mutex_unlock(&migf->lock);
    802	return done;
    803}
    804
    805static const struct file_operations hisi_acc_vf_save_fops = {
    806	.owner = THIS_MODULE,
    807	.read = hisi_acc_vf_save_read,
    808	.release = hisi_acc_vf_release_file,
    809	.llseek = no_llseek,
    810};
    811
    812static struct hisi_acc_vf_migration_file *
    813hisi_acc_vf_stop_copy(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    814{
    815	struct hisi_acc_vf_migration_file *migf;
    816	int ret;
    817
    818	migf = kzalloc(sizeof(*migf), GFP_KERNEL);
    819	if (!migf)
    820		return ERR_PTR(-ENOMEM);
    821
    822	migf->filp = anon_inode_getfile("hisi_acc_vf_mig", &hisi_acc_vf_save_fops, migf,
    823					O_RDONLY);
    824	if (IS_ERR(migf->filp)) {
    825		int err = PTR_ERR(migf->filp);
    826
    827		kfree(migf);
    828		return ERR_PTR(err);
    829	}
    830
    831	stream_open(migf->filp->f_inode, migf->filp);
    832	mutex_init(&migf->lock);
    833
    834	ret = vf_qm_state_save(hisi_acc_vdev, migf);
    835	if (ret) {
    836		fput(migf->filp);
    837		return ERR_PTR(ret);
    838	}
    839
    840	return migf;
    841}
    842
    843static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    844{
    845	struct device *dev = &hisi_acc_vdev->vf_dev->dev;
    846	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
    847	int ret;
    848
    849	ret = vf_qm_func_stop(vf_qm);
    850	if (ret) {
    851		dev_err(dev, "failed to stop QM VF function!\n");
    852		return ret;
    853	}
    854
    855	ret = hisi_acc_check_int_state(hisi_acc_vdev);
    856	if (ret) {
    857		dev_err(dev, "failed to check QM INT state!\n");
    858		return ret;
    859	}
    860	return 0;
    861}
    862
    863static struct file *
    864hisi_acc_vf_set_device_state(struct hisi_acc_vf_core_device *hisi_acc_vdev,
    865			     u32 new)
    866{
    867	u32 cur = hisi_acc_vdev->mig_state;
    868	int ret;
    869
    870	if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
    871		ret = hisi_acc_vf_stop_device(hisi_acc_vdev);
    872		if (ret)
    873			return ERR_PTR(ret);
    874		return NULL;
    875	}
    876
    877	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
    878		struct hisi_acc_vf_migration_file *migf;
    879
    880		migf = hisi_acc_vf_stop_copy(hisi_acc_vdev);
    881		if (IS_ERR(migf))
    882			return ERR_CAST(migf);
    883		get_file(migf->filp);
    884		hisi_acc_vdev->saving_migf = migf;
    885		return migf->filp;
    886	}
    887
    888	if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP)) {
    889		hisi_acc_vf_disable_fds(hisi_acc_vdev);
    890		return NULL;
    891	}
    892
    893	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
    894		struct hisi_acc_vf_migration_file *migf;
    895
    896		migf = hisi_acc_vf_pci_resume(hisi_acc_vdev);
    897		if (IS_ERR(migf))
    898			return ERR_CAST(migf);
    899		get_file(migf->filp);
    900		hisi_acc_vdev->resuming_migf = migf;
    901		return migf->filp;
    902	}
    903
    904	if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
    905		ret = hisi_acc_vf_load_state(hisi_acc_vdev);
    906		if (ret)
    907			return ERR_PTR(ret);
    908		hisi_acc_vf_disable_fds(hisi_acc_vdev);
    909		return NULL;
    910	}
    911
    912	if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
    913		hisi_acc_vf_start_device(hisi_acc_vdev);
    914		return NULL;
    915	}
    916
    917	/*
    918	 * vfio_mig_get_next_state() does not use arcs other than the above
    919	 */
    920	WARN_ON(true);
    921	return ERR_PTR(-EINVAL);
    922}
    923
    924static struct file *
    925hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
    926				   enum vfio_device_mig_state new_state)
    927{
    928	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
    929			struct hisi_acc_vf_core_device, core_device.vdev);
    930	enum vfio_device_mig_state next_state;
    931	struct file *res = NULL;
    932	int ret;
    933
    934	mutex_lock(&hisi_acc_vdev->state_mutex);
    935	while (new_state != hisi_acc_vdev->mig_state) {
    936		ret = vfio_mig_get_next_state(vdev,
    937					      hisi_acc_vdev->mig_state,
    938					      new_state, &next_state);
    939		if (ret) {
    940			res = ERR_PTR(-EINVAL);
    941			break;
    942		}
    943
    944		res = hisi_acc_vf_set_device_state(hisi_acc_vdev, next_state);
    945		if (IS_ERR(res))
    946			break;
    947		hisi_acc_vdev->mig_state = next_state;
    948		if (WARN_ON(res && new_state != hisi_acc_vdev->mig_state)) {
    949			fput(res);
    950			res = ERR_PTR(-EINVAL);
    951			break;
    952		}
    953	}
    954	hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
    955	return res;
    956}
    957
    958static int
    959hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
    960				   enum vfio_device_mig_state *curr_state)
    961{
    962	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(vdev,
    963			struct hisi_acc_vf_core_device, core_device.vdev);
    964
    965	mutex_lock(&hisi_acc_vdev->state_mutex);
    966	*curr_state = hisi_acc_vdev->mig_state;
    967	hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
    968	return 0;
    969}
    970
    971static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
    972{
    973	struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
    974
    975	if (hisi_acc_vdev->core_device.vdev.migration_flags !=
    976				VFIO_MIGRATION_STOP_COPY)
    977		return;
    978
    979	/*
    980	 * As the higher VFIO layers are holding locks across reset and using
    981	 * those same locks with the mm_lock we need to prevent ABBA deadlock
    982	 * with the state_mutex and mm_lock.
    983	 * In case the state_mutex was taken already we defer the cleanup work
    984	 * to the unlock flow of the other running context.
    985	 */
    986	spin_lock(&hisi_acc_vdev->reset_lock);
    987	hisi_acc_vdev->deferred_reset = true;
    988	if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
    989		spin_unlock(&hisi_acc_vdev->reset_lock);
    990		return;
    991	}
    992	spin_unlock(&hisi_acc_vdev->reset_lock);
    993	hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
    994}
    995
    996static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
    997{
    998	struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
    999	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
   1000	struct pci_dev *vf_dev = vdev->pdev;
   1001
   1002	/*
   1003	 * ACC VF dev BAR2 region consists of both functional register space
   1004	 * and migration control register space. For migration to work, we
   1005	 * need access to both. Hence, we map the entire BAR2 region here.
   1006	 * But unnecessarily exposing the migration BAR region to the Guest
   1007	 * has the potential to prevent/corrupt the Guest migration. Hence,
   1008	 * we restrict access to the migration control space from
   1009	 * Guest(Please see mmap/ioctl/read/write override functions).
   1010	 *
   1011	 * Please note that it is OK to expose the entire VF BAR if migration
   1012	 * is not supported or required as this cannot affect the ACC PF
   1013	 * configurations.
   1014	 *
   1015	 * Also the HiSilicon ACC VF devices supported by this driver on
   1016	 * HiSilicon hardware platforms are integrated end point devices
   1017	 * and the platform lacks the capability to perform any PCIe P2P
   1018	 * between these devices.
   1019	 */
   1020
   1021	vf_qm->io_base =
   1022		ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
   1023			pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
   1024	if (!vf_qm->io_base)
   1025		return -EIO;
   1026
   1027	vf_qm->fun_type = QM_HW_VF;
   1028	vf_qm->pdev = vf_dev;
   1029	mutex_init(&vf_qm->mailbox_lock);
   1030
   1031	return 0;
   1032}
   1033
   1034static struct hisi_qm *hisi_acc_get_pf_qm(struct pci_dev *pdev)
   1035{
   1036	struct hisi_qm	*pf_qm;
   1037	struct pci_driver *pf_driver;
   1038
   1039	if (!pdev->is_virtfn)
   1040		return NULL;
   1041
   1042	switch (pdev->device) {
   1043	case PCI_DEVICE_ID_HUAWEI_SEC_VF:
   1044		pf_driver = hisi_sec_get_pf_driver();
   1045		break;
   1046	case PCI_DEVICE_ID_HUAWEI_HPRE_VF:
   1047		pf_driver = hisi_hpre_get_pf_driver();
   1048		break;
   1049	case PCI_DEVICE_ID_HUAWEI_ZIP_VF:
   1050		pf_driver = hisi_zip_get_pf_driver();
   1051		break;
   1052	default:
   1053		return NULL;
   1054	}
   1055
   1056	if (!pf_driver)
   1057		return NULL;
   1058
   1059	pf_qm = pci_iov_get_pf_drvdata(pdev, pf_driver);
   1060
   1061	return !IS_ERR(pf_qm) ? pf_qm : NULL;
   1062}
   1063
   1064static int hisi_acc_pci_rw_access_check(struct vfio_device *core_vdev,
   1065					size_t count, loff_t *ppos,
   1066					size_t *new_count)
   1067{
   1068	unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
   1069	struct vfio_pci_core_device *vdev =
   1070		container_of(core_vdev, struct vfio_pci_core_device, vdev);
   1071
   1072	if (index == VFIO_PCI_BAR2_REGION_INDEX) {
   1073		loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
   1074		resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
   1075
   1076		/* Check if access is for migration control region */
   1077		if (pos >= end)
   1078			return -EINVAL;
   1079
   1080		*new_count = min(count, (size_t)(end - pos));
   1081	}
   1082
   1083	return 0;
   1084}
   1085
   1086static int hisi_acc_vfio_pci_mmap(struct vfio_device *core_vdev,
   1087				  struct vm_area_struct *vma)
   1088{
   1089	struct vfio_pci_core_device *vdev =
   1090		container_of(core_vdev, struct vfio_pci_core_device, vdev);
   1091	unsigned int index;
   1092
   1093	index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
   1094	if (index == VFIO_PCI_BAR2_REGION_INDEX) {
   1095		u64 req_len, pgoff, req_start;
   1096		resource_size_t end = pci_resource_len(vdev->pdev, index) / 2;
   1097
   1098		req_len = vma->vm_end - vma->vm_start;
   1099		pgoff = vma->vm_pgoff &
   1100			((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
   1101		req_start = pgoff << PAGE_SHIFT;
   1102
   1103		if (req_start + req_len > end)
   1104			return -EINVAL;
   1105	}
   1106
   1107	return vfio_pci_core_mmap(core_vdev, vma);
   1108}
   1109
   1110static ssize_t hisi_acc_vfio_pci_write(struct vfio_device *core_vdev,
   1111				       const char __user *buf, size_t count,
   1112				       loff_t *ppos)
   1113{
   1114	size_t new_count = count;
   1115	int ret;
   1116
   1117	ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
   1118	if (ret)
   1119		return ret;
   1120
   1121	return vfio_pci_core_write(core_vdev, buf, new_count, ppos);
   1122}
   1123
   1124static ssize_t hisi_acc_vfio_pci_read(struct vfio_device *core_vdev,
   1125				      char __user *buf, size_t count,
   1126				      loff_t *ppos)
   1127{
   1128	size_t new_count = count;
   1129	int ret;
   1130
   1131	ret = hisi_acc_pci_rw_access_check(core_vdev, count, ppos, &new_count);
   1132	if (ret)
   1133		return ret;
   1134
   1135	return vfio_pci_core_read(core_vdev, buf, new_count, ppos);
   1136}
   1137
   1138static long hisi_acc_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
   1139				    unsigned long arg)
   1140{
   1141	if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
   1142		struct vfio_pci_core_device *vdev =
   1143			container_of(core_vdev, struct vfio_pci_core_device, vdev);
   1144		struct pci_dev *pdev = vdev->pdev;
   1145		struct vfio_region_info info;
   1146		unsigned long minsz;
   1147
   1148		minsz = offsetofend(struct vfio_region_info, offset);
   1149
   1150		if (copy_from_user(&info, (void __user *)arg, minsz))
   1151			return -EFAULT;
   1152
   1153		if (info.argsz < minsz)
   1154			return -EINVAL;
   1155
   1156		if (info.index == VFIO_PCI_BAR2_REGION_INDEX) {
   1157			info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
   1158
   1159			/*
   1160			 * ACC VF dev BAR2 region consists of both functional
   1161			 * register space and migration control register space.
   1162			 * Report only the functional region to Guest.
   1163			 */
   1164			info.size = pci_resource_len(pdev, info.index) / 2;
   1165
   1166			info.flags = VFIO_REGION_INFO_FLAG_READ |
   1167					VFIO_REGION_INFO_FLAG_WRITE |
   1168					VFIO_REGION_INFO_FLAG_MMAP;
   1169
   1170			return copy_to_user((void __user *)arg, &info, minsz) ?
   1171					    -EFAULT : 0;
   1172		}
   1173	}
   1174	return vfio_pci_core_ioctl(core_vdev, cmd, arg);
   1175}
   1176
   1177static int hisi_acc_vfio_pci_open_device(struct vfio_device *core_vdev)
   1178{
   1179	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
   1180			struct hisi_acc_vf_core_device, core_device.vdev);
   1181	struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
   1182	int ret;
   1183
   1184	ret = vfio_pci_core_enable(vdev);
   1185	if (ret)
   1186		return ret;
   1187
   1188	if (core_vdev->ops->migration_set_state) {
   1189		ret = hisi_acc_vf_qm_init(hisi_acc_vdev);
   1190		if (ret) {
   1191			vfio_pci_core_disable(vdev);
   1192			return ret;
   1193		}
   1194		hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
   1195	}
   1196
   1197	vfio_pci_core_finish_enable(vdev);
   1198	return 0;
   1199}
   1200
   1201static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
   1202{
   1203	struct hisi_acc_vf_core_device *hisi_acc_vdev = container_of(core_vdev,
   1204			struct hisi_acc_vf_core_device, core_device.vdev);
   1205	struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
   1206
   1207	iounmap(vf_qm->io_base);
   1208	vfio_pci_core_close_device(core_vdev);
   1209}
   1210
   1211static const struct vfio_device_ops hisi_acc_vfio_pci_migrn_ops = {
   1212	.name = "hisi-acc-vfio-pci-migration",
   1213	.open_device = hisi_acc_vfio_pci_open_device,
   1214	.close_device = hisi_acc_vfio_pci_close_device,
   1215	.ioctl = hisi_acc_vfio_pci_ioctl,
   1216	.device_feature = vfio_pci_core_ioctl_feature,
   1217	.read = hisi_acc_vfio_pci_read,
   1218	.write = hisi_acc_vfio_pci_write,
   1219	.mmap = hisi_acc_vfio_pci_mmap,
   1220	.request = vfio_pci_core_request,
   1221	.match = vfio_pci_core_match,
   1222	.migration_set_state = hisi_acc_vfio_pci_set_device_state,
   1223	.migration_get_state = hisi_acc_vfio_pci_get_device_state,
   1224};
   1225
   1226static const struct vfio_device_ops hisi_acc_vfio_pci_ops = {
   1227	.name = "hisi-acc-vfio-pci",
   1228	.open_device = hisi_acc_vfio_pci_open_device,
   1229	.close_device = vfio_pci_core_close_device,
   1230	.ioctl = vfio_pci_core_ioctl,
   1231	.device_feature = vfio_pci_core_ioctl_feature,
   1232	.read = vfio_pci_core_read,
   1233	.write = vfio_pci_core_write,
   1234	.mmap = vfio_pci_core_mmap,
   1235	.request = vfio_pci_core_request,
   1236	.match = vfio_pci_core_match,
   1237};
   1238
   1239static int
   1240hisi_acc_vfio_pci_migrn_init(struct hisi_acc_vf_core_device *hisi_acc_vdev,
   1241			     struct pci_dev *pdev, struct hisi_qm *pf_qm)
   1242{
   1243	int vf_id;
   1244
   1245	vf_id = pci_iov_vf_id(pdev);
   1246	if (vf_id < 0)
   1247		return vf_id;
   1248
   1249	hisi_acc_vdev->vf_id = vf_id + 1;
   1250	hisi_acc_vdev->core_device.vdev.migration_flags =
   1251					VFIO_MIGRATION_STOP_COPY;
   1252	hisi_acc_vdev->pf_qm = pf_qm;
   1253	hisi_acc_vdev->vf_dev = pdev;
   1254	mutex_init(&hisi_acc_vdev->state_mutex);
   1255
   1256	return 0;
   1257}
   1258
   1259static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
   1260{
   1261	struct hisi_acc_vf_core_device *hisi_acc_vdev;
   1262	struct hisi_qm *pf_qm;
   1263	int ret;
   1264
   1265	hisi_acc_vdev = kzalloc(sizeof(*hisi_acc_vdev), GFP_KERNEL);
   1266	if (!hisi_acc_vdev)
   1267		return -ENOMEM;
   1268
   1269	pf_qm = hisi_acc_get_pf_qm(pdev);
   1270	if (pf_qm && pf_qm->ver >= QM_HW_V3) {
   1271		ret = hisi_acc_vfio_pci_migrn_init(hisi_acc_vdev, pdev, pf_qm);
   1272		if (!ret) {
   1273			vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
   1274						  &hisi_acc_vfio_pci_migrn_ops);
   1275		} else {
   1276			pci_warn(pdev, "migration support failed, continue with generic interface\n");
   1277			vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
   1278						  &hisi_acc_vfio_pci_ops);
   1279		}
   1280	} else {
   1281		vfio_pci_core_init_device(&hisi_acc_vdev->core_device, pdev,
   1282					  &hisi_acc_vfio_pci_ops);
   1283	}
   1284
   1285	dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device);
   1286	ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device);
   1287	if (ret)
   1288		goto out_free;
   1289	return 0;
   1290
   1291out_free:
   1292	vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
   1293	kfree(hisi_acc_vdev);
   1294	return ret;
   1295}
   1296
   1297static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev)
   1298{
   1299	struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev);
   1300
   1301	vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device);
   1302	vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device);
   1303	kfree(hisi_acc_vdev);
   1304}
   1305
   1306static const struct pci_device_id hisi_acc_vfio_pci_table[] = {
   1307	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) },
   1308	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
   1309	{ PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) },
   1310	{ }
   1311};
   1312
   1313MODULE_DEVICE_TABLE(pci, hisi_acc_vfio_pci_table);
   1314
   1315static const struct pci_error_handlers hisi_acc_vf_err_handlers = {
   1316	.reset_done = hisi_acc_vf_pci_aer_reset_done,
   1317	.error_detected = vfio_pci_core_aer_err_detected,
   1318};
   1319
   1320static struct pci_driver hisi_acc_vfio_pci_driver = {
   1321	.name = KBUILD_MODNAME,
   1322	.id_table = hisi_acc_vfio_pci_table,
   1323	.probe = hisi_acc_vfio_pci_probe,
   1324	.remove = hisi_acc_vfio_pci_remove,
   1325	.err_handler = &hisi_acc_vf_err_handlers,
   1326	.driver_managed_dma = true,
   1327};
   1328
   1329module_pci_driver(hisi_acc_vfio_pci_driver);
   1330
   1331MODULE_LICENSE("GPL v2");
   1332MODULE_AUTHOR("Liu Longfang <liulongfang@huawei.com>");
   1333MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
   1334MODULE_DESCRIPTION("HiSilicon VFIO PCI - VFIO PCI driver with live migration support for HiSilicon ACC device family");