cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

otx2_cptvf_main.c (10281B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (C) 2020 Marvell. */
      3
      4#include "otx2_cpt_common.h"
      5#include "otx2_cptvf.h"
      6#include "otx2_cptlf.h"
      7#include "otx2_cptvf_algs.h"
      8#include "cn10k_cpt.h"
      9#include <rvu_reg.h>
     10
     11#define OTX2_CPTVF_DRV_NAME "rvu_cptvf"
     12
     13static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
     14{
     15	/* Clear interrupt if any */
     16	otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
     17			 0x1ULL);
     18
     19	/* Enable PF-VF interrupt */
     20	otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
     21			 OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL);
     22}
     23
     24static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf)
     25{
     26	/* Disable PF-VF interrupt */
     27	otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0,
     28			 OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL);
     29
     30	/* Clear interrupt if any */
     31	otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT,
     32			 0x1ULL);
     33}
     34
     35static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf)
     36{
     37	int ret, irq;
     38	int num_vec;
     39
     40	num_vec = pci_msix_vec_count(cptvf->pdev);
     41	if (num_vec <= 0)
     42		return -EINVAL;
     43
     44	/* Enable MSI-X */
     45	ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec,
     46				    PCI_IRQ_MSIX);
     47	if (ret < 0) {
     48		dev_err(&cptvf->pdev->dev,
     49			"Request for %d msix vectors failed\n", num_vec);
     50		return ret;
     51	}
     52	irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX);
     53	/* Register VF<=>PF mailbox interrupt handler */
     54	ret = devm_request_irq(&cptvf->pdev->dev, irq,
     55			       otx2_cptvf_pfvf_mbox_intr, 0,
     56			       "CPTPFVF Mbox", cptvf);
     57	if (ret)
     58		return ret;
     59	/* Enable PF-VF mailbox interrupts */
     60	cptvf_enable_pfvf_mbox_intrs(cptvf);
     61
     62	ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev);
     63	if (ret) {
     64		dev_warn(&cptvf->pdev->dev,
     65			 "PF not responding to mailbox, deferring probe\n");
     66		cptvf_disable_pfvf_mbox_intrs(cptvf);
     67		return -EPROBE_DEFER;
     68	}
     69	return 0;
     70}
     71
     72static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf)
     73{
     74	struct pci_dev *pdev = cptvf->pdev;
     75	resource_size_t offset, size;
     76	int ret;
     77
     78	cptvf->pfvf_mbox_wq = alloc_workqueue("cpt_pfvf_mailbox",
     79					      WQ_UNBOUND | WQ_HIGHPRI |
     80					      WQ_MEM_RECLAIM, 1);
     81	if (!cptvf->pfvf_mbox_wq)
     82		return -ENOMEM;
     83
     84	if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) {
     85		/* For cn10k platform, VF mailbox region is in its BAR2
     86		 * register space
     87		 */
     88		cptvf->pfvf_mbox_base = cptvf->reg_base +
     89					CN10K_CPT_VF_MBOX_REGION;
     90	} else {
     91		offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
     92		size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
     93		/* Map PF-VF mailbox memory */
     94		cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset,
     95							size);
     96		if (!cptvf->pfvf_mbox_base) {
     97			dev_err(&pdev->dev, "Unable to map BAR4\n");
     98			ret = -ENOMEM;
     99			goto free_wqe;
    100		}
    101	}
    102
    103	ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base,
    104			     pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1);
    105	if (ret)
    106		goto free_wqe;
    107
    108	ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev);
    109	if (ret)
    110		goto destroy_mbox;
    111
    112	INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler);
    113	return 0;
    114
    115destroy_mbox:
    116	otx2_mbox_destroy(&cptvf->pfvf_mbox);
    117free_wqe:
    118	destroy_workqueue(cptvf->pfvf_mbox_wq);
    119	return ret;
    120}
    121
    122static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf)
    123{
    124	destroy_workqueue(cptvf->pfvf_mbox_wq);
    125	otx2_mbox_destroy(&cptvf->pfvf_mbox);
    126}
    127
    128static void cptlf_work_handler(unsigned long data)
    129{
    130	otx2_cpt_post_process((struct otx2_cptlf_wqe *) data);
    131}
    132
    133static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)
    134{
    135	int i;
    136
    137	for (i = 0; i <  lfs->lfs_num; i++) {
    138		if (!lfs->lf[i].wqe)
    139			continue;
    140
    141		tasklet_kill(&lfs->lf[i].wqe->work);
    142		kfree(lfs->lf[i].wqe);
    143		lfs->lf[i].wqe = NULL;
    144	}
    145}
    146
    147static int init_tasklet_work(struct otx2_cptlfs_info *lfs)
    148{
    149	struct otx2_cptlf_wqe *wqe;
    150	int i, ret = 0;
    151
    152	for (i = 0; i < lfs->lfs_num; i++) {
    153		wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL);
    154		if (!wqe) {
    155			ret = -ENOMEM;
    156			goto cleanup_tasklet;
    157		}
    158
    159		tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe);
    160		wqe->lfs = lfs;
    161		wqe->lf_num = i;
    162		lfs->lf[i].wqe = wqe;
    163	}
    164	return 0;
    165
    166cleanup_tasklet:
    167	cleanup_tasklet_work(lfs);
    168	return ret;
    169}
    170
    171static void free_pending_queues(struct otx2_cptlfs_info *lfs)
    172{
    173	int i;
    174
    175	for (i = 0; i < lfs->lfs_num; i++) {
    176		kfree(lfs->lf[i].pqueue.head);
    177		lfs->lf[i].pqueue.head = NULL;
    178	}
    179}
    180
    181static int alloc_pending_queues(struct otx2_cptlfs_info *lfs)
    182{
    183	int size, ret, i;
    184
    185	if (!lfs->lfs_num)
    186		return -EINVAL;
    187
    188	for (i = 0; i < lfs->lfs_num; i++) {
    189		lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS;
    190		size = lfs->lf[i].pqueue.qlen *
    191		       sizeof(struct otx2_cpt_pending_entry);
    192
    193		lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL);
    194		if (!lfs->lf[i].pqueue.head) {
    195			ret = -ENOMEM;
    196			goto error;
    197		}
    198
    199		/* Initialize spin lock */
    200		spin_lock_init(&lfs->lf[i].pqueue.lock);
    201	}
    202	return 0;
    203
    204error:
    205	free_pending_queues(lfs);
    206	return ret;
    207}
    208
    209static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs)
    210{
    211	cleanup_tasklet_work(lfs);
    212	free_pending_queues(lfs);
    213}
    214
    215static int lf_sw_init(struct otx2_cptlfs_info *lfs)
    216{
    217	int ret;
    218
    219	ret = alloc_pending_queues(lfs);
    220	if (ret) {
    221		dev_err(&lfs->pdev->dev,
    222			"Allocating pending queues failed\n");
    223		return ret;
    224	}
    225	ret = init_tasklet_work(lfs);
    226	if (ret) {
    227		dev_err(&lfs->pdev->dev,
    228			"Tasklet work init failed\n");
    229		goto pending_queues_free;
    230	}
    231	return 0;
    232
    233pending_queues_free:
    234	free_pending_queues(lfs);
    235	return ret;
    236}
    237
    238static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
    239{
    240	atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET);
    241
    242	/* Remove interrupts affinity */
    243	otx2_cptlf_free_irqs_affinity(lfs);
    244	/* Disable instruction queue */
    245	otx2_cptlf_disable_iqueues(lfs);
    246	/* Unregister crypto algorithms */
    247	otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
    248	/* Unregister LFs interrupts */
    249	otx2_cptlf_unregister_interrupts(lfs);
    250	/* Cleanup LFs software side */
    251	lf_sw_cleanup(lfs);
    252	/* Send request to detach LFs */
    253	otx2_cpt_detach_rsrcs_msg(lfs);
    254}
    255
    256static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
    257{
    258	struct otx2_cptlfs_info *lfs = &cptvf->lfs;
    259	struct device *dev = &cptvf->pdev->dev;
    260	int ret, lfs_num;
    261	u8 eng_grp_msk;
    262
    263	/* Get engine group number for symmetric crypto */
    264	cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
    265	ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES);
    266	if (ret)
    267		return ret;
    268
    269	if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) {
    270		dev_err(dev, "Engine group for kernel crypto not available\n");
    271		ret = -ENOENT;
    272		return ret;
    273	}
    274	eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num;
    275
    276	ret = otx2_cptvf_send_kvf_limits_msg(cptvf);
    277	if (ret)
    278		return ret;
    279
    280	lfs->reg_base = cptvf->reg_base;
    281	lfs->pdev = cptvf->pdev;
    282	lfs->mbox = &cptvf->pfvf_mbox;
    283
    284	lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
    285		  num_online_cpus();
    286	ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO,
    287			      lfs_num);
    288	if (ret)
    289		return ret;
    290
    291	/* Get msix offsets for attached LFs */
    292	ret = otx2_cpt_msix_offset_msg(lfs);
    293	if (ret)
    294		goto cleanup_lf;
    295
    296	/* Initialize LFs software side */
    297	ret = lf_sw_init(lfs);
    298	if (ret)
    299		goto cleanup_lf;
    300
    301	/* Register LFs interrupts */
    302	ret = otx2_cptlf_register_interrupts(lfs);
    303	if (ret)
    304		goto cleanup_lf_sw;
    305
    306	/* Set interrupts affinity */
    307	ret = otx2_cptlf_set_irqs_affinity(lfs);
    308	if (ret)
    309		goto unregister_intr;
    310
    311	atomic_set(&lfs->state, OTX2_CPTLF_STARTED);
    312	/* Register crypto algorithms */
    313	ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1);
    314	if (ret) {
    315		dev_err(&lfs->pdev->dev, "algorithms registration failed\n");
    316		goto disable_irqs;
    317	}
    318	return 0;
    319
    320disable_irqs:
    321	otx2_cptlf_free_irqs_affinity(lfs);
    322unregister_intr:
    323	otx2_cptlf_unregister_interrupts(lfs);
    324cleanup_lf_sw:
    325	lf_sw_cleanup(lfs);
    326cleanup_lf:
    327	otx2_cptlf_shutdown(lfs);
    328
    329	return ret;
    330}
    331
    332static int otx2_cptvf_probe(struct pci_dev *pdev,
    333			    const struct pci_device_id *ent)
    334{
    335	struct device *dev = &pdev->dev;
    336	struct otx2_cptvf_dev *cptvf;
    337	int ret;
    338
    339	cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
    340	if (!cptvf)
    341		return -ENOMEM;
    342
    343	ret = pcim_enable_device(pdev);
    344	if (ret) {
    345		dev_err(dev, "Failed to enable PCI device\n");
    346		goto clear_drvdata;
    347	}
    348
    349	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
    350	if (ret) {
    351		dev_err(dev, "Unable to get usable DMA configuration\n");
    352		goto clear_drvdata;
    353	}
    354	/* Map VF's configuration registers */
    355	ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
    356					     OTX2_CPTVF_DRV_NAME);
    357	if (ret) {
    358		dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret);
    359		goto clear_drvdata;
    360	}
    361	pci_set_master(pdev);
    362	pci_set_drvdata(pdev, cptvf);
    363	cptvf->pdev = pdev;
    364
    365	cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
    366
    367	otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag);
    368
    369	ret = cn10k_cptvf_lmtst_init(cptvf);
    370	if (ret)
    371		goto clear_drvdata;
    372
    373	/* Initialize PF<=>VF mailbox */
    374	ret = cptvf_pfvf_mbox_init(cptvf);
    375	if (ret)
    376		goto clear_drvdata;
    377
    378	/* Register interrupts */
    379	ret = cptvf_register_interrupts(cptvf);
    380	if (ret)
    381		goto destroy_pfvf_mbox;
    382
    383	/* Initialize CPT LFs */
    384	ret = cptvf_lf_init(cptvf);
    385	if (ret)
    386		goto unregister_interrupts;
    387
    388	return 0;
    389
    390unregister_interrupts:
    391	cptvf_disable_pfvf_mbox_intrs(cptvf);
    392destroy_pfvf_mbox:
    393	cptvf_pfvf_mbox_destroy(cptvf);
    394clear_drvdata:
    395	pci_set_drvdata(pdev, NULL);
    396
    397	return ret;
    398}
    399
    400static void otx2_cptvf_remove(struct pci_dev *pdev)
    401{
    402	struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
    403
    404	if (!cptvf) {
    405		dev_err(&pdev->dev, "Invalid CPT VF device.\n");
    406		return;
    407	}
    408	cptvf_lf_shutdown(&cptvf->lfs);
    409	/* Disable PF-VF mailbox interrupt */
    410	cptvf_disable_pfvf_mbox_intrs(cptvf);
    411	/* Destroy PF-VF mbox */
    412	cptvf_pfvf_mbox_destroy(cptvf);
    413	pci_set_drvdata(pdev, NULL);
    414}
    415
    416/* Supported devices */
    417static const struct pci_device_id otx2_cptvf_id_table[] = {
    418	{PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0},
    419	{PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0},
    420	{ 0, }  /* end of table */
    421};
    422
    423static struct pci_driver otx2_cptvf_pci_driver = {
    424	.name = OTX2_CPTVF_DRV_NAME,
    425	.id_table = otx2_cptvf_id_table,
    426	.probe = otx2_cptvf_probe,
    427	.remove = otx2_cptvf_remove,
    428};
    429
    430module_pci_driver(otx2_cptvf_pci_driver);
    431
    432MODULE_AUTHOR("Marvell");
    433MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
    434MODULE_LICENSE("GPL v2");
    435MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table);