cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

adf_vf_isr.c (8145B)


      1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
      2/* Copyright(c) 2014 - 2020 Intel Corporation */
      3#include <linux/kernel.h>
      4#include <linux/init.h>
      5#include <linux/types.h>
      6#include <linux/pci.h>
      7#include <linux/slab.h>
      8#include <linux/errno.h>
      9#include <linux/interrupt.h>
     10#include <linux/workqueue.h>
     11#include "adf_accel_devices.h"
     12#include "adf_common_drv.h"
     13#include "adf_cfg.h"
     14#include "adf_cfg_strings.h"
     15#include "adf_cfg_common.h"
     16#include "adf_transport_access_macros.h"
     17#include "adf_transport_internal.h"
     18
     19#define ADF_VINTSOU_OFFSET	0x204
     20#define ADF_VINTMSK_OFFSET	0x208
     21#define ADF_VINTSOU_BUN		BIT(0)
     22#define ADF_VINTSOU_PF2VF	BIT(1)
     23
     24static struct workqueue_struct *adf_vf_stop_wq;
     25
     26struct adf_vf_stop_data {
     27	struct adf_accel_dev *accel_dev;
     28	struct work_struct work;
     29};
     30
     31void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
     32{
     33	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
     34
     35	ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x0);
     36}
     37
     38void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
     39{
     40	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
     41
     42	ADF_CSR_WR(pmisc_addr, ADF_VINTMSK_OFFSET, 0x2);
     43}
     44EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
     45
     46static int adf_enable_msi(struct adf_accel_dev *accel_dev)
     47{
     48	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
     49	int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
     50					 PCI_IRQ_MSI);
     51	if (unlikely(stat < 0)) {
     52		dev_err(&GET_DEV(accel_dev),
     53			"Failed to enable MSI interrupt: %d\n", stat);
     54		return stat;
     55	}
     56
     57	return 0;
     58}
     59
     60static void adf_disable_msi(struct adf_accel_dev *accel_dev)
     61{
     62	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
     63
     64	pci_free_irq_vectors(pdev);
     65}
     66
     67static void adf_dev_stop_async(struct work_struct *work)
     68{
     69	struct adf_vf_stop_data *stop_data =
     70		container_of(work, struct adf_vf_stop_data, work);
     71	struct adf_accel_dev *accel_dev = stop_data->accel_dev;
     72
     73	adf_dev_restarting_notify(accel_dev);
     74	adf_dev_stop(accel_dev);
     75	adf_dev_shutdown(accel_dev);
     76
     77	/* Re-enable PF2VF interrupts */
     78	adf_enable_pf2vf_interrupts(accel_dev);
     79	kfree(stop_data);
     80}
     81
     82int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
     83{
     84	struct adf_vf_stop_data *stop_data;
     85
     86	clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
     87	stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
     88	if (!stop_data) {
     89		dev_err(&GET_DEV(accel_dev),
     90			"Couldn't schedule stop for vf_%d\n",
     91			accel_dev->accel_id);
     92		return -ENOMEM;
     93	}
     94	stop_data->accel_dev = accel_dev;
     95	INIT_WORK(&stop_data->work, adf_dev_stop_async);
     96	queue_work(adf_vf_stop_wq, &stop_data->work);
     97
     98	return 0;
     99}
    100
    101static void adf_pf2vf_bh_handler(void *data)
    102{
    103	struct adf_accel_dev *accel_dev = data;
    104	bool ret;
    105
    106	ret = adf_recv_and_handle_pf2vf_msg(accel_dev);
    107	if (ret)
    108		/* Re-enable PF2VF interrupts */
    109		adf_enable_pf2vf_interrupts(accel_dev);
    110
    111	return;
    112
    113}
    114
    115static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
    116{
    117	tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
    118		     (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
    119
    120	mutex_init(&accel_dev->vf.vf2pf_lock);
    121	return 0;
    122}
    123
    124static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
    125{
    126	tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
    127	tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
    128	mutex_destroy(&accel_dev->vf.vf2pf_lock);
    129}
    130
    131static irqreturn_t adf_isr(int irq, void *privdata)
    132{
    133	struct adf_accel_dev *accel_dev = privdata;
    134	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
    135	struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
    136	struct adf_bar *pmisc =
    137			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
    138	void __iomem *pmisc_bar_addr = pmisc->virt_addr;
    139	bool handled = false;
    140	u32 v_int, v_mask;
    141
    142	/* Read VF INT source CSR to determine the source of VF interrupt */
    143	v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTSOU_OFFSET);
    144
    145	/* Read VF INT mask CSR to determine which sources are masked */
    146	v_mask = ADF_CSR_RD(pmisc_bar_addr, ADF_VINTMSK_OFFSET);
    147
    148	/*
    149	 * Recompute v_int ignoring sources that are masked. This is to
    150	 * avoid rescheduling the tasklet for interrupts already handled
    151	 */
    152	v_int &= ~v_mask;
    153
    154	/* Check for PF2VF interrupt */
    155	if (v_int & ADF_VINTSOU_PF2VF) {
    156		/* Disable PF to VF interrupt */
    157		adf_disable_pf2vf_interrupts(accel_dev);
    158
    159		/* Schedule tasklet to handle interrupt BH */
    160		tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
    161		handled = true;
    162	}
    163
    164	/* Check bundle interrupt */
    165	if (v_int & ADF_VINTSOU_BUN) {
    166		struct adf_etr_data *etr_data = accel_dev->transport;
    167		struct adf_etr_bank_data *bank = &etr_data->banks[0];
    168
    169		/* Disable Flag and Coalesce Ring Interrupts */
    170		csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
    171						    bank->bank_number, 0);
    172		tasklet_hi_schedule(&bank->resp_handler);
    173		handled = true;
    174	}
    175
    176	return handled ? IRQ_HANDLED : IRQ_NONE;
    177}
    178
    179static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
    180{
    181	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
    182	unsigned int cpu;
    183	int ret;
    184
    185	snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
    186		 "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
    187		 PCI_FUNC(pdev->devfn));
    188	ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
    189			  (void *)accel_dev);
    190	if (ret) {
    191		dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
    192			accel_dev->vf.irq_name);
    193		return ret;
    194	}
    195	cpu = accel_dev->accel_id % num_online_cpus();
    196	irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
    197	accel_dev->vf.irq_enabled = true;
    198
    199	return ret;
    200}
    201
    202static int adf_setup_bh(struct adf_accel_dev *accel_dev)
    203{
    204	struct adf_etr_data *priv_data = accel_dev->transport;
    205
    206	tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
    207		     (unsigned long)priv_data->banks);
    208	return 0;
    209}
    210
    211static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
    212{
    213	struct adf_etr_data *priv_data = accel_dev->transport;
    214
    215	tasklet_disable(&priv_data->banks[0].resp_handler);
    216	tasklet_kill(&priv_data->banks[0].resp_handler);
    217}
    218
    219/**
    220 * adf_vf_isr_resource_free() - Free IRQ for acceleration device
    221 * @accel_dev:  Pointer to acceleration device.
    222 *
    223 * Function frees interrupts for acceleration device virtual function.
    224 */
    225void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
    226{
    227	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
    228
    229	if (accel_dev->vf.irq_enabled) {
    230		irq_set_affinity_hint(pdev->irq, NULL);
    231		free_irq(pdev->irq, accel_dev);
    232	}
    233	adf_cleanup_bh(accel_dev);
    234	adf_cleanup_pf2vf_bh(accel_dev);
    235	adf_disable_msi(accel_dev);
    236}
    237EXPORT_SYMBOL_GPL(adf_vf_isr_resource_free);
    238
    239/**
    240 * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
    241 * @accel_dev:  Pointer to acceleration device.
    242 *
    243 * Function allocates interrupts for acceleration device virtual function.
    244 *
    245 * Return: 0 on success, error code otherwise.
    246 */
    247int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
    248{
    249	if (adf_enable_msi(accel_dev))
    250		goto err_out;
    251
    252	if (adf_setup_pf2vf_bh(accel_dev))
    253		goto err_disable_msi;
    254
    255	if (adf_setup_bh(accel_dev))
    256		goto err_cleanup_pf2vf_bh;
    257
    258	if (adf_request_msi_irq(accel_dev))
    259		goto err_cleanup_bh;
    260
    261	return 0;
    262
    263err_cleanup_bh:
    264	adf_cleanup_bh(accel_dev);
    265
    266err_cleanup_pf2vf_bh:
    267	adf_cleanup_pf2vf_bh(accel_dev);
    268
    269err_disable_msi:
    270	adf_disable_msi(accel_dev);
    271
    272err_out:
    273	return -EFAULT;
    274}
    275EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
    276
    277/**
    278 * adf_flush_vf_wq() - Flush workqueue for VF
    279 * @accel_dev:  Pointer to acceleration device.
    280 *
    281 * Function disables the PF/VF interrupts on the VF so that no new messages
    282 * are received and flushes the workqueue 'adf_vf_stop_wq'.
    283 *
    284 * Return: void.
    285 */
    286void adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
    287{
    288	adf_disable_pf2vf_interrupts(accel_dev);
    289
    290	flush_workqueue(adf_vf_stop_wq);
    291}
    292EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
    293
    294/**
    295 * adf_init_vf_wq() - Init workqueue for VF
    296 *
    297 * Function init workqueue 'adf_vf_stop_wq' for VF.
    298 *
    299 * Return: 0 on success, error code otherwise.
    300 */
    301int __init adf_init_vf_wq(void)
    302{
    303	adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
    304
    305	return !adf_vf_stop_wq ? -EFAULT : 0;
    306}
    307
    308void adf_exit_vf_wq(void)
    309{
    310	if (adf_vf_stop_wq)
    311		destroy_workqueue(adf_vf_stop_wq);
    312
    313	adf_vf_stop_wq = NULL;
    314}