cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

adf_sriov.c (6498B)


      1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
      2/* Copyright(c) 2015 - 2021 Intel Corporation */
      3#include <linux/workqueue.h>
      4#include <linux/pci.h>
      5#include <linux/device.h>
      6#include "adf_common_drv.h"
      7#include "adf_cfg.h"
      8#include "adf_pfvf_pf_msg.h"
      9
     10#define ADF_VF2PF_RATELIMIT_INTERVAL	8
     11#define ADF_VF2PF_RATELIMIT_BURST	130
     12
     13static struct workqueue_struct *pf2vf_resp_wq;
     14
     15struct adf_pf2vf_resp {
     16	struct work_struct pf2vf_resp_work;
     17	struct adf_accel_vf_info *vf_info;
     18};
     19
     20static void adf_iov_send_resp(struct work_struct *work)
     21{
     22	struct adf_pf2vf_resp *pf2vf_resp =
     23		container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
     24	struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info;
     25	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
     26	u32 vf_nr = vf_info->vf_nr;
     27	bool ret;
     28
     29	ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr);
     30	if (ret)
     31		/* re-enable interrupt on PF from this VF */
     32		adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr);
     33
     34	kfree(pf2vf_resp);
     35}
     36
     37void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info)
     38{
     39	struct adf_pf2vf_resp *pf2vf_resp;
     40
     41	pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
     42	if (!pf2vf_resp)
     43		return;
     44
     45	pf2vf_resp->vf_info = vf_info;
     46	INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
     47	queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
     48}
     49
     50static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
     51{
     52	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
     53	int totalvfs = pci_sriov_get_totalvfs(pdev);
     54	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
     55	struct adf_accel_vf_info *vf_info;
     56	int i;
     57
     58	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
     59	     i++, vf_info++) {
     60		/* This ptr will be populated when VFs will be created */
     61		vf_info->accel_dev = accel_dev;
     62		vf_info->vf_nr = i;
     63		vf_info->vf_compat_ver = 0;
     64
     65		mutex_init(&vf_info->pf2vf_lock);
     66		ratelimit_state_init(&vf_info->vf2pf_ratelimit,
     67				     ADF_VF2PF_RATELIMIT_INTERVAL,
     68				     ADF_VF2PF_RATELIMIT_BURST);
     69	}
     70
     71	/* Set Valid bits in AE Thread to PCIe Function Mapping */
     72	if (hw_data->configure_iov_threads)
     73		hw_data->configure_iov_threads(accel_dev, true);
     74
     75	/* Enable VF to PF interrupts for all VFs */
     76	adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1);
     77
     78	/*
     79	 * Due to the hardware design, when SR-IOV and the ring arbiter
     80	 * are enabled all the VFs supported in hardware must be enabled in
     81	 * order for all the hardware resources (i.e. bundles) to be usable.
     82	 * When SR-IOV is enabled, each of the VFs will own one bundle.
     83	 */
     84	return pci_enable_sriov(pdev, totalvfs);
     85}
     86
     87/**
     88 * adf_disable_sriov() - Disable SRIOV for the device
     89 * @accel_dev:  Pointer to accel device.
     90 *
     91 * Function disables SRIOV for the accel device.
     92 *
     93 * Return: 0 on success, error code otherwise.
     94 */
     95void adf_disable_sriov(struct adf_accel_dev *accel_dev)
     96{
     97	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
     98	int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
     99	struct adf_accel_vf_info *vf;
    100	int i;
    101
    102	if (!accel_dev->pf.vf_info)
    103		return;
    104
    105	adf_pf2vf_notify_restarting(accel_dev);
    106	pci_disable_sriov(accel_to_pci_dev(accel_dev));
    107
    108	/* Disable VF to PF interrupts */
    109	adf_disable_all_vf2pf_interrupts(accel_dev);
    110
    111	/* Clear Valid bits in AE Thread to PCIe Function Mapping */
    112	if (hw_data->configure_iov_threads)
    113		hw_data->configure_iov_threads(accel_dev, false);
    114
    115	for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++)
    116		mutex_destroy(&vf->pf2vf_lock);
    117
    118	kfree(accel_dev->pf.vf_info);
    119	accel_dev->pf.vf_info = NULL;
    120}
    121EXPORT_SYMBOL_GPL(adf_disable_sriov);
    122
    123static int adf_sriov_prepare_restart(struct adf_accel_dev *accel_dev)
    124{
    125	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
    126	int ret;
    127
    128	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
    129				      ADF_SERVICES_ENABLED, services);
    130
    131	adf_dev_stop(accel_dev);
    132	adf_dev_shutdown(accel_dev);
    133
    134	if (!ret) {
    135		ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
    136		if (ret)
    137			return ret;
    138
    139		ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
    140						  ADF_SERVICES_ENABLED,
    141						  services, ADF_STR);
    142		if (ret)
    143			return ret;
    144	}
    145
    146	return 0;
    147}
    148
    149/**
    150 * adf_sriov_configure() - Enable SRIOV for the device
    151 * @pdev:  Pointer to PCI device.
    152 * @numvfs: Number of virtual functions (VFs) to enable.
    153 *
    154 * Note that the @numvfs parameter is ignored and all VFs supported by the
    155 * device are enabled due to the design of the hardware.
    156 *
    157 * Function enables SRIOV for the PCI device.
    158 *
    159 * Return: number of VFs enabled on success, error code otherwise.
    160 */
    161int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
    162{
    163	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
    164	int totalvfs = pci_sriov_get_totalvfs(pdev);
    165	unsigned long val;
    166	int ret;
    167
    168	if (!accel_dev) {
    169		dev_err(&pdev->dev, "Failed to find accel_dev\n");
    170		return -EFAULT;
    171	}
    172
    173	if (!device_iommu_mapped(&pdev->dev))
    174		dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
    175
    176	if (accel_dev->pf.vf_info) {
    177		dev_info(&pdev->dev, "Already enabled for this device\n");
    178		return -EINVAL;
    179	}
    180
    181	if (adf_dev_started(accel_dev)) {
    182		if (adf_devmgr_in_reset(accel_dev) ||
    183		    adf_dev_in_use(accel_dev)) {
    184			dev_err(&GET_DEV(accel_dev), "Device busy\n");
    185			return -EBUSY;
    186		}
    187
    188		ret = adf_sriov_prepare_restart(accel_dev);
    189		if (ret)
    190			return ret;
    191	}
    192
    193	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
    194		return -EFAULT;
    195	val = 0;
    196	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
    197					ADF_NUM_CY, (void *)&val, ADF_DEC))
    198		return -EFAULT;
    199
    200	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
    201
    202	/* Allocate memory for VF info structs */
    203	accel_dev->pf.vf_info = kcalloc(totalvfs,
    204					sizeof(struct adf_accel_vf_info),
    205					GFP_KERNEL);
    206	if (!accel_dev->pf.vf_info)
    207		return -ENOMEM;
    208
    209	if (adf_dev_init(accel_dev)) {
    210		dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
    211			accel_dev->accel_id);
    212		return -EFAULT;
    213	}
    214
    215	if (adf_dev_start(accel_dev)) {
    216		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
    217			accel_dev->accel_id);
    218		return -EFAULT;
    219	}
    220
    221	ret = adf_enable_sriov(accel_dev);
    222	if (ret)
    223		return ret;
    224
    225	return numvfs;
    226}
    227EXPORT_SYMBOL_GPL(adf_sriov_configure);
    228
    229int __init adf_init_pf_wq(void)
    230{
    231	/* Workqueue for PF2VF responses */
    232	pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
    233
    234	return !pf2vf_resp_wq ? -ENOMEM : 0;
    235}
    236
    237void adf_exit_pf_wq(void)
    238{
    239	if (pf2vf_resp_wq) {
    240		destroy_workqueue(pf2vf_resp_wq);
    241		pf2vf_resp_wq = NULL;
    242	}
    243}