cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

adf_dh895xcc_hw_data.c (8257B)


      1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
      2/* Copyright(c) 2014 - 2021 Intel Corporation */
      3#include <adf_accel_devices.h>
      4#include <adf_common_drv.h>
      5#include <adf_gen2_hw_data.h>
      6#include <adf_gen2_pfvf.h>
      7#include "adf_dh895xcc_hw_data.h"
      8#include "icp_qat_hw.h"
      9
     10#define ADF_DH895XCC_VF_MSK	0xFFFFFFFF
     11
     12/* Worker thread to service arbiter mappings */
     13static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
     14	0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
     15	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
     16	0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
     17};
     18
     19static struct adf_hw_device_class dh895xcc_class = {
     20	.name = ADF_DH895XCC_DEVICE_NAME,
     21	.type = DEV_DH895XCC,
     22	.instances = 0
     23};
     24
     25static u32 get_accel_mask(struct adf_hw_device_data *self)
     26{
     27	u32 fuses = self->fuses;
     28
     29	return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
     30			 ADF_DH895XCC_ACCELERATORS_MASK;
     31}
     32
     33static u32 get_ae_mask(struct adf_hw_device_data *self)
     34{
     35	u32 fuses = self->fuses;
     36
     37	return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
     38}
     39
     40static u32 get_misc_bar_id(struct adf_hw_device_data *self)
     41{
     42	return ADF_DH895XCC_PMISC_BAR;
     43}
     44
     45static u32 get_etr_bar_id(struct adf_hw_device_data *self)
     46{
     47	return ADF_DH895XCC_ETR_BAR;
     48}
     49
     50static u32 get_sram_bar_id(struct adf_hw_device_data *self)
     51{
     52	return ADF_DH895XCC_SRAM_BAR;
     53}
     54
     55static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
     56{
     57	struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
     58	u32 capabilities;
     59	u32 legfuses;
     60
     61	capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
     62		       ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
     63		       ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
     64		       ICP_ACCEL_CAPABILITIES_CIPHER |
     65		       ICP_ACCEL_CAPABILITIES_COMPRESSION;
     66
     67	/* Read accelerator capabilities mask */
     68	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
     69
     70	/* A set bit in legfuses means the feature is OFF in this SKU */
     71	if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
     72		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
     73		capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
     74	}
     75	if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
     76		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
     77	if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
     78		capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
     79		capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
     80	}
     81	if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
     82		capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
     83
     84	return capabilities;
     85}
     86
     87static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
     88{
     89	int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
     90	    >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
     91
     92	switch (sku) {
     93	case ADF_DH895XCC_FUSECTL_SKU_1:
     94		return DEV_SKU_1;
     95	case ADF_DH895XCC_FUSECTL_SKU_2:
     96		return DEV_SKU_2;
     97	case ADF_DH895XCC_FUSECTL_SKU_3:
     98		return DEV_SKU_3;
     99	case ADF_DH895XCC_FUSECTL_SKU_4:
    100		return DEV_SKU_4;
    101	default:
    102		return DEV_SKU_UNKNOWN;
    103	}
    104	return DEV_SKU_UNKNOWN;
    105}
    106
    107static const u32 *adf_get_arbiter_mapping(void)
    108{
    109	return thrd_to_arb_map;
    110}
    111
    112static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
    113{
    114	/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
    115	if (vf_mask & 0xFFFF) {
    116		u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
    117			  & ~ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
    118		ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
    119	}
    120
    121	/* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
    122	if (vf_mask >> 16) {
    123		u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
    124			  & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
    125		ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
    126	}
    127}
    128
    129static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
    130{
    131	u32 val;
    132
    133	/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
    134	val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
    135	      | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
    136	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
    137
    138	/* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
    139	val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
    140	      | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
    141	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
    142}
    143
    144static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
    145{
    146	u32 sources, pending, disabled;
    147	u32 errsou3, errmsk3;
    148	u32 errsou5, errmsk5;
    149
    150	/* Get the interrupt sources triggered by VFs */
    151	errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
    152	errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
    153	sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
    154		  | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
    155
    156	if (!sources)
    157		return 0;
    158
    159	/* Get the already disabled interrupts */
    160	errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
    161	errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
    162	disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
    163		   | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
    164
    165	pending = sources & ~disabled;
    166	if (!pending)
    167		return 0;
    168
    169	/* Due to HW limitations, when disabling the interrupts, we can't
    170	 * just disable the requested sources, as this would lead to missed
    171	 * interrupts if sources changes just before writing to ERRMSK3 and
    172	 * ERRMSK5.
    173	 * To work around it, disable all and re-enable only the sources that
    174	 * are not in vf_mask and were not already disabled. Re-enabling will
    175	 * trigger a new interrupt for the sources that have changed in the
    176	 * meantime, if any.
    177	 */
    178	errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
    179	errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
    180	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
    181	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
    182
    183	errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
    184	errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
    185	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
    186	ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
    187
    188	/* Return the sources of the (new) interrupt(s) */
    189	return pending;
    190}
    191
    192static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
    193{
    194	adf_gen2_cfg_iov_thds(accel_dev, enable,
    195			      ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS,
    196			      ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS);
    197}
    198
    199void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
    200{
    201	hw_data->dev_class = &dh895xcc_class;
    202	hw_data->instance_id = dh895xcc_class.instances++;
    203	hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
    204	hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
    205	hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
    206	hw_data->num_logical_accel = 1;
    207	hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
    208	hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
    209	hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
    210	hw_data->ring_to_svc_map = ADF_GEN2_DEFAULT_RING_TO_SRV_MAP;
    211	hw_data->alloc_irq = adf_isr_resource_alloc;
    212	hw_data->free_irq = adf_isr_resource_free;
    213	hw_data->enable_error_correction = adf_gen2_enable_error_correction;
    214	hw_data->get_accel_mask = get_accel_mask;
    215	hw_data->get_ae_mask = get_ae_mask;
    216	hw_data->get_accel_cap = get_accel_cap;
    217	hw_data->get_num_accels = adf_gen2_get_num_accels;
    218	hw_data->get_num_aes = adf_gen2_get_num_aes;
    219	hw_data->get_etr_bar_id = get_etr_bar_id;
    220	hw_data->get_misc_bar_id = get_misc_bar_id;
    221	hw_data->get_admin_info = adf_gen2_get_admin_info;
    222	hw_data->get_arb_info = adf_gen2_get_arb_info;
    223	hw_data->get_sram_bar_id = get_sram_bar_id;
    224	hw_data->get_sku = get_sku;
    225	hw_data->fw_name = ADF_DH895XCC_FW;
    226	hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
    227	hw_data->init_admin_comms = adf_init_admin_comms;
    228	hw_data->exit_admin_comms = adf_exit_admin_comms;
    229	hw_data->configure_iov_threads = configure_iov_threads;
    230	hw_data->send_admin_init = adf_send_admin_init;
    231	hw_data->init_arb = adf_init_arb;
    232	hw_data->exit_arb = adf_exit_arb;
    233	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
    234	hw_data->enable_ints = adf_gen2_enable_ints;
    235	hw_data->reset_device = adf_reset_sbr;
    236	hw_data->disable_iov = adf_disable_sriov;
    237
    238	adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
    239	hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
    240	hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
    241	hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
    242	adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
    243}
    244
    245void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
    246{
    247	hw_data->dev_class->instances--;
    248}