cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fixed_config.h (6999B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2021 Google LLC
      4 * Author: Fuad Tabba <tabba@google.com>
      5 */
      6
      7#ifndef __ARM64_KVM_FIXED_CONFIG_H__
      8#define __ARM64_KVM_FIXED_CONFIG_H__
      9
     10#include <asm/sysreg.h>
     11
     12/*
     13 * This file contains definitions for features to be allowed or restricted for
     14 * guest virtual machines, depending on the mode KVM is running in and on the
     15 * type of guest that is running.
     16 *
     17 * The ALLOW masks represent a bitmask of feature fields that are allowed
     18 * without any restrictions as long as they are supported by the system.
     19 *
     20 * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for
     21 * features that are restricted to support at most the specified feature.
     22 *
     23 * If a feature field is not present in either, than it is not supported.
     24 *
     25 * The approach taken for protected VMs is to allow features that are:
     26 * - Needed by common Linux distributions (e.g., floating point)
     27 * - Trivial to support, e.g., supporting the feature does not introduce or
     28 * require tracking of additional state in KVM
     29 * - Cannot be trapped or prevent the guest from using anyway
     30 */
     31
     32/*
     33 * Allow for protected VMs:
     34 * - Floating-point and Advanced SIMD
     35 * - Data Independent Timing
     36 */
     37#define PVM_ID_AA64PFR0_ALLOW (\
     38	ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \
     39	ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \
     40	ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \
     41	)
     42
     43/*
     44 * Restrict to the following *unsigned* features for protected VMs:
     45 * - AArch64 guests only (no support for AArch32 guests):
     46 *	AArch32 adds complexity in trap handling, emulation, condition codes,
     47 *	etc...
     48 * - RAS (v1)
     49 *	Supported by KVM
     50 */
     51#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
     52	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \
     53	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \
     54	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \
     55	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \
     56	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \
     57	)
     58
     59/*
     60 * Allow for protected VMs:
     61 * - Branch Target Identification
     62 * - Speculative Store Bypassing
     63 */
     64#define PVM_ID_AA64PFR1_ALLOW (\
     65	ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \
     66	ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \
     67	)
     68
     69/*
     70 * Allow for protected VMs:
     71 * - Mixed-endian
     72 * - Distinction between Secure and Non-secure Memory
     73 * - Mixed-endian at EL0 only
     74 * - Non-context synchronizing exception entry and exit
     75 */
     76#define PVM_ID_AA64MMFR0_ALLOW (\
     77	ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \
     78	ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \
     79	ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \
     80	ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \
     81	)
     82
     83/*
     84 * Restrict to the following *unsigned* features for protected VMs:
     85 * - 40-bit IPA
     86 * - 16-bit ASID
     87 */
     88#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
     89	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \
     90	FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \
     91	)
     92
     93/*
     94 * Allow for protected VMs:
     95 * - Hardware translation table updates to Access flag and Dirty state
     96 * - Number of VMID bits from CPU
     97 * - Hierarchical Permission Disables
     98 * - Privileged Access Never
     99 * - SError interrupt exceptions from speculative reads
    100 * - Enhanced Translation Synchronization
    101 */
    102#define PVM_ID_AA64MMFR1_ALLOW (\
    103	ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \
    104	ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \
    105	ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \
    106	ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \
    107	ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \
    108	ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \
    109	)
    110
    111/*
    112 * Allow for protected VMs:
    113 * - Common not Private translations
    114 * - User Access Override
    115 * - IESB bit in the SCTLR_ELx registers
    116 * - Unaligned single-copy atomicity and atomic functions
    117 * - ESR_ELx.EC value on an exception by read access to feature ID space
    118 * - TTL field in address operations.
    119 * - Break-before-make sequences when changing translation block size
    120 * - E0PDx mechanism
    121 */
    122#define PVM_ID_AA64MMFR2_ALLOW (\
    123	ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \
    124	ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \
    125	ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \
    126	ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \
    127	ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \
    128	ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \
    129	ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \
    130	ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \
    131	)
    132
    133/*
    134 * No support for Scalable Vectors for protected VMs:
    135 *	Requires additional support from KVM, e.g., context-switching and
    136 *	trapping at EL2
    137 */
    138#define PVM_ID_AA64ZFR0_ALLOW (0ULL)
    139
    140/*
    141 * No support for debug, including breakpoints, and watchpoints for protected
    142 * VMs:
    143 *	The Arm architecture mandates support for at least the Armv8 debug
    144 *	architecture, which would include at least 2 hardware breakpoints and
    145 *	watchpoints. Providing that support to protected guests adds
    146 *	considerable state and complexity. Therefore, the reserved value of 0 is
    147 *	used for debug-related fields.
    148 */
    149#define PVM_ID_AA64DFR0_ALLOW (0ULL)
    150#define PVM_ID_AA64DFR1_ALLOW (0ULL)
    151
    152/*
    153 * No support for implementation defined features.
    154 */
    155#define PVM_ID_AA64AFR0_ALLOW (0ULL)
    156#define PVM_ID_AA64AFR1_ALLOW (0ULL)
    157
    158/*
    159 * No restrictions on instructions implemented in AArch64.
    160 */
    161#define PVM_ID_AA64ISAR0_ALLOW (\
    162	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_AES) | \
    163	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA1) | \
    164	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA2) | \
    165	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_CRC32) | \
    166	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC) | \
    167	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RDM) | \
    168	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SHA3) | \
    169	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM3) | \
    170	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_SM4) | \
    171	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_DP) | \
    172	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_FHM) | \
    173	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TS) | \
    174	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_TLB) | \
    175	ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_RNDR) \
    176	)
    177
    178#define PVM_ID_AA64ISAR1_ALLOW (\
    179	ARM64_FEATURE_MASK(ID_AA64ISAR1_DPB) | \
    180	ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | \
    181	ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | \
    182	ARM64_FEATURE_MASK(ID_AA64ISAR1_JSCVT) | \
    183	ARM64_FEATURE_MASK(ID_AA64ISAR1_FCMA) | \
    184	ARM64_FEATURE_MASK(ID_AA64ISAR1_LRCPC) | \
    185	ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | \
    186	ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI) | \
    187	ARM64_FEATURE_MASK(ID_AA64ISAR1_FRINTTS) | \
    188	ARM64_FEATURE_MASK(ID_AA64ISAR1_SB) | \
    189	ARM64_FEATURE_MASK(ID_AA64ISAR1_SPECRES) | \
    190	ARM64_FEATURE_MASK(ID_AA64ISAR1_BF16) | \
    191	ARM64_FEATURE_MASK(ID_AA64ISAR1_DGH) | \
    192	ARM64_FEATURE_MASK(ID_AA64ISAR1_I8MM) \
    193	)
    194
    195#define PVM_ID_AA64ISAR2_ALLOW (\
    196	ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3) | \
    197	ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) \
    198	)
    199
    200u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
    201bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
    202bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
    203int kvm_check_pvm_sysreg_table(void);
    204
    205#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */