cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

reset.c (11044B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2012,2013 - ARM Ltd
      4 * Author: Marc Zyngier <marc.zyngier@arm.com>
      5 *
      6 * Derived from arch/arm/kvm/reset.c
      7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
      8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
      9 */
     10
     11#include <linux/errno.h>
     12#include <linux/kernel.h>
     13#include <linux/kvm_host.h>
     14#include <linux/kvm.h>
     15#include <linux/hw_breakpoint.h>
     16#include <linux/slab.h>
     17#include <linux/string.h>
     18#include <linux/types.h>
     19
     20#include <kvm/arm_arch_timer.h>
     21
     22#include <asm/cpufeature.h>
     23#include <asm/cputype.h>
     24#include <asm/fpsimd.h>
     25#include <asm/ptrace.h>
     26#include <asm/kvm_arm.h>
     27#include <asm/kvm_asm.h>
     28#include <asm/kvm_emulate.h>
     29#include <asm/kvm_mmu.h>
     30#include <asm/virt.h>
     31
     32/* Maximum phys_shift supported for any VM on this host */
     33static u32 kvm_ipa_limit;
     34
     35/*
     36 * ARMv8 Reset Values
     37 */
     38#define VCPU_RESET_PSTATE_EL1	(PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
     39				 PSR_F_BIT | PSR_D_BIT)
     40
     41#define VCPU_RESET_PSTATE_SVC	(PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
     42				 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
     43
     44unsigned int kvm_sve_max_vl;
     45
     46int kvm_arm_init_sve(void)
     47{
     48	if (system_supports_sve()) {
     49		kvm_sve_max_vl = sve_max_virtualisable_vl();
     50
     51		/*
     52		 * The get_sve_reg()/set_sve_reg() ioctl interface will need
     53		 * to be extended with multiple register slice support in
     54		 * order to support vector lengths greater than
     55		 * VL_ARCH_MAX:
     56		 */
     57		if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
     58			kvm_sve_max_vl = VL_ARCH_MAX;
     59
     60		/*
     61		 * Don't even try to make use of vector lengths that
     62		 * aren't available on all CPUs, for now:
     63		 */
     64		if (kvm_sve_max_vl < sve_max_vl())
     65			pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
     66				kvm_sve_max_vl);
     67	}
     68
     69	return 0;
     70}
     71
     72static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
     73{
     74	if (!system_supports_sve())
     75		return -EINVAL;
     76
     77	vcpu->arch.sve_max_vl = kvm_sve_max_vl;
     78
     79	/*
     80	 * Userspace can still customize the vector lengths by writing
     81	 * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
     82	 * kvm_arm_vcpu_finalize(), which freezes the configuration.
     83	 */
     84	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
     85
     86	return 0;
     87}
     88
     89/*
     90 * Finalize vcpu's maximum SVE vector length, allocating
     91 * vcpu->arch.sve_state as necessary.
     92 */
     93static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
     94{
     95	void *buf;
     96	unsigned int vl;
     97	size_t reg_sz;
     98	int ret;
     99
    100	vl = vcpu->arch.sve_max_vl;
    101
    102	/*
    103	 * Responsibility for these properties is shared between
    104	 * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
    105	 * set_sve_vls().  Double-check here just to be sure:
    106	 */
    107	if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
    108		    vl > VL_ARCH_MAX))
    109		return -EIO;
    110
    111	reg_sz = vcpu_sve_state_size(vcpu);
    112	buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
    113	if (!buf)
    114		return -ENOMEM;
    115
    116	ret = kvm_share_hyp(buf, buf + reg_sz);
    117	if (ret) {
    118		kfree(buf);
    119		return ret;
    120	}
    121	
    122	vcpu->arch.sve_state = buf;
    123	vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
    124	return 0;
    125}
    126
    127int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
    128{
    129	switch (feature) {
    130	case KVM_ARM_VCPU_SVE:
    131		if (!vcpu_has_sve(vcpu))
    132			return -EINVAL;
    133
    134		if (kvm_arm_vcpu_sve_finalized(vcpu))
    135			return -EPERM;
    136
    137		return kvm_vcpu_finalize_sve(vcpu);
    138	}
    139
    140	return -EINVAL;
    141}
    142
    143bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
    144{
    145	if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
    146		return false;
    147
    148	return true;
    149}
    150
    151void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
    152{
    153	void *sve_state = vcpu->arch.sve_state;
    154
    155	kvm_vcpu_unshare_task_fp(vcpu);
    156	kvm_unshare_hyp(vcpu, vcpu + 1);
    157	if (sve_state)
    158		kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
    159	kfree(sve_state);
    160}
    161
    162static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
    163{
    164	if (vcpu_has_sve(vcpu))
    165		memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
    166}
    167
    168static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
    169{
    170	/*
    171	 * For now make sure that both address/generic pointer authentication
    172	 * features are requested by the userspace together and the system
    173	 * supports these capabilities.
    174	 */
    175	if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
    176	    !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
    177	    !system_has_full_ptr_auth())
    178		return -EINVAL;
    179
    180	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
    181	return 0;
    182}
    183
    184/**
    185 * kvm_set_vm_width() - set the register width for the guest
    186 * @vcpu: Pointer to the vcpu being configured
    187 *
    188 * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
    189 * in the VM flags based on the vcpu's requested register width, the HW
    190 * capabilities and other options (such as MTE).
    191 * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
    192 * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
    193 *
    194 * Return: 0 on success, negative error code on failure.
    195 */
    196static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
    197{
    198	struct kvm *kvm = vcpu->kvm;
    199	bool is32bit;
    200
    201	is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
    202
    203	lockdep_assert_held(&kvm->lock);
    204
    205	if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
    206		/*
    207		 * The guest's register width is already configured.
    208		 * Make sure that the vcpu is consistent with it.
    209		 */
    210		if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
    211			return 0;
    212
    213		return -EINVAL;
    214	}
    215
    216	if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
    217		return -EINVAL;
    218
    219	/* MTE is incompatible with AArch32 */
    220	if (kvm_has_mte(kvm) && is32bit)
    221		return -EINVAL;
    222
    223	if (is32bit)
    224		set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
    225
    226	set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
    227
    228	return 0;
    229}
    230
    231/**
    232 * kvm_reset_vcpu - sets core registers and sys_regs to reset value
    233 * @vcpu: The VCPU pointer
    234 *
    235 * This function sets the registers on the virtual CPU struct to their
    236 * architecturally defined reset values, except for registers whose reset is
    237 * deferred until kvm_arm_vcpu_finalize().
    238 *
    239 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
    240 * ioctl or as part of handling a request issued by another VCPU in the PSCI
    241 * handling code.  In the first case, the VCPU will not be loaded, and in the
    242 * second case the VCPU will be loaded.  Because this function operates purely
    243 * on the memory-backed values of system registers, we want to do a full put if
    244 * we were loaded (handling a request) and load the values back at the end of
    245 * the function.  Otherwise we leave the state alone.  In both cases, we
    246 * disable preemption around the vcpu reset as we would otherwise race with
    247 * preempt notifiers which also call put/load.
    248 */
    249int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
    250{
    251	struct vcpu_reset_state reset_state;
    252	int ret;
    253	bool loaded;
    254	u32 pstate;
    255
    256	mutex_lock(&vcpu->kvm->lock);
    257	ret = kvm_set_vm_width(vcpu);
    258	if (!ret) {
    259		reset_state = vcpu->arch.reset_state;
    260		WRITE_ONCE(vcpu->arch.reset_state.reset, false);
    261	}
    262	mutex_unlock(&vcpu->kvm->lock);
    263
    264	if (ret)
    265		return ret;
    266
    267	/* Reset PMU outside of the non-preemptible section */
    268	kvm_pmu_vcpu_reset(vcpu);
    269
    270	preempt_disable();
    271	loaded = (vcpu->cpu != -1);
    272	if (loaded)
    273		kvm_arch_vcpu_put(vcpu);
    274
    275	if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
    276		if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
    277			ret = kvm_vcpu_enable_sve(vcpu);
    278			if (ret)
    279				goto out;
    280		}
    281	} else {
    282		kvm_vcpu_reset_sve(vcpu);
    283	}
    284
    285	if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
    286	    test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
    287		if (kvm_vcpu_enable_ptrauth(vcpu)) {
    288			ret = -EINVAL;
    289			goto out;
    290		}
    291	}
    292
    293	switch (vcpu->arch.target) {
    294	default:
    295		if (vcpu_el1_is_32bit(vcpu)) {
    296			pstate = VCPU_RESET_PSTATE_SVC;
    297		} else {
    298			pstate = VCPU_RESET_PSTATE_EL1;
    299		}
    300
    301		if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
    302			ret = -EINVAL;
    303			goto out;
    304		}
    305		break;
    306	}
    307
    308	/* Reset core registers */
    309	memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
    310	memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
    311	vcpu->arch.ctxt.spsr_abt = 0;
    312	vcpu->arch.ctxt.spsr_und = 0;
    313	vcpu->arch.ctxt.spsr_irq = 0;
    314	vcpu->arch.ctxt.spsr_fiq = 0;
    315	vcpu_gp_regs(vcpu)->pstate = pstate;
    316
    317	/* Reset system registers */
    318	kvm_reset_sys_regs(vcpu);
    319
    320	/*
    321	 * Additional reset state handling that PSCI may have imposed on us.
    322	 * Must be done after all the sys_reg reset.
    323	 */
    324	if (reset_state.reset) {
    325		unsigned long target_pc = reset_state.pc;
    326
    327		/* Gracefully handle Thumb2 entry point */
    328		if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
    329			target_pc &= ~1UL;
    330			vcpu_set_thumb(vcpu);
    331		}
    332
    333		/* Propagate caller endianness */
    334		if (reset_state.be)
    335			kvm_vcpu_set_be(vcpu);
    336
    337		*vcpu_pc(vcpu) = target_pc;
    338		vcpu_set_reg(vcpu, 0, reset_state.r0);
    339	}
    340
    341	/* Reset timer */
    342	ret = kvm_timer_vcpu_reset(vcpu);
    343out:
    344	if (loaded)
    345		kvm_arch_vcpu_load(vcpu, smp_processor_id());
    346	preempt_enable();
    347	return ret;
    348}
    349
    350u32 get_kvm_ipa_limit(void)
    351{
    352	return kvm_ipa_limit;
    353}
    354
    355int kvm_set_ipa_limit(void)
    356{
    357	unsigned int parange;
    358	u64 mmfr0;
    359
    360	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
    361	parange = cpuid_feature_extract_unsigned_field(mmfr0,
    362				ID_AA64MMFR0_PARANGE_SHIFT);
    363	/*
    364	 * IPA size beyond 48 bits could not be supported
    365	 * on either 4K or 16K page size. Hence let's cap
    366	 * it to 48 bits, in case it's reported as larger
    367	 * on the system.
    368	 */
    369	if (PAGE_SIZE != SZ_64K)
    370		parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
    371
    372	/*
    373	 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
    374	 * Stage-2. If not, things will stop very quickly.
    375	 */
    376	switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) {
    377	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
    378		kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
    379		return -EINVAL;
    380	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
    381		kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
    382		break;
    383	case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
    384		kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
    385		break;
    386	default:
    387		kvm_err("Unsupported value for TGRAN_2, giving up\n");
    388		return -EINVAL;
    389	}
    390
    391	kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
    392	kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
    393		 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
    394		  " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
    395
    396	return 0;
    397}
    398
    399int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
    400{
    401	u64 mmfr0, mmfr1;
    402	u32 phys_shift;
    403
    404	if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
    405		return -EINVAL;
    406
    407	phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
    408	if (phys_shift) {
    409		if (phys_shift > kvm_ipa_limit ||
    410		    phys_shift < ARM64_MIN_PARANGE_BITS)
    411			return -EINVAL;
    412	} else {
    413		phys_shift = KVM_PHYS_SHIFT;
    414		if (phys_shift > kvm_ipa_limit) {
    415			pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
    416				     current->comm);
    417			return -EINVAL;
    418		}
    419	}
    420
    421	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
    422	mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
    423	kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
    424
    425	return 0;
    426}