cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pvtime.c (2980B)


      1// SPDX-License-Identifier: GPL-2.0
      2// Copyright (C) 2019 Arm Ltd.
      3
      4#include <linux/arm-smccc.h>
      5#include <linux/kvm_host.h>
      6#include <linux/sched/stat.h>
      7
      8#include <asm/kvm_mmu.h>
      9#include <asm/pvclock-abi.h>
     10
     11#include <kvm/arm_hypercalls.h>
     12
     13void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
     14{
     15	struct kvm *kvm = vcpu->kvm;
     16	u64 base = vcpu->arch.steal.base;
     17	u64 last_steal = vcpu->arch.steal.last_steal;
     18	u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
     19	u64 steal = 0;
     20	int idx;
     21
     22	if (base == GPA_INVALID)
     23		return;
     24
     25	idx = srcu_read_lock(&kvm->srcu);
     26	if (!kvm_get_guest(kvm, base + offset, steal)) {
     27		steal = le64_to_cpu(steal);
     28		vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
     29		steal += vcpu->arch.steal.last_steal - last_steal;
     30		kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
     31	}
     32	srcu_read_unlock(&kvm->srcu, idx);
     33}
     34
     35long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
     36{
     37	u32 feature = smccc_get_arg1(vcpu);
     38	long val = SMCCC_RET_NOT_SUPPORTED;
     39
     40	switch (feature) {
     41	case ARM_SMCCC_HV_PV_TIME_FEATURES:
     42	case ARM_SMCCC_HV_PV_TIME_ST:
     43		if (vcpu->arch.steal.base != GPA_INVALID)
     44			val = SMCCC_RET_SUCCESS;
     45		break;
     46	}
     47
     48	return val;
     49}
     50
     51gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
     52{
     53	struct pvclock_vcpu_stolen_time init_values = {};
     54	struct kvm *kvm = vcpu->kvm;
     55	u64 base = vcpu->arch.steal.base;
     56
     57	if (base == GPA_INVALID)
     58		return base;
     59
     60	/*
     61	 * Start counting stolen time from the time the guest requests
     62	 * the feature enabled.
     63	 */
     64	vcpu->arch.steal.last_steal = current->sched_info.run_delay;
     65	kvm_write_guest_lock(kvm, base, &init_values, sizeof(init_values));
     66
     67	return base;
     68}
     69
     70bool kvm_arm_pvtime_supported(void)
     71{
     72	return !!sched_info_on();
     73}
     74
     75int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
     76			    struct kvm_device_attr *attr)
     77{
     78	u64 __user *user = (u64 __user *)attr->addr;
     79	struct kvm *kvm = vcpu->kvm;
     80	u64 ipa;
     81	int ret = 0;
     82	int idx;
     83
     84	if (!kvm_arm_pvtime_supported() ||
     85	    attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
     86		return -ENXIO;
     87
     88	if (get_user(ipa, user))
     89		return -EFAULT;
     90	if (!IS_ALIGNED(ipa, 64))
     91		return -EINVAL;
     92	if (vcpu->arch.steal.base != GPA_INVALID)
     93		return -EEXIST;
     94
     95	/* Check the address is in a valid memslot */
     96	idx = srcu_read_lock(&kvm->srcu);
     97	if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
     98		ret = -EINVAL;
     99	srcu_read_unlock(&kvm->srcu, idx);
    100
    101	if (!ret)
    102		vcpu->arch.steal.base = ipa;
    103
    104	return ret;
    105}
    106
    107int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
    108			    struct kvm_device_attr *attr)
    109{
    110	u64 __user *user = (u64 __user *)attr->addr;
    111	u64 ipa;
    112
    113	if (!kvm_arm_pvtime_supported() ||
    114	    attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
    115		return -ENXIO;
    116
    117	ipa = vcpu->arch.steal.base;
    118
    119	if (put_user(ipa, user))
    120		return -EFAULT;
    121	return 0;
    122}
    123
    124int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
    125			    struct kvm_device_attr *attr)
    126{
    127	switch (attr->attr) {
    128	case KVM_ARM_VCPU_PVTIME_IPA:
    129		if (kvm_arm_pvtime_supported())
    130			return 0;
    131	}
    132	return -ENXIO;
    133}