cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vcpu_sbi_base.c (2646B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
      4 *
      5 * Authors:
      6 *     Atish Patra <atish.patra@wdc.com>
      7 */
      8
      9#include <linux/errno.h>
     10#include <linux/err.h>
     11#include <linux/kvm_host.h>
     12#include <linux/version.h>
     13#include <asm/csr.h>
     14#include <asm/sbi.h>
     15#include <asm/kvm_vcpu_timer.h>
     16#include <asm/kvm_vcpu_sbi.h>
     17
     18static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
     19				    unsigned long *out_val,
     20				    struct kvm_cpu_trap *trap, bool *exit)
     21{
     22	int ret = 0;
     23	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
     24	struct sbiret ecall_ret;
     25
     26	switch (cp->a6) {
     27	case SBI_EXT_BASE_GET_SPEC_VERSION:
     28		*out_val = (KVM_SBI_VERSION_MAJOR <<
     29			    SBI_SPEC_VERSION_MAJOR_SHIFT) |
     30			    KVM_SBI_VERSION_MINOR;
     31		break;
     32	case SBI_EXT_BASE_GET_IMP_ID:
     33		*out_val = KVM_SBI_IMPID;
     34		break;
     35	case SBI_EXT_BASE_GET_IMP_VERSION:
     36		*out_val = LINUX_VERSION_CODE;
     37		break;
     38	case SBI_EXT_BASE_PROBE_EXT:
     39		if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START &&
     40		     cp->a0 <= SBI_EXT_EXPERIMENTAL_END) ||
     41		    (cp->a0 >= SBI_EXT_VENDOR_START &&
     42		     cp->a0 <= SBI_EXT_VENDOR_END)) {
     43			/*
     44			 * For experimental/vendor extensions
     45			 * forward it to the userspace
     46			 */
     47			kvm_riscv_vcpu_sbi_forward(vcpu, run);
     48			*exit = true;
     49		} else
     50			*out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0;
     51		break;
     52	case SBI_EXT_BASE_GET_MVENDORID:
     53	case SBI_EXT_BASE_GET_MARCHID:
     54	case SBI_EXT_BASE_GET_MIMPID:
     55		ecall_ret = sbi_ecall(SBI_EXT_BASE, cp->a6, 0, 0, 0, 0, 0, 0);
     56		if (!ecall_ret.error)
     57			*out_val = ecall_ret.value;
     58		/*TODO: We are unnecessarily converting the error twice */
     59		ret = sbi_err_map_linux_errno(ecall_ret.error);
     60		break;
     61	default:
     62		ret = -EOPNOTSUPP;
     63		break;
     64	}
     65
     66	return ret;
     67}
     68
     69const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = {
     70	.extid_start = SBI_EXT_BASE,
     71	.extid_end = SBI_EXT_BASE,
     72	.handler = kvm_sbi_ext_base_handler,
     73};
     74
     75static int kvm_sbi_ext_forward_handler(struct kvm_vcpu *vcpu,
     76					struct kvm_run *run,
     77					unsigned long *out_val,
     78					struct kvm_cpu_trap *utrap,
     79					bool *exit)
     80{
     81	/*
     82	 * Both SBI experimental and vendor extensions are
     83	 * unconditionally forwarded to userspace.
     84	 */
     85	kvm_riscv_vcpu_sbi_forward(vcpu, run);
     86	*exit = true;
     87	return 0;
     88}
     89
     90const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental = {
     91	.extid_start = SBI_EXT_EXPERIMENTAL_START,
     92	.extid_end = SBI_EXT_EXPERIMENTAL_END,
     93	.handler = kvm_sbi_ext_forward_handler,
     94};
     95
     96const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor = {
     97	.extid_start = SBI_EXT_VENDOR_START,
     98	.extid_end = SBI_EXT_VENDOR_END,
     99	.handler = kvm_sbi_ext_forward_handler,
    100};