cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

main.c (2658B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
      4 *
      5 * Authors:
      6 *     Anup Patel <anup.patel@wdc.com>
      7 */
      8
      9#include <linux/errno.h>
     10#include <linux/err.h>
     11#include <linux/module.h>
     12#include <linux/kvm_host.h>
     13#include <asm/csr.h>
     14#include <asm/hwcap.h>
     15#include <asm/sbi.h>
     16
     17long kvm_arch_dev_ioctl(struct file *filp,
     18			unsigned int ioctl, unsigned long arg)
     19{
     20	return -EINVAL;
     21}
     22
     23int kvm_arch_check_processor_compat(void *opaque)
     24{
     25	return 0;
     26}
     27
     28int kvm_arch_hardware_setup(void *opaque)
     29{
     30	return 0;
     31}
     32
     33int kvm_arch_hardware_enable(void)
     34{
     35	unsigned long hideleg, hedeleg;
     36
     37	hedeleg = 0;
     38	hedeleg |= (1UL << EXC_INST_MISALIGNED);
     39	hedeleg |= (1UL << EXC_BREAKPOINT);
     40	hedeleg |= (1UL << EXC_SYSCALL);
     41	hedeleg |= (1UL << EXC_INST_PAGE_FAULT);
     42	hedeleg |= (1UL << EXC_LOAD_PAGE_FAULT);
     43	hedeleg |= (1UL << EXC_STORE_PAGE_FAULT);
     44	csr_write(CSR_HEDELEG, hedeleg);
     45
     46	hideleg = 0;
     47	hideleg |= (1UL << IRQ_VS_SOFT);
     48	hideleg |= (1UL << IRQ_VS_TIMER);
     49	hideleg |= (1UL << IRQ_VS_EXT);
     50	csr_write(CSR_HIDELEG, hideleg);
     51
     52	csr_write(CSR_HCOUNTEREN, -1UL);
     53
     54	csr_write(CSR_HVIP, 0);
     55
     56	return 0;
     57}
     58
     59void kvm_arch_hardware_disable(void)
     60{
     61	/*
     62	 * After clearing the hideleg CSR, the host kernel will receive
     63	 * spurious interrupts if hvip CSR has pending interrupts and the
     64	 * corresponding enable bits in vsie CSR are asserted. To avoid it,
     65	 * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
     66	 */
     67	csr_write(CSR_VSIE, 0);
     68	csr_write(CSR_HVIP, 0);
     69	csr_write(CSR_HEDELEG, 0);
     70	csr_write(CSR_HIDELEG, 0);
     71}
     72
     73int kvm_arch_init(void *opaque)
     74{
     75	const char *str;
     76
     77	if (!riscv_isa_extension_available(NULL, h)) {
     78		kvm_info("hypervisor extension not available\n");
     79		return -ENODEV;
     80	}
     81
     82	if (sbi_spec_is_0_1()) {
     83		kvm_info("require SBI v0.2 or higher\n");
     84		return -ENODEV;
     85	}
     86
     87	if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) {
     88		kvm_info("require SBI RFENCE extension\n");
     89		return -ENODEV;
     90	}
     91
     92	kvm_riscv_gstage_mode_detect();
     93
     94	kvm_riscv_gstage_vmid_detect();
     95
     96	kvm_info("hypervisor extension available\n");
     97
     98	switch (kvm_riscv_gstage_mode()) {
     99	case HGATP_MODE_SV32X4:
    100		str = "Sv32x4";
    101		break;
    102	case HGATP_MODE_SV39X4:
    103		str = "Sv39x4";
    104		break;
    105	case HGATP_MODE_SV48X4:
    106		str = "Sv48x4";
    107		break;
    108	case HGATP_MODE_SV57X4:
    109		str = "Sv57x4";
    110		break;
    111	default:
    112		return -ENODEV;
    113	}
    114	kvm_info("using %s G-stage page table format\n", str);
    115
    116	kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
    117
    118	return 0;
    119}
    120
    121void kvm_arch_exit(void)
    122{
    123}
    124
    125static int riscv_kvm_init(void)
    126{
    127	return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
    128}
    129module_init(riscv_kvm_init);