cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmio.c (4229B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
      4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
      5 */
      6
      7#include <linux/kvm_host.h>
      8#include <asm/kvm_emulate.h>
      9#include <trace/events/kvm.h>
     10
     11#include "trace.h"
     12
     13void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
     14{
     15	void *datap = NULL;
     16	union {
     17		u8	byte;
     18		u16	hword;
     19		u32	word;
     20		u64	dword;
     21	} tmp;
     22
     23	switch (len) {
     24	case 1:
     25		tmp.byte	= data;
     26		datap		= &tmp.byte;
     27		break;
     28	case 2:
     29		tmp.hword	= data;
     30		datap		= &tmp.hword;
     31		break;
     32	case 4:
     33		tmp.word	= data;
     34		datap		= &tmp.word;
     35		break;
     36	case 8:
     37		tmp.dword	= data;
     38		datap		= &tmp.dword;
     39		break;
     40	}
     41
     42	memcpy(buf, datap, len);
     43}
     44
     45unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
     46{
     47	unsigned long data = 0;
     48	union {
     49		u16	hword;
     50		u32	word;
     51		u64	dword;
     52	} tmp;
     53
     54	switch (len) {
     55	case 1:
     56		data = *(u8 *)buf;
     57		break;
     58	case 2:
     59		memcpy(&tmp.hword, buf, len);
     60		data = tmp.hword;
     61		break;
     62	case 4:
     63		memcpy(&tmp.word, buf, len);
     64		data = tmp.word;
     65		break;
     66	case 8:
     67		memcpy(&tmp.dword, buf, len);
     68		data = tmp.dword;
     69		break;
     70	}
     71
     72	return data;
     73}
     74
     75/**
     76 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
     77 *			     or in-kernel IO emulation
     78 *
     79 * @vcpu: The VCPU pointer
     80 */
     81int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
     82{
     83	unsigned long data;
     84	unsigned int len;
     85	int mask;
     86
     87	/* Detect an already handled MMIO return */
     88	if (unlikely(!vcpu->mmio_needed))
     89		return 0;
     90
     91	vcpu->mmio_needed = 0;
     92
     93	if (!kvm_vcpu_dabt_iswrite(vcpu)) {
     94		struct kvm_run *run = vcpu->run;
     95
     96		len = kvm_vcpu_dabt_get_as(vcpu);
     97		data = kvm_mmio_read_buf(run->mmio.data, len);
     98
     99		if (kvm_vcpu_dabt_issext(vcpu) &&
    100		    len < sizeof(unsigned long)) {
    101			mask = 1U << ((len * 8) - 1);
    102			data = (data ^ mask) - mask;
    103		}
    104
    105		if (!kvm_vcpu_dabt_issf(vcpu))
    106			data = data & 0xffffffff;
    107
    108		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
    109			       &data);
    110		data = vcpu_data_host_to_guest(vcpu, data, len);
    111		vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
    112	}
    113
    114	/*
    115	 * The MMIO instruction is emulated and should not be re-executed
    116	 * in the guest.
    117	 */
    118	kvm_incr_pc(vcpu);
    119
    120	return 0;
    121}
    122
    123int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
    124{
    125	struct kvm_run *run = vcpu->run;
    126	unsigned long data;
    127	unsigned long rt;
    128	int ret;
    129	bool is_write;
    130	int len;
    131	u8 data_buf[8];
    132
    133	/*
    134	 * No valid syndrome? Ask userspace for help if it has
    135	 * volunteered to do so, and bail out otherwise.
    136	 */
    137	if (!kvm_vcpu_dabt_isvalid(vcpu)) {
    138		if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
    139			     &vcpu->kvm->arch.flags)) {
    140			run->exit_reason = KVM_EXIT_ARM_NISV;
    141			run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
    142			run->arm_nisv.fault_ipa = fault_ipa;
    143			return 0;
    144		}
    145
    146		kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
    147		return -ENOSYS;
    148	}
    149
    150	/*
    151	 * Prepare MMIO operation. First decode the syndrome data we get
    152	 * from the CPU. Then try if some in-kernel emulation feels
    153	 * responsible, otherwise let user space do its magic.
    154	 */
    155	is_write = kvm_vcpu_dabt_iswrite(vcpu);
    156	len = kvm_vcpu_dabt_get_as(vcpu);
    157	rt = kvm_vcpu_dabt_get_rd(vcpu);
    158
    159	if (is_write) {
    160		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
    161					       len);
    162
    163		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
    164		kvm_mmio_write_buf(data_buf, len, data);
    165
    166		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
    167				       data_buf);
    168	} else {
    169		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
    170			       fault_ipa, NULL);
    171
    172		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
    173				      data_buf);
    174	}
    175
    176	/* Now prepare kvm_run for the potential return to userland. */
    177	run->mmio.is_write	= is_write;
    178	run->mmio.phys_addr	= fault_ipa;
    179	run->mmio.len		= len;
    180	vcpu->mmio_needed	= 1;
    181
    182	if (!ret) {
    183		/* We handled the access successfully in the kernel. */
    184		if (!is_write)
    185			memcpy(run->mmio.data, data_buf, len);
    186		vcpu->stat.mmio_exit_kernel++;
    187		kvm_handle_mmio_return(vcpu);
    188		return 1;
    189	}
    190
    191	if (is_write)
    192		memcpy(run->mmio.data, data_buf, len);
    193	vcpu->stat.mmio_exit_user++;
    194	run->exit_reason	= KVM_EXIT_MMIO;
    195	return 0;
    196}