cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

perf_test_util.c (2802B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * x86_64-specific extensions to perf_test_util.c.
      4 *
      5 * Copyright (C) 2022, Google, Inc.
      6 */
      7#include <stdio.h>
      8#include <stdlib.h>
      9#include <linux/bitmap.h>
     10#include <linux/bitops.h>
     11
     12#include "test_util.h"
     13#include "kvm_util.h"
     14#include "perf_test_util.h"
     15#include "../kvm_util_internal.h"
     16#include "processor.h"
     17#include "vmx.h"
     18
     19void perf_test_l2_guest_code(uint64_t vcpu_id)
     20{
     21	perf_test_guest_code(vcpu_id);
     22	vmcall();
     23}
     24
     25extern char perf_test_l2_guest_entry[];
     26__asm__(
     27"perf_test_l2_guest_entry:"
     28"	mov (%rsp), %rdi;"
     29"	call perf_test_l2_guest_code;"
     30"	ud2;"
     31);
     32
     33static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
     34{
     35#define L2_GUEST_STACK_SIZE 64
     36	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
     37	unsigned long *rsp;
     38
     39	GUEST_ASSERT(vmx->vmcs_gpa);
     40	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
     41	GUEST_ASSERT(load_vmcs(vmx));
     42	GUEST_ASSERT(ept_1g_pages_supported());
     43
     44	rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
     45	*rsp = vcpu_id;
     46	prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
     47
     48	GUEST_ASSERT(!vmlaunch());
     49	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
     50	GUEST_DONE();
     51}
     52
     53uint64_t perf_test_nested_pages(int nr_vcpus)
     54{
     55	/*
     56	 * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
     57	 * pages and 4-level paging, plus a few pages per-vCPU for data
     58	 * structures such as the VMCS.
     59	 */
     60	return 513 + 10 * nr_vcpus;
     61}
     62
     63void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
     64{
     65	uint64_t start, end;
     66
     67	prepare_eptp(vmx, vm, 0);
     68
     69	/*
     70	 * Identity map the first 4G and the test region with 1G pages so that
     71	 * KVM can shadow the EPT12 with the maximum huge page size supported
     72	 * by the backing source.
     73	 */
     74	nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
     75
     76	start = align_down(perf_test_args.gpa, PG_SIZE_1G);
     77	end = align_up(perf_test_args.gpa + perf_test_args.size, PG_SIZE_1G);
     78	nested_identity_map_1g(vmx, vm, start, end - start);
     79}
     80
     81void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus)
     82{
     83	struct vmx_pages *vmx, *vmx0 = NULL;
     84	struct kvm_regs regs;
     85	vm_vaddr_t vmx_gva;
     86	int vcpu_id;
     87
     88	nested_vmx_check_supported();
     89
     90	for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
     91		vmx = vcpu_alloc_vmx(vm, &vmx_gva);
     92
     93		if (vcpu_id == 0) {
     94			perf_test_setup_ept(vmx, vm);
     95			vmx0 = vmx;
     96		} else {
     97			/* Share the same EPT table across all vCPUs. */
     98			vmx->eptp = vmx0->eptp;
     99			vmx->eptp_hva = vmx0->eptp_hva;
    100			vmx->eptp_gpa = vmx0->eptp_gpa;
    101		}
    102
    103		/*
    104		 * Override the vCPU to run perf_test_l1_guest_code() which will
    105		 * bounce it into L2 before calling perf_test_guest_code().
    106		 */
    107		vcpu_regs_get(vm, vcpu_id, &regs);
    108		regs.rip = (unsigned long) perf_test_l1_guest_code;
    109		vcpu_regs_set(vm, vcpu_id, &regs);
    110		vcpu_args_set(vm, vcpu_id, 2, vmx_gva, vcpu_id);
    111	}
    112}