fix_hypercall_test.c (3763B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2020, Google LLC. 4 * 5 * Tests for KVM paravirtual feature disablement 6 */ 7#include <asm/kvm_para.h> 8#include <linux/kvm_para.h> 9#include <linux/stringify.h> 10#include <stdint.h> 11 12#include "apic.h" 13#include "test_util.h" 14#include "kvm_util.h" 15#include "processor.h" 16 17#define VCPU_ID 0 18 19static bool ud_expected; 20 21static void guest_ud_handler(struct ex_regs *regs) 22{ 23 GUEST_ASSERT(ud_expected); 24 GUEST_DONE(); 25} 26 27extern unsigned char svm_hypercall_insn; 28static uint64_t svm_do_sched_yield(uint8_t apic_id) 29{ 30 uint64_t ret; 31 32 asm volatile("mov %1, %%rax\n\t" 33 "mov %2, %%rbx\n\t" 34 "svm_hypercall_insn:\n\t" 35 "vmmcall\n\t" 36 "mov %%rax, %0\n\t" 37 : "=r"(ret) 38 : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) 39 : "rax", "rbx", "memory"); 40 41 return ret; 42} 43 44extern unsigned char vmx_hypercall_insn; 45static uint64_t vmx_do_sched_yield(uint8_t apic_id) 46{ 47 uint64_t ret; 48 49 asm volatile("mov %1, %%rax\n\t" 50 "mov %2, %%rbx\n\t" 51 "vmx_hypercall_insn:\n\t" 52 "vmcall\n\t" 53 "mov %%rax, %0\n\t" 54 : "=r"(ret) 55 : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id) 56 : "rax", "rbx", "memory"); 57 58 return ret; 59} 60 61static void assert_hypercall_insn(unsigned char *exp_insn, unsigned char *obs_insn) 62{ 63 uint32_t exp = 0, obs = 0; 64 65 memcpy(&exp, exp_insn, sizeof(exp)); 66 memcpy(&obs, obs_insn, sizeof(obs)); 67 68 GUEST_ASSERT_EQ(exp, obs); 69} 70 71static void guest_main(void) 72{ 73 unsigned char *native_hypercall_insn, *hypercall_insn; 74 uint8_t apic_id; 75 76 apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)); 77 78 if (is_intel_cpu()) { 79 native_hypercall_insn = &vmx_hypercall_insn; 80 hypercall_insn = &svm_hypercall_insn; 81 svm_do_sched_yield(apic_id); 82 } else if (is_amd_cpu()) { 83 native_hypercall_insn = &svm_hypercall_insn; 84 hypercall_insn = &vmx_hypercall_insn; 85 vmx_do_sched_yield(apic_id); 86 } else { 87 GUEST_ASSERT(0); 88 /* unreachable */ 89 return; 90 } 91 92 GUEST_ASSERT(!ud_expected); 93 assert_hypercall_insn(native_hypercall_insn, hypercall_insn); 94 GUEST_DONE(); 95} 96 97static void setup_ud_vector(struct kvm_vm *vm) 98{ 99 vm_init_descriptor_tables(vm); 100 vcpu_init_descriptor_tables(vm, VCPU_ID); 101 vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler); 102} 103 104static void enter_guest(struct kvm_vm *vm) 105{ 106 struct kvm_run *run; 107 struct ucall uc; 108 109 run = vcpu_state(vm, VCPU_ID); 110 111 vcpu_run(vm, VCPU_ID); 112 switch (get_ucall(vm, VCPU_ID, &uc)) { 113 case UCALL_SYNC: 114 pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]); 115 break; 116 case UCALL_DONE: 117 return; 118 case UCALL_ABORT: 119 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__, uc.args[1]); 120 default: 121 TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)", 122 uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason)); 123 } 124} 125 126static void test_fix_hypercall(void) 127{ 128 struct kvm_vm *vm; 129 130 vm = vm_create_default(VCPU_ID, 0, guest_main); 131 setup_ud_vector(vm); 132 133 ud_expected = false; 134 sync_global_to_guest(vm, ud_expected); 135 136 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 137 138 enter_guest(vm); 139} 140 141static void test_fix_hypercall_disabled(void) 142{ 143 struct kvm_enable_cap cap = {0}; 144 struct kvm_vm *vm; 145 146 vm = vm_create_default(VCPU_ID, 0, guest_main); 147 setup_ud_vector(vm); 148 149 cap.cap = KVM_CAP_DISABLE_QUIRKS2; 150 cap.args[0] = KVM_X86_QUIRK_FIX_HYPERCALL_INSN; 151 vm_enable_cap(vm, &cap); 152 153 ud_expected = true; 154 sync_global_to_guest(vm, ud_expected); 155 156 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA); 157 158 enter_guest(vm); 159} 160 161int main(void) 162{ 163 if (!(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { 164 print_skip("KVM_X86_QUIRK_HYPERCALL_INSN not supported"); 165 exit(KSFT_SKIP); 166 } 167 168 test_fix_hypercall(); 169 test_fix_hypercall_disabled(); 170}