vmx_set_nested_state_test.c (8977B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * vmx_set_nested_state_test 4 * 5 * Copyright (C) 2019, Google LLC. 6 * 7 * This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE. 8 */ 9 10#include "test_util.h" 11#include "kvm_util.h" 12#include "processor.h" 13#include "vmx.h" 14 15#include <errno.h> 16#include <linux/kvm.h> 17#include <string.h> 18#include <sys/ioctl.h> 19#include <unistd.h> 20 21/* 22 * Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value 23 * changes this should be updated. 24 */ 25#define VMCS12_REVISION 0x11e57ed0 26#define VCPU_ID 5 27 28bool have_evmcs; 29 30void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) 31{ 32 vcpu_nested_state_set(vm, VCPU_ID, state, false); 33} 34 35void test_nested_state_expect_errno(struct kvm_vm *vm, 36 struct kvm_nested_state *state, 37 int expected_errno) 38{ 39 int rv; 40 41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); 42 TEST_ASSERT(rv == -1 && errno == expected_errno, 43 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", 44 strerror(expected_errno), expected_errno, rv, strerror(errno), 45 errno); 46} 47 48void test_nested_state_expect_einval(struct kvm_vm *vm, 49 struct kvm_nested_state *state) 50{ 51 test_nested_state_expect_errno(vm, state, EINVAL); 52} 53 54void test_nested_state_expect_efault(struct kvm_vm *vm, 55 struct kvm_nested_state *state) 56{ 57 test_nested_state_expect_errno(vm, state, EFAULT); 58} 59 60void set_revision_id_for_vmcs12(struct kvm_nested_state *state, 61 u32 vmcs12_revision) 62{ 63 /* Set revision_id in vmcs12 to vmcs12_revision. */ 64 memcpy(&state->data, &vmcs12_revision, sizeof(u32)); 65} 66 67void set_default_state(struct kvm_nested_state *state) 68{ 69 memset(state, 0, sizeof(*state)); 70 state->flags = KVM_STATE_NESTED_RUN_PENDING | 71 KVM_STATE_NESTED_GUEST_MODE; 72 state->format = 0; 73 state->size = sizeof(*state); 74} 75 76void set_default_vmx_state(struct kvm_nested_state *state, int size) 77{ 78 memset(state, 0, size); 79 if (have_evmcs) 80 state->flags = KVM_STATE_NESTED_EVMCS; 81 state->format = 0; 82 state->size = size; 83 state->hdr.vmx.vmxon_pa = 0x1000; 84 state->hdr.vmx.vmcs12_pa = 0x2000; 85 state->hdr.vmx.smm.flags = 0; 86 set_revision_id_for_vmcs12(state, VMCS12_REVISION); 87} 88 89void test_vmx_nested_state(struct kvm_vm *vm) 90{ 91 /* Add a page for VMCS12. */ 92 const int state_sz = sizeof(struct kvm_nested_state) + getpagesize(); 93 struct kvm_nested_state *state = 94 (struct kvm_nested_state *)malloc(state_sz); 95 96 /* The format must be set to 0. 0 for VMX, 1 for SVM. */ 97 set_default_vmx_state(state, state_sz); 98 state->format = 1; 99 test_nested_state_expect_einval(vm, state); 100 101 /* 102 * We cannot virtualize anything if the guest does not have VMX 103 * enabled. 104 */ 105 set_default_vmx_state(state, state_sz); 106 test_nested_state_expect_einval(vm, state); 107 108 /* 109 * We cannot virtualize anything if the guest does not have VMX 110 * enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa 111 * is set to -1ull, but the flags must be zero. 112 */ 113 set_default_vmx_state(state, state_sz); 114 state->hdr.vmx.vmxon_pa = -1ull; 115 test_nested_state_expect_einval(vm, state); 116 117 state->hdr.vmx.vmcs12_pa = -1ull; 118 state->flags = KVM_STATE_NESTED_EVMCS; 119 test_nested_state_expect_einval(vm, state); 120 121 state->flags = 0; 122 test_nested_state(vm, state); 123 124 /* Enable VMX in the guest CPUID. */ 125 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 126 127 /* 128 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without 129 * setting the nested state but flags other than eVMCS must be clear. 130 * The eVMCS flag can be set if the enlightened VMCS capability has 131 * been enabled. 132 */ 133 set_default_vmx_state(state, state_sz); 134 state->hdr.vmx.vmxon_pa = -1ull; 135 state->hdr.vmx.vmcs12_pa = -1ull; 136 test_nested_state_expect_einval(vm, state); 137 138 state->flags &= KVM_STATE_NESTED_EVMCS; 139 if (have_evmcs) { 140 test_nested_state_expect_einval(vm, state); 141 vcpu_enable_evmcs(vm, VCPU_ID); 142 } 143 test_nested_state(vm, state); 144 145 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ 146 state->hdr.vmx.smm.flags = 1; 147 test_nested_state_expect_einval(vm, state); 148 149 /* Invalid flags are rejected. */ 150 set_default_vmx_state(state, state_sz); 151 state->hdr.vmx.flags = ~0; 152 test_nested_state_expect_einval(vm, state); 153 154 /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */ 155 set_default_vmx_state(state, state_sz); 156 state->hdr.vmx.vmxon_pa = -1ull; 157 state->flags = 0; 158 test_nested_state_expect_einval(vm, state); 159 160 /* It is invalid to have vmxon_pa set to a non-page aligned address. */ 161 set_default_vmx_state(state, state_sz); 162 state->hdr.vmx.vmxon_pa = 1; 163 test_nested_state_expect_einval(vm, state); 164 165 /* 166 * It is invalid to have KVM_STATE_NESTED_SMM_GUEST_MODE and 167 * KVM_STATE_NESTED_GUEST_MODE set together. 168 */ 169 set_default_vmx_state(state, state_sz); 170 state->flags = KVM_STATE_NESTED_GUEST_MODE | 171 KVM_STATE_NESTED_RUN_PENDING; 172 state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; 173 test_nested_state_expect_einval(vm, state); 174 175 /* 176 * It is invalid to have any of the SMM flags set besides: 177 * KVM_STATE_NESTED_SMM_GUEST_MODE 178 * KVM_STATE_NESTED_SMM_VMXON 179 */ 180 set_default_vmx_state(state, state_sz); 181 state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE | 182 KVM_STATE_NESTED_SMM_VMXON); 183 test_nested_state_expect_einval(vm, state); 184 185 /* Outside SMM, SMM flags must be zero. */ 186 set_default_vmx_state(state, state_sz); 187 state->flags = 0; 188 state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; 189 test_nested_state_expect_einval(vm, state); 190 191 /* 192 * Size must be large enough to fit kvm_nested_state and vmcs12 193 * if VMCS12 physical address is set 194 */ 195 set_default_vmx_state(state, state_sz); 196 state->size = sizeof(*state); 197 state->flags = 0; 198 test_nested_state_expect_einval(vm, state); 199 200 set_default_vmx_state(state, state_sz); 201 state->size = sizeof(*state); 202 state->flags = 0; 203 state->hdr.vmx.vmcs12_pa = -1; 204 test_nested_state(vm, state); 205 206 /* 207 * KVM_SET_NESTED_STATE succeeds with invalid VMCS 208 * contents but L2 not running. 209 */ 210 set_default_vmx_state(state, state_sz); 211 state->flags = 0; 212 test_nested_state(vm, state); 213 214 /* Invalid flags are rejected, even if no VMCS loaded. */ 215 set_default_vmx_state(state, state_sz); 216 state->size = sizeof(*state); 217 state->flags = 0; 218 state->hdr.vmx.vmcs12_pa = -1; 219 state->hdr.vmx.flags = ~0; 220 test_nested_state_expect_einval(vm, state); 221 222 /* vmxon_pa cannot be the same address as vmcs_pa. */ 223 set_default_vmx_state(state, state_sz); 224 state->hdr.vmx.vmxon_pa = 0; 225 state->hdr.vmx.vmcs12_pa = 0; 226 test_nested_state_expect_einval(vm, state); 227 228 /* 229 * Test that if we leave nesting the state reflects that when we get 230 * it again. 231 */ 232 set_default_vmx_state(state, state_sz); 233 state->hdr.vmx.vmxon_pa = -1ull; 234 state->hdr.vmx.vmcs12_pa = -1ull; 235 state->flags = 0; 236 test_nested_state(vm, state); 237 vcpu_nested_state_get(vm, VCPU_ID, state); 238 TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz, 239 "Size must be between %ld and %d. The size returned was %d.", 240 sizeof(*state), state_sz, state->size); 241 TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull."); 242 TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull."); 243 244 free(state); 245} 246 247void disable_vmx(struct kvm_vm *vm) 248{ 249 struct kvm_cpuid2 *cpuid = kvm_get_supported_cpuid(); 250 int i; 251 252 for (i = 0; i < cpuid->nent; ++i) 253 if (cpuid->entries[i].function == 1 && 254 cpuid->entries[i].index == 0) 255 break; 256 TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found"); 257 258 cpuid->entries[i].ecx &= ~CPUID_VMX; 259 vcpu_set_cpuid(vm, VCPU_ID, cpuid); 260 cpuid->entries[i].ecx |= CPUID_VMX; 261} 262 263int main(int argc, char *argv[]) 264{ 265 struct kvm_vm *vm; 266 struct kvm_nested_state state; 267 268 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS); 269 270 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { 271 print_skip("KVM_CAP_NESTED_STATE not available"); 272 exit(KSFT_SKIP); 273 } 274 275 /* 276 * AMD currently does not implement set_nested_state, so for now we 277 * just early out. 278 */ 279 nested_vmx_check_supported(); 280 281 vm = vm_create_default(VCPU_ID, 0, 0); 282 283 /* 284 * First run tests with VMX disabled to check error handling. 285 */ 286 disable_vmx(vm); 287 288 /* Passing a NULL kvm_nested_state causes a EFAULT. */ 289 test_nested_state_expect_efault(vm, NULL); 290 291 /* 'size' cannot be smaller than sizeof(kvm_nested_state). */ 292 set_default_state(&state); 293 state.size = 0; 294 test_nested_state_expect_einval(vm, &state); 295 296 /* 297 * Setting the flags 0xf fails the flags check. The only flags that 298 * can be used are: 299 * KVM_STATE_NESTED_GUEST_MODE 300 * KVM_STATE_NESTED_RUN_PENDING 301 * KVM_STATE_NESTED_EVMCS 302 */ 303 set_default_state(&state); 304 state.flags = 0xf; 305 test_nested_state_expect_einval(vm, &state); 306 307 /* 308 * If KVM_STATE_NESTED_RUN_PENDING is set then 309 * KVM_STATE_NESTED_GUEST_MODE has to be set as well. 310 */ 311 set_default_state(&state); 312 state.flags = KVM_STATE_NESTED_RUN_PENDING; 313 test_nested_state_expect_einval(vm, &state); 314 315 test_vmx_nested_state(vm); 316 317 kvm_vm_free(vm); 318 return 0; 319}