psci_test.c (5186B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the 4 * CPU_ON PSCI call matches what the caller requested. 5 * 6 * Copyright (c) 2021 Google LLC. 7 * 8 * This is a regression test for a race between KVM servicing the PSCI call and 9 * userspace reading the vCPUs registers. 10 */ 11 12#define _GNU_SOURCE 13 14#include <linux/psci.h> 15 16#include "kvm_util.h" 17#include "processor.h" 18#include "test_util.h" 19 20#define VCPU_ID_SOURCE 0 21#define VCPU_ID_TARGET 1 22 23#define CPU_ON_ENTRY_ADDR 0xfeedf00dul 24#define CPU_ON_CONTEXT_ID 0xdeadc0deul 25 26static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr, 27 uint64_t context_id) 28{ 29 struct arm_smccc_res res; 30 31 smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id, 32 0, 0, 0, 0, &res); 33 34 return res.a0; 35} 36 37static uint64_t psci_affinity_info(uint64_t target_affinity, 38 uint64_t lowest_affinity_level) 39{ 40 struct arm_smccc_res res; 41 42 smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level, 43 0, 0, 0, 0, 0, &res); 44 45 return res.a0; 46} 47 48static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id) 49{ 50 struct arm_smccc_res res; 51 52 smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id, 53 0, 0, 0, 0, 0, &res); 54 55 return res.a0; 56} 57 58static uint64_t psci_features(uint32_t func_id) 59{ 60 struct arm_smccc_res res; 61 62 smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res); 63 64 return res.a0; 65} 66 67static void vcpu_power_off(struct kvm_vm *vm, uint32_t vcpuid) 68{ 69 struct kvm_mp_state mp_state = { 70 .mp_state = KVM_MP_STATE_STOPPED, 71 }; 72 73 vcpu_set_mp_state(vm, vcpuid, &mp_state); 74} 75 76static struct kvm_vm *setup_vm(void *guest_code) 77{ 78 struct kvm_vcpu_init init; 79 struct kvm_vm *vm; 80 81 vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR); 82 kvm_vm_elf_load(vm, program_invocation_name); 83 ucall_init(vm, NULL); 84 85 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); 86 init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2); 87 88 aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_code); 89 aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_code); 90 91 return vm; 92} 93 94static void enter_guest(struct kvm_vm *vm, uint32_t vcpuid) 95{ 96 struct ucall uc; 97 98 vcpu_run(vm, vcpuid); 99 if (get_ucall(vm, vcpuid, &uc) == UCALL_ABORT) 100 TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__, 101 uc.args[1]); 102} 103 104static void assert_vcpu_reset(struct kvm_vm *vm, uint32_t vcpuid) 105{ 106 uint64_t obs_pc, obs_x0; 107 108 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &obs_pc); 109 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[0]), &obs_x0); 110 111 TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR, 112 "unexpected target cpu pc: %lx (expected: %lx)", 113 obs_pc, CPU_ON_ENTRY_ADDR); 114 TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID, 115 "unexpected target context id: %lx (expected: %lx)", 116 obs_x0, CPU_ON_CONTEXT_ID); 117} 118 119static void guest_test_cpu_on(uint64_t target_cpu) 120{ 121 uint64_t target_state; 122 123 GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID)); 124 125 do { 126 target_state = psci_affinity_info(target_cpu, 0); 127 128 GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) || 129 (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF)); 130 } while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON); 131 132 GUEST_DONE(); 133} 134 135static void host_test_cpu_on(void) 136{ 137 uint64_t target_mpidr; 138 struct kvm_vm *vm; 139 struct ucall uc; 140 141 vm = setup_vm(guest_test_cpu_on); 142 143 /* 144 * make sure the target is already off when executing the test. 145 */ 146 vcpu_power_off(vm, VCPU_ID_TARGET); 147 148 get_reg(vm, VCPU_ID_TARGET, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr); 149 vcpu_args_set(vm, VCPU_ID_SOURCE, 1, target_mpidr & MPIDR_HWID_BITMASK); 150 enter_guest(vm, VCPU_ID_SOURCE); 151 152 if (get_ucall(vm, VCPU_ID_SOURCE, &uc) != UCALL_DONE) 153 TEST_FAIL("Unhandled ucall: %lu", uc.cmd); 154 155 assert_vcpu_reset(vm, VCPU_ID_TARGET); 156 kvm_vm_free(vm); 157} 158 159static void enable_system_suspend(struct kvm_vm *vm) 160{ 161 struct kvm_enable_cap cap = { 162 .cap = KVM_CAP_ARM_SYSTEM_SUSPEND, 163 }; 164 165 vm_enable_cap(vm, &cap); 166} 167 168static void guest_test_system_suspend(void) 169{ 170 uint64_t ret; 171 172 /* assert that SYSTEM_SUSPEND is discoverable */ 173 GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND)); 174 GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND)); 175 176 ret = psci_system_suspend(CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID); 177 GUEST_SYNC(ret); 178} 179 180static void host_test_system_suspend(void) 181{ 182 struct kvm_run *run; 183 struct kvm_vm *vm; 184 185 vm = setup_vm(guest_test_system_suspend); 186 enable_system_suspend(vm); 187 188 vcpu_power_off(vm, VCPU_ID_TARGET); 189 run = vcpu_state(vm, VCPU_ID_SOURCE); 190 191 enter_guest(vm, VCPU_ID_SOURCE); 192 193 TEST_ASSERT(run->exit_reason == KVM_EXIT_SYSTEM_EVENT, 194 "Unhandled exit reason: %u (%s)", 195 run->exit_reason, exit_reason_str(run->exit_reason)); 196 TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND, 197 "Unhandled system event: %u (expected: %u)", 198 run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND); 199 200 kvm_vm_free(vm); 201} 202 203int main(void) 204{ 205 if (!kvm_check_cap(KVM_CAP_ARM_SYSTEM_SUSPEND)) { 206 print_skip("KVM_CAP_ARM_SYSTEM_SUSPEND not supported"); 207 exit(KSFT_SKIP); 208 } 209 210 host_test_cpu_on(); 211 host_test_system_suspend(); 212 return 0; 213}