hypercalls.c (12857B)
1// SPDX-License-Identifier: GPL-2.0 2// Copyright (C) 2019 Arm Ltd. 3 4#include <linux/arm-smccc.h> 5#include <linux/kvm_host.h> 6 7#include <asm/kvm_emulate.h> 8 9#include <kvm/arm_hypercalls.h> 10#include <kvm/arm_psci.h> 11 12#define KVM_ARM_SMCCC_STD_FEATURES \ 13 GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0) 14#define KVM_ARM_SMCCC_STD_HYP_FEATURES \ 15 GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0) 16#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \ 17 GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0) 18 19static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) 20{ 21 struct system_time_snapshot systime_snapshot; 22 u64 cycles = ~0UL; 23 u32 feature; 24 25 /* 26 * system time and counter value must captured at the same 27 * time to keep consistency and precision. 28 */ 29 ktime_get_snapshot(&systime_snapshot); 30 31 /* 32 * This is only valid if the current clocksource is the 33 * architected counter, as this is the only one the guest 34 * can see. 35 */ 36 if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER) 37 return; 38 39 /* 40 * The guest selects one of the two reference counters 41 * (virtual or physical) with the first argument of the SMCCC 42 * call. In case the identifier is not supported, error out. 43 */ 44 feature = smccc_get_arg1(vcpu); 45 switch (feature) { 46 case KVM_PTP_VIRT_COUNTER: 47 cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2); 48 break; 49 case KVM_PTP_PHYS_COUNTER: 50 cycles = systime_snapshot.cycles; 51 break; 52 default: 53 return; 54 } 55 56 /* 57 * This relies on the top bit of val[0] never being set for 58 * valid values of system time, because that is *really* far 59 * in the future (about 292 years from 1970, and at that stage 60 * nobody will give a damn about it). 61 */ 62 val[0] = upper_32_bits(systime_snapshot.real); 63 val[1] = lower_32_bits(systime_snapshot.real); 64 val[2] = upper_32_bits(cycles); 65 val[3] = lower_32_bits(cycles); 66} 67 68static bool kvm_hvc_call_default_allowed(u32 func_id) 69{ 70 switch (func_id) { 71 /* 72 * List of function-ids that are not gated with the bitmapped 73 * feature firmware registers, and are to be allowed for 74 * servicing the call by default. 75 */ 76 case ARM_SMCCC_VERSION_FUNC_ID: 77 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: 78 return true; 79 default: 80 /* PSCI 0.2 and up is in the 0:0x1f range */ 81 if (ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD && 82 ARM_SMCCC_FUNC_NUM(func_id) <= 0x1f) 83 return true; 84 85 /* 86 * KVM's PSCI 0.1 doesn't comply with SMCCC, and has 87 * its own function-id base and range 88 */ 89 if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3)) 90 return true; 91 92 return false; 93 } 94} 95 96static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id) 97{ 98 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; 99 100 switch (func_id) { 101 case ARM_SMCCC_TRNG_VERSION: 102 case ARM_SMCCC_TRNG_FEATURES: 103 case ARM_SMCCC_TRNG_GET_UUID: 104 case ARM_SMCCC_TRNG_RND32: 105 case ARM_SMCCC_TRNG_RND64: 106 return test_bit(KVM_REG_ARM_STD_BIT_TRNG_V1_0, 107 &smccc_feat->std_bmap); 108 case ARM_SMCCC_HV_PV_TIME_FEATURES: 109 case ARM_SMCCC_HV_PV_TIME_ST: 110 return test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME, 111 &smccc_feat->std_hyp_bmap); 112 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: 113 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: 114 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT, 115 &smccc_feat->vendor_hyp_bmap); 116 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: 117 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP, 118 &smccc_feat->vendor_hyp_bmap); 119 default: 120 return kvm_hvc_call_default_allowed(func_id); 121 } 122} 123 124int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) 125{ 126 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; 127 u32 func_id = smccc_get_function(vcpu); 128 u64 val[4] = {SMCCC_RET_NOT_SUPPORTED}; 129 u32 feature; 130 gpa_t gpa; 131 132 if (!kvm_hvc_call_allowed(vcpu, func_id)) 133 goto out; 134 135 switch (func_id) { 136 case ARM_SMCCC_VERSION_FUNC_ID: 137 val[0] = ARM_SMCCC_VERSION_1_1; 138 break; 139 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: 140 feature = smccc_get_arg1(vcpu); 141 switch (feature) { 142 case ARM_SMCCC_ARCH_WORKAROUND_1: 143 switch (arm64_get_spectre_v2_state()) { 144 case SPECTRE_VULNERABLE: 145 break; 146 case SPECTRE_MITIGATED: 147 val[0] = SMCCC_RET_SUCCESS; 148 break; 149 case SPECTRE_UNAFFECTED: 150 val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; 151 break; 152 } 153 break; 154 case ARM_SMCCC_ARCH_WORKAROUND_2: 155 switch (arm64_get_spectre_v4_state()) { 156 case SPECTRE_VULNERABLE: 157 break; 158 case SPECTRE_MITIGATED: 159 /* 160 * SSBS everywhere: Indicate no firmware 161 * support, as the SSBS support will be 162 * indicated to the guest and the default is 163 * safe. 164 * 165 * Otherwise, expose a permanent mitigation 166 * to the guest, and hide SSBS so that the 167 * guest stays protected. 168 */ 169 if (cpus_have_final_cap(ARM64_SSBS)) 170 break; 171 fallthrough; 172 case SPECTRE_UNAFFECTED: 173 val[0] = SMCCC_RET_NOT_REQUIRED; 174 break; 175 } 176 break; 177 case ARM_SMCCC_ARCH_WORKAROUND_3: 178 switch (arm64_get_spectre_bhb_state()) { 179 case SPECTRE_VULNERABLE: 180 break; 181 case SPECTRE_MITIGATED: 182 val[0] = SMCCC_RET_SUCCESS; 183 break; 184 case SPECTRE_UNAFFECTED: 185 val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; 186 break; 187 } 188 break; 189 case ARM_SMCCC_HV_PV_TIME_FEATURES: 190 if (test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME, 191 &smccc_feat->std_hyp_bmap)) 192 val[0] = SMCCC_RET_SUCCESS; 193 break; 194 } 195 break; 196 case ARM_SMCCC_HV_PV_TIME_FEATURES: 197 val[0] = kvm_hypercall_pv_features(vcpu); 198 break; 199 case ARM_SMCCC_HV_PV_TIME_ST: 200 gpa = kvm_init_stolen_time(vcpu); 201 if (gpa != GPA_INVALID) 202 val[0] = gpa; 203 break; 204 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: 205 val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0; 206 val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1; 207 val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2; 208 val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3; 209 break; 210 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: 211 val[0] = smccc_feat->vendor_hyp_bmap; 212 break; 213 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: 214 kvm_ptp_get_time(vcpu, val); 215 break; 216 case ARM_SMCCC_TRNG_VERSION: 217 case ARM_SMCCC_TRNG_FEATURES: 218 case ARM_SMCCC_TRNG_GET_UUID: 219 case ARM_SMCCC_TRNG_RND32: 220 case ARM_SMCCC_TRNG_RND64: 221 return kvm_trng_call(vcpu); 222 default: 223 return kvm_psci_call(vcpu); 224 } 225 226out: 227 smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]); 228 return 1; 229} 230 231static const u64 kvm_arm_fw_reg_ids[] = { 232 KVM_REG_ARM_PSCI_VERSION, 233 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, 234 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, 235 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, 236 KVM_REG_ARM_STD_BMAP, 237 KVM_REG_ARM_STD_HYP_BMAP, 238 KVM_REG_ARM_VENDOR_HYP_BMAP, 239}; 240 241void kvm_arm_init_hypercalls(struct kvm *kvm) 242{ 243 struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat; 244 245 smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES; 246 smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES; 247 smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES; 248} 249 250int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) 251{ 252 return ARRAY_SIZE(kvm_arm_fw_reg_ids); 253} 254 255int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 256{ 257 int i; 258 259 for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) { 260 if (put_user(kvm_arm_fw_reg_ids[i], uindices++)) 261 return -EFAULT; 262 } 263 264 return 0; 265} 266 267#define KVM_REG_FEATURE_LEVEL_MASK GENMASK(3, 0) 268 269/* 270 * Convert the workaround level into an easy-to-compare number, where higher 271 * values mean better protection. 272 */ 273static int get_kernel_wa_level(u64 regid) 274{ 275 switch (regid) { 276 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: 277 switch (arm64_get_spectre_v2_state()) { 278 case SPECTRE_VULNERABLE: 279 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; 280 case SPECTRE_MITIGATED: 281 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL; 282 case SPECTRE_UNAFFECTED: 283 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED; 284 } 285 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; 286 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: 287 switch (arm64_get_spectre_v4_state()) { 288 case SPECTRE_MITIGATED: 289 /* 290 * As for the hypercall discovery, we pretend we 291 * don't have any FW mitigation if SSBS is there at 292 * all times. 293 */ 294 if (cpus_have_final_cap(ARM64_SSBS)) 295 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; 296 fallthrough; 297 case SPECTRE_UNAFFECTED: 298 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; 299 case SPECTRE_VULNERABLE: 300 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; 301 } 302 break; 303 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: 304 switch (arm64_get_spectre_bhb_state()) { 305 case SPECTRE_VULNERABLE: 306 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL; 307 case SPECTRE_MITIGATED: 308 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL; 309 case SPECTRE_UNAFFECTED: 310 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED; 311 } 312 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL; 313 } 314 315 return -EINVAL; 316} 317 318int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 319{ 320 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; 321 void __user *uaddr = (void __user *)(long)reg->addr; 322 u64 val; 323 324 switch (reg->id) { 325 case KVM_REG_ARM_PSCI_VERSION: 326 val = kvm_psci_version(vcpu); 327 break; 328 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: 329 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: 330 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: 331 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; 332 break; 333 case KVM_REG_ARM_STD_BMAP: 334 val = READ_ONCE(smccc_feat->std_bmap); 335 break; 336 case KVM_REG_ARM_STD_HYP_BMAP: 337 val = READ_ONCE(smccc_feat->std_hyp_bmap); 338 break; 339 case KVM_REG_ARM_VENDOR_HYP_BMAP: 340 val = READ_ONCE(smccc_feat->vendor_hyp_bmap); 341 break; 342 default: 343 return -ENOENT; 344 } 345 346 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id))) 347 return -EFAULT; 348 349 return 0; 350} 351 352static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val) 353{ 354 int ret = 0; 355 struct kvm *kvm = vcpu->kvm; 356 struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat; 357 unsigned long *fw_reg_bmap, fw_reg_features; 358 359 switch (reg_id) { 360 case KVM_REG_ARM_STD_BMAP: 361 fw_reg_bmap = &smccc_feat->std_bmap; 362 fw_reg_features = KVM_ARM_SMCCC_STD_FEATURES; 363 break; 364 case KVM_REG_ARM_STD_HYP_BMAP: 365 fw_reg_bmap = &smccc_feat->std_hyp_bmap; 366 fw_reg_features = KVM_ARM_SMCCC_STD_HYP_FEATURES; 367 break; 368 case KVM_REG_ARM_VENDOR_HYP_BMAP: 369 fw_reg_bmap = &smccc_feat->vendor_hyp_bmap; 370 fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES; 371 break; 372 default: 373 return -ENOENT; 374 } 375 376 /* Check for unsupported bit */ 377 if (val & ~fw_reg_features) 378 return -EINVAL; 379 380 mutex_lock(&kvm->lock); 381 382 if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) && 383 val != *fw_reg_bmap) { 384 ret = -EBUSY; 385 goto out; 386 } 387 388 WRITE_ONCE(*fw_reg_bmap, val); 389out: 390 mutex_unlock(&kvm->lock); 391 return ret; 392} 393 394int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 395{ 396 void __user *uaddr = (void __user *)(long)reg->addr; 397 u64 val; 398 int wa_level; 399 400 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id))) 401 return -EFAULT; 402 403 switch (reg->id) { 404 case KVM_REG_ARM_PSCI_VERSION: 405 { 406 bool wants_02; 407 408 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features); 409 410 switch (val) { 411 case KVM_ARM_PSCI_0_1: 412 if (wants_02) 413 return -EINVAL; 414 vcpu->kvm->arch.psci_version = val; 415 return 0; 416 case KVM_ARM_PSCI_0_2: 417 case KVM_ARM_PSCI_1_0: 418 case KVM_ARM_PSCI_1_1: 419 if (!wants_02) 420 return -EINVAL; 421 vcpu->kvm->arch.psci_version = val; 422 return 0; 423 } 424 break; 425 } 426 427 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: 428 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: 429 if (val & ~KVM_REG_FEATURE_LEVEL_MASK) 430 return -EINVAL; 431 432 if (get_kernel_wa_level(reg->id) < val) 433 return -EINVAL; 434 435 return 0; 436 437 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: 438 if (val & ~(KVM_REG_FEATURE_LEVEL_MASK | 439 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED)) 440 return -EINVAL; 441 442 /* The enabled bit must not be set unless the level is AVAIL. */ 443 if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) && 444 (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL) 445 return -EINVAL; 446 447 /* 448 * Map all the possible incoming states to the only two we 449 * really want to deal with. 450 */ 451 switch (val & KVM_REG_FEATURE_LEVEL_MASK) { 452 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL: 453 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN: 454 wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; 455 break; 456 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: 457 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: 458 wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; 459 break; 460 default: 461 return -EINVAL; 462 } 463 464 /* 465 * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the 466 * other way around. 467 */ 468 if (get_kernel_wa_level(reg->id) < wa_level) 469 return -EINVAL; 470 471 return 0; 472 case KVM_REG_ARM_STD_BMAP: 473 case KVM_REG_ARM_STD_HYP_BMAP: 474 case KVM_REG_ARM_VENDOR_HYP_BMAP: 475 return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val); 476 default: 477 return -ENOENT; 478 } 479 480 return -EINVAL; 481}