From 54effa653246c35997f5e990e0134be5be09f9d1 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 3 Feb 2021 14:19:30 +0000 Subject: asm-generic: export: Stub EXPORT_SYMBOL with __DISABLE_EXPORTS It is currently possible to stub EXPORT_SYMBOL() macros in C code using __DISABLE_EXPORTS, which is necessary to run in constrained environments such as the EFI stub or the decompressor. But this currently doesn't apply to exports from assembly, which can lead to somewhat confusing situations. Consolidate the __DISABLE_EXPORTS infrastructure by checking it from asm-generic/export.h as well. Signed-off-by: Quentin Perret Acked-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210203141931.615898-2-qperret@google.com --- include/asm-generic/export.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 365345f9a9e3..07a36a874dca 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -33,7 +33,7 @@ */ .macro ___EXPORT_SYMBOL name,val,sec -#ifdef CONFIG_MODULES +#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS) .section ___ksymtab\sec+\name,"a" .balign KSYM_ALIGN __ksymtab_\name: -- cgit v1.2.3-71-gd317 From 2c07ded06427dd3339278487a1413d5e478f05f9 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Mon, 4 Jan 2021 09:17:49 -0600 Subject: KVM/SVM: add support for SEV attestation command The SEV FW version >= 0.23 added a new command that can be used to query the attestation report containing the SHA-256 digest of the guest memory encrypted through the KVM_SEV_LAUNCH_UPDATE_{DATA, VMSA} commands and sign the report with the Platform Endorsement Key (PEK). See the SEV FW API spec section 6.8 for more details. Note there already exist a command (KVM_SEV_LAUNCH_MEASURE) that can be used to get the SHA-256 digest. The main difference between the KVM_SEV_LAUNCH_MEASURE and KVM_SEV_ATTESTATION_REPORT is that the latter can be called while the guest is running and the measurement value is signed with PEK. Cc: James Bottomley Cc: Tom Lendacky Cc: David Rientjes Cc: Paolo Bonzini Cc: Sean Christopherson Cc: Borislav Petkov Cc: John Allen Cc: Herbert Xu Cc: linux-crypto@vger.kernel.org Reviewed-by: Tom Lendacky Acked-by: David Rientjes Tested-by: James Bottomley Signed-off-by: Brijesh Singh Message-Id: <20210104151749.30248-1-brijesh.singh@amd.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/amd-memory-encryption.rst | 21 +++++++ arch/x86/kvm/svm/sev.c | 71 ++++++++++++++++++++++++ drivers/crypto/ccp/sev-dev.c | 1 + include/linux/psp-sev.h | 17 ++++++ include/uapi/linux/kvm.h | 8 +++ 5 files changed, 118 insertions(+) (limited to 'include') diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 09a8f2a34e39..469a6308765b 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -263,6 +263,27 @@ Returns: 0 on success, -negative on error __u32 trans_len; }; +10. KVM_SEV_GET_ATTESTATION_REPORT +---------------------------------- + +The KVM_SEV_GET_ATTESTATION_REPORT command can be used by the hypervisor to query the attestation +report containing the SHA-256 digest of the guest memory and VMSA passed through the KVM_SEV_LAUNCH +commands and signed with the PEK. The digest returned by the command should match the digest +used by the guest owner with the KVM_SEV_LAUNCH_MEASURE. + +Parameters (in): struct kvm_sev_attestation + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_attestation_report { + __u8 mnonce[16]; /* A random mnonce that will be placed in the report */ + + __u64 uaddr; /* userspace address where the report should be copied */ + __u32 len; + }; + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 48017fef1cd9..8dfe8988be8d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1041,6 +1041,74 @@ e_unpin_memory: return ret; } +static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + void __user *report = (void __user *)(uintptr_t)argp->data; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data; + struct kvm_sev_attestation_report params; + void __user *p; + void *blob = NULL; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (!data) + return -ENOMEM; + + /* User wants to query the blob length */ + if (!params.len) + goto cmd; + + p = (void __user *)(uintptr_t)params.uaddr; + if (p) { + if (params.len > SEV_FW_BLOB_MAX_SIZE) { + ret = -EINVAL; + goto e_free; + } + + ret = -ENOMEM; + blob = kmalloc(params.len, GFP_KERNEL); + if (!blob) + goto e_free; + + data->address = __psp_pa(blob); + data->len = params.len; + memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce)); + } +cmd: + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error); + /* + * If we query the session length, FW responded with expected data. + */ + if (!params.len) + goto done; + + if (ret) + goto e_free_blob; + + if (blob) { + if (copy_to_user(p, blob, params.len)) + ret = -EFAULT; + } + +done: + params.len = data->len; + if (copy_to_user(report, ¶ms, sizeof(params))) + ret = -EFAULT; +e_free_blob: + kfree(blob); +e_free: + kfree(data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1091,6 +1159,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_LAUNCH_SECRET: r = sev_launch_secret(kvm, &sev_cmd); break; + case KVM_SEV_GET_ATTESTATION_REPORT: + r = sev_get_attestation_report(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 476113e12489..cb9b4c4e371e 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -128,6 +128,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); + case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); default: return 0; } diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 49d155cd2dfe..b801ead1e2bb 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -66,6 +66,7 @@ enum sev_cmd { SEV_CMD_LAUNCH_MEASURE = 0x033, SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034, SEV_CMD_LAUNCH_FINISH = 0x035, + SEV_CMD_ATTESTATION_REPORT = 0x036, /* Guest migration commands (outgoing) */ SEV_CMD_SEND_START = 0x040, @@ -483,6 +484,22 @@ struct sev_data_dbg { u32 len; /* In */ } __packed; +/** + * struct sev_data_attestation_report - SEV_ATTESTATION_REPORT command parameters + * + * @handle: handle of the VM + * @mnonce: a random nonce that will be included in the report. + * @address: physical address where the report will be copied. + * @len: length of the physical buffer. + */ +struct sev_data_attestation_report { + u32 handle; /* In */ + u32 reserved; + u64 address; /* In */ + u8 mnonce[16]; /* In */ + u32 len; /* In/Out */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 374c67875cdb..07c194e2c302 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1593,6 +1593,8 @@ enum sev_cmd_id { KVM_SEV_DBG_ENCRYPT, /* Guest certificates commands */ KVM_SEV_CERT_EXPORT, + /* Attestation report */ + KVM_SEV_GET_ATTESTATION_REPORT, KVM_SEV_NR_MAX, }; @@ -1645,6 +1647,12 @@ struct kvm_sev_dbg { __u32 len; }; +struct kvm_sev_attestation_report { + __u8 mnonce[16]; + __u64 uaddr; + __u32 len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- cgit v1.2.3-71-gd317 From fe6b6bc802b40081e8a7a1abe8d32b88d10a03e1 Mon Sep 17 00:00:00 2001 From: Chenyi Qiang Date: Fri, 6 Nov 2020 17:03:14 +0800 Subject: KVM: VMX: Enable bus lock VM exit Virtual Machine can exploit bus locks to degrade the performance of system. Bus lock can be caused by split locked access to writeback(WB) memory or by using locks on uncacheable(UC) memory. The bus lock is typically >1000 cycles slower than an atomic operation within a cache line. It also disrupts performance on other cores (which must wait for the bus lock to be released before their memory operations can complete). To address the threat, bus lock VM exit is introduced to notify the VMM when a bus lock was acquired, allowing it to enforce throttling or other policy based mitigations. A VMM can enable VM exit due to bus locks by setting a new "Bus Lock Detection" VM-execution control(bit 30 of Secondary Processor-based VM execution controls). If delivery of this VM exit was preempted by a higher priority VM exit (e.g. EPT misconfiguration, EPT violation, APIC access VM exit, APIC write VM exit, exception bitmap exiting), bit 26 of exit reason in vmcs field is set to 1. In current implementation, the KVM exposes this capability through KVM_CAP_X86_BUS_LOCK_EXIT. The user can get the supported mode bitmap (i.e. off and exit) and enable it explicitly (disabled by default). If bus locks in guest are detected by KVM, exit to user space even when current exit reason is handled by KVM internally. Set a new field KVM_RUN_BUS_LOCK in vcpu->run->flags to inform the user space that there is a bus lock detected in guest. Document for Bus Lock VM exit is now available at the latest "Intel Architecture Instruction Set Extensions Programming Reference". Document Link: https://software.intel.com/content/www/us/en/develop/download/intel-architecture-instruction-set-extensions-programming-reference.html Co-developed-by: Xiaoyao Li Signed-off-by: Xiaoyao Li Signed-off-by: Chenyi Qiang Message-Id: <20201106090315.18606-4-chenyi.qiang@intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 7 +++++++ arch/x86/include/asm/vmx.h | 1 + arch/x86/include/asm/vmxfeatures.h | 1 + arch/x86/include/uapi/asm/kvm.h | 1 + arch/x86/include/uapi/asm/vmx.h | 4 +++- arch/x86/kvm/vmx/capabilities.h | 6 ++++++ arch/x86/kvm/vmx/vmx.c | 37 +++++++++++++++++++++++++++++++++++-- arch/x86/kvm/vmx/vmx.h | 2 +- arch/x86/kvm/x86.c | 23 +++++++++++++++++++++++ include/uapi/linux/kvm.h | 5 +++++ 10 files changed, 83 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ed575c5655dd..c0706022d22f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -52,6 +52,9 @@ #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \ KVM_DIRTY_LOG_INITIALLY_SET) +#define KVM_BUS_LOCK_DETECTION_VALID_MODE (KVM_BUS_LOCK_DETECTION_OFF | \ + KVM_BUS_LOCK_DETECTION_EXIT) + /* x86-specific vcpu->requests bit members */ #define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0) #define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1) @@ -996,6 +999,8 @@ struct kvm_arch { struct msr_bitmap_range ranges[16]; } msr_filter; + bool bus_lock_detection_enabled; + struct kvm_pmu_event_filter *pmu_event_filter; struct task_struct *nx_lpage_recovery_thread; @@ -1418,6 +1423,8 @@ extern u8 kvm_tsc_scaling_ratio_frac_bits; extern u64 kvm_max_tsc_scaling_ratio; /* 1ull << kvm_tsc_scaling_ratio_frac_bits */ extern u64 kvm_default_tsc_scaling_ratio; +/* bus lock detection supported? */ +extern bool kvm_has_bus_lock_exit; extern u64 kvm_mce_cap_supported; diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 38ca445a8429..358707f60d99 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -73,6 +73,7 @@ #define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA) #define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING) #define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE) +#define SECONDARY_EXEC_BUS_LOCK_DETECTION VMCS_CONTROL_BIT(BUS_LOCK_DETECTION) #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index 9915990fd8cf..d9a74681a77d 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -83,5 +83,6 @@ #define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */ #define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */ #define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */ +#define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* "" VM-Exit when bus lock caused */ #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 8e76d3701db3..5a3022c8af82 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -112,6 +112,7 @@ struct kvm_ioapic_state { #define KVM_NR_IRQCHIPS 3 #define KVM_RUN_X86_SMM (1 << 0) +#define KVM_RUN_X86_BUS_LOCK (1 << 1) /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index ada955c5ebb6..b8e650a985e3 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -89,6 +89,7 @@ #define EXIT_REASON_XRSTORS 64 #define EXIT_REASON_UMWAIT 67 #define EXIT_REASON_TPAUSE 68 +#define EXIT_REASON_BUS_LOCK 74 #define VMX_EXIT_REASONS \ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ @@ -150,7 +151,8 @@ { EXIT_REASON_XSAVES, "XSAVES" }, \ { EXIT_REASON_XRSTORS, "XRSTORS" }, \ { EXIT_REASON_UMWAIT, "UMWAIT" }, \ - { EXIT_REASON_TPAUSE, "TPAUSE" } + { EXIT_REASON_TPAUSE, "TPAUSE" }, \ + { EXIT_REASON_BUS_LOCK, "BUS_LOCK" } #define VMX_EXIT_REASON_FLAGS \ { VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" } diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 3a1861403d73..5366ccdd134c 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -262,6 +262,12 @@ static inline bool cpu_has_vmx_tsc_scaling(void) SECONDARY_EXEC_TSC_SCALING; } +static inline bool cpu_has_vmx_bus_lock_detection(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_BUS_LOCK_DETECTION; +} + static inline bool cpu_has_vmx_apicv(void) { return cpu_has_vmx_apic_register_virt() && diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 880a2617820c..af8d3a2db3d2 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2428,7 +2428,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf, SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX | - SECONDARY_EXEC_ENABLE_VMFUNC; + SECONDARY_EXEC_ENABLE_VMFUNC | + SECONDARY_EXEC_BUS_LOCK_DETECTION; if (cpu_has_sgx()) opt2 |= SECONDARY_EXEC_ENCLS_EXITING; if (adjust_vmx_controls(min2, opt2, @@ -4269,6 +4270,9 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG, ENABLE_USR_WAIT_PAUSE, false); + if (!vcpu->kvm->arch.bus_lock_detection_enabled) + exec_control &= ~SECONDARY_EXEC_BUS_LOCK_DETECTION; + vmx->secondary_exec_control = exec_control; } @@ -5600,6 +5604,13 @@ static int handle_encls(struct kvm_vcpu *vcpu) return 1; } +static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu) +{ + vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; + vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; + return 0; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -5656,6 +5667,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_VMFUNC] = handle_vmx_instruction, [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, [EXIT_REASON_ENCLS] = handle_encls, + [EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit, }; static const int kvm_vmx_max_exit_handlers = @@ -5908,7 +5920,7 @@ void dump_vmcs(void) * The guest has exited. See if we can fix it or if we need userspace * assistance. */ -static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) { struct vcpu_vmx *vmx = to_vmx(vcpu); union vmx_exit_reason exit_reason = vmx->exit_reason; @@ -6061,6 +6073,25 @@ unexpected_vmexit: return 0; } +static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + int ret = __vmx_handle_exit(vcpu, exit_fastpath); + + /* + * Even when current exit reason is handled by KVM internally, we + * still need to exit to user space when bus lock detected to inform + * that there is a bus lock in guest. + */ + if (to_vmx(vcpu)->exit_reason.bus_lock_detected) { + if (ret > 0) + vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK; + + vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK; + return 0; + } + return ret; +} + /* * Software based L1D cache flush which is used when microcode providing * the cache control MSR is not loaded. @@ -7812,6 +7843,8 @@ static __init int hardware_setup(void) kvm_tsc_scaling_ratio_frac_bits = 48; } + kvm_has_bus_lock_exit = cpu_has_vmx_bus_lock_detection(); + set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ if (enable_ept) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 4dd71b7494ea..cf335e94f198 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -83,7 +83,7 @@ union vmx_exit_reason { u32 reserved23 : 1; u32 reserved24 : 1; u32 reserved25 : 1; - u32 reserved26 : 1; + u32 bus_lock_detected : 1; u32 enclave_mode : 1; u32 smi_pending_mtf : 1; u32 smi_from_vmx_root : 1; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 90a2335498a2..8719877fe799 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -136,6 +136,8 @@ u64 __read_mostly kvm_max_tsc_scaling_ratio; EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio); u64 __read_mostly kvm_default_tsc_scaling_ratio; EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio); +bool __read_mostly kvm_has_bus_lock_exit; +EXPORT_SYMBOL_GPL(kvm_has_bus_lock_exit); /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ static u32 __read_mostly tsc_tolerance_ppm = 250; @@ -3843,6 +3845,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_STEAL_TIME: r = sched_info_on(); break; + case KVM_CAP_X86_BUS_LOCK_EXIT: + if (kvm_has_bus_lock_exit) + r = KVM_BUS_LOCK_DETECTION_OFF | + KVM_BUS_LOCK_DETECTION_EXIT; + else + r = 0; + break; default: break; } @@ -5298,6 +5307,20 @@ split_irqchip_unlock: kvm->arch.user_space_msr_mask = cap->args[0]; r = 0; break; + case KVM_CAP_X86_BUS_LOCK_EXIT: + r = -EINVAL; + if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE) + break; + + if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) && + (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)) + break; + + if (kvm_has_bus_lock_exit && + cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT) + kvm->arch.bus_lock_detection_enabled = true; + r = 0; + break; default: r = -EINVAL; break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 07c194e2c302..dfe3ba5cf262 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -252,6 +252,7 @@ struct kvm_hyperv_exit { #define KVM_EXIT_X86_WRMSR 30 #define KVM_EXIT_DIRTY_RING_FULL 31 #define KVM_EXIT_AP_RESET_HOLD 32 +#define KVM_EXIT_X86_BUS_LOCK 33 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -1058,6 +1059,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 #define KVM_CAP_SYS_HYPERV_CPUID 191 #define KVM_CAP_DIRTY_LOG_RING 192 +#define KVM_CAP_X86_BUS_LOCK_EXIT 193 #ifdef KVM_CAP_IRQ_ROUTING @@ -1774,4 +1776,7 @@ struct kvm_dirty_gfn { __u64 offset; }; +#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) +#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) + #endif /* __LINUX_KVM_H */ -- cgit v1.2.3-71-gd317 From 26128cb6c7e6731fe644c687af97733adfdb5ee9 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:12 -0800 Subject: locking/rwlocks: Add contention detection for rwlocks rwlocks do not currently have any facility to detect contention like spinlocks do. In order to allow users of rwlocks to better manage latency, add contention detection for queued rwlocks. CC: Ingo Molnar CC: Will Deacon Acked-by: Peter Zijlstra Acked-by: Davidlohr Bueso Acked-by: Waiman Long Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-7-bgardon@google.com> Signed-off-by: Paolo Bonzini --- include/asm-generic/qrwlock.h | 24 ++++++++++++++++++------ include/linux/rwlock.h | 7 +++++++ 2 files changed, 25 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 84ce841ce735..0020d3b820a7 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -14,6 +14,7 @@ #include #include +#include /* * Writer states & reader shift and bias. @@ -116,15 +117,26 @@ static inline void queued_write_unlock(struct qrwlock *lock) smp_store_release(&lock->wlocked, 0); } +/** + * queued_rwlock_is_contended - check if the lock is contended + * @lock : Pointer to queue rwlock structure + * Return: 1 if lock contended, 0 otherwise + */ +static inline int queued_rwlock_is_contended(struct qrwlock *lock) +{ + return arch_spin_is_locked(&lock->wait_lock); +} + /* * Remapping rwlock architecture specific functions to the corresponding * queue rwlock functions. */ -#define arch_read_lock(l) queued_read_lock(l) -#define arch_write_lock(l) queued_write_lock(l) -#define arch_read_trylock(l) queued_read_trylock(l) -#define arch_write_trylock(l) queued_write_trylock(l) -#define arch_read_unlock(l) queued_read_unlock(l) -#define arch_write_unlock(l) queued_write_unlock(l) +#define arch_read_lock(l) queued_read_lock(l) +#define arch_write_lock(l) queued_write_lock(l) +#define arch_read_trylock(l) queued_read_trylock(l) +#define arch_write_trylock(l) queued_write_trylock(l) +#define arch_read_unlock(l) queued_read_unlock(l) +#define arch_write_unlock(l) queued_write_unlock(l) +#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l) #endif /* __ASM_GENERIC_QRWLOCK_H */ diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 3dcd617e65ae..7ce9a51ae5c0 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -128,4 +128,11 @@ do { \ 1 : ({ local_irq_restore(flags); 0; }); \ }) +#ifdef arch_rwlock_is_contended +#define rwlock_is_contended(lock) \ + arch_rwlock_is_contended(&(lock)->raw_lock) +#else +#define rwlock_is_contended(lock) ((void)(lock), 0) +#endif /* arch_rwlock_is_contended */ + #endif /* __LINUX_RWLOCK_H */ -- cgit v1.2.3-71-gd317 From a09a689a534183c48f200bc2de1ae61ae9c462ad Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:13 -0800 Subject: sched: Add needbreak for rwlocks Contention awareness while holding a spin lock is essential for reducing latency when long running kernel operations can hold that lock. Add the same contention detection interface for read/write spin locks. CC: Ingo Molnar CC: Will Deacon Acked-by: Peter Zijlstra Acked-by: Davidlohr Bueso Acked-by: Waiman Long Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-8-bgardon@google.com> Signed-off-by: Paolo Bonzini --- include/linux/sched.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 6e3a5eeec509..5d1378e5a040 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1912,6 +1912,23 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } +/* + * Check if a rwlock is contended. + * Returns non-zero if there is another task waiting on the rwlock. + * Returns zero if the lock is not contended or the system / underlying + * rwlock implementation does not support contention detection. + * Technically does not depend on CONFIG_PREEMPTION, but a general need + * for low latency. + */ +static inline int rwlock_needbreak(rwlock_t *lock) +{ +#ifdef CONFIG_PREEMPTION + return rwlock_is_contended(lock); +#else + return 0; +#endif +} + static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); -- cgit v1.2.3-71-gd317 From f3d4b4b1dc1c5fb9ea17cac14133463bfe72f170 Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:14 -0800 Subject: sched: Add cond_resched_rwlock Safely rescheduling while holding a spin lock is essential for keeping long running kernel operations running smoothly. Add the facility to cond_resched rwlocks. CC: Ingo Molnar CC: Will Deacon Acked-by: Peter Zijlstra Acked-by: Davidlohr Bueso Acked-by: Waiman Long Acked-by: Paolo Bonzini Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-9-bgardon@google.com> Signed-off-by: Paolo Bonzini --- include/linux/sched.h | 12 ++++++++++++ kernel/sched/core.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 5d1378e5a040..3052d16da3cf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1883,12 +1883,24 @@ static inline int _cond_resched(void) { return 0; } }) extern int __cond_resched_lock(spinlock_t *lock); +extern int __cond_resched_rwlock_read(rwlock_t *lock); +extern int __cond_resched_rwlock_write(rwlock_t *lock); #define cond_resched_lock(lock) ({ \ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ }) +#define cond_resched_rwlock_read(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_rwlock_read(lock); \ +}) + +#define cond_resched_rwlock_write(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_rwlock_write(lock); \ +}) + static inline void cond_resched_rcu(void) { #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 15d2562118d1..ade357642279 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6695,6 +6695,46 @@ int __cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(__cond_resched_lock); +int __cond_resched_rwlock_read(rwlock_t *lock) +{ + int resched = should_resched(PREEMPT_LOCK_OFFSET); + int ret = 0; + + lockdep_assert_held_read(lock); + + if (rwlock_needbreak(lock) || resched) { + read_unlock(lock); + if (resched) + preempt_schedule_common(); + else + cpu_relax(); + ret = 1; + read_lock(lock); + } + return ret; +} +EXPORT_SYMBOL(__cond_resched_rwlock_read); + +int __cond_resched_rwlock_write(rwlock_t *lock) +{ + int resched = should_resched(PREEMPT_LOCK_OFFSET); + int ret = 0; + + lockdep_assert_held_write(lock); + + if (rwlock_needbreak(lock) || resched) { + write_unlock(lock); + if (resched) + preempt_schedule_common(); + else + cpu_relax(); + ret = 1; + write_lock(lock); + } + return ret; +} +EXPORT_SYMBOL(__cond_resched_rwlock_write); + /** * yield - yield the current processor to other threads. * -- cgit v1.2.3-71-gd317 From 531810caa9f4bc99ffbb90e09256792c56a6b07a Mon Sep 17 00:00:00 2001 From: Ben Gardon Date: Tue, 2 Feb 2021 10:57:24 -0800 Subject: KVM: x86/mmu: Use an rwlock for the x86 MMU Add a read / write lock to be used in place of the MMU spinlock on x86. The rwlock will enable the TDP MMU to handle page faults, and other operations in parallel in future commits. Reviewed-by: Peter Feiner Signed-off-by: Ben Gardon Message-Id: <20210202185734.1680553-19-bgardon@google.com> [Introduce virt/kvm/mmu_lock.h - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/mmu/mmu.c | 90 ++++++++++++++++++++--------------------- arch/x86/kvm/mmu/page_track.c | 8 ++-- arch/x86/kvm/mmu/paging_tmpl.h | 8 ++-- arch/x86/kvm/mmu/tdp_mmu.c | 20 ++++----- arch/x86/kvm/x86.c | 4 +- include/linux/kvm_host.h | 5 +++ virt/kvm/dirty_ring.c | 5 ++- virt/kvm/kvm_main.c | 37 +++++++++-------- virt/kvm/mmu_lock.h | 23 +++++++++++ 10 files changed, 118 insertions(+), 84 deletions(-) create mode 100644 virt/kvm/mmu_lock.h (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fa7b2df6422b..c445a51244d3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -348,6 +348,8 @@ struct kvm_mmu_root_info { #define KVM_MMU_NUM_PREV_ROOTS 3 +#define KVM_HAVE_MMU_RWLOCK + struct kvm_mmu_page; /* diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 5b364ff9c115..329930d57774 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2010,9 +2010,9 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu, flush |= kvm_sync_page(vcpu, sp, &invalid_list); mmu_pages_clear_parents(&parents); } - if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); - cond_resched_lock(&vcpu->kvm->mmu_lock); + cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); flush = false; } } @@ -2464,7 +2464,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) */ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) { - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - @@ -2475,7 +2475,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) @@ -2486,7 +2486,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) pgprintk("%s: looking for gfn %llx\n", __func__, gfn); r = 0; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { pgprintk("%s: gfn %llx role %x\n", __func__, gfn, sp->role.word); @@ -2494,7 +2494,7 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); return r; } @@ -3186,7 +3186,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, return; } - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) @@ -3209,7 +3209,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); @@ -3230,16 +3230,16 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, { struct kvm_mmu_page *sp; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); if (make_mmu_pages_available(vcpu)) { - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return INVALID_PAGE; } sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL); ++sp->root_count; - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return __pa(sp->spt); } @@ -3410,17 +3410,17 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) !smp_load_acquire(&sp->unsync_children)) return; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); mmu_sync_children(vcpu, sp); kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); return; } - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); for (i = 0; i < 4; ++i) { @@ -3434,7 +3434,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) } kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); @@ -3718,7 +3718,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, return r; r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; r = make_mmu_pages_available(vcpu); @@ -3733,7 +3733,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, prefault, is_tdp); out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return r; } @@ -4959,7 +4959,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, */ mmu_topup_memory_caches(vcpu, true); - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); @@ -4991,7 +4991,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, } kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); } int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) @@ -5189,14 +5189,14 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, if (iterator.rmap) flush |= fn(kvm, iterator.rmap); - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { if (flush && lock_flush_tlb) { kvm_flush_remote_tlbs_with_address(kvm, start_gfn, iterator.gfn - start_gfn + 1); flush = false; } - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); } } @@ -5346,7 +5346,7 @@ restart: * be in active use by the guest. */ if (batch >= BATCH_ZAP_PAGES && - cond_resched_lock(&kvm->mmu_lock)) { + cond_resched_rwlock_write(&kvm->mmu_lock)) { batch = 0; goto restart; } @@ -5379,7 +5379,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) { lockdep_assert_held(&kvm->slots_lock); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); trace_kvm_mmu_zap_all_fast(kvm); /* @@ -5406,7 +5406,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm) if (kvm->arch.tdp_mmu_enabled) kvm_tdp_mmu_zap_all(kvm); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) @@ -5448,7 +5448,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) int i; bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); kvm_for_each_memslot(memslot, slots) { @@ -5472,7 +5472,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) kvm_flush_remote_tlbs(kvm); } - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } static bool slot_rmap_write_protect(struct kvm *kvm, @@ -5487,12 +5487,12 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, start_level, KVM_MAX_HUGEPAGE_LEVEL, false); if (kvm->arch.tdp_mmu_enabled) flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); /* * We can flush all the TLBs out of the mmu lock without TLB @@ -5552,13 +5552,13 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *memslot) { /* FIXME: const-ify all uses of struct kvm_memory_slot. */ - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, kvm_mmu_zap_collapsible_spte, true); if (kvm->arch.tdp_mmu_enabled) kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, @@ -5581,11 +5581,11 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); if (kvm->arch.tdp_mmu_enabled) flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); /* * It's also safe to flush TLBs out of mmu lock here as currently this @@ -5603,12 +5603,12 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, false); if (kvm->arch.tdp_mmu_enabled) flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); @@ -5620,11 +5620,11 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, { bool flush; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); if (kvm->arch.tdp_mmu_enabled) flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); @@ -5637,14 +5637,14 @@ void kvm_mmu_zap_all(struct kvm *kvm) LIST_HEAD(invalid_list); int ign; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); restart: list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { if (WARN_ON(sp->role.invalid)) continue; if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) goto restart; - if (cond_resched_lock(&kvm->mmu_lock)) + if (cond_resched_rwlock_write(&kvm->mmu_lock)) goto restart; } @@ -5653,7 +5653,7 @@ restart: if (kvm->arch.tdp_mmu_enabled) kvm_tdp_mmu_zap_all(kvm); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) @@ -5713,7 +5713,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) continue; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); if (kvm_has_zapped_obsolete_pages(kvm)) { kvm_mmu_commit_zap_page(kvm, @@ -5724,7 +5724,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan); unlock: - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); /* @@ -5944,7 +5944,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) ulong to_zap; rcu_idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); ratio = READ_ONCE(nx_huge_pages_recovery_ratio); to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; @@ -5969,14 +5969,14 @@ static void kvm_recover_nx_lpages(struct kvm *kvm) WARN_ON_ONCE(sp->lpage_disallowed); } - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { kvm_mmu_commit_zap_page(kvm, &invalid_list); - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); } } kvm_mmu_commit_zap_page(kvm, &invalid_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, rcu_idx); } diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 8443a675715b..34bb0ec69bd8 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -184,9 +184,9 @@ kvm_page_track_register_notifier(struct kvm *kvm, head = &kvm->arch.track_notifier_head; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); hlist_add_head_rcu(&n->node, &head->track_notifier_list); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); } EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); @@ -202,9 +202,9 @@ kvm_page_track_unregister_notifier(struct kvm *kvm, head = &kvm->arch.track_notifier_head; - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); hlist_del_rcu(&n->node); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); synchronize_srcu(&head->track_srcu); } EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 50e268eb8e1a..d9f66cc459e8 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -868,7 +868,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, } r = RET_PF_RETRY; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; @@ -881,7 +881,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return r; } @@ -919,7 +919,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) return; } - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { level = iterator.level; sptep = iterator.sptep; @@ -954,7 +954,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) break; } - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); } /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 9e4009068920..f1fbed72e149 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -59,7 +59,7 @@ static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, struct kvm_mmu_page *root) { - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) return false; @@ -117,7 +117,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) { gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT); - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); WARN_ON(root->root_count); WARN_ON(!root->tdp_mmu_page); @@ -170,13 +170,13 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu) role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level); - spin_lock(&kvm->mmu_lock); + write_lock(&kvm->mmu_lock); /* Check for an existing root before allocating a new one. */ for_each_tdp_mmu_root(kvm, root) { if (root->role.word == role.word) { kvm_mmu_get_root(kvm, root); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); return root; } } @@ -186,7 +186,7 @@ static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu) list_add(&root->link, &kvm->arch.tdp_mmu_roots); - spin_unlock(&kvm->mmu_lock); + write_unlock(&kvm->mmu_lock); return root; } @@ -421,7 +421,7 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *root = sptep_to_sp(root_pt); int as_id = kvm_mmu_page_as_id(root); - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte); @@ -492,13 +492,13 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, if (iter->next_last_level_gfn == iter->yielded_gfn) return false; - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { + if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { rcu_read_unlock(); if (flush) kvm_flush_remote_tlbs(kvm); - cond_resched_lock(&kvm->mmu_lock); + cond_resched_rwlock_write(&kvm->mmu_lock); rcu_read_lock(); WARN_ON(iter->gfn > iter->next_last_level_gfn); @@ -1103,7 +1103,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root; int root_as_id; - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); for_each_tdp_mmu_root(kvm, root) { root_as_id = kvm_mmu_page_as_id(root); if (root_as_id != slot->as_id) @@ -1268,7 +1268,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, int root_as_id; bool spte_set = false; - lockdep_assert_held(&kvm->mmu_lock); + lockdep_assert_held_write(&kvm->mmu_lock); for_each_tdp_mmu_root(kvm, root) { root_as_id = kvm_mmu_page_as_id(root); if (root_as_id != slot->as_id) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 912fc418ce99..b3b1cce939ab 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7112,9 +7112,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (vcpu->arch.mmu->direct_map) { unsigned int indirect_shadow_pages; - spin_lock(&vcpu->kvm->mmu_lock); + write_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; - spin_unlock(&vcpu->kvm->mmu_lock); + write_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f3b1013fb22c..f417447129b9 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -451,7 +451,12 @@ struct kvm_memslots { }; struct kvm { +#ifdef KVM_HAVE_MMU_RWLOCK + rwlock_t mmu_lock; +#else spinlock_t mmu_lock; +#endif /* KVM_HAVE_MMU_RWLOCK */ + struct mutex slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index 790f17325f8d..7aafefc50aa7 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -9,6 +9,7 @@ #include #include #include +#include "mmu_lock.h" int __weak kvm_cpu_dirty_log_size(void) { @@ -60,9 +61,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) if (!memslot || (offset + __fls(mask)) >= memslot->npages) return; - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); } int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 335a1a2b8edc..48ccdf4e3d04 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -58,6 +58,7 @@ #include "coalesced_mmio.h" #include "async_pf.h" +#include "mmu_lock.h" #include "vfio.h" #define CREATE_TRACE_POINTS @@ -459,13 +460,15 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + + KVM_MMU_LOCK(kvm); + kvm->mmu_notifier_seq++; if (kvm_set_spte_hva(kvm, address, pte)) kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -476,7 +479,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, int need_tlb_flush = 0, idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); /* * The count increase must become visible at unlock time as no * spte can be established without taking the mmu_lock and @@ -489,7 +492,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, if (need_tlb_flush || kvm->tlbs_dirty) kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return 0; @@ -500,7 +503,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, { struct kvm *kvm = mmu_notifier_to_kvm(mn); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); /* * This sequence increase will notify the kvm page fault that * the page that is going to be mapped in the spte could have @@ -514,7 +517,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, * in conjunction with the smp_rmb in mmu_notifier_retry(). */ kvm->mmu_notifier_count--; - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); BUG_ON(kvm->mmu_notifier_count < 0); } @@ -528,13 +531,13 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, int young, idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); young = kvm_age_hva(kvm, start, end); if (young) kvm_flush_remote_tlbs(kvm); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return young; @@ -549,7 +552,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, int young, idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); /* * Even though we do not flush TLB, this will still adversely * affect performance on pre-Haswell Intel EPT, where there is @@ -564,7 +567,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, * more sophisticated heuristic later. */ young = kvm_age_hva(kvm, start, end); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return young; @@ -578,9 +581,9 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn, int young, idx; idx = srcu_read_lock(&kvm->srcu); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); young = kvm_test_age_hva(kvm, address); - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); srcu_read_unlock(&kvm->srcu, idx); return young; @@ -745,7 +748,7 @@ static struct kvm *kvm_create_vm(unsigned long type) if (!kvm) return ERR_PTR(-ENOMEM); - spin_lock_init(&kvm->mmu_lock); + KVM_MMU_LOCK_INIT(kvm); mmgrab(current->mm); kvm->mm = current->mm; kvm_eventfd_init(kvm); @@ -1525,7 +1528,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); memset(dirty_bitmap_buffer, 0, n); - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; @@ -1541,7 +1544,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); } - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); } if (flush) @@ -1636,7 +1639,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm, if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) return -EFAULT; - spin_lock(&kvm->mmu_lock); + KVM_MMU_LOCK(kvm); for (offset = log->first_page, i = offset / BITS_PER_LONG, n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--; i++, offset += BITS_PER_LONG) { @@ -1659,7 +1662,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm, offset, mask); } } - spin_unlock(&kvm->mmu_lock); + KVM_MMU_UNLOCK(kvm); if (flush) kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); diff --git a/virt/kvm/mmu_lock.h b/virt/kvm/mmu_lock.h new file mode 100644 index 000000000000..9e1308f9734c --- /dev/null +++ b/virt/kvm/mmu_lock.h @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#ifndef KVM_MMU_LOCK_H +#define KVM_MMU_LOCK_H 1 + +/* + * Architectures can choose whether to use an rwlock or spinlock + * for the mmu_lock. These macros, for use in common code + * only, avoids using #ifdefs in places that must deal with + * multiple architectures. + */ + +#ifdef KVM_HAVE_MMU_RWLOCK +#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) +#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) +#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) +#else +#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) +#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) +#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock) +#endif /* KVM_HAVE_MMU_RWLOCK */ + +#endif -- cgit v1.2.3-71-gd317 From 23200b7a30de315d0e9a40663c905869d29d833c Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Wed, 13 Jun 2018 09:55:44 -0400 Subject: KVM: x86/xen: intercept xen hypercalls if enabled Add a new exit reason for emulator to handle Xen hypercalls. Since this means KVM owns the ABI, dispense with the facility for the VMM to provide its own copy of the hypercall pages; just fill them in directly using VMCALL/VMMCALL as we do for the Hyper-V hypercall page. This behaviour is enabled by a new INTERCEPT_HCALL flag in the KVM_XEN_HVM_CONFIG ioctl structure, and advertised by the same flag being returned from the KVM_CAP_XEN_HVM check. Rename xen_hvm_config() to kvm_xen_write_hypercall_page() and move it to the nascent xen.c while we're at it, and add a test case. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 6 + arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/trace.h | 36 ++++++ arch/x86/kvm/x86.c | 49 +++---- arch/x86/kvm/xen.c | 142 +++++++++++++++++++++ arch/x86/kvm/xen.h | 21 +++ include/uapi/linux/kvm.h | 20 +++ tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/lib/kvm_util.c | 1 + .../testing/selftests/kvm/x86_64/xen_vmcall_test.c | 123 ++++++++++++++++++ 10 files changed, 369 insertions(+), 32 deletions(-) create mode 100644 arch/x86/kvm/xen.c create mode 100644 arch/x86/kvm/xen.h create mode 100644 tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bcbb32ef9f00..b4bcdebd6e4c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -531,6 +531,11 @@ struct kvm_vcpu_hv { cpumask_t tlb_flush; }; +/* Xen HVM per vcpu emulation context */ +struct kvm_vcpu_xen { + u64 hypercall_rip; +}; + struct kvm_vcpu_arch { /* * rip and regs accesses must go through @@ -729,6 +734,7 @@ struct kvm_vcpu_arch { unsigned long singlestep_rip; struct kvm_vcpu_hv hyperv; + struct kvm_vcpu_xen xen; cpumask_var_t wbinvd_dirty_mask; diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 4bd14ab01323..a50041235530 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -14,7 +14,7 @@ kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ $(KVM)/dirty_ring.o kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o -kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ +kvm-y += x86.o emulate.o i8259.o irq.o lapic.o xen.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 5ef238621881..a61c015870e3 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -92,6 +92,42 @@ TRACE_EVENT(kvm_hv_hypercall, __entry->outgpa) ); +/* + * Tracepoint for Xen hypercall. + */ +TRACE_EVENT(kvm_xen_hypercall, + TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5), + TP_ARGS(nr, a0, a1, a2, a3, a4, a5), + + TP_STRUCT__entry( + __field(unsigned long, nr) + __field(unsigned long, a0) + __field(unsigned long, a1) + __field(unsigned long, a2) + __field(unsigned long, a3) + __field(unsigned long, a4) + __field(unsigned long, a5) + ), + + TP_fast_assign( + __entry->nr = nr; + __entry->a0 = a0; + __entry->a1 = a1; + __entry->a2 = a2; + __entry->a3 = a3; + __entry->a4 = a4; + __entry->a4 = a5; + ), + + TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx a4 0x%lx a5 %lx", + __entry->nr, __entry->a0, __entry->a1, __entry->a2, + __entry->a3, __entry->a4, __entry->a5) +); + + + /* * Tracepoint for PIO. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 34ca136a608e..5a41d465134d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -29,6 +29,7 @@ #include "pmu.h" #include "hyperv.h" #include "lapic.h" +#include "xen.h" #include #include @@ -2870,34 +2871,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 0; } -static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) -{ - struct kvm *kvm = vcpu->kvm; - int lm = is_long_mode(vcpu); - u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 - : kvm->arch.xen_hvm_config.blob_addr_32; - u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 - : kvm->arch.xen_hvm_config.blob_size_32; - u32 page_num = data & ~PAGE_MASK; - u64 page_addr = data & PAGE_MASK; - u8 *page; - - if (page_num >= blob_size) - return 1; - - blob_addr += page_num * PAGE_SIZE; - - page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); - if (IS_ERR(page)) - return PTR_ERR(page); - - if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { - kfree(page); - return 1; - } - return 0; -} - static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) { u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT; @@ -3032,7 +3005,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) u64 data = msr_info->data; if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) - return xen_hvm_config(vcpu, data); + return kvm_xen_write_hypercall_page(vcpu, data); switch (msr) { case MSR_AMD64_NB_CFG: @@ -3741,7 +3714,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: - case KVM_CAP_XEN_HVM: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: @@ -3781,6 +3753,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: r = 1; break; + case KVM_CAP_XEN_HVM: + r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL; + break; case KVM_CAP_SYNC_REGS: r = KVM_SYNC_X86_VALID_FIELDS; break; @@ -5652,7 +5628,15 @@ set_pit2_out: if (copy_from_user(&xhc, argp, sizeof(xhc))) goto out; r = -EINVAL; - if (xhc.flags) + if (xhc.flags & ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) + goto out; + /* + * With hypercall interception the kernel generates its own + * hypercall page so it must not be provided. + */ + if ((xhc.flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && + (xhc.blob_addr_32 || xhc.blob_addr_64 || + xhc.blob_size_32 || xhc.blob_size_64)) goto out; memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); r = 0; @@ -8143,6 +8127,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) unsigned long nr, a0, a1, a2, a3, ret; int op_64_bit; + if (kvm_xen_hypercall_enabled(vcpu->kvm)) + return kvm_xen_hypercall(vcpu); + if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c new file mode 100644 index 000000000000..62569ca43857 --- /dev/null +++ b/arch/x86/kvm/xen.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * KVM Xen emulation + */ + +#include "x86.h" +#include "xen.h" + +#include + +#include + +#include "trace.h" + +int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) +{ + struct kvm *kvm = vcpu->kvm; + u32 page_num = data & ~PAGE_MASK; + u64 page_addr = data & PAGE_MASK; + + /* + * If Xen hypercall intercept is enabled, fill the hypercall + * page with VMCALL/VMMCALL instructions since that's what + * we catch. Else the VMM has provided the hypercall pages + * with instructions of its own choosing, so use those. + */ + if (kvm_xen_hypercall_enabled(kvm)) { + u8 instructions[32]; + int i; + + if (page_num) + return 1; + + /* mov imm32, %eax */ + instructions[0] = 0xb8; + + /* vmcall / vmmcall */ + kvm_x86_ops.patch_hypercall(vcpu, instructions + 5); + + /* ret */ + instructions[8] = 0xc3; + + /* int3 to pad */ + memset(instructions + 9, 0xcc, sizeof(instructions) - 9); + + for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) { + *(u32 *)&instructions[1] = i; + if (kvm_vcpu_write_guest(vcpu, + page_addr + (i * sizeof(instructions)), + instructions, sizeof(instructions))) + return 1; + } + } else { + int lm = is_long_mode(vcpu); + u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 + : kvm->arch.xen_hvm_config.blob_addr_32; + u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 + : kvm->arch.xen_hvm_config.blob_size_32; + u8 *page; + + if (page_num >= blob_size) + return 1; + + blob_addr += page_num * PAGE_SIZE; + + page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE); + if (IS_ERR(page)) + return PTR_ERR(page); + + if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { + kfree(page); + return 1; + } + } + return 0; +} + +static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) +{ + kvm_rax_write(vcpu, result); + return kvm_skip_emulated_instruction(vcpu); +} + +static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) + return 1; + + return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); +} + +int kvm_xen_hypercall(struct kvm_vcpu *vcpu) +{ + bool longmode; + u64 input, params[6]; + + input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX); + + longmode = is_64_bit_mode(vcpu); + if (!longmode) { + params[0] = (u32)kvm_rbx_read(vcpu); + params[1] = (u32)kvm_rcx_read(vcpu); + params[2] = (u32)kvm_rdx_read(vcpu); + params[3] = (u32)kvm_rsi_read(vcpu); + params[4] = (u32)kvm_rdi_read(vcpu); + params[5] = (u32)kvm_rbp_read(vcpu); + } +#ifdef CONFIG_X86_64 + else { + params[0] = (u64)kvm_rdi_read(vcpu); + params[1] = (u64)kvm_rsi_read(vcpu); + params[2] = (u64)kvm_rdx_read(vcpu); + params[3] = (u64)kvm_r10_read(vcpu); + params[4] = (u64)kvm_r8_read(vcpu); + params[5] = (u64)kvm_r9_read(vcpu); + } +#endif + trace_kvm_xen_hypercall(input, params[0], params[1], params[2], + params[3], params[4], params[5]); + + vcpu->run->exit_reason = KVM_EXIT_XEN; + vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; + vcpu->run->xen.u.hcall.longmode = longmode; + vcpu->run->xen.u.hcall.cpl = kvm_x86_ops.get_cpl(vcpu); + vcpu->run->xen.u.hcall.input = input; + vcpu->run->xen.u.hcall.params[0] = params[0]; + vcpu->run->xen.u.hcall.params[1] = params[1]; + vcpu->run->xen.u.hcall.params[2] = params[2]; + vcpu->run->xen.u.hcall.params[3] = params[3]; + vcpu->run->xen.u.hcall.params[4] = params[4]; + vcpu->run->xen.u.hcall.params[5] = params[5]; + vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); + vcpu->arch.complete_userspace_io = + kvm_xen_hypercall_complete_userspace; + + return 0; +} diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h new file mode 100644 index 000000000000..276ed59e476b --- /dev/null +++ b/arch/x86/kvm/xen.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2019 Oracle and/or its affiliates. All rights reserved. + * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * KVM Xen emulation + */ + +#ifndef __ARCH_X86_KVM_XEN_H__ +#define __ARCH_X86_KVM_XEN_H__ + +int kvm_xen_hypercall(struct kvm_vcpu *vcpu); +int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); + +static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) +{ + return kvm->arch.xen_hvm_config.flags & + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL; +} + +#endif /* __ARCH_X86_KVM_XEN_H__ */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index dfe3ba5cf262..c87defe5db4f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -216,6 +216,20 @@ struct kvm_hyperv_exit { } u; }; +struct kvm_xen_exit { +#define KVM_EXIT_XEN_HCALL 1 + __u32 type; + union { + struct { + __u32 longmode; + __u32 cpl; + __u64 input; + __u64 result; + __u64 params[6]; + } hcall; + } u; +}; + #define KVM_S390_GET_SKEYS_NONE 1 #define KVM_S390_SKEYS_MAX 1048576 @@ -253,6 +267,7 @@ struct kvm_hyperv_exit { #define KVM_EXIT_DIRTY_RING_FULL 31 #define KVM_EXIT_AP_RESET_HOLD 32 #define KVM_EXIT_X86_BUS_LOCK 33 +#define KVM_EXIT_XEN 34 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -429,6 +444,8 @@ struct kvm_run { __u32 index; /* kernel -> user */ __u64 data; /* kernel <-> user */ } msr; + /* KVM_EXIT_XEN */ + struct kvm_xen_exit xen; /* Fix the size of the union. */ char padding[256]; }; @@ -1133,6 +1150,9 @@ struct kvm_x86_mce { #endif #ifdef KVM_CAP_XEN_HVM +#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) +#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) + struct kvm_xen_hvm_config { __u32 flags; __u32 msr; diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 9616f77cc5af..be9613dd6d9b 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -62,6 +62,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test TEST_GEN_PROGS_x86_64 += x86_64/debug_regs TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test +TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_perf_test diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index fa5a90e6c6f0..d787cb802b4a 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1801,6 +1801,7 @@ static struct exit_reason { {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"}, {KVM_EXIT_X86_RDMSR, "RDMSR"}, {KVM_EXIT_X86_WRMSR, "WRMSR"}, + {KVM_EXIT_XEN, "XEN"}, #ifdef KVM_EXIT_MEMORY_NOT_PRESENT {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, #endif diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c new file mode 100644 index 000000000000..234c9da286aa --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * xen_vmcall_test + * + * Copyright © 2020 Amazon.com, Inc. or its affiliates. + * + * Userspace hypercall testing + */ + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 5 + +#define HCALL_REGION_GPA 0xc0000000ULL +#define HCALL_REGION_SLOT 10 + +static struct kvm_vm *vm; + +#define INPUTVALUE 17 +#define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x) +#define RETVALUE 0xcafef00dfbfbffffUL + +#define XEN_HYPERCALL_MSR 0x40000000 + +static void guest_code(void) +{ + unsigned long rax = INPUTVALUE; + unsigned long rdi = ARGVALUE(1); + unsigned long rsi = ARGVALUE(2); + unsigned long rdx = ARGVALUE(3); + register unsigned long r10 __asm__("r10") = ARGVALUE(4); + register unsigned long r8 __asm__("r8") = ARGVALUE(5); + register unsigned long r9 __asm__("r9") = ARGVALUE(6); + + /* First a direct invocation of 'vmcall' */ + __asm__ __volatile__("vmcall" : + "=a"(rax) : + "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx), + "r"(r10), "r"(r8), "r"(r9)); + GUEST_ASSERT(rax == RETVALUE); + + /* Now fill in the hypercall page */ + __asm__ __volatile__("wrmsr" : : "c" (XEN_HYPERCALL_MSR), + "a" (HCALL_REGION_GPA & 0xffffffff), + "d" (HCALL_REGION_GPA >> 32)); + + /* And invoke the same hypercall that way */ + __asm__ __volatile__("call *%1" : "=a"(rax) : + "r"(HCALL_REGION_GPA + INPUTVALUE * 32), + "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx), + "r"(r10), "r"(r8), "r"(r9)); + GUEST_ASSERT(rax == RETVALUE); + + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + if (!(kvm_check_cap(KVM_CAP_XEN_HVM) & + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) { + print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available"); + exit(KSFT_SKIP); + } + + vm = vm_create_default(VCPU_ID, 0, (void *) guest_code); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + + struct kvm_xen_hvm_config hvmc = { + .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, + .msr = XEN_HYPERCALL_MSR, + }; + vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc); + + /* Map a region for the hypercall page */ + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + HCALL_REGION_GPA, HCALL_REGION_SLOT, + getpagesize(), 0); + virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 1, 0); + + for (;;) { + volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct ucall uc; + + vcpu_run(vm, VCPU_ID); + + if (run->exit_reason == KVM_EXIT_XEN) { + ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL); + ASSERT_EQ(run->xen.u.hcall.cpl, 0); + ASSERT_EQ(run->xen.u.hcall.longmode, 1); + ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE); + ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1)); + ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2)); + ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3)); + ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4)); + ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5)); + ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6)); + run->xen.u.hcall.result = RETVALUE; + continue; + } + + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_FAIL("%s", (const char *)uc.args[0]); + /* NOT REACHED */ + case UCALL_SYNC: + break; + case UCALL_DONE: + goto done; + default: + TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd); + } + } +done: + kvm_vm_free(vm); + return 0; +} -- cgit v1.2.3-71-gd317 From a76b9641ad1c0b045045727a6cbbeebf80b6b9bb Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Thu, 3 Dec 2020 15:52:25 +0000 Subject: KVM: x86/xen: add KVM_XEN_HVM_SET_ATTR/KVM_XEN_HVM_GET_ATTR This will be used to set up shared info pages etc. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 20 ++++++++++++++++++++ arch/x86/kvm/xen.c | 30 ++++++++++++++++++++++++++++++ arch/x86/kvm/xen.h | 2 ++ include/uapi/linux/kvm.h | 11 +++++++++++ 4 files changed, 63 insertions(+) (limited to 'include') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ffc4ab3bd6ec..59bcba1ea59f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5630,6 +5630,26 @@ set_pit2_out: r = kvm_xen_hvm_config(kvm, &xhc); break; } + case KVM_XEN_HVM_GET_ATTR: { + struct kvm_xen_hvm_attr xha; + + r = -EFAULT; + if (copy_from_user(&xha, argp, sizeof(xha))) + goto out; + r = kvm_xen_hvm_get_attr(kvm, &xha); + if (!r && copy_to_user(argp, &xha, sizeof(xha))) + r = -EFAULT; + break; + } + case KVM_XEN_HVM_SET_ATTR: { + struct kvm_xen_hvm_attr xha; + + r = -EFAULT; + if (copy_from_user(&xha, argp, sizeof(xha))) + goto out; + r = kvm_xen_hvm_set_attr(kvm, &xha); + break; + } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 7d03d918e595..a3fd791b0354 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -18,6 +18,36 @@ DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); +int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) +{ + int r = -ENOENT; + + mutex_unlock(&kvm->lock); + + switch (data->type) { + default: + break; + } + + mutex_unlock(&kvm->lock); + return r; +} + +int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&kvm->lock); + + switch (data->type) { + default: + break; + } + + mutex_unlock(&kvm->lock); + return r; +} + int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index ec3d8f6d0ef5..0e2467fcfb9f 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -13,6 +13,8 @@ extern struct static_key_false_deferred kvm_xen_enabled; +int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); +int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); int kvm_xen_hypercall(struct kvm_vcpu *vcpu); int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data); int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index c87defe5db4f..334796799dbc 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1587,6 +1587,17 @@ struct kvm_pv_cmd { /* Available with KVM_CAP_DIRTY_LOG_RING */ #define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7) +#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr) +#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr) + +struct kvm_xen_hvm_attr { + __u16 type; + __u16 pad[3]; + union { + __u64 pad[8]; + } u; +}; + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ -- cgit v1.2.3-71-gd317 From a3833b81b05d0ae92ae085959dd8da136ec91868 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Dec 2020 16:20:32 +0000 Subject: KVM: x86/xen: latch long_mode when hypercall page is set up Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 6 ++++++ arch/x86/kvm/xen.c | 16 +++++++++++++++- include/uapi/linux/kvm.h | 3 +++ 3 files changed, 24 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b4bcdebd6e4c..016a005cc2ad 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -905,6 +905,11 @@ struct msr_bitmap_range { unsigned long *bitmap; }; +/* Xen emulation context */ +struct kvm_xen { + bool long_mode; +}; + enum kvm_irqchip_mode { KVM_IRQCHIP_NONE, KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */ @@ -984,6 +989,7 @@ struct kvm_arch { struct hlist_head mask_notifier_list; struct kvm_hv hyperv; + struct kvm_xen xen; #ifdef CONFIG_KVM_MMU_AUDIT int audit_point; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index a3fd791b0354..55da739267b1 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -25,6 +25,13 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) mutex_unlock(&kvm->lock); switch (data->type) { + case KVM_XEN_ATTR_TYPE_LONG_MODE: + if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) + return -EINVAL; + + kvm->arch.xen.long_mode = !!data->u.long_mode; + r = 0; + break; default: break; } @@ -40,6 +47,10 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) mutex_lock(&kvm->lock); switch (data->type) { + case KVM_XEN_ATTR_TYPE_LONG_MODE: + data->u.long_mode = kvm->arch.xen.long_mode; + r = 0; + break; default: break; } @@ -53,6 +64,10 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) struct kvm *kvm = vcpu->kvm; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; + bool lm = is_long_mode(vcpu); + + /* Latch long_mode for shared_info pages etc. */ + vcpu->kvm->arch.xen.long_mode = lm; /* * If Xen hypercall intercept is enabled, fill the hypercall @@ -87,7 +102,6 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) return 1; } } else { - int lm = is_long_mode(vcpu); u64 blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 : kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 334796799dbc..11644954a2e2 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1594,10 +1594,13 @@ struct kvm_xen_hvm_attr { __u16 type; __u16 pad[3]; union { + __u8 long_mode; __u64 pad[8]; } u; }; +#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ -- cgit v1.2.3-71-gd317 From 13ffb97a3b11998450d51457b6b3617781953f7c Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Fri, 15 Jun 2018 21:17:14 -0400 Subject: KVM: x86/xen: register shared_info page Add KVM_XEN_ATTR_TYPE_SHARED_INFO to allow hypervisor to know where the guest's shared info page is. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/xen.c | 40 ++++++++++++++++++++++++++++++++++++---- include/uapi/linux/kvm.h | 4 ++++ 3 files changed, 42 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 016a005cc2ad..00d1233cfd55 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -908,6 +908,8 @@ struct msr_bitmap_range { /* Xen emulation context */ struct kvm_xen { bool long_mode; + bool shinfo_set; + struct gfn_to_hva_cache shinfo_cache; }; enum kvm_irqchip_mode { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 55da739267b1..924d4e853108 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -13,25 +13,49 @@ #include #include +#include #include "trace.h" DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); +static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) +{ + int ret; + int idx = srcu_read_lock(&kvm->srcu); + + ret = kvm_gfn_to_hva_cache_init(kvm, &kvm->arch.xen.shinfo_cache, + gfn_to_gpa(gfn), PAGE_SIZE); + if (!ret) { + kvm->arch.xen.shinfo_set = true; + } + + srcu_read_unlock(&kvm->srcu, idx); + return ret; +} + int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) { int r = -ENOENT; + mutex_lock(&kvm->lock); + mutex_unlock(&kvm->lock); switch (data->type) { case KVM_XEN_ATTR_TYPE_LONG_MODE: - if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) - return -EINVAL; + if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { + r = -EINVAL; + } else { + kvm->arch.xen.long_mode = !!data->u.long_mode; + r = 0; + } + break; - kvm->arch.xen.long_mode = !!data->u.long_mode; - r = 0; + case KVM_XEN_ATTR_TYPE_SHARED_INFO: + r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); break; + default: break; } @@ -51,6 +75,14 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) data->u.long_mode = kvm->arch.xen.long_mode; r = 0; break; + + case KVM_XEN_ATTR_TYPE_SHARED_INFO: + if (kvm->arch.xen.shinfo_set) { + data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); + r = 0; + } + break; + default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 11644954a2e2..f57f6e741a28 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1595,11 +1595,15 @@ struct kvm_xen_hvm_attr { __u16 pad[3]; union { __u8 long_mode; + struct { + __u64 gfn; + } shared_info; __u64 pad[8]; } u; }; #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 +#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- cgit v1.2.3-71-gd317 From 42387042ba38cca8fb86bb3a7913e44cd3569750 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 3 Dec 2020 21:02:23 +0000 Subject: xen: add wc_sec_hi to struct shared_info Xen added this in 2015 (Xen 4.6). On x86_64 and Arm it fills what was previously a 32-bit hole in the generic shared_info structure; on i386 it had to go at the end of struct arch_shared_info. Signed-off-by: David Woodhouse --- arch/x86/include/asm/xen/interface.h | 3 +++ include/xen/interface/xen.h | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index 9139b3e86316..baca0b00ef76 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h @@ -182,6 +182,9 @@ struct arch_shared_info { unsigned long p2m_cr3; /* cr3 value of the p2m address space */ unsigned long p2m_vaddr; /* virtual address of the p2m list */ unsigned long p2m_generation; /* generation count of p2m mapping */ +#ifdef CONFIG_X86_32 + uint32_t wc_sec_hi; +#endif }; #endif /* !__ASSEMBLY__ */ diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index 8bfb242f433e..5ee37a296481 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h @@ -598,7 +598,9 @@ struct shared_info { * their gettimeofday() syscall on this wallclock-base value. */ struct pvclock_wall_clock wc; - +#ifndef CONFIG_X86_32 + uint32_t wc_sec_hi; +#endif struct arch_shared_info arch; }; -- cgit v1.2.3-71-gd317 From 3e3246158808d46b81edb8246214c0ab5a852594 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 2 Feb 2021 16:53:25 +0000 Subject: KVM: x86/xen: Add KVM_XEN_VCPU_SET_ATTR/KVM_XEN_VCPU_GET_ATTR This will be used for per-vCPU setup such as runstate and vcpu_info. Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 20 ++++++++++++++++++++ arch/x86/kvm/xen.c | 30 ++++++++++++++++++++++++++++++ arch/x86/kvm/xen.h | 2 ++ include/uapi/linux/kvm.h | 13 +++++++++++++ 4 files changed, 65 insertions(+) (limited to 'include') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 040d3d470173..66cd574ee458 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5016,6 +5016,26 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_GET_SUPPORTED_HV_CPUID: r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); break; + case KVM_XEN_VCPU_GET_ATTR: { + struct kvm_xen_vcpu_attr xva; + + r = -EFAULT; + if (copy_from_user(&xva, argp, sizeof(xva))) + goto out; + r = kvm_xen_vcpu_get_attr(vcpu, &xva); + if (!r && copy_to_user(argp, &xva, sizeof(xva))) + r = -EFAULT; + break; + } + case KVM_XEN_VCPU_SET_ATTR: { + struct kvm_xen_vcpu_attr xva; + + r = -EFAULT; + if (copy_from_user(&xva, argp, sizeof(xva))) + goto out; + r = kvm_xen_vcpu_set_attr(vcpu, &xva); + break; + } default: r = -EINVAL; } diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index eab4dce93be1..5cbf6955e509 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -118,6 +118,36 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) return r; } +int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&vcpu->kvm->lock); + + switch (data->type) { + default: + break; + } + + mutex_unlock(&vcpu->kvm->lock); + return r; +} + +int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) +{ + int r = -ENOENT; + + mutex_lock(&vcpu->kvm->lock); + + switch (data->type) { + default: + break; + } + + mutex_unlock(&vcpu->kvm->lock); + return r; +} + int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 12a3dc32e78e..fb85377fdbdc 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -13,6 +13,8 @@ extern struct static_key_false_deferred kvm_xen_enabled; +int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); +int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); int kvm_xen_hypercall(struct kvm_vcpu *vcpu); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index f57f6e741a28..e2b0cbde1908 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1587,6 +1587,7 @@ struct kvm_pv_cmd { /* Available with KVM_CAP_DIRTY_LOG_RING */ #define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7) +/* Per-VM Xen attributes */ #define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr) #define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr) @@ -1605,6 +1606,18 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 +/* Per-vCPU Xen attributes */ +#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) +#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) + +struct kvm_xen_vcpu_attr { + __u16 type; + __u16 pad[3]; + union { + __u64 pad[8]; + } u; +}; + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ -- cgit v1.2.3-71-gd317 From 73e69a86347afe8156aa50c436fc192b280b0cd7 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Fri, 29 Jun 2018 10:52:52 -0400 Subject: KVM: x86/xen: register vcpu info The vcpu info supersedes the per vcpu area of the shared info page and the guest vcpus will use this instead. Signed-off-by: Joao Martins Signed-off-by: Ankur Arora Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/xen.c | 25 ++++++++++++++++++++++++- include/uapi/linux/kvm.h | 3 +++ 3 files changed, 29 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 00d1233cfd55..4e864f2bd59a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -534,6 +534,8 @@ struct kvm_vcpu_hv { /* Xen HVM per vcpu emulation context */ struct kvm_vcpu_xen { u64 hypercall_rip; + bool vcpu_info_set; + struct gfn_to_hva_cache vcpu_info_cache; }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 5cbf6955e509..2712f1aa9ac9 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -120,15 +120,31 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) { - int r = -ENOENT; + int idx, r = -ENOENT; mutex_lock(&vcpu->kvm->lock); + idx = srcu_read_lock(&vcpu->kvm->srcu); switch (data->type) { + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: + /* No compat necessary here. */ + BUILD_BUG_ON(sizeof(struct vcpu_info) != + sizeof(struct compat_vcpu_info)); + + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.xen.vcpu_info_cache, + data->u.gpa, + sizeof(struct vcpu_info)); + if (!r) + vcpu->arch.xen.vcpu_info_set = true; + break; + + default: break; } + srcu_read_unlock(&vcpu->kvm->srcu, idx); mutex_unlock(&vcpu->kvm->lock); return r; } @@ -140,6 +156,13 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) mutex_lock(&vcpu->kvm->lock); switch (data->type) { + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO: + if (vcpu->arch.xen.vcpu_info_set) { + data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; + r = 0; + } + break; + default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e2b0cbde1908..2db0657b3337 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1614,10 +1614,13 @@ struct kvm_xen_vcpu_attr { __u16 type; __u16 pad[3]; union { + __u64 gpa; __u64 pad[8]; } u; }; +#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ -- cgit v1.2.3-71-gd317 From f2340cd9e41dc463cb1189274f3db560c1dfa1f4 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Mon, 23 Jul 2018 11:20:57 -0400 Subject: KVM: x86/xen: register vcpu time info region Allow the Xen emulated guest the ability to register secondary vcpu time information. On Xen guests this is used in order to be mapped to userspace and hence allow vdso gettimeofday to work. Signed-off-by: Joao Martins Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/x86.c | 2 ++ arch/x86/kvm/xen.c | 18 ++++++++++++++++++ include/uapi/linux/kvm.h | 1 + 4 files changed, 23 insertions(+) (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4e864f2bd59a..c6c84c4ef7b0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -535,7 +535,9 @@ struct kvm_vcpu_hv { struct kvm_vcpu_xen { u64 hypercall_rip; bool vcpu_info_set; + bool vcpu_time_info_set; struct gfn_to_hva_cache vcpu_info_cache; + struct gfn_to_hva_cache vcpu_time_info_cache; }; struct kvm_vcpu_arch { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a295fb8a8978..0f648434796f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2757,6 +2757,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) if (vcpu->xen.vcpu_info_set) kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache, offsetof(struct compat_vcpu_info, time)); + if (vcpu->xen.vcpu_time_info_set) + kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); if (v == kvm_get_vcpu(v->kvm, 0)) kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); return 0; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index c307f8b7a8a3..bd343222e740 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -141,6 +141,17 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) } break; + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: + r = kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.xen.vcpu_time_info_cache, + data->u.gpa, + sizeof(struct pvclock_vcpu_time_info)); + if (!r) { + vcpu->arch.xen.vcpu_time_info_set = true; + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); + } + break; + default: break; } @@ -164,6 +175,13 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data) } break; + case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO: + if (vcpu->arch.xen.vcpu_time_info_set) { + data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; + r = 0; + } + break; + default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2db0657b3337..0f045ffd9cb6 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1620,6 +1620,7 @@ struct kvm_xen_vcpu_attr { }; #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 +#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 /* Secure Encrypted Virtualization command */ enum sev_cmd_id { -- cgit v1.2.3-71-gd317 From 40da8ccd724f7ca2f08550a46268bc3a91cc8869 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 9 Dec 2020 20:08:30 +0000 Subject: KVM: x86/xen: Add event channel interrupt vector upcall It turns out that we can't handle event channels *entirely* in userspace by delivering them as ExtINT, because KVM is a bit picky about when it accepts ExtINT interrupts from a legacy PIC. The in-kernel local APIC has to have LVT0 configured in APIC_MODE_EXTINT and unmasked, which isn't necessarily the case for Xen guests especially on secondary CPUs. To cope with this, add kvm_xen_get_interrupt() which checks the evtchn_pending_upcall field in the Xen vcpu_info, and delivers the Xen upcall vector (configured by KVM_XEN_ATTR_TYPE_UPCALL_VECTOR) if it's set regardless of LAPIC LVT0 configuration. This gives us the minimum support we need for completely userspace-based implementation of event channels. This does mean that vcpu_enter_guest() needs to check for the evtchn_pending_upcall flag being set, because it can't rely on someone having set KVM_REQ_EVENT unless we were to add some way for userspace to do so manually. Signed-off-by: David Woodhouse --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/irq.c | 7 ++++++ arch/x86/kvm/x86.c | 3 ++- arch/x86/kvm/xen.c | 53 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/xen.h | 9 +++++++ include/uapi/linux/kvm.h | 2 ++ 6 files changed, 74 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c6c84c4ef7b0..b37afd856bab 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -913,6 +913,7 @@ struct msr_bitmap_range { struct kvm_xen { bool long_mode; bool shinfo_set; + u8 upcall_vector; struct gfn_to_hva_cache shinfo_cache; }; diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index a035cca82f8f..172b05343cfd 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -14,6 +14,7 @@ #include "irq.h" #include "i8254.h" #include "x86.h" +#include "xen.h" /* * check if there are pending timer events @@ -56,6 +57,9 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v) if (!lapic_in_kernel(v)) return v->arch.interrupt.injected; + if (kvm_xen_has_interrupt(v)) + return 1; + if (!kvm_apic_accept_pic_intr(v)) return 0; @@ -110,6 +114,9 @@ static int kvm_cpu_get_extint(struct kvm_vcpu *v) if (!lapic_in_kernel(v)) return v->arch.interrupt.nr; + if (kvm_xen_has_interrupt(v)) + return v->kvm->arch.xen.upcall_vector; + if (irqchip_split(v->kvm)) { int vector = v->arch.pending_external_vector; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0f648434796f..1cedbc3d2455 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8987,7 +8987,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) static_call(kvm_x86_msr_filter_changed)(vcpu); } - if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { + if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || + kvm_xen_has_interrupt(vcpu)) { ++vcpu->stat.req_event; kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index bd343222e740..39a7ffcdcf22 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -61,6 +61,44 @@ out: return ret; } +int __kvm_xen_has_interrupt(struct kvm_vcpu *v) +{ + u8 rc = 0; + + /* + * If the global upcall vector (HVMIRQ_callback_vector) is set and + * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending. + */ + struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache; + struct kvm_memslots *slots = kvm_memslots(v->kvm); + unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending); + + /* No need for compat handling here */ + BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) != + offsetof(struct compat_vcpu_info, evtchn_upcall_pending)); + BUILD_BUG_ON(sizeof(rc) != + sizeof(((struct vcpu_info *)0)->evtchn_upcall_pending)); + BUILD_BUG_ON(sizeof(rc) != + sizeof(((struct compat_vcpu_info *)0)->evtchn_upcall_pending)); + + /* + * For efficiency, this mirrors the checks for using the valid + * cache in kvm_read_guest_offset_cached(), but just uses + * __get_user() instead. And falls back to the slow path. + */ + if (likely(slots->generation == ghc->generation && + !kvm_is_error_hva(ghc->hva) && ghc->memslot)) { + /* Fast path */ + __get_user(rc, (u8 __user *)ghc->hva + offset); + } else { + /* Slow path */ + kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset, + sizeof(rc)); + } + + return rc; +} + int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) { int r = -ENOENT; @@ -83,6 +121,16 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); break; + + case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: + if (data->u.vector < 0x10) + r = -EINVAL; + else { + kvm->arch.xen.upcall_vector = data->u.vector; + r = 0; + } + break; + default: break; } @@ -110,6 +158,11 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) } break; + case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: + data->u.vector = kvm->arch.xen.upcall_vector; + r = 0; + break; + default: break; } diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index fb85377fdbdc..4b32489c0cec 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -13,6 +13,7 @@ extern struct static_key_false_deferred kvm_xen_enabled; +int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); @@ -29,6 +30,14 @@ static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm) KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL); } +static inline int kvm_xen_has_interrupt(struct kvm_vcpu *vcpu) +{ + if (static_branch_unlikely(&kvm_xen_enabled.key) && + vcpu->arch.xen.vcpu_info_set && vcpu->kvm->arch.xen.upcall_vector) + return __kvm_xen_has_interrupt(vcpu); + + return 0; +} /* 32-bit compatibility definitions, also used natively in 32-bit build */ #include diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 0f045ffd9cb6..4d4cd001c908 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1596,6 +1596,7 @@ struct kvm_xen_hvm_attr { __u16 pad[3]; union { __u8 long_mode; + __u8 vector; struct { __u64 gfn; } shared_info; @@ -1605,6 +1606,7 @@ struct kvm_xen_hvm_attr { #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 +#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 /* Per-vCPU Xen attributes */ #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) -- cgit v1.2.3-71-gd317 From 8d4e7e80838f45d3466d36d4fcb890424825faa9 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 4 Dec 2020 01:02:04 +0000 Subject: KVM: x86: declare Xen HVM shared info capability and add test case Instead of adding a plethora of new KVM_CAP_XEN_FOO capabilities, just add bits to the return value of KVM_CAP_XEN_HVM. Signed-off-by: David Woodhouse --- arch/x86/kvm/x86.c | 3 +- include/uapi/linux/kvm.h | 3 + tools/testing/selftests/kvm/Makefile | 1 + .../testing/selftests/kvm/x86_64/xen_shinfo_test.c | 169 +++++++++++++++++++++ 4 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c (limited to 'include') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1cedbc3d2455..838ce5e9814b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3769,7 +3769,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; case KVM_CAP_XEN_HVM: r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR | - KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL; + KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL | + KVM_XEN_HVM_CONFIG_SHARED_INFO; break; case KVM_CAP_SYNC_REGS: r = KVM_SYNC_X86_VALID_FIELDS; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4d4cd001c908..63f8f6e95648 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1152,6 +1152,7 @@ struct kvm_x86_mce { #ifdef KVM_CAP_XEN_HVM #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) +#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) struct kvm_xen_hvm_config { __u32 flags; @@ -1604,6 +1605,7 @@ struct kvm_xen_hvm_attr { } u; }; +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 #define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 @@ -1621,6 +1623,7 @@ struct kvm_xen_vcpu_attr { } u; }; +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index be9613dd6d9b..8c8eda429576 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -62,6 +62,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test TEST_GEN_PROGS_x86_64 += x86_64/debug_regs TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test +TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c new file mode 100644 index 000000000000..bdb3feb86b5b --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * svm_vmcall_test + * + * Copyright © 2021 Amazon.com, Inc. or its affiliates. + * + * Xen shared_info / pvclock testing + */ + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#include +#include + +#define VCPU_ID 5 + +#define SHINFO_REGION_GPA 0xc0000000ULL +#define SHINFO_REGION_SLOT 10 +#define PAGE_SIZE 4096 + +#define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE) + +static struct kvm_vm *vm; + +#define XEN_HYPERCALL_MSR 0x40000000 + +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; +} __attribute__((__packed__)); /* 32 bytes */ + +struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; +} __attribute__((__packed__)); + +static void guest_code(void) +{ + GUEST_DONE(); +} + +static int cmp_timespec(struct timespec *a, struct timespec *b) +{ + if (a->tv_sec > b->tv_sec) + return 1; + else if (a->tv_sec < b->tv_sec) + return -1; + else if (a->tv_nsec > b->tv_nsec) + return 1; + else if (a->tv_nsec < b->tv_nsec) + return -1; + else + return 0; +} + +int main(int argc, char *argv[]) +{ + struct timespec min_ts, max_ts, vm_ts; + + if (!(kvm_check_cap(KVM_CAP_XEN_HVM) & + KVM_XEN_HVM_CONFIG_SHARED_INFO) ) { + print_skip("KVM_XEN_HVM_CONFIG_SHARED_INFO not available"); + exit(KSFT_SKIP); + } + + clock_gettime(CLOCK_REALTIME, &min_ts); + + vm = vm_create_default(VCPU_ID, 0, (void *) guest_code); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + + /* Map a region for the shared_info page */ + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + SHINFO_REGION_GPA, SHINFO_REGION_SLOT, + 2 * getpagesize(), 0); + virt_map(vm, SHINFO_REGION_GPA, SHINFO_REGION_GPA, 2, 0); + + struct kvm_xen_hvm_config hvmc = { + .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL, + .msr = XEN_HYPERCALL_MSR, + }; + vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc); + + struct kvm_xen_hvm_attr lm = { + .type = KVM_XEN_ATTR_TYPE_LONG_MODE, + .u.long_mode = 1, + }; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &lm); + + struct kvm_xen_hvm_attr ha = { + .type = KVM_XEN_ATTR_TYPE_SHARED_INFO, + .u.shared_info.gfn = SHINFO_REGION_GPA / PAGE_SIZE, + }; + vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha); + + struct kvm_xen_vcpu_attr vi = { + .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, + .u.gpa = SHINFO_REGION_GPA + 40, + }; + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi); + + struct kvm_xen_vcpu_attr pvclock = { + .type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO, + .u.gpa = PVTIME_ADDR, + }; + vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock); + + for (;;) { + volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID); + struct ucall uc; + + vcpu_run(vm, VCPU_ID); + + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_FAIL("%s", (const char *)uc.args[0]); + /* NOT REACHED */ + case UCALL_SYNC: + break; + case UCALL_DONE: + goto done; + default: + TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd); + } + } + + done: + clock_gettime(CLOCK_REALTIME, &max_ts); + + /* + * Just a *really* basic check that things are being put in the + * right place. The actual calculations are much the same for + * Xen as they are for the KVM variants, so no need to check. + */ + struct pvclock_wall_clock *wc; + struct pvclock_vcpu_time_info *ti, *ti2; + + wc = addr_gva2hva(vm, SHINFO_REGION_GPA + 0xc00); + ti = addr_gva2hva(vm, SHINFO_REGION_GPA + 0x40 + 0x20); + ti2 = addr_gva2hva(vm, PVTIME_ADDR); + + vm_ts.tv_sec = wc->sec; + vm_ts.tv_nsec = wc->nsec; + TEST_ASSERT(wc->version && !(wc->version & 1), + "Bad wallclock version %x", wc->version); + TEST_ASSERT(cmp_timespec(&min_ts, &vm_ts) <= 0, "VM time too old"); + TEST_ASSERT(cmp_timespec(&max_ts, &vm_ts) >= 0, "VM time too new"); + + TEST_ASSERT(ti->version && !(ti->version & 1), + "Bad time_info version %x", ti->version); + TEST_ASSERT(ti2->version && !(ti2->version & 1), + "Bad time_info version %x", ti->version); + + kvm_vm_free(vm); + return 0; +} -- cgit v1.2.3-71-gd317 From 9fd6dad1261a541b3f5fa7dc5b152222306e6702 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 5 Feb 2021 05:07:11 -0500 Subject: mm: provide a saner PTE walking API for modules Currently, the follow_pfn function is exported for modules but follow_pte is not. However, follow_pfn is very easy to misuse, because it does not provide protections (so most of its callers assume the page is writable!) and because it returns after having already unlocked the page table lock. Provide instead a simplified version of follow_pte that does not have the pmdpp and range arguments. The older version survives as follow_invalidate_pte() for use by fs/dax.c. Reviewed-by: Jason Gunthorpe Signed-off-by: Paolo Bonzini --- arch/s390/pci/pci_mmio.c | 4 ++-- fs/dax.c | 5 +++-- include/linux/mm.h | 6 ++++-- mm/memory.c | 41 ++++++++++++++++++++++++++++++++++++----- virt/kvm/kvm_main.c | 4 ++-- 5 files changed, 47 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 18f2d10c3176..474617b88648 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -170,7 +170,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, if (!(vma->vm_flags & VM_WRITE)) goto out_unlock_mmap; - ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl); + ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl); if (ret) goto out_unlock_mmap; @@ -311,7 +311,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, if (!(vma->vm_flags & VM_WRITE)) goto out_unlock_mmap; - ret = follow_pte(vma->vm_mm, mmio_addr, NULL, &ptep, NULL, &ptl); + ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl); if (ret) goto out_unlock_mmap; diff --git a/fs/dax.c b/fs/dax.c index 26d5dcd2d69e..b3d27fdc6775 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -810,11 +810,12 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, address = pgoff_address(index, vma); /* - * Note because we provide range to follow_pte it will call + * follow_invalidate_pte() will use the range to call * mmu_notifier_invalidate_range_start() on our behalf before * taking any lock. */ - if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl)) + if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep, + &pmdp, &ptl)) continue; /* diff --git a/include/linux/mm.h b/include/linux/mm.h index ecdf8a8cd6ae..24b292fce8e5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1658,9 +1658,11 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); +int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, + struct mmu_notifier_range *range, pte_t **ptepp, + pmd_t **pmdpp, spinlock_t **ptlp); int follow_pte(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp, - spinlock_t **ptlp); + pte_t **ptepp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, diff --git a/mm/memory.c b/mm/memory.c index feff48e1465a..985dac0958dc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4709,9 +4709,9 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) } #endif /* __PAGETABLE_PMD_FOLDED */ -int follow_pte(struct mm_struct *mm, unsigned long address, - struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp, - spinlock_t **ptlp) +int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, + struct mmu_notifier_range *range, pte_t **ptepp, + pmd_t **pmdpp, spinlock_t **ptlp) { pgd_t *pgd; p4d_t *p4d; @@ -4776,6 +4776,34 @@ out: return -EINVAL; } +/** + * follow_pte - look up PTE at a user virtual address + * @mm: the mm_struct of the target address space + * @address: user virtual address + * @ptepp: location to store found PTE + * @ptlp: location to store the lock for the PTE + * + * On a successful return, the pointer to the PTE is stored in @ptepp; + * the corresponding lock is taken and its location is stored in @ptlp. + * The contents of the PTE are only stable until @ptlp is released; + * any further use, if any, must be protected against invalidation + * with MMU notifiers. + * + * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore + * should be taken for read. + * + * KVM uses this function. While it is arguably less bad than ``follow_pfn``, + * it is not a good general-purpose API. + * + * Return: zero on success, -ve otherwise. + */ +int follow_pte(struct mm_struct *mm, unsigned long address, + pte_t **ptepp, spinlock_t **ptlp) +{ + return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp); +} +EXPORT_SYMBOL_GPL(follow_pte); + /** * follow_pfn - look up PFN at a user virtual address * @vma: memory mapping @@ -4784,6 +4812,9 @@ out: * * Only IO mappings and raw PFN mappings are allowed. * + * This function does not allow the caller to read the permissions + * of the PTE. Do not use it. + * * Return: zero and the pfn at @pfn on success, -ve otherwise. */ int follow_pfn(struct vm_area_struct *vma, unsigned long address, @@ -4796,7 +4827,7 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address, if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return ret; - ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl); + ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); if (ret) return ret; *pfn = pte_pfn(*ptep); @@ -4817,7 +4848,7 @@ int follow_phys(struct vm_area_struct *vma, if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) goto out; - if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl)) + if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) goto out; pte = *ptep; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 48ccdf4e3d04..ee4ac2618ec5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1911,7 +1911,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, spinlock_t *ptl; int r; - r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl); + r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); if (r) { /* * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does @@ -1926,7 +1926,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, if (r) return r; - r = follow_pte(vma->vm_mm, addr, NULL, &ptep, NULL, &ptl); + r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); if (r) return r; } -- cgit v1.2.3-71-gd317 From 4fc096a99e01dd06dc55bef76ade7f8d76653245 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Thu, 28 Jan 2021 13:01:31 -0500 Subject: KVM: Raise the maximum number of user memslots Current KVM_USER_MEM_SLOTS limits are arch specific (512 on Power, 509 on x86, 32 on s390, 16 on MIPS) but they don't really need to be. Memory slots are allocated dynamically in KVM when added so the only real limitation is 'id_to_index' array which is 'short'. We don't have any other KVM_MEM_SLOTS_NUM/KVM_USER_MEM_SLOTS-sized statically defined structures. Low KVM_USER_MEM_SLOTS can be a limiting factor for some configurations. In particular, when QEMU tries to start a Windows guest with Hyper-V SynIC enabled and e.g. 256 vCPUs the limit is hit as SynIC requires two pages per vCPU and the guest is free to pick any GFN for each of them, this fragments memslots as QEMU wants to have a separate memslot for each of these pages (which are supposed to act as 'overlay' pages). Signed-off-by: Vitaly Kuznetsov Message-Id: <20210127175731.2020089-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/kvm_host.h | 1 - arch/mips/include/asm/kvm_host.h | 1 - arch/powerpc/include/asm/kvm_host.h | 1 - arch/s390/include/asm/kvm_host.h | 1 - arch/x86/include/asm/kvm_host.h | 2 -- include/linux/kvm_host.h | 5 ++--- 6 files changed, 2 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8fcfab0c2567..1b8a3d825276 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,7 +30,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#define KVM_USER_MEM_SLOTS 512 #define KVM_HALT_POLL_NS_DEFAULT 500000 #include diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 24f3d0f9996b..3a5612e7304c 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -83,7 +83,6 @@ #define KVM_MAX_VCPUS 16 -#define KVM_USER_MEM_SLOTS 16 /* memory slots that does not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 0 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d67a470e95a3..2b9b6855ec86 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -28,7 +28,6 @@ #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS -#define KVM_USER_MEM_SLOTS 512 #include diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 74f9a036bab2..6bcfc5614bbc 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -28,7 +28,6 @@ #define KVM_S390_BSCA_CPU_SLOTS 64 #define KVM_S390_ESCA_CPU_SLOTS 248 #define KVM_MAX_VCPUS 255 -#define KVM_USER_MEM_SLOTS 32 /* * These seem to be used for allocating ->chip in the routing table, which we diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c451412a6230..b6de568e7476 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -40,10 +40,8 @@ #define KVM_MAX_VCPUS 288 #define KVM_SOFT_MAX_VCPUS 240 #define KVM_MAX_VCPU_ID 1023 -#define KVM_USER_MEM_SLOTS 509 /* memory slots that are not exposed to userspace */ #define KVM_PRIVATE_MEM_SLOTS 3 -#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) #define KVM_HALT_POLL_NS_DEFAULT 200000 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f417447129b9..e126ebda36d0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -425,9 +425,8 @@ struct kvm_irq_routing_table { #define KVM_PRIVATE_MEM_SLOTS 0 #endif -#ifndef KVM_MEM_SLOTS_NUM -#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) -#endif +#define KVM_MEM_SLOTS_NUM SHRT_MAX +#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS) #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) -- cgit v1.2.3-71-gd317 From d9a47edabc4f948102753fa9d41f2dc1dbeb28be Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Wed, 16 Dec 2020 16:12:19 +0530 Subject: KVM: PPC: Book3S HV: Introduce new capability for 2nd DAWR Introduce KVM_CAP_PPC_DAWR1 which can be used by QEMU to query whether KVM supports 2nd DAWR or not. The capability is by default disabled even when the underlying CPU supports 2nd DAWR. QEMU needs to check and enable it manually to use the feature. Signed-off-by: Ravi Bangoria Signed-off-by: Paul Mackerras --- Documentation/virt/kvm/api.rst | 10 ++++++++++ arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s_hv.c | 12 ++++++++++++ arch/powerpc/kvm/powerpc.c | 10 ++++++++++ include/uapi/linux/kvm.h | 1 + tools/include/uapi/linux/kvm.h | 1 + 6 files changed, 35 insertions(+) (limited to 'include') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index a783a8da818b..45fd862ac128 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6215,6 +6215,16 @@ the bus lock vm exit can be preempted by a higher priority VM exit, the exit notifications to userspace can be KVM_EXIT_BUS_LOCK or other reasons. KVM_RUN_BUS_LOCK flag is used to distinguish between them. +7.22 KVM_CAP_PPC_DAWR1 +---------------------- + +:Architectures: ppc +:Parameters: none +:Returns: 0 on success, -EINVAL when CPU doesn't support 2nd DAWR + +This capability can be used to check / enable 2nd DAWR feature provided +by POWER10 processor. + 8. Other capabilities. ====================== diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0a056c64c317..13c39d24dda5 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -314,6 +314,7 @@ struct kvmppc_ops { int size); int (*enable_svm)(struct kvm *kvm); int (*svm_off)(struct kvm *kvm); + int (*enable_dawr1)(struct kvm *kvm); }; extern struct kvmppc_ops *kvmppc_hv_ops; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index b37bddfead1b..dffbfd1ab47b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5642,6 +5642,17 @@ out: return ret; } +static int kvmhv_enable_dawr1(struct kvm *kvm) +{ + if (!cpu_has_feature(CPU_FTR_DAWR1)) + return -ENODEV; + + /* kvm == NULL means the caller is testing if the capability exists */ + if (kvm) + kvm->arch.dawr1_enabled = true; + return 0; +} + static struct kvmppc_ops kvm_ops_hv = { .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, @@ -5685,6 +5696,7 @@ static struct kvmppc_ops kvm_ops_hv = { .store_to_eaddr = kvmhv_store_to_eaddr, .enable_svm = kvmhv_enable_svm, .svm_off = kvmhv_svm_off, + .enable_dawr1 = kvmhv_enable_dawr1, }; static int kvm_init_subcore_bitmap(void) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index cf52d26f49cd..5914c32d9ef3 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -678,6 +678,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = hv_enabled && kvmppc_hv_ops->enable_svm && !kvmppc_hv_ops->enable_svm(NULL); break; + case KVM_CAP_PPC_DAWR1: + r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 && + !kvmppc_hv_ops->enable_dawr1(NULL)); + break; #endif default: r = 0; @@ -2187,6 +2191,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, break; r = kvm->arch.kvm_ops->enable_svm(kvm); break; + case KVM_CAP_PPC_DAWR1: + r = -EINVAL; + if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) + break; + r = kvm->arch.kvm_ops->enable_dawr1(kvm); + break; #endif default: r = -EINVAL; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 63f8f6e95648..8b281f722e5b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1077,6 +1077,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_SYS_HYPERV_CPUID 191 #define KVM_CAP_DIRTY_LOG_RING 192 #define KVM_CAP_X86_BUS_LOCK_EXIT 193 +#define KVM_CAP_PPC_DAWR1 194 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 886802b8ffba..9c284a9c3ac5 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1056,6 +1056,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 #define KVM_CAP_SYS_HYPERV_CPUID 191 #define KVM_CAP_DIRTY_LOG_RING 192 +#define KVM_CAP_PPC_DAWR1 194 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3-71-gd317 From d8d0da4eee5c4e86ea08abde6975848376b4ac13 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Wed, 10 Feb 2021 13:16:31 -0500 Subject: locking/arch: Move qrwlock.h include after qspinlock.h include/asm-generic/qrwlock.h was trying to get arch_spin_is_locked via asm-generic/qspinlock.h. However, this does not work because architectures might be using queued rwlocks but not queued spinlocks (csky), or because they might be defining their own queued_* macros before including asm/qspinlock.h. To fix this, ensure that asm/spinlock.h always includes qrwlock.h after defining arch_spin_is_locked (either directly for csky, or via asm/qspinlock.h for other architectures). The only inclusion elsewhere is in kernel/locking/qrwlock.c. That one is really unnecessary because the file is only compiled in SMP configurations (config QUEUED_RWLOCKS depends on SMP) and in that case linux/spinlock.h already includes asm/qrwlock.h if needed, via asm/spinlock.h. Reported-by: Guenter Roeck Signed-off-by: Waiman Long Fixes: 26128cb6c7e6 ("locking/rwlocks: Add contention detection for rwlocks") Tested-by: Guenter Roeck Reviewed-by: Ben Gardon [Add arch/sparc and kernel/locking parts per discussion with Waiman. - Paolo] Signed-off-by: Paolo Bonzini --- arch/arm64/include/asm/spinlock.h | 2 +- arch/mips/include/asm/spinlock.h | 2 +- arch/sparc/include/asm/spinlock_64.h | 2 +- arch/xtensa/include/asm/spinlock.h | 2 +- include/asm-generic/qrwlock.h | 3 ++- kernel/locking/qrwlock.c | 1 - 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 9083d6992603..0525c0b089ed 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -5,8 +5,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include #include +#include /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 8a88eb265516..6ce2117e49f6 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -10,7 +10,6 @@ #define _ASM_SPINLOCK_H #include -#include #include @@ -27,5 +26,6 @@ static inline void queued_spin_unlock(struct qspinlock *lock) } #include +#include #endif /* _ASM_SPINLOCK_H */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 7fc82a233f49..3a9a0b0c7465 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -11,8 +11,8 @@ #include #include -#include #include +#include #endif /* !(__ASSEMBLY__) */ diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 584b0de6f2ca..41c449ece2d8 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h @@ -12,8 +12,8 @@ #define _XTENSA_SPINLOCK_H #include -#include #include +#include #define smp_mb__after_spinlock() smp_mb() diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 0020d3b820a7..7ae0ece07b4e 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -14,7 +14,8 @@ #include #include -#include + +/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */ /* * Writer states & reader shift and bias. diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index fe9ca92faa2a..4786dd271b45 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c @@ -12,7 +12,6 @@ #include #include #include -#include /** * queued_read_lock_slowpath - acquire read lock of a queue rwlock -- cgit v1.2.3-71-gd317