commit 0c77235bfdb50a099e9dd97921e431600c7a86bd
parent 0a98557023804f047f54875817dedc4aa431d899
Author: Louis Burda <quent.burda@gmail.com>
Date: Fri, 19 Aug 2022 14:08:45 +0200
Move measurement even closer and ensure preemption is disabled
Diffstat:
M | patch.diff | | | 58 | ++++++++++++++++++++++++++++------------------------------ |
1 file changed, 28 insertions(+), 30 deletions(-)
diff --git a/patch.diff b/patch.diff
@@ -32,7 +32,7 @@ index b804444e16d4..17167ccfca22 100644
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index 7b3cfbe8f7e3..e6b30237aa06 100644
+index 7b3cfbe8f7e3..cb60859f7d17 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2,6 +2,8 @@
@@ -44,19 +44,22 @@ index 7b3cfbe8f7e3..e6b30237aa06 100644
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
-@@ -3751,7 +3753,11 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+@@ -3751,7 +3753,14 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
if (sev_es_guest(svm->vcpu.kvm)) {
__svm_sev_es_vcpu_run(svm->vmcb_pa);
} else {
+ memset(cachepc_msrmts, 0, 64 * 2);
++ int cpu = get_cpu();
++ local_irq_disable();
++ WARN_ON(cpu != 2);
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
+ cachepc_save_msrmts(cachepc_ds);
-+ //asm volatile ("movb $1, (%0)" : : "r"(cachepc_msrmts) : "rax");
-+ //asm volatile ("movb $1, cachepc_msrmts");
++ local_irq_enable();
++ put_cpu();
#ifdef CONFIG_X86_64
native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-@@ -3785,8 +3791,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+@@ -3785,8 +3794,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -71,7 +74,7 @@ index 7b3cfbe8f7e3..e6b30237aa06 100644
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
-index 6feb8c08f45a..f2e71fb17274 100644
+index 6feb8c08f45a..2f259db92037 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -27,14 +27,53 @@
@@ -136,10 +139,12 @@ index 6feb8c08f45a..f2e71fb17274 100644
push %_ASM_BX
/* Save @regs. */
-@@ -77,6 +117,45 @@ SYM_FUNC_START(__svm_vcpu_run)
- /* "POP" @vmcb to RAX. */
- pop %_ASM_AX
-
+@@ -85,8 +125,87 @@ SYM_FUNC_START(__svm_vcpu_run)
+ jne 3f
+ ud2
+ _ASM_EXTABLE(1b, 2b)
++
++3:
+ swap_xmm %rax, %xmm0
+ swap_xmm %rcx, %xmm2
+ swap_xmm %rdx, %xmm3
@@ -179,25 +184,8 @@ index 6feb8c08f45a..f2e71fb17274 100644
+ swap_xmm %r14, %xmm14
+ swap_xmm %r15, %xmm15
+
- /* Enter guest mode */
- sti
- 1: vmload %_ASM_AX
-@@ -85,8 +164,10 @@ SYM_FUNC_START(__svm_vcpu_run)
- jne 3f
- ud2
- _ASM_EXTABLE(1b, 2b)
-+
-+3:
+ vmrun %_ASM_AX
-
--3: vmrun %_ASM_AX
- jmp 5f
- 4: cmpb $0, kvm_rebooting
- jne 5f
-@@ -102,6 +183,44 @@ SYM_FUNC_START(__svm_vcpu_run)
- 7:
- cli
-
++
+ swap_xmm %rax, %xmm0
+ swap_xmm %rcx, %xmm2
+ swap_xmm %rdx, %xmm3
@@ -235,10 +223,20 @@ index 6feb8c08f45a..f2e71fb17274 100644
+ swap_xmm %r14, %xmm14
+ swap_xmm %r15, %xmm15
+
-+
+
+-3: vmrun %_ASM_AX
+ jmp 5f
+ 4: cmpb $0, kvm_rebooting
+ jne 5f
+@@ -100,7 +219,7 @@ SYM_FUNC_START(__svm_vcpu_run)
+ ud2
+ _ASM_EXTABLE(5b, 6b)
+ 7:
+- cli
++ cli
+
#ifdef CONFIG_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
@@ -166,6 +285,11 @@ SYM_FUNC_START(__svm_vcpu_run)
pop %edi
#endif