summaryrefslogtreecommitdiffstats
path: root/patch.diff
blob: 1ccac4996a638105bdc98df164859625cc08dca5 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index b804444e16d4..66a4d56e331a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
-ccflags-y += -Iarch/x86/kvm
+ccflags-y += -Iarch/x86/kvm -O2
 ccflags-$(CONFIG_KVM_WERROR) += -Werror
 
 ifeq ($(CONFIG_FRAME_POINTER),y)
@@ -10,7 +10,9 @@ endif
 KVM := ../../../virt/kvm
 
 kvm-y			+= $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
-				$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o
+				$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \
+				svm/cachepc/cachepc.o svm/cachepc/util.o svm/cachepc/kvm.o
+
 kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(KVM)/async_pf.o
 
 kvm-y			+= x86.o emulate.o i8259.o irq.o lapic.o \
@@ -20,7 +22,8 @@ kvm-y			+= x86.o emulate.o i8259.o irq.o lapic.o \
 
 kvm-intel-y		+= vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
 			   vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
-kvm-amd-y		+= svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
+kvm-amd-y		+= svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o \
+			   svm/cachepc/cachepc.o svm/cachepc/util.o svm/cachepc/kvm.o
 
 obj-$(CONFIG_KVM)	+= kvm.o
 obj-$(CONFIG_KVM_INTEL)	+= kvm-intel.o
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 7b3cfbe8f7e3..c7952eab7c6d 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2,6 +2,8 @@
 
 #include <linux/kvm_host.h>
 
+#include "cachepc/cachepc.h"
+
 #include "irq.h"
 #include "mmu.h"
 #include "kvm_cache_regs.h"
@@ -3749,9 +3751,26 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 	lockdep_hardirqs_on(CALLER_ADDR0);
 
 	if (sev_es_guest(svm->vcpu.kvm)) {
+		memset(cachepc_msrmts, 0, 64 * 2);
+		int cpu = get_cpu();
+		local_irq_disable();
+		WARN_ON(cpu != 2);
+		cacheline *next;
+		next = cachepc_prime(cachepc_ds);
 		__svm_sev_es_vcpu_run(svm->vmcb_pa);
+		cachepc_probe(next);
+		cachepc_save_msrmts(cachepc_ds);
+		local_irq_enable();
+		put_cpu();
 	} else {
+		memset(cachepc_msrmts, 0, 64 * 2);
+		int cpu = get_cpu();
+		local_irq_disable();
+		WARN_ON(cpu != 2);
 		__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
+		cachepc_save_msrmts(cachepc_ds);
+		local_irq_enable();
+		put_cpu();
 
 #ifdef CONFIG_X86_64
 		native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
@@ -3785,8 +3804,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 
 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
-	struct vcpu_svm *svm = to_svm(vcpu);
+	struct vcpu_svm *svm;
 
+	printk(KERN_WARNING "CachePC: svm_cpu_enter_exit()\n");
+	WARN_ON(smp_processor_id() != 2);
+
+	svm = to_svm(vcpu);
 	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
 	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
@@ -3888,7 +3911,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
 	vmcb_mark_all_clean(svm->vmcb);
-
+	printk(KERN_WARNING "Vincent: svm->vmcb exit code %d\n", svm->vmcb->control.exit_code);
 	/* if exit due to PF check for async PF */
 	if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
 		svm->vcpu.arch.apf.host_apf_flags =
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 6feb8c08f45a..eb0ea02ef187 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -27,14 +27,74 @@
 #define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
 #endif
 
+.extern cachepc_msrmts
+
 .section .noinstr.text, "ax"
 
+.macro push_xmm gpr xmm
+	vmovq \gpr, \xmm
+.endm
+
+.macro pop_xmm gpr xmm
+	vmovq \xmm, \gpr
+.endm
+
+.macro swap_xmm grp xmm
+	vmovq \grp, %xmm15
+	vmovq \xmm, \grp
+	vmovq %xmm15, \xmm
+.endm
+
+.macro push_all
+	push_xmm %rax, %xmm0
+	push_xmm %rbx, %xmm1
+	push_xmm %rcx, %xmm2
+	push_xmm %rdx, %xmm3
+	push_xmm %rbp, %xmm4
+	push_xmm %rsp, %xmm5
+	push_xmm %rdi, %xmm6
+	push_xmm %rsi, %xmm7
+	push_xmm %r8,  %xmm8
+	push_xmm %r9,  %xmm9
+	push_xmm %r10, %xmm10
+	push_xmm %r11, %xmm11
+	push_xmm %r12, %xmm12
+	push_xmm %r13, %xmm13
+	push_xmm %r14, %xmm14
+.endm
+
+.macro swap_all
+	swap_xmm %rax, %xmm0
+	swap_xmm %rbx, %xmm1
+	swap_xmm %rcx, %xmm2
+	swap_xmm %rdx, %xmm3
+	swap_xmm %rbp, %xmm4
+	swap_xmm %rsp, %xmm5
+	swap_xmm %rdi, %xmm6
+	swap_xmm %rsi, %xmm7
+	swap_xmm %r8,  %xmm8
+	swap_xmm %r9,  %xmm9
+	swap_xmm %r10, %xmm10
+	swap_xmm %r11, %xmm11
+	swap_xmm %r12, %xmm12
+	swap_xmm %r13, %xmm13
+	swap_xmm %r14, %xmm14
+.endm
+
+.macro barrier
+	mfence
+	mov $0x80000005,%eax
+	cpuid
+.endm
+
 /**
  * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
  * @vmcb_pa:	unsigned long
  * @regs:	unsigned long * (to guest registers)
  */
 SYM_FUNC_START(__svm_vcpu_run)
+	push_all
+
 	push %_ASM_BP
 #ifdef CONFIG_X86_64
 	push %r15
@@ -45,6 +105,7 @@ SYM_FUNC_START(__svm_vcpu_run)
 	push %edi
 	push %esi
 #endif
+
 	push %_ASM_BX
 
 	/* Save @regs. */
@@ -85,8 +146,25 @@ SYM_FUNC_START(__svm_vcpu_run)
 	jne 3f
 	ud2
 	_ASM_EXTABLE(1b, 2b)
+
+3:
+	swap_all
+	mov cachepc_ds, %rsi
+	mov 0x8(%rsi), %r15
+	lea sev_prime_ret(%rip), %rdi
+	jmp cachepc_prime_vcall+1 // skip stack pushes
+sev_prime_ret:
+	swap_all
+
+	vmrun %_ASM_AX
+
+	swap_all
+	mov %r15, %rsi
+	lea sev_probe_ret(%rip), %rdi
+	jmp cachepc_probe_vcall+6 // skip stack pushs
+sev_probe_ret:
+	swap_all
 
-3:	vmrun %_ASM_AX
 	jmp 5f
 4:	cmpb $0, kvm_rebooting
 	jne 5f
@@ -100,7 +178,7 @@ SYM_FUNC_START(__svm_vcpu_run)
 	ud2
 	_ASM_EXTABLE(5b, 6b)
 7:
-	cli
+	cli
 
 #ifdef CONFIG_RETPOLINE
 	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
@@ -166,6 +244,11 @@ SYM_FUNC_START(__svm_vcpu_run)
 	pop %edi
 #endif
 	pop %_ASM_BP
+
+	# mov cachepc_msrmts(%rip), %rax
+	# mov $0x1, %edx
+	# mov %dx, (%rax)
+
 	ret
 SYM_FUNC_END(__svm_vcpu_run)
 
@@ -174,6 +257,8 @@ SYM_FUNC_END(__svm_vcpu_run)
  * @vmcb_pa:	unsigned long
  */
 SYM_FUNC_START(__svm_sev_es_vcpu_run)
+	push_all
+
 	push %_ASM_BP
 #ifdef CONFIG_X86_64
 	push %r15
@@ -190,7 +275,28 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
 	mov %_ASM_ARG1, %_ASM_AX
 	sti
 
-1:	vmrun %_ASM_AX
+1:
+
+//	swap_all
+//	mov cachepc_ds, %rsi
+//	mov 0x8(%rsi), %r15
+//	lea sev_es_prime_ret(%rip), %rdi
+//	jmp cachepc_prime_vcall+1 // skip stack pushes
+//sev_es_prime_ret:
+//	swap_all
+
+//	// TEST r15 dependance
+//	movq $0x41414141, %r15
+
+	vmrun %_ASM_AX
+
+//	swap_all
+//	mov %r15, %rsi
+//	lea sev_es_probe_ret(%rip), %rdi
+//	jmp cachepc_probe_vcall+6 // skip stack pushs
+//sev_es_probe_ret:
+//	swap_all
+
 	jmp 3f
 2:	cmpb $0, kvm_rebooting
 	jne 3f
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2541a17ff1c4..8796ad5e9b73 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -66,6 +66,8 @@
 /* Worst case buffer size needed for holding an integer. */
 #define ITOA_MAX_LEN 12
 
+#include "../../arch/x86/kvm/svm/cachepc/kvm.h"
+
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
@@ -4848,6 +4849,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 	r = kvm_vfio_ops_init();
 	WARN_ON(r);
 
+	cachepc_kvm_init();
+
 	return 0;
 
 out_unreg:
@@ -4872,6 +4875,8 @@ EXPORT_SYMBOL_GPL(kvm_init);
 
 void kvm_exit(void)
 {
+	cachepc_kvm_exit();
+
 	debugfs_remove_recursive(kvm_debugfs_dir);
 	misc_deregister(&kvm_dev);
 	kmem_cache_destroy(kvm_vcpu_cache);