commit 2ee8bd2f14f1fe909108e89a24ec9f6de814f438
parent 476f6c892d90e66fbd17ba616b82b000a990f63e
Author: Louis Burda <quent.burda@gmail.com>
Date: Sat, 13 Aug 2022 20:59:43 +0200
Fixed counts read, added test for hwpf and disable attempts
Diffstat:
3 files changed, 71 insertions(+), 21 deletions(-)
diff --git a/kmod/cachepc.c b/kmod/cachepc.c
@@ -15,7 +15,6 @@ static cacheline *build_cache_ds(cache_ctx *ctx, cacheline **cacheline_ptr_arr);
static void build_randomized_list_for_cache_set(cache_ctx *ctx, cacheline **cacheline_ptr_arr);
static cacheline **allocate_cache_ds(cache_ctx *ctx);
static uint16_t get_virt_cache_set(cache_ctx *ctx, void *ptr);
-static void *aligned_alloc(size_t alignment, size_t size);
void
cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask)
@@ -385,7 +384,7 @@ allocate_cache_ds(cache_ctx *ctx)
BUG_ON(ctx->addressing != VIRTUAL);
// For virtual addressing, allocating a consecutive chunk of memory is enough
- cl_arr = aligned_alloc(PAGE_SIZE, ctx->cache_size);
+ cl_arr = cachepc_aligned_alloc(PAGE_SIZE, ctx->cache_size);
BUG_ON(cl_arr == NULL);
for (i = 0; i < ctx->nr_of_cachelines; ++i) {
@@ -404,7 +403,7 @@ get_virt_cache_set(cache_ctx *ctx, void *ptr)
}
void *
-aligned_alloc(size_t alignment, size_t size)
+cachepc_aligned_alloc(size_t alignment, size_t size)
{
void *p;
diff --git a/kmod/cachepc.h b/kmod/cachepc.h
@@ -16,6 +16,8 @@ void cachepc_release_ds(cache_ctx *ctx, cacheline *ds);
cacheline *cachepc_prepare_victim(cache_ctx *ctx, uint32_t set);
void cachepc_release_victim(cache_ctx *ctx, cacheline *ptr);
+void *cachepc_aligned_alloc(size_t alignment, size_t size);
+
void cachepc_save_msrmts(cacheline *head);
void cachepc_print_msrmts(cacheline *head);
diff --git a/patch.diff b/patch.diff
@@ -80,7 +80,7 @@ index 7b3cfbe8f7e3..16dfd9b2938e 100644
* We do not use IBRS in the kernel. If this vCPU has used the
* SPEC_CTRL MSR it may have left it on; save the value and
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 2541a17ff1c4..8c46d509bd13 100644
+index 2541a17ff1c4..5a85ea4ce5af 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,6 +51,9 @@
@@ -121,7 +121,7 @@ index 2541a17ff1c4..8c46d509bd13 100644
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
-@@ -4765,12 +4782,240 @@ static void check_processor_compat(void *data)
+@@ -4765,12 +4782,289 @@ static void check_processor_compat(void *data)
*c->ret = kvm_arch_check_processor_compat(c->opaque);
}
@@ -147,7 +147,7 @@ index 2541a17ff1c4..8c46d509bd13 100644
+ size_t len, left;
+ size_t size;
+
-+ printk(KERN_WARNING "CacheSC: Reading entries (%lu:%lli)\n",
++ printk(KERN_WARNING "CachePC: Reading entries (%lu:%lli)\n",
+ buflen, off ? *off : 0);
+
+ size = cachepc_msrmts_count * sizeof(uint16_t);
@@ -157,7 +157,7 @@ index 2541a17ff1c4..8c46d509bd13 100644
+ len = size - *off;
+ if (len > buflen) len = buflen;
+
-+ left = copy_to_user(buf, cachepc_msrmts + *off, len);
++ left = copy_to_user(buf, (uint8_t *) cachepc_msrmts + *off, len);
+
+ len -= left;
+ *off += len;
@@ -172,6 +172,39 @@ index 2541a17ff1c4..8c46d509bd13 100644
+}
+
+void
++kvm_cachepc_stream_hwpf_test(void *p)
++{
++ cacheline *cl;
++ uint32_t count;
++ uint32_t *arg;
++
++ arg = p;
++
++ /* l2 data cache, hit or miss */
++ cachepc_init_pmc(0, 0x64, 0xD8);
++
++ cl = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size);
++ BUG_ON(cl == NULL);
++
++ cachepc_prime(cachepc_ds);
++
++ count = cachepc_read_pmc(0);
++
++ asm volatile ("lea 0(%0), %%rbx; mov (%%rbx), %%rbx" : : "r"(cl) : "rbx");
++ asm volatile ("lea 8(%0), %%rbx; mov (%%rbx), %%rbx" : : "r"(cl) : "rbx");
++ asm volatile ("lea 16(%0), %%rbx; mov (%%rbx), %%rbx" : : "r"(cl) : "rbx");
++ asm volatile ("lea 24(%0), %%rbx; mov (%%rbx), %%rbx" : : "r"(cl) : "rbx");
++
++ count = cachepc_read_pmc(0) - count;
++
++ printk(KERN_WARNING "CachePC: HWPF test done, result: 4 (expected) vs. %u (actual) misses", count);
++
++ if (arg) *arg = count != 4;
++
++ kfree(cl);
++}
++
++void
+kvm_cachepc_single_access_test(void *p)
+{
+ cacheline *ptr;
@@ -258,13 +291,34 @@ index 2541a17ff1c4..8c46d509bd13 100644
+}
+
+void
++kvm_cachepc_stream_hwpf_disable(void)
++{
++ uint64_t reg_addr, val;
++ uint32_t lo, hi;
++
++ /* attempt to disable hwpf.. */
++
++ reg_addr = 0xc0011022;
++ asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr));
++ val = (uint64_t) lo | ((uint64_t) hi << 32);
++ val |= 1 << 13;
++ asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
++ printk("Writing MSR %08llX to disable HWPF: %16llX\n", reg_addr, val);
++
++ /* CAUSES CRASH! */
++ //reg_addr = 0xc001101C;
++ //asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(reg_addr));
++ //val = (uint64_t) lo | ((uint64_t) hi << 32);
++ //val |= 1 << 23;
++ //asm volatile ("wrmsr" : : "c"(reg_addr), "a"(val), "d"(0x00));
++ //printk("Writing MSR %08llX to disable HWPF: %16llX\n", reg_addr, val);
++}
++
++void
+kvm_cachepc_init(void *p)
+{
-+ cacheline *cl, *head;
+ int cpu;
+
-+ local_irq_disable();
-+
+ cpu = get_cpu();
+
+ printk(KERN_WARNING "CachePC: Running on core %i\n", cpu);
@@ -272,18 +326,13 @@ index 2541a17ff1c4..8c46d509bd13 100644
+ cachepc_ctx = cachepc_get_ctx(L1);
+ cachepc_ds = cachepc_prepare_ds(cachepc_ctx);
+
-+ head = cl = cachepc_ds;
-+ do {
-+ cl = cl->next;
-+ printk(KERN_WARNING "%i:%i\n", cl->cache_set, cl->cache_line);
-+ } while (cl != head);
++ kvm_cachepc_stream_hwpf_disable();
+
-+ kvm_cachepc_single_access_test(p);
-+ kvm_cachepc_single_eviction_test(p);
++ kvm_cachepc_stream_hwpf_test(NULL);
++ kvm_cachepc_single_access_test(NULL);
++ kvm_cachepc_single_eviction_test(NULL);
+
+ put_cpu();
-+
-+ local_irq_enable();
+}
+
+void
@@ -364,7 +413,7 @@ index 2541a17ff1c4..8c46d509bd13 100644
r = kvm_arch_init(opaque);
if (r)
-@@ -4848,6 +5093,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -4848,6 +5142,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = kvm_vfio_ops_init();
WARN_ON(r);
@@ -386,7 +435,7 @@ index 2541a17ff1c4..8c46d509bd13 100644
return 0;
out_unreg:
-@@ -4872,6 +5132,12 @@ EXPORT_SYMBOL_GPL(kvm_init);
+@@ -4872,6 +5181,12 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{