commit 0f3b9caf389b486541614836bf180b64544615cb
parent 98babf91dcf166bc7644a3d70a90dac272f12a75
Author: Louis Burda <quent.burda@gmail.com>
Date: Sat, 13 Aug 2022 18:06:20 +0200
Fixup test/kvm, add target_set to ioctl tests as argument
Diffstat:
9 files changed, 178 insertions(+), 135 deletions(-)
diff --git a/.gitignore b/.gitignore
@@ -1,5 +1,6 @@
build.sh
push.sh
+.vscode
*.o.cmd
*.o
-.vscode
+*.out
diff --git a/kmod/asm.h b/kmod/asm.h
@@ -5,9 +5,6 @@
#define CPUID_AFFECTED_REGS "rax", "rbx", "rcx", "rdx"
__attribute__((always_inline))
-static inline uint64_t cachepc_readpmc(uint64_t event);
-
-__attribute__((always_inline))
static inline void cachepc_cpuid(void);
__attribute__((always_inline))
@@ -22,22 +19,6 @@ static inline void cachepc_mfence(void);
__attribute__((always_inline))
static inline void cachepc_readq(void *p);
-uint64_t
-cachepc_readpmc(uint64_t event)
-{
- uint32_t lo, hi;
-
- event = 0xC0010201 + 2 * event;
-
- asm volatile (
- "rdmsr"
- : "=a" (lo), "=d" (hi)
- : "c"(event)
- );
-
- return ((uint64_t) hi << 32) | (uint64_t) lo;
-}
-
void
cachepc_cpuid(void)
{
diff --git a/kmod/cachepc.c b/kmod/cachepc.c
@@ -18,49 +18,28 @@ static uint16_t get_virt_cache_set(cache_ctx *ctx, void *ptr);
static void *aligned_alloc(size_t alignment, size_t size);
void
-cachepc_init_counters(void)
+cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask)
{
- uint64_t event, event_no, event_mask;
+ uint64_t event;
uint64_t reg_addr;
- /* SEE: https://developer.amd.com/resources/developer-guides-manuals (PPR 17H 31H, P.166)
+ /* REF: https://developer.amd.com/resources/developer-guides-manuals (PPR 17H 31H, P.166)
*
- * performance event selection is done via 0xC001_020X with X = (0..A)[::2]
- * performance event reading is done viea 0XC001_020X with X = (1..B)[::2]
- *
- * 6 slots total
+ * performance event selection via 0xC001_020X with X = (0..A)[::2]
+ * performance event reading viea 0XC001_020X with X = (1..B)[::2]
*/
- reg_addr = 0xc0010200;
- event_no = 0x70;
- event_mask = 0xFF;
- event = event_no | (event_mask << 8);
- event |= (1ULL << 17); /* OS (kernel) events only */
- event |= (1ULL << 22); /* enable performance counter */
- event |= (1ULL << 40); /* Host events only */
- printk(KERN_WARNING "CachePC: Initialized event %llu\n", event);
- asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00));
+ WARN_ON(index >= 6);
+ if (index >= 6) return;
- reg_addr = 0xc0010202;
- event_no = 0x71;
- event_mask = 0xFF;
- event = event_no | (event_mask << 8);
- event |= (1ULL << 17); /* OS (kernel) events only */
- event |= (1ULL << 22); /* enable performance counter */
- event |= (1ULL << 40); /* Host events only */
- printk(KERN_WARNING "CachePC: Initialized event %llu\n", event);
- asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00));
-
- reg_addr = 0xc0010204;
- event_no = 0x72;
- event_mask = 0xFF;
- event = event_no | (event_mask << 8);
+ reg_addr = 0xc0010200 + index * 2;
+ event = event_no | (event_mask << 8);
event |= (1ULL << 17); /* OS (kernel) events only */
event |= (1ULL << 22); /* enable performance counter */
event |= (1ULL << 40); /* Host events only */
- printk(KERN_WARNING "CachePC: Initialized event %llu\n", event);
+ printk(KERN_WARNING "CachePC: Initialized %i. PMC %02X:%02X\n",
+ index, event_no, event_mask);
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00));
-
}
cache_ctx *
diff --git a/kmod/cachepc.h b/kmod/cachepc.h
@@ -5,7 +5,7 @@
#include "util.h"
#include "cachepc_user.h"
-void cachepc_init_counters(void);
+void cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask);
cache_ctx *cachepc_get_ctx(cache_level cl);
void cachepc_release_ctx(cache_ctx *ctx);
@@ -31,6 +31,9 @@ static inline cacheline *cachepc_probe(cacheline *head);
__attribute__((always_inline))
static inline void cachepc_victim(void *p);
+__attribute__((always_inline))
+static inline uint64_t cachepc_read_pmc(uint64_t event);
+
extern uint16_t *cachepc_msrmts;
extern size_t cachepc_msrmts_count;
@@ -46,8 +49,6 @@ cachepc_prime(cacheline *head)
{
cacheline *curr_cl;
- //printk(KERN_WARNING "CachePC: Priming..\n");
-
cachepc_cpuid();
curr_cl = head;
do {
@@ -56,8 +57,6 @@ cachepc_prime(cacheline *head)
} while(curr_cl != head);
cachepc_cpuid();
- //printk(KERN_WARNING "CachePC: Priming done\n");
-
return curr_cl->prev;
}
@@ -102,8 +101,8 @@ cachepc_probe(cacheline *start_cl)
curr_cl = start_cl;
do {
- pre = cachepc_readpmc(0);
- pre += cachepc_readpmc(1);
+ pre = cachepc_read_pmc(0);
+ pre += cachepc_read_pmc(1);
cachepc_mfence();
cachepc_cpuid();
@@ -126,8 +125,8 @@ cachepc_probe(cacheline *start_cl)
cachepc_mfence();
cachepc_cpuid();
- post = cachepc_readpmc(0);
- post += cachepc_readpmc(1);
+ post = cachepc_read_pmc(0);
+ post += cachepc_read_pmc(1);
cachepc_mfence();
cachepc_cpuid();
@@ -148,3 +147,19 @@ cachepc_victim(void *p)
cachepc_mfence();
cachepc_readq(p);
}
+
+uint64_t
+cachepc_read_pmc(uint64_t event)
+{
+ uint32_t lo, hi;
+
+ event = 0xC0010201 + 2 * event;
+
+ asm volatile (
+ "rdmsr"
+ : "=a" (lo), "=d" (hi)
+ : "c"(event)
+ );
+
+ return ((uint64_t) hi << 32) | (uint64_t) lo;
+}
diff --git a/kmod/cachepc_user.h b/kmod/cachepc_user.h
@@ -3,5 +3,6 @@
#include <linux/ioctl.h>
#define CACHEPC_IOCTL_MAGIC 0xBF
-#define CACHEPC_IOCTL_ACCESS_TEST _IOR(CACHEPC_IOCTL_MAGIC, 0, int)
-#define CACHEPC_IOCTL_EVICTION_TEST _IOR(CACHEPC_IOCTL_MAGIC, 1, int)
+#define CACHEPC_IOCTL_TEST_ACCESS _IOR(CACHEPC_IOCTL_MAGIC, 0, uint32_t)
+#define CACHEPC_IOCTL_TEST_EVICTION _IOWR(CACHEPC_IOCTL_MAGIC, 1, uint32_t)
+#define CACHEPC_IOCTL_INIT_PMC _IOW(CACHEPC_IOCTL_MAGIC, 2, uint32_t)
diff --git a/patch.diff b/patch.diff
@@ -89,7 +89,7 @@ index 7b3cfbe8f7e3..71697d08e9e4 100644
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 2541a17ff1c4..830cdb295d9c 100644
+index 2541a17ff1c4..a84a99f4b182 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,6 +51,9 @@
@@ -130,7 +130,7 @@ index 2541a17ff1c4..830cdb295d9c 100644
__visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
-@@ -4765,12 +4782,203 @@ static void check_processor_compat(void *data)
+@@ -4765,12 +4782,245 @@ static void check_processor_compat(void *data)
*c->ret = kvm_arch_check_processor_compat(c->opaque);
}
@@ -186,11 +186,14 @@ index 2541a17ff1c4..830cdb295d9c 100644
+ cacheline *ptr;
+ uint64_t pre, post;
+ volatile register uint64_t i asm("r11");
-+ int *cnt;
++ uint32_t *user;
+
-+ cnt = p;
++ /* l2 prefetches, hit or miss */
++ cachepc_init_pmc(0, 0x60, 0x01);
+
-+ ptr = cachepc_prepare_victim(cachepc_ctx, 48);
++ user = p;
++
++ ptr = cachepc_prepare_victim(cachepc_ctx, user ? *user : 48);
+
+ cachepc_mfence();
+ cachepc_cpuid();
@@ -205,17 +208,17 @@ index 2541a17ff1c4..830cdb295d9c 100644
+ cachepc_mfence();
+ cachepc_cpuid();
+
-+ pre = cachepc_readpmc(0);
++ pre = cachepc_read_pmc(0);
+
-+ cachepc_mfence();
-+ cachepc_cpuid();
++ //cachepc_mfence();
++ //cachepc_cpuid();
+
-+ pre += cachepc_readpmc(1);
++ //pre += cachepc_read_pmc(1);
+
-+ cachepc_mfence();
-+ cachepc_cpuid();
++ //cachepc_mfence();
++ //cachepc_cpuid();
+
-+ pre += cachepc_readpmc(2);
++ //pre += cachepc_read_pmc(2);
+
+ cachepc_mfence();
+ cachepc_cpuid();
@@ -230,24 +233,24 @@ index 2541a17ff1c4..830cdb295d9c 100644
+ cachepc_mfence();
+ cachepc_cpuid();
+
-+ post = cachepc_readpmc(0);
++ post = cachepc_read_pmc(0);
+
-+ cachepc_mfence();
-+ cachepc_cpuid();
++ //cachepc_mfence();
++ //cachepc_cpuid();
+
-+ post += cachepc_readpmc(1);
++ //post += cachepc_read_pmc(1);
+
-+ cachepc_mfence();
-+ cachepc_cpuid();
++ //cachepc_mfence();
++ //cachepc_cpuid();
+
-+ post += cachepc_readpmc(2);
++ //post += cachepc_read_pmc(2);
+
+ cachepc_mfence();
+ cachepc_cpuid();
+
+ printk(KERN_WARNING "CachePC: Single access test done, result: %llu", post - pre);
+
-+ if (cnt) *cnt = post - pre;
++ if (user) *user = post - pre;
+
+ cachepc_release_victim(cachepc_ctx, ptr);
+}
@@ -257,14 +260,19 @@ index 2541a17ff1c4..830cdb295d9c 100644
+{
+ cacheline *head;
+ cacheline *ptr;
++ uint32_t *user;
+
-+ ptr = cachepc_prepare_victim(cachepc_ctx, 48);
++ user = p;
++
++ /* l2 data cache, hit or miss */
++ cachepc_init_pmc(0, 0x64, 0xD8);
++
++ ptr = cachepc_prepare_victim(cachepc_ctx, user ? *user : 48);
+ head = cachepc_prime(cachepc_ds);
-+ //cachepc_victim(ptr);
++ cachepc_victim(ptr);
+ cachepc_probe(head);
+
+ printk(KERN_WARNING "CachePC: Single eviction test done\n");
-+ //cachepc_print_msrmts(head);
+ cachepc_save_msrmts(head);
+
+ cachepc_release_victim(cachepc_ctx, ptr);
@@ -281,8 +289,6 @@ index 2541a17ff1c4..830cdb295d9c 100644
+
+ printk(KERN_WARNING "CachePC: Running on core %i\n", cpu);
+
-+ cachepc_init_counters();
-+
+ cachepc_ctx = cachepc_get_ctx(L1);
+ cachepc_ds = cachepc_prepare_ds(cachepc_ctx);
+
@@ -294,29 +300,65 @@ index 2541a17ff1c4..830cdb295d9c 100644
+ local_irq_enable();
+}
+
++void
++kvm_cachepc_init_pmc_ioctl(void *p)
++{
++ uint32_t event;
++ uint8_t index, event_no, event_mask;
++
++ WARN_ON(p == NULL);
++ if (!p) return;
++
++ event = *(uint32_t *)p;
++
++ index = (event & 0xFF000000) >> 24;
++ event_no = (event & 0x0000FF00) >> 8;
++ event_mask = (event & 0x000000FF) >> 0;
++
++ cachepc_init_pmc(index, event_no, event_mask);
++}
++
+long
+kvm_cachepc_ioctl(struct file *file, unsigned int cmd, unsigned long argp)
+{
-+ int r;
+ void __user *arg_user;
-+ int cnt;
++ uint32_t u32;
++ int r;
+
+ arg_user = (void __user *)argp;
+ switch (cmd) {
-+ case CACHEPC_IOCTL_ACCESS_TEST:
++ case CACHEPC_IOCTL_TEST_ACCESS:
+ printk(KERN_WARNING "CachePC: ioctl access test\n");
++ if (arg_user) {
++ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
++ return -EFAULT;
++ }
+ r = smp_call_function_single(2,
-+ kvm_cachepc_single_access_test, &cnt, true);
++ kvm_cachepc_single_access_test, &u32, true);
+ WARN_ON(r != 0);
+ if (arg_user) {
-+ if (copy_to_user(arg_user, &cnt, sizeof(int)))
++ if (copy_to_user(arg_user, &u32, sizeof(uint32_t)))
+ return -EFAULT;
+ }
+ break;
-+ case CACHEPC_IOCTL_EVICTION_TEST:
++ case CACHEPC_IOCTL_TEST_EVICTION:
+ printk(KERN_WARNING "CachePC: ioctl eviction test\n");
++ if (arg_user) {
++ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
++ return -EFAULT;
++ }
++ r = smp_call_function_single(2,
++ kvm_cachepc_single_eviction_test, &u32, true);
++ WARN_ON(r != 0);
++ break;
++ case CACHEPC_IOCTL_INIT_PMC:
++ printk(KERN_WARNING "CachePC: ioctl init counter\n");
++ if (arg_user) {
++ if (copy_from_user(&u32, arg_user, sizeof(uint32_t)))
++ return -EFAULT;
++ }
+ r = smp_call_function_single(2,
-+ kvm_cachepc_single_eviction_test, NULL, true);
++ kvm_cachepc_init_pmc_ioctl, &u32, true);
+ WARN_ON(r != 0);
+ break;
+ default:
@@ -336,7 +378,7 @@ index 2541a17ff1c4..830cdb295d9c 100644
r = kvm_arch_init(opaque);
if (r)
-@@ -4848,6 +5056,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+@@ -4848,6 +5098,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = kvm_vfio_ops_init();
WARN_ON(r);
@@ -358,7 +400,7 @@ index 2541a17ff1c4..830cdb295d9c 100644
return 0;
out_unreg:
-@@ -4872,6 +5095,12 @@ EXPORT_SYMBOL_GPL(kvm_init);
+@@ -4872,6 +5137,12 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
diff --git a/test/access.c b/test/access.c
@@ -12,17 +12,18 @@
int
main(int argc, const char **argv)
{
+ uint32_t arg;
size_t i, len;
int fd, ret;
- int count;
fd = open("/proc/cachepc", O_RDONLY);
if (fd < 0) err(1, "open");
for (i = 0; i < 50; i++) {
- ret = ioctl(fd, CACHEPC_IOCTL_ACCESS_TEST, &count);
+ arg = 48; /* target set */
+ ret = ioctl(fd, CACHEPC_IOCTL_TEST_ACCESS, &arg);
if (ret == -1) err(1, "ioctl fail");
- printf("%i\n", count);
+ printf("%i\n", arg);
}
close(fd);
diff --git a/test/eviction.c b/test/eviction.c
@@ -13,12 +13,16 @@ int
main(int argc, const char **argv)
{
uint16_t counts[64];
+ uint32_t arg;
size_t i, len;
int fd, ret;
fd = open("/proc/cachepc", O_RDONLY);
- ret = ioctl(fd, CACHEPC_IOCTL_EVICTION_TEST, NULL);
+ arg = 48;
+ if (argc == 2) arg = atoi(argv[1]);
+
+ ret = ioctl(fd, CACHEPC_IOCTL_TEST_EVICTION, &arg);
if (ret == -1) err(1, "ioctl fail");
len = read(fd, counts, sizeof(counts));
diff --git a/test/kvm.c b/test/kvm.c
@@ -1,6 +1,8 @@
/* for CPU_ZERO macros.. */
#define _GNU_SOURCE
+#include "cachepc_user.h"
+
#include <linux/kvm.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
@@ -71,6 +73,8 @@ static pid_t victim_pid;
static struct kvm kvm;
static struct kvm_run *kvm_run;
+static int cachepc_fd;
+
#define TARGET_CACHE_LINESIZE 64
#define TARGET_SET 15
@@ -115,10 +119,9 @@ pin_process(pid_t pid, int cpu, bool assert)
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
- return true;
status = sched_setaffinity(pid, sizeof(cpu_set_t), &cpuset);
if (status < 0) {
- if (assert) err(EXIT_FAILURE, "sched_setaffinity");
+ if (assert) err(1, "sched_setaffinity");
return false;
}
@@ -139,13 +142,13 @@ read_stat_core(pid_t pid)
if (!file) return -1;
if (!fgets(line, sizeof(line), file))
- err(EXIT_FAILURE, "read stat");
+ err(1, "read stat");
p = line;
for (i = 0; i < 38 && (p = strchr(p, ' ')); i++)
p += 1;
- if (!p) errx(EXIT_FAILURE, "stat format");
+ if (!p) errx(1, "stat format");
cpu = atoi(p);
fclose(file);
@@ -166,7 +169,7 @@ clear_cores(uint64_t cpu_mask)
/* move all processes from the target cpu to secondary */
proc_dir = opendir("/proc");
- if (!proc_dir) err(EXIT_FAILURE, "opendir");
+ if (!proc_dir) err(1, "opendir");
while ((proc_ent = readdir(proc_dir))) {
pid = atoi(proc_ent->d_name);
@@ -181,7 +184,7 @@ clear_cores(uint64_t cpu_mask)
snprintf(taskpath, sizeof(taskpath), "/proc/%u/task", pid);
task_dir = opendir(taskpath);
- if (!task_dir) err(EXIT_FAILURE, "opendir");
+ if (!task_dir) err(1, "opendir");
while ((task_ent = readdir(task_dir))) {
tid = atoi(task_ent->d_name);
@@ -210,23 +213,23 @@ kvm_init(size_t ramsize, size_t code_start, size_t code_stop)
kvm.fd = open("/dev/kvm", O_RDWR | O_CLOEXEC);
if (kvm.fd < 0)
- err(EXIT_FAILURE, "/dev/kvm");
+ err(1, "/dev/kvm");
/* Make sure we have the stable version of the API */
ret = ioctl(kvm.fd, KVM_GET_API_VERSION, NULL);
if (ret == -1)
- err(EXIT_FAILURE, "KVM_GET_API_VERSION");
+ err(1, "KVM_GET_API_VERSION");
if (ret != 12)
- errx(EXIT_FAILURE, "KVM_GET_API_VERSION %d, expected 12", ret);
+ errx(1, "KVM_GET_API_VERSION %d, expected 12", ret);
kvm.vmfd = ioctl(kvm.fd, KVM_CREATE_VM, 0);
if (kvm.vmfd < 0)
- err(EXIT_FAILURE, "KVM_CREATE_VM");
+ err(1, "KVM_CREATE_VM");
/* Allocate one aligned page of guest memory to hold the code. */
kvm.mem = mmap(NULL, ramsize, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (!kvm.mem) err(EXIT_FAILURE, "allocating guest memory");
+ if (!kvm.mem) err(1, "allocating guest memory");
assert(code_stop - code_start <= ramsize);
memcpy(kvm.mem, code_start, code_stop - code_start);
@@ -239,29 +242,29 @@ kvm_init(size_t ramsize, size_t code_start, size_t code_stop)
printf("Ramsize %d\n", region.memory_size);
printf("Access guest %d\n", TARGET_CACHE_LINESIZE * TARGET_SET);
ret = ioctl(kvm.vmfd, KVM_SET_USER_MEMORY_REGION, ®ion);
- if (ret < 0) err(EXIT_FAILURE, "KVM_SET_USER_MEMORY_REGION");
+ if (ret < 0) err(1, "KVM_SET_USER_MEMORY_REGION");
kvm.vcpufd = ioctl(kvm.vmfd, KVM_CREATE_VCPU, 0);
- if (kvm.vcpufd < 0) err(EXIT_FAILURE, "KVM_CREATE_VCPU");
+ if (kvm.vcpufd < 0) err(1, "KVM_CREATE_VCPU");
/* Map the shared kvm_run structure and following data. */
ret = ioctl(kvm.fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
- if (ret < 0) err(EXIT_FAILURE, "KVM_GET_VCPU_MMAP_SIZE");
+ if (ret < 0) err(1, "KVM_GET_VCPU_MMAP_SIZE");
if (ret < sizeof(struct kvm_run))
- errx(EXIT_FAILURE, "KVM_GET_VCPU_MMAP_SIZE too small");
+ errx(1, "KVM_GET_VCPU_MMAP_SIZE too small");
kvm_run = mmap(NULL, ret, PROT_READ | PROT_WRITE,
MAP_SHARED, kvm.vcpufd, 0);
- if (!kvm_run) err(EXIT_FAILURE, "mmap vcpu");
+ if (!kvm_run) err(1, "mmap vcpu");
/* Initialize CS to point at 0, via a read-modify-write of sregs. */
memset(&sregs, 0, sizeof(sregs));
ret = ioctl(kvm.vcpufd, KVM_GET_SREGS, &sregs);
- if (ret < 0) err(EXIT_FAILURE, "KVM_GET_SREGS");
+ if (ret < 0) err(1, "KVM_GET_SREGS");
sregs.cs.base = 0;
sregs.cs.selector = 0;
ret = ioctl(kvm.vcpufd, KVM_SET_SREGS, &sregs);
- if (ret < 0) err(EXIT_FAILURE, "KVM_SET_SREGS");
+ if (ret < 0) err(1, "KVM_SET_SREGS");
/* Initialize registers: instruction pointer for our code, addends, and
* initial flags required by x86 architecture. */
@@ -271,16 +274,16 @@ kvm_init(size_t ramsize, size_t code_start, size_t code_stop)
regs.rdx = 0;
regs.rflags = 0x2;
ret = ioctl(kvm.vcpufd, KVM_SET_REGS, ®s);
- if (ret < 0) err(EXIT_FAILURE, "KVM_SET_REGS");
+ if (ret < 0) err(1, "KVM_SET_REGS");
}
int16_t *print_accessed_sets(){
//int16_t counts[64];
int16_t *counts = (int16_t *)malloc(64*sizeof(int16_t));
size_t i, len;
- int fd;
- fd = open("/proc/cachepc", O_RDONLY);
- len = read(fd, counts, 64*sizeof(int16_t)); // sizeof(counts));
+
+ lseek(cachepc_fd, 0, SEEK_SET);
+ len = read(cachepc_fd, counts, 64*sizeof(int16_t)); // sizeof(counts));
assert(len == 64*sizeof(int16_t));//sizeof(counts));
for (i = 0; i < 64; i++) {
@@ -296,13 +299,13 @@ int16_t *print_accessed_sets(){
}
printf("\n Target Set Count: %d %hu \n", TARGET_SET, counts[TARGET_SET]);
printf("\n");
- close(fd);
+
return counts;
}
void
-collect( const char *prefix, size_t code_start, size_t code_stop)
+collect(const char *prefix, size_t code_start, size_t code_stop)
{
int ret;
@@ -319,27 +322,30 @@ collect( const char *prefix, size_t code_start, size_t code_stop)
printf("Now calling KVM_RUN");
ret = ioctl(kvm.vcpufd, KVM_RUN, NULL);
if (kvm_run->exit_reason == KVM_EXIT_MMIO)
- errx(EXIT_FAILURE, "Victim access OOB: %lu\n",
+ errx(1, "Victim access OOB: %lu\n",
kvm_run->mmio.phys_addr);
if (ret < 0 || kvm_run->exit_reason != KVM_EXIT_IO)
- errx(EXIT_FAILURE, "KVM died: %i %i\n",
+ errx(1, "KVM died: %i %i\n",
ret, kvm_run->exit_reason);
close(kvm.fd);
close(kvm.vmfd);
close(kvm.vcpufd);
}
-void dump_msrmt_results_to_log(char *log_file_path, int16_t msrmt_results[SAMPLE_COUNT][64]){
- FILE *fp = fopen(log_file_path,"w+");
- if (!fp){
- errx(EXIT_FAILURE, "Failed to open log file\n");
- }
+void dump_msrmt_results_to_log(char *log_file_path, int16_t msrmt_results[SAMPLE_COUNT][64])
+{
+ FILE *fp;
+
+ fp = fopen(log_file_path,"w+");
+ if (!fp) err(1, "fopen");
+
fprintf(fp, "Number of samples: %d\n", SAMPLE_COUNT);
fprintf(fp, "Target set: %d\n", TARGET_SET);
fprintf(fp, "Measurements per sample: %d\n", 64);
fprintf(fp, "Legend: target set: %d\n", TARGET_SET);
fprintf(fp, "Output cache attack data\n");
+
for(int i=0; i<SAMPLE_COUNT; ++i){
fprintf(fp, "Sample number %d:\n", i);
for(int j=0; j<64; ++j){
@@ -348,19 +354,31 @@ void dump_msrmt_results_to_log(char *log_file_path, int16_t msrmt_results[SAMPLE
}
fprintf(fp,"\n");
}
- close(fp);
+ close(fp);
}
int
main(int argc, const char **argv)
{
+ uint32_t arg;
+ int ret;
setvbuf(stdout, NULL, _IONBF, 0);
clear_cores(1 << TARGET_CORE);
pin_process(0, TARGET_CORE, true);
+ cachepc_fd = open("/proc/cachepc", O_RDONLY);
+ if (cachepc_fd < 0) err(1, "open");
+
+ arg = 0x000064F0;
+ ret = ioctl(cachepc_fd, CACHEPC_IOCTL_INIT_PMC, &arg);
+ if (ret == -1) err(1, "ioctl fail");
+
+ arg = 0x01006408;
+ ret = ioctl(cachepc_fd, CACHEPC_IOCTL_INIT_PMC, &arg);
+ if (ret == -1) err(1, "ioctl fail");
printf("\n");
printf("Number of samples: %d\n", SAMPLE_COUNT);
@@ -399,7 +417,8 @@ main(int argc, const char **argv)
putchar('\n');
}
printf("\n");
- dump_msrmt_results_to_log("msmrt_without_access.out", msmrt_without_access);
- dump_msrmt_results_to_log("msmrt_with_access.out", msmrt_with_access);
+ //dump_msrmt_results_to_log("msmrt_without_access.out", msmrt_without_access);
+ //dump_msrmt_results_to_log("msmrt_with_access.out", msmrt_with_access);
+ close(cachepc_fd);
}