cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 434a6210041dd447acae38b95561a2d990efa153
parent 16469e2094edab39279b9f6aaa97c84722baa812
Author: Louis Burda <quent.burda@gmail.com>
Date:   Thu, 27 Oct 2022 23:35:21 +0200

Add online baseline measurement and pmc reset

Diffstat:
Mcachepc/cachepc.c | 23+++++++++++++++++++++++
Mcachepc/cachepc.h | 6++++--
Mcachepc/kvm.c | 67+++++++++++++++++++++++++++++++++++++++++++++++++++----------------
Mcachepc/uapi.h | 8+++++++-
Mpatch.diff | 24+++++++++++++++++++-----
Mtest/kvm.c | 46+++++++++++++++++++++++++---------------------
Mtest/sev-es.c | 47+++++++++++++++++++++++++----------------------
Mtest/sev.c | 46+++++++++++++++++++++++++---------------------
8 files changed, 179 insertions(+), 88 deletions(-)

diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c @@ -139,6 +139,20 @@ cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask, asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00)); } +void +cachepc_reset_pmc(uint8_t index) +{ + uint64_t reg_addr; + uint64_t value; + + WARN_ON(index >= 6); + if (index >= 6) return; + + reg_addr = 0xc0010201 + index * 2; + value = 0; + asm volatile ("wrmsr" : : "c"(reg_addr), "a"(value)); +} + cache_ctx * cachepc_get_ctx(int cache_level) { @@ -244,6 +258,7 @@ void cachepc_save_msrmts(cacheline *head) { cacheline *curr_cl; + size_t i; curr_cl = head; do { @@ -254,6 +269,14 @@ cachepc_save_msrmts(cacheline *head) curr_cl = curr_cl->prev; } while (curr_cl != head); + + if (cachepc_baseline_active) { + for (i = 0; i < cachepc_msrmts_count; i++) { + if (!cachepc_baseline_active) + WARN_ON(cachepc_msrmts[i] < cachepc_baseline[i]); + cachepc_msrmts[i] -= cachepc_baseline[i]; + } + } } void diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h @@ -85,6 +85,7 @@ bool cachepc_verify_topology(void); void cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask, uint8_t host_guest, uint8_t kernel_user); +void cachepc_reset_pmc(uint8_t index); cache_ctx *cachepc_get_ctx(int cache_level); void cachepc_release_ctx(cache_ctx *ctx); @@ -119,11 +120,12 @@ static inline void cachepc_victim(void *p); __attribute__((always_inline)) static inline uint64_t cachepc_read_pmc(uint64_t event); -extern uint16_t *cachepc_msrmts; +extern cpc_msrmt_t *cachepc_msrmts; extern size_t cachepc_msrmts_count; -extern uint16_t *cachepc_baseline; +extern cpc_msrmt_t *cachepc_baseline; extern bool cachepc_baseline_measure; +extern bool cachepc_baseline_active; extern cache_ctx *cachepc_ctx; extern cacheline *cachepc_ds; diff --git a/cachepc/kvm.c b/cachepc/kvm.c @@ -10,15 +10,17 @@ #include <linux/init.h> #include <asm/uaccess.h> -uint16_t *cachepc_msrmts = NULL; +cpc_msrmt_t *cachepc_msrmts = NULL; size_t cachepc_msrmts_count = 0; EXPORT_SYMBOL(cachepc_msrmts); EXPORT_SYMBOL(cachepc_msrmts_count); -uint16_t *cachepc_baseline = NULL; +cpc_msrmt_t *cachepc_baseline = NULL; bool cachepc_baseline_measure = false; +bool cachepc_baseline_active = false; EXPORT_SYMBOL(cachepc_baseline); EXPORT_SYMBOL(cachepc_baseline_measure); +EXPORT_SYMBOL(cachepc_baseline_active); cache_ctx *cachepc_ctx = NULL; cacheline *cachepc_ds = NULL; @@ -243,14 +245,14 @@ cachepc_kvm_test_access_ioctl(void __user *arg_user) if (!arg_user) return -EINVAL; - if (copy_from_user(&u32, arg_user, sizeof(uint32_t))) + if (copy_from_user(&u32, arg_user, sizeof(u32))) return -EFAULT; ret = smp_call_function_single(2, cachepc_kvm_single_access_test, &u32, true); WARN_ON(ret != 0); - if (copy_to_user(arg_user, &u32, sizeof(uint32_t))) + if (copy_to_user(arg_user, &u32, sizeof(u32))) return -EFAULT; return 0; @@ -264,14 +266,14 @@ cachepc_kvm_test_eviction_ioctl(void __user *arg_user) if (!arg_user) return -EINVAL; - if (copy_from_user(&u32, arg_user, sizeof(uint32_t))) + if (copy_from_user(&u32, arg_user, sizeof(u32))) return -EFAULT; ret = smp_call_function_single(2, cachepc_kvm_single_eviction_test, &u32, true); WARN_ON(ret != 0); - if (copy_to_user(arg_user, &u32, sizeof(uint32_t))) + if (copy_to_user(arg_user, &u32, sizeof(u32))) return -EFAULT; return 0; @@ -293,7 +295,7 @@ cachepc_kvm_init_pmc_ioctl(void __user *arg_user) return -EFAULT; } - if (copy_from_user(&event, arg_user, sizeof(uint32_t))) { + if (copy_from_user(&event, arg_user, sizeof(event))) { put_cpu(); return -EFAULT; } @@ -306,6 +308,7 @@ cachepc_kvm_init_pmc_ioctl(void __user *arg_user) cachepc_init_pmc(index, event_no, event_mask, host_guest, kernel_user); + cachepc_reset_pmc(index); put_cpu(); @@ -320,11 +323,11 @@ cachepc_kvm_read_pmc_ioctl(void __user *arg_user) if (!arg_user) return -EINVAL; - if (copy_from_user(&event, arg_user, sizeof(uint32_t))) + if (copy_from_user(&event, arg_user, sizeof(event))) return -EFAULT; count = cachepc_read_pmc(event); - if (copy_to_user(arg_user, &count, sizeof(uint64_t))) + if (copy_to_user(arg_user, &count, sizeof(count))) return -EFAULT; return 0; @@ -336,7 +339,7 @@ cachepc_kvm_read_counts_ioctl(void __user *arg_user) if (!arg_user) return -EINVAL; if (copy_to_user(arg_user, cachepc_msrmts, - cachepc_msrmts_count * sizeof(uint16_t))) + cachepc_msrmts_count * sizeof(cpc_msrmt_t))) return -EFAULT; return 0; @@ -370,7 +373,8 @@ cachepc_kvm_setup_pmc_ioctl(void __user *arg_user) int cachepc_kvm_measure_baseline_ioctl(void __user *arg_user) { - bool state; + uint32_t state; + size_t i; if (!arg_user) return -EINVAL; @@ -379,15 +383,42 @@ cachepc_kvm_measure_baseline_ioctl(void __user *arg_user) cachepc_baseline_measure = state; - if (!state) { - memset(cachepc_baseline, 0, - sizeof(uint16_t) * cachepc_msrmts_count); + if (state) { + for (i = 0; i < cachepc_msrmts_count; i++) + cachepc_baseline[i] = CPC_MSRMT_MAX; } return 0; } int +cachepc_kvm_read_baseline_ioctl(void __user *arg_user) +{ + if (!arg_user) return -EINVAL; + + if (copy_to_user(arg_user, cachepc_baseline, + cachepc_msrmts_count * sizeof(cpc_msrmt_t))) + return -EFAULT; + + return 0; +} + +int +cachepc_kvm_sub_baseline_ioctl(void __user *arg_user) +{ + uint32_t state; + + if (!arg_user) return -EINVAL; + + if (copy_from_user(&state, arg_user, sizeof(state))) + return -EFAULT; + + cachepc_baseline_active = state; + + return 0; +} + +int cachepc_kvm_track_page_ioctl(void __user *arg_user) { struct cpc_track_config cfg; @@ -522,6 +553,10 @@ cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) return cachepc_kvm_setup_pmc_ioctl(arg_user); case KVM_CPC_MEASURE_BASELINE: return cachepc_kvm_measure_baseline_ioctl(arg_user); + case KVM_CPC_READ_BASELINE: + return cachepc_kvm_read_baseline_ioctl(arg_user); + case KVM_CPC_SUB_BASELINE: + return cachepc_kvm_sub_baseline_ioctl(arg_user); case KVM_CPC_TRACK_PAGE: return cachepc_kvm_track_page_ioctl(arg_user); case KVM_CPC_TRACK_ALL: @@ -574,11 +609,11 @@ cachepc_kvm_init(void) cachepc_ds = NULL; cachepc_msrmts_count = L1_SETS; - cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(uint16_t), GFP_KERNEL); + cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(cpc_msrmt_t), GFP_KERNEL); BUG_ON(cachepc_msrmts == NULL); cachepc_baseline_measure = false; - cachepc_baseline = kzalloc(cachepc_msrmts_count * sizeof(uint16_t), GFP_KERNEL); + cachepc_baseline = kzalloc(cachepc_msrmts_count * sizeof(cpc_msrmt_t), GFP_KERNEL); BUG_ON(cachepc_baseline == NULL); ret = smp_call_function_single(2, cachepc_kvm_setup_test, NULL, true); diff --git a/cachepc/uapi.h b/cachepc/uapi.h @@ -9,13 +9,17 @@ #define CPC_L1MISS_PMC 0 #define CPC_RETINST_PMC 1 +#define CPC_MSRMT_MAX (~((cpc_msrmt_t) 0)) + #define KVM_CPC_TEST_ACCESS _IOWR(KVMIO, 0x20, __u32) #define KVM_CPC_TEST_EVICTION _IOWR(KVMIO, 0x21, __u32) #define KVM_CPC_INIT_PMC _IOW(KVMIO, 0x22, __u32) #define KVM_CPC_READ_PMC _IOWR(KVMIO, 0x23, __u32) #define KVM_CPC_READ_COUNTS _IOR(KVMIO, 0x24, __u64) #define KVM_CPC_SETUP_PMC _IO(KVMIO, 0x25) -#define KVM_CPC_MEASURE_BASELINE _IO(KVMIO, 0x26) +#define KVM_CPC_MEASURE_BASELINE _IOW(KVMIO, 0x26, __u32) +#define KVM_CPC_READ_BASELINE _IOR(KVMIO, 0x27, __u64) +#define KVM_CPC_SUB_BASELINE _IOR(KVMIO, 0x28, __u32) #define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x30, struct cpc_track_config) #define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x31, __u64) @@ -51,3 +55,5 @@ struct cpc_track_event { __u8 have_retired_instructions; __u64 retired_instructions; }; + +typedef __u64 cpc_msrmt_t; diff --git a/patch.diff b/patch.diff @@ -50,6 +50,14 @@ index 30f244b64523..e0eeffd340e8 100644 ifdef CONFIG_HYPERV kvm-amd-y += svm/svm_onhyperv.o +diff --git a/arch/x86/kvm/cachepc b/arch/x86/kvm/cachepc +new file mode 120000 +index 000000000000..9119e44af1f0 +--- /dev/null ++++ b/arch/x86/kvm/cachepc +@@ -0,0 +1 @@ ++/home/louis/kvm-prime-count/cachepc +\ No newline at end of file diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d871b8dee7b3..3b7720aebbc6 100644 --- a/arch/x86/kvm/mmu/mmu.c @@ -181,7 +189,7 @@ index 2e09d1b6249f..9b40e71564bf 100644 EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c -index cf0bf456d520..6687fad99e97 100644 +index cf0bf456d520..d6a4002fa550 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2,6 +2,8 @@ @@ -193,7 +201,7 @@ index cf0bf456d520..6687fad99e97 100644 #include "irq.h" #include "mmu.h" #include "kvm_cache_regs.h" -@@ -3788,14 +3790,33 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) +@@ -3788,14 +3790,37 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); unsigned long vmcb_pa = svm->current_vmcb->pa; @@ -203,7 +211,8 @@ index cf0bf456d520..6687fad99e97 100644 if (sev_es_guest(vcpu->kvm)) { + memset(cachepc_msrmts, 0, -+ cachepc_msrmts_count * sizeof(uint16_t)); ++ cachepc_msrmts_count * sizeof(cpc_msrmt_t)); ++ cachepc_reset_pmc(CPC_L1MISS_PMC); + + cpu = get_cpu(); + local_irq_disable(); @@ -212,13 +221,16 @@ index cf0bf456d520..6687fad99e97 100644 __svm_sev_es_vcpu_run(vmcb_pa); + + cachepc_save_msrmts(cachepc_ds); ++ if (cachepc_baseline_measure) ++ cachepc_update_baseline(); + local_irq_enable(); + put_cpu(); } else { struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + memset(cachepc_msrmts, 0, -+ cachepc_msrmts_count * sizeof(uint16_t)); ++ cachepc_msrmts_count * sizeof(cpc_msrmt_t)); ++ cachepc_reset_pmc(CPC_L1MISS_PMC); + + cpu = get_cpu(); + local_irq_disable(); @@ -227,12 +239,14 @@ index cf0bf456d520..6687fad99e97 100644 /* * Use a single vmcb (vmcb01 because it's always valid) for * context switching guest state via VMLOAD/VMSAVE, that way -@@ -3807,6 +3828,10 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) +@@ -3807,6 +3832,12 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) vmsave(svm->vmcb01.pa); vmload(__sme_page_pa(sd->save_area)); + + cachepc_save_msrmts(cachepc_ds); ++ if (cachepc_baseline_measure) ++ cachepc_update_baseline(); + local_irq_enable(); + put_cpu(); } diff --git a/test/kvm.c b/test/kvm.c @@ -188,13 +188,13 @@ kvm_init(size_t ramsize, void *code_start, void *code_stop) if (ret < 0) err(1, "KVM_SET_REGS"); } -uint16_t * +cpc_msrmt_t * read_counts() { - uint16_t *counts; + cpc_msrmt_t *counts; int ret; - counts = malloc(64 * sizeof(uint16_t)); + counts = malloc(64 * sizeof(cpc_msrmt_t)); if (!counts) err(1, "malloc"); ret = ioctl(kvm_fd, KVM_CPC_READ_COUNTS, counts); if (ret == -1) err(1, "ioctl READ_COUNTS"); @@ -203,7 +203,7 @@ read_counts() } void -print_counts(uint16_t *counts) +print_counts(cpc_msrmt_t *counts) { int i; @@ -218,15 +218,15 @@ print_counts(uint16_t *counts) if (counts[i] > 0) printf("\x1b[0m"); } - printf("\n Target Set Count: %d %hu \n", TARGET_SET, counts[TARGET_SET]); + printf("\n Target Set Count: %d %llu \n", TARGET_SET, counts[TARGET_SET]); printf("\n"); } -uint16_t * +cpc_msrmt_t * collect(const char *prefix, void *code_start, void *code_stop) { struct kvm_regs regs; - uint16_t *counts; + cpc_msrmt_t *counts; int ret; /* using cache size for alignment of kvm memory access */ @@ -263,10 +263,10 @@ collect(const char *prefix, void *code_start, void *code_stop) int main(int argc, const char **argv) { - uint16_t without_access[SAMPLE_COUNT][64]; - uint16_t with_access[SAMPLE_COUNT][64]; - uint16_t *counts, *baseline; - uint32_t arg; + cpc_msrmt_t without_access[SAMPLE_COUNT][64]; + cpc_msrmt_t with_access[SAMPLE_COUNT][64]; + cpc_msrmt_t *counts, *baseline; + uint32_t arg, measure; int i, k, ret; setvbuf(stdout, NULL, _IONBF, 0); @@ -281,25 +281,29 @@ main(int argc, const char **argv) ret = ioctl(kvm_fd, KVM_CPC_INIT_PMC, &arg); if (ret == -1) err(1, "ioctl INIT_PMC"); - baseline = calloc(sizeof(uint16_t), 64); + baseline = calloc(sizeof(cpc_msrmt_t), 64); if (!baseline) err(1, "calloc"); - for (k = 0; k < 64; k++) - baseline[k] = UINT16_MAX; + + measure = true; + ret = ioctl(kvm_fd, KVM_CPC_MEASURE_BASELINE, &measure); + if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); for (i = 0; i < SAMPLE_COUNT; i++) { counts = collect("without", __start_guest_without, __stop_guest_without); - memcpy(without_access[i], counts, 64 * sizeof(uint16_t)); + memcpy(without_access[i], counts, 64 * sizeof(cpc_msrmt_t)); free(counts); counts = collect("with", __start_guest_with, __stop_guest_with); - memcpy(with_access[i], counts, 64 * sizeof(uint16_t)); + memcpy(with_access[i], counts, 64 * sizeof(cpc_msrmt_t)); free(counts); - - for (k = 0; k < 64; k++) { - baseline[k] = MIN(baseline[k], without_access[i][k]); - baseline[k] = MIN(baseline[k], with_access[i][k]); - } } + + measure = false; + ret = ioctl(kvm_fd, KVM_CPC_MEASURE_BASELINE, &measure); + if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); + + ret = ioctl(kvm_fd, KVM_CPC_READ_BASELINE, baseline); + if (ret == -1) err(1, "ioctl READ_BASELINE"); for (i = 0; i < SAMPLE_COUNT; i++) { for (k = 0; k < 64; k++) { diff --git a/test/sev-es.c b/test/sev-es.c @@ -399,13 +399,13 @@ sev_kvm_deinit(struct kvm *kvm) munmap(kvm->mem, kvm->memsize); } -uint16_t * +cpc_msrmt_t * read_counts() { - uint16_t *counts; + cpc_msrmt_t *counts; int ret; - counts = malloc(64 * sizeof(uint16_t)); + counts = malloc(64 * sizeof(cpc_msrmt_t)); if (!counts) err(1, "malloc"); ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts); if (ret == -1) err(1, "ioctl READ_COUNTS"); @@ -414,7 +414,7 @@ read_counts() } void -print_counts(uint16_t *counts) +print_counts(cpc_msrmt_t *counts) { int i; @@ -429,11 +429,11 @@ print_counts(uint16_t *counts) if (counts[i] > 0) printf("\x1b[0m"); } - printf("\n Target Set %i Count: %hu\n", TARGET_SET, counts[TARGET_SET]); + printf("\n Target Set %i Count: %llu\n", TARGET_SET, counts[TARGET_SET]); printf("\n"); } -uint16_t * +cpc_msrmt_t * collect(struct kvm *kvm) { struct kvm_regs regs; @@ -459,11 +459,11 @@ collect(struct kvm *kvm) int main(int argc, const char **argv) { - uint16_t without_access[SAMPLE_COUNT][64]; - uint16_t with_access[SAMPLE_COUNT][64]; + cpc_msrmt_t without_access[SAMPLE_COUNT][64]; + cpc_msrmt_t with_access[SAMPLE_COUNT][64]; struct kvm kvm_without_access, kvm_with_access; - uint16_t *counts, *baseline; - uint32_t arg; + cpc_msrmt_t *counts, *baseline; + uint32_t arg, measure; int i, k, ret; setvbuf(stdout, NULL, _IONBF, 0); @@ -486,10 +486,8 @@ main(int argc, const char **argv) ret = ioctl(kvm_dev, KVM_CPC_INIT_PMC, &arg); if (ret < 0) err(1, "ioctl INIT_PMC"); - baseline = calloc(sizeof(uint16_t), 64); + baseline = calloc(sizeof(cpc_msrmt_t), 64); if (!baseline) err(1, "calloc"); - for (k = 0; k < 64; k++) - baseline[k] = UINT16_MAX; sev_kvm_init(&kvm_with_access, 64 * 64 * 8 * 2, __start_guest_with, __stop_guest_with); sev_kvm_init(&kvm_without_access, 64 * 64 * 8 * 2, __start_guest_without, __stop_guest_without); @@ -498,22 +496,27 @@ main(int argc, const char **argv) ioctl(kvm_with_access.vcpufd, KVM_RUN, NULL); ioctl(kvm_without_access.vcpufd, KVM_RUN, NULL); + measure = true; + ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &measure); + if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); + for (i = 0; i < SAMPLE_COUNT; i++) { - //printf("Running guest without\n"); counts = collect(&kvm_without_access); - memcpy(without_access[i], counts, 64 * sizeof(uint16_t)); + memcpy(without_access[i], counts, 64 * sizeof(cpc_msrmt_t)); free(counts); - //printf("Running guest with\n"); counts = collect(&kvm_with_access); - memcpy(with_access[i], counts, 64 * sizeof(uint16_t)); + memcpy(with_access[i], counts, 64 * sizeof(cpc_msrmt_t)); free(counts); - - for (k = 0; k < 64; k++) { - baseline[k] = MIN(baseline[k], without_access[i][k]); - baseline[k] = MIN(baseline[k], with_access[i][k]); - } } + + measure = false; + ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &measure); + if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); + + ret = ioctl(kvm_dev, KVM_CPC_READ_BASELINE, baseline); + if (ret == -1) err(1, "ioctl READ_BASELINE"); + for (i = 0; i < SAMPLE_COUNT; i++) { for (k = 0; k < 64; k++) { diff --git a/test/sev.c b/test/sev.c @@ -398,13 +398,13 @@ sev_kvm_deinit(struct kvm *kvm) munmap(kvm->mem, kvm->memsize); } -uint16_t * +cpc_msrmt_t * read_counts() { - uint16_t *counts; + cpc_msrmt_t *counts; int ret; - counts = malloc(64 * sizeof(uint16_t)); + counts = malloc(64 * sizeof(cpc_msrmt_t)); if (!counts) err(1, "malloc"); ret = ioctl(kvm_dev, KVM_CPC_READ_COUNTS, counts); if (ret == -1) err(1, "ioctl READ_COUNTS"); @@ -413,7 +413,7 @@ read_counts() } void -print_counts(uint16_t *counts) +print_counts(cpc_msrmt_t *counts) { int i; @@ -428,16 +428,16 @@ print_counts(uint16_t *counts) if (counts[i] > 0) printf("\x1b[0m"); } - printf("\n Target Set %i Count: %hu\n", TARGET_SET, counts[TARGET_SET]); + printf("\n Target Set %i Count: %llu\n", TARGET_SET, counts[TARGET_SET]); printf("\n"); } -uint16_t * +cpc_msrmt_t * collect(const char *prefix, void *code_start, void *code_stop) { struct kvm_regs regs; struct kvm kvm; - uint16_t *counts; + cpc_msrmt_t *counts; int ret; sev_kvm_init(&kvm, 64 * 64 * 8 * 2, code_start, code_stop); @@ -468,10 +468,10 @@ collect(const char *prefix, void *code_start, void *code_stop) int main(int argc, const char **argv) { - uint16_t without_access[SAMPLE_COUNT][64]; - uint16_t with_access[SAMPLE_COUNT][64]; - uint16_t *counts, *baseline; - uint32_t arg; + cpc_msrmt_t without_access[SAMPLE_COUNT][64]; + cpc_msrmt_t with_access[SAMPLE_COUNT][64]; + cpc_msrmt_t *counts, *baseline; + uint32_t arg, measure; int i, k, ret; setvbuf(stdout, NULL, _IONBF, 0); @@ -494,25 +494,29 @@ main(int argc, const char **argv) ret = ioctl(kvm_dev, KVM_CPC_INIT_PMC, &arg); if (ret < 0) err(1, "ioctl INIT_PMC"); - baseline = calloc(sizeof(uint16_t), 64); + baseline = calloc(sizeof(cpc_msrmt_t), 64); if (!baseline) err(1, "calloc"); - for (k = 0; k < 64; k++) - baseline[k] = UINT16_MAX; + + measure = true; + ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &measure); + if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); for (i = 0; i < SAMPLE_COUNT; i++) { counts = collect("without", __start_guest_without, __stop_guest_without); - memcpy(without_access[i], counts, 64 * sizeof(uint16_t)); + memcpy(without_access[i], counts, 64 * sizeof(cpc_msrmt_t)); free(counts); counts = collect("with", __start_guest_with, __stop_guest_with); - memcpy(with_access[i], counts, 64 * sizeof(uint16_t)); + memcpy(with_access[i], counts, 64 * sizeof(cpc_msrmt_t)); free(counts); - - for (k = 0; k < 64; k++) { - baseline[k] = MIN(baseline[k], without_access[i][k]); - baseline[k] = MIN(baseline[k], with_access[i][k]); - } } + + measure = false; + ret = ioctl(kvm_dev, KVM_CPC_MEASURE_BASELINE, &measure); + if (ret == -1) err(1, "ioctl MEASURE_BASELINE"); + + ret = ioctl(kvm_dev, KVM_CPC_READ_BASELINE, baseline); + if (ret == -1) err(1, "ioctl READ_BASELINE"); for (i = 0; i < SAMPLE_COUNT; i++) { for (k = 0; k < 64; k++) {