cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 16469e2094edab39279b9f6aaa97c84722baa812
parent 22e4bb34a2022458a594738a846d6bc2f8607862
Author: Louis Burda <quent.burda@gmail.com>
Date:   Thu, 27 Oct 2022 21:34:43 +0200

Add baseline measurement mode

Diffstat:
Mcachepc/cachepc.c | 13+++++++++++++
Mcachepc/cachepc.h | 4++++
Mcachepc/kvm.c | 31+++++++++++++++++++++++++++++++
Mcachepc/uapi.h | 1+
4 files changed, 49 insertions(+), 0 deletions(-)

diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c @@ -10,6 +10,8 @@ #define REMOVE_PAGE_OFFSET(ptr) ((void *) (((uintptr_t) ptr) & PAGE_MASK)) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + static void cl_insert(cacheline *last_cl, cacheline *new_cl); static void *remove_cache_set(cache_ctx *ctx, void *ptr); static void *remove_cache_group_set(void *ptr); @@ -270,6 +272,17 @@ cachepc_print_msrmts(cacheline *head) } while (curr_cl != head); } +void +cachepc_update_baseline(void) +{ + size_t i; + + for (i = 0; i < cachepc_msrmts_count; i++) { + cachepc_baseline[i] = MIN(cachepc_baseline[i], + cachepc_msrmts[i]); + } +} + void __attribute__((optimize(1))) // prevent instruction reordering cachepc_prime_vcall(uintptr_t ret, cacheline *cl) { diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h @@ -99,6 +99,7 @@ void *cachepc_aligned_alloc(size_t alignment, size_t size); void cachepc_save_msrmts(cacheline *head); void cachepc_print_msrmts(cacheline *head); +void cachepc_update_baseline(void); void cachepc_prime_vcall(uintptr_t ret, cacheline *cl); void cachepc_probe_vcall(uintptr_t ret, cacheline *cl); @@ -121,6 +122,9 @@ static inline uint64_t cachepc_read_pmc(uint64_t event); extern uint16_t *cachepc_msrmts; extern size_t cachepc_msrmts_count; +extern uint16_t *cachepc_baseline; +extern bool cachepc_baseline_measure; + extern cache_ctx *cachepc_ctx; extern cacheline *cachepc_ds; diff --git a/cachepc/kvm.c b/cachepc/kvm.c @@ -15,6 +15,11 @@ size_t cachepc_msrmts_count = 0; EXPORT_SYMBOL(cachepc_msrmts); EXPORT_SYMBOL(cachepc_msrmts_count); +uint16_t *cachepc_baseline = NULL; +bool cachepc_baseline_measure = false; +EXPORT_SYMBOL(cachepc_baseline); +EXPORT_SYMBOL(cachepc_baseline_measure); + cache_ctx *cachepc_ctx = NULL; cacheline *cachepc_ds = NULL; EXPORT_SYMBOL(cachepc_ctx); @@ -363,6 +368,26 @@ cachepc_kvm_setup_pmc_ioctl(void __user *arg_user) } int +cachepc_kvm_measure_baseline_ioctl(void __user *arg_user) +{ + bool state; + + if (!arg_user) return -EINVAL; + + if (copy_from_user(&state, arg_user, sizeof(state))) + return -EFAULT; + + cachepc_baseline_measure = state; + + if (!state) { + memset(cachepc_baseline, 0, + sizeof(uint16_t) * cachepc_msrmts_count); + } + + return 0; +} + +int cachepc_kvm_track_page_ioctl(void __user *arg_user) { struct cpc_track_config cfg; @@ -495,6 +520,8 @@ cachepc_kvm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) return cachepc_kvm_read_counts_ioctl(arg_user); case KVM_CPC_SETUP_PMC: return cachepc_kvm_setup_pmc_ioctl(arg_user); + case KVM_CPC_MEASURE_BASELINE: + return cachepc_kvm_measure_baseline_ioctl(arg_user); case KVM_CPC_TRACK_PAGE: return cachepc_kvm_track_page_ioctl(arg_user); case KVM_CPC_TRACK_ALL: @@ -550,6 +577,10 @@ cachepc_kvm_init(void) cachepc_msrmts = kzalloc(cachepc_msrmts_count * sizeof(uint16_t), GFP_KERNEL); BUG_ON(cachepc_msrmts == NULL); + cachepc_baseline_measure = false; + cachepc_baseline = kzalloc(cachepc_msrmts_count * sizeof(uint16_t), GFP_KERNEL); + BUG_ON(cachepc_baseline == NULL); + ret = smp_call_function_single(2, cachepc_kvm_setup_test, NULL, true); WARN_ON(ret != 0); } diff --git a/cachepc/uapi.h b/cachepc/uapi.h @@ -15,6 +15,7 @@ #define KVM_CPC_READ_PMC _IOWR(KVMIO, 0x23, __u32) #define KVM_CPC_READ_COUNTS _IOR(KVMIO, 0x24, __u64) #define KVM_CPC_SETUP_PMC _IO(KVMIO, 0x25) +#define KVM_CPC_MEASURE_BASELINE _IO(KVMIO, 0x26) #define KVM_CPC_TRACK_PAGE _IOWR(KVMIO, 0x30, struct cpc_track_config) #define KVM_CPC_TRACK_ALL _IOWR(KVMIO, 0x31, __u64)