cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit 14b189055d58c170103aff0bc273d0fa7128e522
parent d4c8266836e9a4e6fa073667e4edfbbbb61e8666
Author: Louis Burda <quent.burda@gmail.com>
Date:   Wed,  5 Oct 2022 17:05:18 +0200

Migrate sevstep to cachepc pmc api

Diffstat:
Mcachepc/cachepc.c | 6+++---
Mcachepc/cachepc.h | 8+++++++-
Mcachepc/kvm.c | 19+++++++++----------
Msevstep/sevstep.c | 72+++++-------------------------------------------------------------------
Msevstep/sevstep.h | 31-------------------------------
Msevstep/uspt.c | 27+++++----------------------
6 files changed, 29 insertions(+), 134 deletions(-)

diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c @@ -22,7 +22,7 @@ static bool is_in_arr(uint32_t elem, uint32_t *arr, uint32_t arr_len); void cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask, - bool host, bool guest, bool kernel, bool user) + int host_guest, int kernel_user) { uint64_t event; uint64_t reg_addr; @@ -39,8 +39,8 @@ cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask, reg_addr = 0xc0010200 + index * 2; event = event_no | (event_mask << 8); event |= (1ULL << 22); /* enable performance counter */ - event |= ((kernel * 2ULL + user * 1ULL) << 16); - event |= ((host * 2ULL + guest * 1ULL) << 40); + event |= (kernel_user * 1ULL) << 16; + event |= (host_guest * 1ULL) << 40; printk(KERN_WARNING "CachePC: Initialized %i. PMC %02X:%02X\n", index, event_no, event_mask); asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00)); diff --git a/cachepc/cachepc.h b/cachepc/cachepc.h @@ -4,8 +4,14 @@ #include "cache_types.h" #include "uapi.h" +#define PMC_KERNEL 2 +#define PMC_USER 1 + +#define PMC_HOST 2 +#define PMC_GUEST 1 + void cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask, - bool host, bool guest, bool kernel, bool user); + int host_guest, int kernel_user); cache_ctx *cachepc_get_ctx(cache_level cl); void cachepc_release_ctx(cache_ctx *ctx); diff --git a/cachepc/kvm.c b/cachepc/kvm.c @@ -100,8 +100,8 @@ cachepc_kvm_prime_probe_test(void *p) arg = p; - /* l2 data cache, hit or miss */ - cachepc_init_pmc(0, 0x64, 0xD8, true, false, true, false); + /* l2 data cache hit & miss */ + cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size); BUG_ON(lines == NULL); @@ -144,8 +144,8 @@ cachepc_kvm_stream_hwpf_test(void *p) /* TODO: improve detection */ - /* l2 data cache, hit or miss */ - cachepc_init_pmc(0, 0x64, 0xD8, true, false, true, false); + /* l2 data cache hit & miss */ + cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); lines = cachepc_aligned_alloc(PAGE_SIZE, cachepc_ctx->cache_size); BUG_ON(lines == NULL); @@ -174,8 +174,8 @@ cachepc_kvm_single_access_test(void *p) uint64_t pre, post; uint32_t *arg; - /* l2 data cache, hit or miss */ - cachepc_init_pmc(0, 0x64, 0xD8, true, false, true, false); + /* l2 data cache hit & miss */ + cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); arg = p; @@ -208,8 +208,8 @@ cachepc_kvm_single_eviction_test(void *p) arg = p; - /* l2 data cache, hit or miss */ - cachepc_init_pmc(0, 0x64, 0xD8, true, false, true, false); + /* l2 data cache hit & miss */ + cachepc_init_pmc(0, 0x64, 0xD8, PMC_HOST, PMC_KERNEL); WARN_ON(arg && *arg >= L1_SETS); if (arg && *arg >= L1_SETS) return; @@ -291,8 +291,7 @@ cachepc_kvm_init_pmc_ioctl(void *p) event_mask = (event & 0x000000FF) >> 0; cachepc_init_pmc(index, event_no, event_mask, - host_guest >> 1, host_guest & 1, - kernel_user >> 1, kernel_user & 1); + host_guest, kernel_user); } long diff --git a/sevstep/sevstep.c b/sevstep/sevstep.c @@ -1,4 +1,5 @@ #include "sevstep.h" +#include "svm/cachepc/cachepc.h" #include "mmu/mmu_internal.h" #include "mmu.h" @@ -43,77 +44,14 @@ struct kvm* main_vm; EXPORT_SYMBOL(main_vm); -static perf_ctl_config_t perf_configs[6]; - -uint64_t -perf_ctl_to_u64(perf_ctl_config_t * config) -{ - uint64_t result; - - result = 0; - result |= config->EventSelect & 0xffULL; - result |= (config->UintMask & 0xffULL) << 8; - result |= (config->OsUserMode & 0x3ULL) << 16; - result |= (config->Edge & 0x1ULL) << 18; - result |= (config->Int & 0x1ULL) << 20; - result |= (config->En & 0x1ULL) << 22; - result |= (config->Inv & 0x1ULL) << 23; - result |= (config->CntMask & 0xffULL) << 24; - result |= ((config->EventSelect & 0xf00ULL) >> 8) << 32; - result |= (config->HostGuestOnly & 0x3ULL) << 40; - - return result; - -} - -void -write_ctl(perf_ctl_config_t * config, int cpu, uint64_t ctl_msr) -{ - wrmsrl_on_cpu(cpu, ctl_msr, perf_ctl_to_u64(config)); -} - -void -read_ctr(uint64_t ctr_msr, int cpu, uint64_t* result) -{ - uint64_t tmp; - - rdmsrl_on_cpu(cpu, ctr_msr, &tmp); - *result = tmp & ( (0x1ULL << 48) - 1); -} - void sevstep_setup_pmcs(void) { - int perf_cpu; - int i; - - perf_cpu = smp_processor_id(); - - for (i = 0; i < 6; i++) { - perf_configs[i].HostGuestOnly = 0x1; /* count only guest */ - perf_configs[i].CntMask = 0x0; - perf_configs[i].Inv = 0x0; - perf_configs[i].En = 0x0; - perf_configs[i].Int = 0x0; - perf_configs[i].Edge = 0x0; - perf_configs[i].OsUserMode = 0x3; /* count userland and kernel events */ - } - - perf_configs[0].EventSelect = 0x0c0; - perf_configs[0].UintMask = 0x0; - perf_configs[0].En = 0x1; - write_ctl(&perf_configs[0], perf_cpu, CTL_MSR_0); + /* retired instructions */ + cachepc_init_pmc(0, 0xc0, 0x00, PMC_GUEST, PMC_KERNEL | PMC_USER); - /* - * programm l2d hit from data cache miss perf for - * cpu_probe_pointer_chasing_inplace without counting thread. - * N.B. that this time we count host events - */ - perf_configs[1].EventSelect = 0x064; - perf_configs[1].UintMask = 0x70; - perf_configs[1].En = 0x1; - perf_configs[1].HostGuestOnly = 0x2; /* count only host events */ - write_ctl(&perf_configs[1], perf_cpu, CTL_MSR_1); + /* l2 data cache hit & miss */ + cachepc_init_pmc(1, 0x64, 0x70, PMC_HOST, PMC_KERNEL); } EXPORT_SYMBOL(sevstep_setup_pmcs); diff --git a/sevstep/sevstep.h b/sevstep/sevstep.h @@ -10,33 +10,6 @@ #include <linux/pid.h> #include <linux/psp-sev.h> -#define CTL_MSR_0 0xc0010200ULL -#define CTL_MSR_1 0xc0010202ULL -#define CTL_MSR_2 0xc0010204ULL -#define CTL_MSR_3 0xc0010206ULL -#define CTL_MSR_4 0xc0010208ULL -#define CTL_MSR_5 0xc001020aULL - -#define CTR_MSR_0 0xc0010201ULL -#define CTR_MSR_1 0xc0010203ULL -#define CTR_MSR_2 0xc0010205ULL -#define CTR_MSR_3 0xc0010207ULL -#define CTR_MSR_4 0xc0010209ULL -#define CTR_MSR_5 0xc001020bULL - -typedef struct { - uint64_t HostGuestOnly; - uint64_t CntMask; - uint64_t Inv; - uint64_t En; - uint64_t Int; - uint64_t Edge; - uint64_t OsUserMode; - uint64_t UintMask; - uint64_t EventSelect; //12 bits in total split in [11:8] and [7:0] - -} perf_ctl_config_t; - extern struct kvm* main_vm; bool sevstep_spte_protect(u64 *sptep, @@ -56,10 +29,6 @@ bool sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn); long sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); long sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode); -uint64_t perf_ctl_to_u64(perf_ctl_config_t *config); -void write_ctl(perf_ctl_config_t *config, int cpu, uint64_t ctl_msr); -void read_ctr(uint64_t ctr_msr, int cpu, uint64_t *result); - void sevstep_setup_pmcs(void); int sevstep_get_rip_kvm_vcpu(struct kvm_vcpu *vcpu, uint64_t *rip); diff --git a/sevstep/uspt.c b/sevstep/uspt.c @@ -1,6 +1,8 @@ #include "uspt.h" #include "sevstep.h" +#include "svm/cachepc/cachepc.h" + #include <linux/kvm.h> #include <linux/timekeeping.h> #include <linux/uaccess.h> @@ -211,24 +213,6 @@ uspt_handle_ack_event_ioctl(ack_event_t event) return _uspt_handle_ack_event(event.id); } -// setup perf_state and program retired instruction performance counter -void -_perf_state_setup_retired_instructions(void) -{ - perf_ctl_config_t retired_instructions_perf_config; - retired_instructions_perf_config.HostGuestOnly = 0x1; // 0x1 means: count only guest - retired_instructions_perf_config.CntMask = 0x0; - retired_instructions_perf_config.Inv = 0x0; - retired_instructions_perf_config.Int = 0x0; - retired_instructions_perf_config.Edge = 0x0; - retired_instructions_perf_config.OsUserMode = 0x3; // 0x3 means: count kern and user events - retired_instructions_perf_config.EventSelect = 0x0c0; - retired_instructions_perf_config.UintMask = 0x0; - retired_instructions_perf_config.En = 0x1; - write_ctl(&retired_instructions_perf_config, batch_track_state.perf_cpu, CTL_MSR_0); -} - - // get retired instructions between current_event_idx-1 and current_event_idx // value is cached for multiple calls to the same current_event_idx uint64_t @@ -239,9 +223,8 @@ _perf_state_update_and_get_delta(uint64_t current_event_idx) /* check if value is "cached" */ if (perf_state.delta_valid_idx == current_event_idx) { if (current_event_idx == 0) { - read_ctr(CTR_MSR_0, batch_track_state.perf_cpu, &current_value); perf_state.idx_for_last_perf_reading = current_event_idx; - perf_state.last_perf_reading = current_value; + perf_state.last_perf_reading = cachepc_read_pmc(0); } return perf_state.delta; } @@ -253,7 +236,7 @@ _perf_state_update_and_get_delta(uint64_t current_event_idx) perf_state.idx_for_last_perf_reading, current_event_idx); } - read_ctr(CTR_MSR_0, batch_track_state.perf_cpu, &current_value); + current_value = cachepc_read_pmc(0); perf_state.delta = (current_value - perf_state.last_perf_reading); perf_state.delta_valid_idx = current_event_idx; @@ -310,7 +293,7 @@ uspt_batch_tracking_start(int tracking_type,uint64_t expected_events, perf_state.last_perf_reading = 0; perf_state.delta_valid_idx = 0; perf_state.delta = 0; - _perf_state_setup_retired_instructions(); + cachepc_init_pmc(0, 0xc0, 0x00, PMC_GUEST, PMC_KERNEL | PMC_USER); spin_lock(&batch_track_state_lock);