cachepc

Prime+Probe cache-based side-channel attack on AMD SEV-SNP protected virtual machines
git clone https://git.sinitax.com/sinitax/cachepc
Log | Files | Refs | Submodules | README | sfeed.txt

commit b7ba9ea06a0a6332a6b18bcdf2c5339fabfb2a5b
parent 0def0e2d6eeca32304a31072469b94f40c2ce2b9
Author: Louis Burda <quent.burda@gmail.com>
Date:   Fri, 14 Oct 2022 01:35:32 +0200

Replace smp_processor_id with get_cpu

Diffstat:
Mcachepc/cachepc.c | 2--
Mcachepc/kvm.c | 22+++++++++++++++++++---
Mcachepc/sevstep.c | 16++++++++++------
Mcachepc/uspt.c | 23+++++++++++++++++------
Mtest/sevstep.c | 9+++++----
5 files changed, 51 insertions(+), 21 deletions(-)

diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c @@ -171,8 +171,6 @@ cachepc_save_msrmts(cacheline *head) curr_cl = curr_cl->prev; } while (curr_cl != head); - - cachepc_print_msrmts(head); } void diff --git a/cachepc/kvm.c b/cachepc/kvm.c @@ -554,14 +554,20 @@ cachepc_kvm_init_pmc_ioctl(void __user *arg_user) uint8_t index, event_no, event_mask; uint8_t host_guest, kernel_user; uint32_t event; + int cpu; if (!arg_user) return -EINVAL; - if (smp_processor_id() != CPC_ISOLCPU) + cpu = get_cpu(); + if (cpu != CPC_ISOLCPU) { + put_cpu(); return -EFAULT; + } - if (copy_from_user(&event, arg_user, sizeof(uint32_t))) + if (copy_from_user(&event, arg_user, sizeof(uint32_t))) { + put_cpu(); return -EFAULT; + } index = (event & 0xFF000000) >> 24; host_guest = (event & 0x00300000) >> 20; @@ -572,6 +578,8 @@ cachepc_kvm_init_pmc_ioctl(void __user *arg_user) cachepc_init_pmc(index, event_no, event_mask, host_guest, kernel_user); + put_cpu(); + return 0; } @@ -608,8 +616,14 @@ cachepc_kvm_read_counts_ioctl(void __user *arg_user) int cachepc_kvm_setup_pmc_ioctl(void __user *arg_user) { - if (smp_processor_id() != CPC_ISOLCPU) + int cpu; + + cpu = get_cpu(); + + if (cpu != CPC_ISOLCPU) { + put_cpu(); return -EFAULT; + } /* L1 Misses in Host Kernel */ cachepc_init_pmc(CPC_L1MISS_PMC, 0x64, 0xD8, @@ -619,6 +633,8 @@ cachepc_kvm_setup_pmc_ioctl(void __user *arg_user) cachepc_init_pmc(CPC_RETINST_PMC, 0xC0, 0x00, PMC_GUEST, PMC_KERNEL | PMC_USER); + put_cpu(); + return 0; } diff --git a/cachepc/sevstep.c b/cachepc/sevstep.c @@ -192,20 +192,23 @@ sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) long count = 0; int idx; + pr_warn("Sevstep: Start tracking %i\n", mode); + // Vincent: Memslots interface changed into a rb tree, see - // here: https:// lwn.net/Articles/856392/ - // and here: https:// lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u + // here: https://lwn.net/Articles/856392/ + // and here: https://lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u // Thus we use instead of // iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn // + vcpu->kvm->memslots[0]->memslots[0].npages; node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree)); first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]); iterat_max = first_memslot->base_gfn + first_memslot->npages; - for (iterator = 0; iterator < iterat_max; iterator++) - { + pr_warn("Sevstep: Page count: %llu\n", iterat_max); + for (iterator = 0; iterator < iterat_max; iterator++) { idx = srcu_read_lock(&vcpu->kvm->srcu); slot = kvm_vcpu_gfn_to_memslot(vcpu, iterator); if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm, slot, iterator, mode)) { + pr_warn("Sevstep: Tracking page: %llu\n", iterator); write_lock(&vcpu->kvm->mmu_lock); kvm_slot_page_track_add_page(vcpu->kvm, slot, iterator, mode); write_unlock(&vcpu->kvm->mmu_lock); @@ -228,6 +231,8 @@ sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) long count = 0; int idx; + pr_warn("Sevstep: Stop tracking %i\n", mode); + // Vincent: Memslots interface changed into a rb tree, see // here: https:// lwn.net/Articles/856392/ // and here: https:// lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u @@ -237,8 +242,7 @@ sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode) node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree)); first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]); iterat_max = first_memslot->base_gfn + first_memslot->npages; - for (iterator=0; iterator < iterat_max; iterator++) - { + for (iterator = 0; iterator < iterat_max; iterator++) { idx = srcu_read_lock(&vcpu->kvm->srcu); slot = kvm_vcpu_gfn_to_memslot(vcpu, iterator); // Vincent: I think see here diff --git a/cachepc/uspt.c b/cachepc/uspt.c @@ -306,19 +306,23 @@ sevstep_uspt_batch_tracking_handle_retrack(struct kvm_vcpu* vcpu, { uint64_t ret_instr_delta; int i, next_idx; + int cpu; + + cpu = get_cpu(); spin_lock(&batch_track_state_lock); if (!batch_track_state.retrack) { spin_unlock(&batch_track_state_lock); + put_cpu(); return; } - if (smp_processor_id() != batch_track_state.perf_cpu) { + if (cpu != batch_track_state.perf_cpu) { pr_warn("sevstep_uspt_batch_tracking_handle_retrack: perf was " "programmed on logical cpu %d but handler was called " "on %d. Did you forget to pin the vcpu thread?\n", - batch_track_state.perf_cpu, smp_processor_id()); + batch_track_state.perf_cpu, cpu); } ret_instr_delta = perf_state_update_and_get_delta(batch_track_state.event_next_idx); @@ -339,6 +343,7 @@ sevstep_uspt_batch_tracking_handle_retrack(struct kvm_vcpu* vcpu, } spin_unlock(&batch_track_state_lock); + put_cpu(); return; } @@ -354,7 +359,7 @@ sevstep_uspt_batch_tracking_handle_retrack(struct kvm_vcpu* vcpu, batch_track_state.gfn_retrack_backlog_next_idx = 1; spin_unlock(&batch_track_state_lock); - + put_cpu(); } int @@ -363,6 +368,9 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code, { uint64_t ret_instr_delta; page_fault_event_t* event; + int cpu; + + cpu = get_cpu(); spin_lock(&batch_track_state_lock); @@ -371,22 +379,23 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code, "got save but batch tracking is not active!\n"); batch_track_state.error_occured = true; spin_unlock(&batch_track_state_lock); + put_cpu(); return 1; } - if (batch_track_state.event_next_idx >= batch_track_state.events_size) { pr_warn("sevstep_uspt_batch_tracking_save: events buffer is full!\n"); batch_track_state.error_occured = true; spin_unlock(&batch_track_state_lock); + put_cpu(); return 1; } - if (smp_processor_id() != batch_track_state.perf_cpu) { + if (cpu != batch_track_state.perf_cpu) { pr_warn("sevstep_uspt_batch_tracking_save: perf was " "programmed on logical cpu %d but handler was called " "on %d. Did you forget to pin the vcpu thread?\n", - batch_track_state.perf_cpu, smp_processor_id()); + batch_track_state.perf_cpu, cpu); } ret_instr_delta = perf_state_update_and_get_delta(batch_track_state.event_next_idx); @@ -416,10 +425,12 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code, "gfn retrack backlog overflow!\n"); batch_track_state.error_occured = true; spin_unlock(&batch_track_state_lock); + put_cpu(); return 1; } spin_unlock(&batch_track_state_lock); + put_cpu(); return 0; } diff --git a/test/sevstep.c b/test/sevstep.c @@ -503,10 +503,11 @@ main(int argc, const char **argv) free(counts); } - for (i = 0; i < SAMPLE_COUNT; i++) { - printf("Evictions with access:\n"); - print_counts(with_access[i]); - } + // for (i = 0; i < SAMPLE_COUNT; i++) { + // printf("Evictions with access:\n"); + // print_counts(with_access[i]); + // } + printf("done.\n"); sev_kvm_deinit(&kvm_with_access);