cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

commit dc737cafcbc0d030d797b66a45ccf9fe92d09700
parent c017dafb1efb1e28b9f5b3500bf5f87e451709d5
Author: Louis Burda <quent.burda@gmail.com>
Date:   Mon,  6 Feb 2023 11:23:10 -0600

Use retired instructions in userspace for TRACK_STEPS

Diffstat:
March/x86/kvm/mmu/mmu.c | 33++++++++++++++++++++++++++-------
March/x86/kvm/svm/svm.c | 26+++++++++++++++++++++-----
2 files changed, 47 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c @@ -3978,9 +3978,10 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, cpc_untrack_single(vcpu, fault->gfn, modes[i]); - if (cpc_track_steps.use_target && !cpc_track_steps.stepping + if (cpc_track_steps.use_target && !cpc_track_steps.in_target && inst_fetch && fault->gfn == cpc_track_steps.target_gfn) { CPC_INFO("Entering target gfn for stepping\n"); + cpc_track_steps.in_target = true; cpc_track_steps.stepping = true; cpc_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_EXEC); @@ -3993,9 +3994,10 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, cpc_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_EXEC); } - } else if (cpc_track_steps.use_target && cpc_track_steps.stepping + } else if (cpc_track_steps.use_target && cpc_track_steps.in_target && inst_fetch && fault->gfn != cpc_track_steps.target_gfn) { CPC_INFO("Leaving target gfn for stepping\n"); + cpc_track_steps.in_target = false; cpc_track_steps.stepping = false; if (cpc_track_steps.with_data) { cpc_untrack_all(vcpu, KVM_PAGE_TRACK_ACCESS); @@ -4023,6 +4025,9 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, break; case CPC_TRACK_PAGES: BUG_ON(modes[i] != KVM_PAGE_TRACK_EXEC); + /* future readers: ik this part is messy, but handling + * instructions on page boundaries has many cases + * when optimizing for the common case (not on boundary) */ if (!inst_fetch || !fault->present) return false; @@ -4046,8 +4051,10 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, cpc_track_single(vcpu, cpc_track_pages.cur_gfn, KVM_PAGE_TRACK_EXEC); cpc_track_pages.prev_gfn = cpc_track_pages.cur_gfn; + cpc_track_pages.prev_err = cpc_track_pages.cur_err; cpc_track_pages.prev_avail = true; cpc_track_pages.cur_gfn = cpc_track_pages.next_gfn; + cpc_track_pages.cur_err = cpc_track_pages.next_err; cpc_track_pages.cur_avail = true; cpc_track_pages.next_avail = false; cpc_track_pages.in_step = false; @@ -4056,41 +4063,53 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, cpc_untrack_single(vcpu, fault->gfn, modes[i]); if (!cpc_track_pages.in_step) { + /* assume instruction is not on page boundary, + * retrack previous, keep current untracked.. */ if (cpc_track_pages.cur_avail) { cpc_track_single(vcpu, cpc_track_pages.cur_gfn, modes[i]); cpc_send_track_page_event(cpc_track_pages.cur_gfn, - fault->gfn, cpc_track_pages.retinst); + fault->gfn, fault->error_code, + cpc_track_pages.retinst, + cpc_track_pages.retinst_user); cpc_track_pages.prev_gfn = cpc_track_pages.cur_gfn; + cpc_track_pages.prev_err = cpc_track_pages.cur_err; cpc_track_pages.prev_avail = true; } cpc_track_pages.cur_gfn = fault->gfn; + cpc_track_pages.cur_err = fault->error_code; cpc_track_pages.cur_avail = true; cpc_track_pages.next_avail = false; cpc_track_pages.retinst = 0; + cpc_track_pages.retinst_user = 0; cpc_track_pages.in_step = true; } else { WARN_ON(cpc_track_pages.next_avail); if (is_prev_gfn) { /* instruction on boundary A -> B, but we * untracked A previously so now its being - * retracked load the insrtuction. + * retracked to load the instruction. * reorder gfns chronologically */ cpc_track_pages.next_gfn = cpc_track_pages.cur_gfn; + cpc_track_pages.next_err = cpc_track_pages.cur_err; cpc_track_pages.next_avail = true; cpc_track_pages.cur_gfn = cpc_track_pages.prev_gfn; + cpc_track_pages.cur_err = cpc_track_pages.prev_err; cpc_track_pages.cur_avail = true; cpc_track_pages.prev_avail = false; } else { - /* instruction on boundary A -> B and both - * pages need to be loaded simultaneously */ + /* instruction on boundary B -> C in order */ cpc_track_pages.next_gfn = fault->gfn; + cpc_track_pages.next_err = fault->error_code; cpc_track_pages.next_avail = true; cpc_send_track_page_event(cpc_track_pages.cur_gfn, cpc_track_pages.next_gfn, - cpc_track_pages.retinst); + cpc_track_pages.cur_err, + cpc_track_pages.retinst, + cpc_track_pages.retinst_user); cpc_track_pages.retinst = 0; + cpc_track_pages.retinst_user = 0; } CPC_INFO("Instruction on boundary %08llx -> %08llx\n", diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c @@ -2163,12 +2163,22 @@ static int intr_interception(struct kvm_vcpu *vcpu) inst_gfn_seen = true; } if (!inst_gfn_seen && cpc_track_steps.use_filter) { + /* remove without retracking */ list_del(&fault->list); kfree(fault); } } - if (cpc_track_steps.stepping) - cpc_send_track_step_event(&cpc_faults); + + if (cpc_track_steps.target_user && !cpc_retinst_user) { + /* stop single-stepping until we leave this page */ + CPC_INFO("Target page not userspace, skipping..\n"); + cpc_singlestep = false; + cpc_prime_probe = false; + cpc_track_steps.stepping = false; + break; + } + + cpc_send_track_step_event(&cpc_faults); list_for_each_entry_safe(fault, next, &cpc_faults, list) { if (cpc_track_steps.with_data && cpc_track_steps.stepping) cpc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); @@ -3383,7 +3393,7 @@ int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) if (!svm_check_exit_valid(exit_code)) return svm_handle_invalid_exit(vcpu, exit_code); - if (cpc_loglevel >= CPC_LOGLVL_INFO && exit_code != SVM_EXIT_INTR) { + if (cpc_loglevel >= CPC_LOGLVL_DBG && exit_code != SVM_EXIT_INTR) { for (i = 0; i < sizeof(codelut) / sizeof(codelut[0]); i++) { if (codelut[i].code == exit_code) CPC_INFO("KVM EXIT %s (%u,%llu)\n", @@ -3963,6 +3973,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) cpc_apic_timer = cpc_apic_timer_min; cpc_retinst = cpc_read_pmc(CPC_RETINST_PMC); + cpc_retinst_user = cpc_read_pmc(CPC_RETINST_USER_PMC); if (sev_es_guest(vcpu->kvm)) { __svm_sev_es_vcpu_run(vmcb_pa); @@ -3983,12 +3994,17 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) } cpc_retinst = cpc_read_pmc(CPC_RETINST_PMC) - cpc_retinst; + cpc_retinst_user = cpc_read_pmc(CPC_RETINST_USER_PMC) - cpc_retinst_user; if (cpc_prime_probe) cpc_save_msrmts(cpc_ds); - if (cpc_track_mode == CPC_TRACK_PAGES && cpc_retinst >= 1) - cpc_track_pages.retinst += cpc_retinst - 1; + if (cpc_track_mode == CPC_TRACK_PAGES) { + if (cpc_retinst >= 1) + cpc_track_pages.retinst += cpc_retinst - 1; + if (cpc_retinst_user >= 1) + cpc_track_pages.retinst_user += cpc_retinst_user - 1; + } put_cpu();