diff options
| author | Louis Burda <quent.burda@gmail.com> | 2023-01-27 16:42:32 +0100 |
|---|---|---|
| committer | Louis Burda <quent.burda@gmail.com> | 2023-01-27 16:42:32 +0100 |
| commit | dfccc232af2bfb64cf86473e27be568b57915d3b (patch) | |
| tree | 0b41d209259c904222fe50db8d4f9ef9f3a1cf31 | |
| parent | 4a61d3c63464647e03259e098a158a56e406d946 (diff) | |
| download | cachepc-linux-dfccc232af2bfb64cf86473e27be568b57915d3b.tar.gz cachepc-linux-dfccc232af2bfb64cf86473e27be568b57915d3b.zip | |
Consistent use of cpc shorthand instead of cachepc
| -rw-r--r-- | arch/x86/kvm/mmu.h | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 74 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu/mmu_internal.h | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu/page_track.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu/tdp_mmu.c | 12 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu/tdp_mmu.h | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/svm/svm.c | 136 | ||||
| -rw-r--r-- | arch/x86/kvm/svm/vmenter.S | 24 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 18 | ||||
| -rw-r--r-- | virt/kvm/kvm_main.c | 6 |
10 files changed, 139 insertions, 139 deletions
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 869cae5094ec..55d0062702c1 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -52,7 +52,7 @@ extern bool __read_mostly enable_mmio_caching; #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP) #define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX) -static inline u64 cachepc_protect_pte(u64 pte, enum kvm_page_track_mode mode) +static inline u64 cpc_protect_pte(u64 pte, enum kvm_page_track_mode mode) { if (mode == KVM_PAGE_TRACK_WRITE) { pte &= ~PT_WRITABLE_MASK; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 838eac001d41..ac64cf9e672b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1158,7 +1158,7 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) } bool -cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) +cpc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) { u64 spte; @@ -1169,14 +1169,14 @@ cachepc_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode) if (pt_protect) spte &= ~shadow_mmu_writable_mask; - spte = cachepc_protect_pte(spte, mode); + spte = cpc_protect_pte(spte, mode); mmu_spte_update(sptep, spte); return true; } -bool cachepc_rmap_protect(struct kvm_rmap_head *rmap_head, +bool cpc_rmap_protect(struct kvm_rmap_head *rmap_head, bool pt_protect, enum kvm_page_track_mode mode) { struct rmap_iterator iter; @@ -1185,7 +1185,7 @@ bool cachepc_rmap_protect(struct kvm_rmap_head *rmap_head, flush = false; for_each_rmap_spte(rmap_head, &iter, sptep) { - flush |= cachepc_spte_protect(sptep, pt_protect, mode); + flush |= cpc_spte_protect(sptep, pt_protect, mode); } return flush; @@ -1206,7 +1206,7 @@ bool cachepc_rmap_protect(struct kvm_rmap_head *rmap_head, static bool rmap_write_protect(struct kvm_rmap_head *rmap_head, bool pt_protect) { - return cachepc_rmap_protect(rmap_head, pt_protect, KVM_PAGE_TRACK_WRITE); + return cpc_rmap_protect(rmap_head, pt_protect, KVM_PAGE_TRACK_WRITE); } static bool spte_clear_dirty(u64 *sptep) @@ -1369,7 +1369,7 @@ int kvm_cpu_dirty_log_size(void) } bool -cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, +cpc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, uint64_t gfn, int min_level, enum kvm_page_track_mode mode) { struct kvm_rmap_head *rmap_head; @@ -1381,10 +1381,10 @@ cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, if (kvm_memslots_have_rmaps(kvm)) { for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { rmap_head = gfn_to_rmap(gfn, i, slot); - flush |= cachepc_rmap_protect(rmap_head, true, mode); + flush |= cpc_rmap_protect(rmap_head, true, mode); } } else if (is_tdp_mmu_enabled(kvm)) { - flush |= cachepc_tdp_protect_gfn(kvm, slot, gfn, min_level, mode); + flush |= cpc_tdp_protect_gfn(kvm, slot, gfn, min_level, mode); } else { CPC_ERR("Tracking unsupported!\n"); } @@ -1396,7 +1396,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level) { - return cachepc_kvm_mmu_slot_gfn_protect(kvm, slot, + return cpc_kvm_mmu_slot_gfn_protect(kvm, slot, gfn, min_level, KVM_PAGE_TRACK_WRITE); } @@ -3948,15 +3948,15 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, inst_fetch = fault->error_code & PFERR_FETCH_MASK; count = 0; - list_for_each_entry(tmp, &cachepc_faults, list) + list_for_each_entry(tmp, &cpc_faults, list) count += 1; - switch (cachepc_track_mode) { + switch (cpc_track_mode) { case CPC_TRACK_FAULT_NO_RUN: BUG_ON(modes[i] != KVM_PAGE_TRACK_ACCESS); - cachepc_send_track_step_event_single( - fault->gfn, fault->error_code, cachepc_retinst); + cpc_send_track_step_event_single( + fault->gfn, fault->error_code, cpc_retinst); return true; case CPC_TRACK_STEPS: @@ -3967,15 +3967,15 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, CPC_INFO("Got fault cnt:%lu gfn:%08llx err:%u\n", count, fault->gfn, fault->error_code); - cachepc_untrack_single(vcpu, fault->gfn, modes[i]); + cpc_untrack_single(vcpu, fault->gfn, modes[i]); alloc = kmalloc(sizeof(struct cpc_fault), GFP_KERNEL); BUG_ON(!alloc); alloc->gfn = fault->gfn; alloc->err = fault->error_code; - list_add_tail(&alloc->list, &cachepc_faults); + list_add_tail(&alloc->list, &cpc_faults); - cachepc_singlestep_reset = true; + cpc_singlestep_reset = true; break; case CPC_TRACK_STEPS_AND_FAULTS: @@ -3984,15 +3984,15 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, CPC_INFO("Got fault cnt:%lu gfn:%08llx err:%u\n", count, fault->gfn, fault->error_code); - cachepc_untrack_single(vcpu, fault->gfn, modes[i]); + cpc_untrack_single(vcpu, fault->gfn, modes[i]); alloc = kmalloc(sizeof(struct cpc_fault), GFP_KERNEL); BUG_ON(!alloc); alloc->gfn = fault->gfn; alloc->err = fault->error_code; - list_add_tail(&alloc->list, &cachepc_faults); + list_add_tail(&alloc->list, &cpc_faults); - cachepc_singlestep_reset = true; + cpc_singlestep_reset = true; break; case CPC_TRACK_PAGES: @@ -4004,15 +4004,15 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, fault->gfn, fault->error_code); if (!cpc_track_pages.cur_avail) { - cachepc_untrack_single(vcpu, fault->gfn, modes[i]); + cpc_untrack_single(vcpu, fault->gfn, modes[i]); cpc_track_pages.cur_gfn = fault->gfn; cpc_track_pages.cur_avail = true; cpc_track_pages.retinst = 0; } else { - cachepc_untrack_single(vcpu, fault->gfn, modes[i]); - cachepc_track_single(vcpu, + cpc_untrack_single(vcpu, fault->gfn, modes[i]); + cpc_track_single(vcpu, cpc_track_pages.cur_gfn, modes[i]); - cachepc_send_track_page_event(cpc_track_pages.cur_gfn, + cpc_send_track_page_event(cpc_track_pages.cur_gfn, fault->gfn, cpc_track_pages.retinst); cpc_track_pages.retinst = 0; cpc_track_pages.cur_gfn = fault->gfn; @@ -4028,17 +4028,17 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, fault->gfn, fault->error_code); /* no conflict if next pagefault happens on a different inst */ - if (cpc_track_pages.step && !cachepc_singlestep + if (cpc_track_pages.step && !cpc_singlestep && cpc_track_pages.retinst > 2) cpc_track_pages.step = false; - cachepc_untrack_single(vcpu, fault->gfn, modes[i]); + cpc_untrack_single(vcpu, fault->gfn, modes[i]); if (!cpc_track_pages.step) { if (cpc_track_pages.cur_avail) { - cachepc_track_single(vcpu, + cpc_track_single(vcpu, cpc_track_pages.cur_gfn, modes[i]); - //cachepc_send_track_page_event(cpc_track_pages.cur_gfn, + //cpc_send_track_page_event(cpc_track_pages.cur_gfn, // fault->gfn, cpc_track_pages.retinst); } @@ -4052,10 +4052,10 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, BUG_ON(!alloc); alloc->gfn = fault->gfn; alloc->err = fault->error_code; - list_add_tail(&alloc->list, &cachepc_faults); + list_add_tail(&alloc->list, &cpc_faults); /* single step and retrack to resolve */ - cachepc_singlestep_reset = true; + cpc_singlestep_reset = true; } break; @@ -4071,23 +4071,23 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, if (!cpc_track_steps_signalled.target_avail) { cpc_track_steps_signalled.target_gfn = fault->gfn; cpc_track_steps_signalled.target_avail = true; - cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC); + cpc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC); } else { - cachepc_untrack_single(vcpu, fault->gfn, + cpc_untrack_single(vcpu, fault->gfn, KVM_PAGE_TRACK_EXEC); } - cachepc_singlestep_reset = true; - cachepc_prime_probe = true; + cpc_singlestep_reset = true; + cpc_prime_probe = true; } break; } - if (cachepc_singlestep_reset) - cachepc_apic_timer -= 10 * CPC_APIC_TIMER_SOFTDIV; - if (cachepc_apic_timer < CPC_APIC_TIMER_MIN) - cachepc_apic_timer = CPC_APIC_TIMER_MIN; + if (cpc_singlestep_reset) + cpc_apic_timer -= 10 * CPC_APIC_TIMER_SOFTDIV; + if (cpc_apic_timer < CPC_APIC_TIMER_MIN) + cpc_apic_timer = CPC_APIC_TIMER_MIN; return false; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index e007d9f10284..e7608f517ecc 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -133,7 +133,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); -bool cachepc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, +bool cpc_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level, enum kvm_page_track_mode mode); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 977278e841ff..e240cb5c88b6 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c @@ -133,7 +133,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm, */ kvm_mmu_gfn_disallow_lpage(slot, gfn); - if (cachepc_kvm_mmu_slot_gfn_protect(kvm, + if (cpc_kvm_mmu_slot_gfn_protect(kvm, slot, gfn, PG_LEVEL_4K, mode)) { kvm_flush_remote_tlbs(kvm); } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 5e2ea56398d6..ae5b0194a861 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1088,7 +1088,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, for (i = 0; i < 2; i++) { if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, modes[i])) { - new_spte = cachepc_protect_pte(new_spte, modes[i]); + new_spte = cpc_protect_pte(new_spte, modes[i]); break; } } @@ -1824,7 +1824,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, zap_collapsible_spte_range(kvm, root, slot); } -static bool cachepc_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, +static bool cpc_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, int min_level, int mode) { struct tdp_iter iter; @@ -1841,7 +1841,7 @@ static bool cachepc_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, continue; new_spte = iter.old_spte & ~shadow_mmu_writable_mask; - new_spte = cachepc_protect_pte(new_spte, mode); + new_spte = cpc_protect_pte(new_spte, mode); if (new_spte == iter.old_spte) break; @@ -1855,7 +1855,7 @@ static bool cachepc_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, return spte_set; } -bool cachepc_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, +bool cpc_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level, enum kvm_page_track_mode mode) { struct kvm_mmu_page *root; @@ -1863,7 +1863,7 @@ bool cachepc_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, lockdep_assert_held_write(&kvm->mmu_lock); for_each_tdp_mmu_root(kvm, root, slot->as_id) - spte_set |= cachepc_protect_gfn(kvm, root, gfn, min_level, mode); + spte_set |= cpc_protect_gfn(kvm, root, gfn, min_level, mode); return spte_set; } @@ -1877,7 +1877,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level) { - return cachepc_tdp_protect_gfn(kvm, slot, gfn, min_level, + return cpc_tdp_protect_gfn(kvm, slot, gfn, min_level, KVM_PAGE_TRACK_WRITE); } diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 0592d05e76ee..6c451ab27a65 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -41,7 +41,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot); -bool cachepc_tdp_protect_gfn(struct kvm *kvm, +bool cpc_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level, enum kvm_page_track_mode mode); bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f0750002a49d..5cfd604546c1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2119,13 +2119,13 @@ static int intr_interception(struct kvm_vcpu *vcpu) ++vcpu->stat.irq_exits; - if (!cachepc_singlestep) + if (!cpc_singlestep) return 1; svm = to_svm(vcpu); control = &svm->vmcb->control; - if (cachepc_debug && 0) { + if (cpc_debug && 0) { hexdump_diff((const uint8_t *)&prev_vmsa, (const uint8_t *)svm->sev_es.vmsa, sizeof(prev_vmsa)); memcpy(&prev_vmsa, svm->sev_es.vmsa, sizeof(prev_vmsa)); @@ -2146,96 +2146,96 @@ static int intr_interception(struct kvm_vcpu *vcpu) wbinvd(); if (sev_es_guest(vcpu->kvm)) { - cachepc_rip = svm->sev_es.vmsa->rip; + cpc_rip = svm->sev_es.vmsa->rip; } else { - cachepc_rip = kvm_rip_read(vcpu); + cpc_rip = kvm_rip_read(vcpu); } - if (!cachepc_rip_prev_set) { - cachepc_rip_prev = cachepc_rip; - cachepc_rip_prev_set = true; + if (!cpc_rip_prev_set) { + cpc_rip_prev = cpc_rip; + cpc_rip_prev_set = true; } - if (cachepc_rip == cachepc_rip_prev) { + if (cpc_rip == cpc_rip_prev) { CPC_DBG("No RIP change (%llu,%u)\n", - cachepc_rip, cachepc_apic_timer); - cachepc_apic_timer += 1; + cpc_rip, cpc_apic_timer); + cpc_apic_timer += 1; return 1; } - cachepc_rip_prev = cachepc_rip; - CPC_INFO("Detected RIP change! (%u)\n", cachepc_apic_timer); + cpc_rip_prev = cpc_rip; + CPC_INFO("Detected RIP change! (%u)\n", cpc_apic_timer); - // if (!cachepc_retinst_prev) - // cachepc_retinst_prev = cachepc_retinst; - // if (cachepc_retinst_prev == cachepc_retinst) { - // cachepc_apic_timer += 1; + // if (!cpc_retinst_prev) + // cpc_retinst_prev = cpc_retinst; + // if (cpc_retinst_prev == cpc_retinst) { + // cpc_apic_timer += 1; // return 1; // } - // cachepc_retinst_prev = cachepc_retinst; + // cpc_retinst_prev = cpc_retinst; // CPC_INFO("Detected RETINST change! (%llu,%u)\n", - // cachepc_retinst, cachepc_apic_timer); + // cpc_retinst, cpc_apic_timer); count = 0; - list_for_each_entry(fault, &cachepc_faults, list) + list_for_each_entry(fault, &cpc_faults, list) count += 1; CPC_INFO("Caught single step with %lu faults!\n", count); - switch (cachepc_track_mode) { + switch (cpc_track_mode) { case CPC_TRACK_PAGES_RESOLVE: cpc_track_pages.step = false; - cachepc_singlestep = false; + cpc_singlestep = false; fallthrough; case CPC_TRACK_PAGES: - list_for_each_entry_safe(fault, next, &cachepc_faults, list) { - cachepc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_EXEC); + list_for_each_entry_safe(fault, next, &cpc_faults, list) { + cpc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_EXEC); list_del(&fault->list); kfree(fault); } break; case CPC_TRACK_STEPS: - list_for_each_entry_safe(fault, next, &cachepc_faults, list) { - cachepc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_EXEC); + list_for_each_entry_safe(fault, next, &cpc_faults, list) { + cpc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_EXEC); list_del(&fault->list); kfree(fault); } - cachepc_singlestep_reset = true; + cpc_singlestep_reset = true; break; case CPC_TRACK_STEPS_AND_FAULTS: inst_gfn_seen = false; - list_for_each_entry_safe(fault, next, &cachepc_faults, list) { + list_for_each_entry_safe(fault, next, &cpc_faults, list) { if (!inst_gfn_seen && (fault->err & PFERR_FETCH_MASK)) inst_gfn_seen = true; if (!inst_gfn_seen) { list_del(&fault->list); kfree(fault); } else { - cachepc_track_single(vcpu, fault->gfn, + cpc_track_single(vcpu, fault->gfn, KVM_PAGE_TRACK_ACCESS); } } - cachepc_send_track_step_event(&cachepc_faults); - cachepc_singlestep_reset = true; + cpc_send_track_step_event(&cpc_faults); + cpc_singlestep_reset = true; break; case CPC_TRACK_STEPS_SIGNALLED: if (cpc_track_steps_signalled.enabled && cpc_track_steps_signalled.target_avail) { - cachepc_send_track_step_event_single( + cpc_send_track_step_event_single( cpc_track_steps_signalled.target_gfn, - 0, cachepc_retinst); - cachepc_track_single(vcpu, + 0, cpc_retinst); + cpc_track_single(vcpu, cpc_track_steps_signalled.target_gfn, KVM_PAGE_TRACK_EXEC); - cachepc_prime_probe = false; - cachepc_singlestep = false; + cpc_prime_probe = false; + cpc_singlestep = false; } break; } - if (cachepc_singlestep_reset) - cachepc_apic_timer -= 30 * CPC_APIC_TIMER_SOFTDIV; - if (cachepc_apic_timer < CPC_APIC_TIMER_MIN) - cachepc_apic_timer = CPC_APIC_TIMER_MIN; + if (cpc_singlestep_reset) + cpc_apic_timer -= 30 * CPC_APIC_TIMER_SOFTDIV; + if (cpc_apic_timer < CPC_APIC_TIMER_MIN) + cpc_apic_timer = CPC_APIC_TIMER_MIN; - list_for_each_entry_safe(fault, next, &cachepc_faults, list) { + list_for_each_entry_safe(fault, next, &cpc_faults, list) { list_del(&fault->list); kfree(fault); } @@ -3437,7 +3437,7 @@ int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) if (!svm_check_exit_valid(exit_code)) return svm_handle_invalid_exit(vcpu, exit_code); - if (cachepc_debug) { + if (cpc_debug) { for (i = 0; i < sizeof(codelut) / sizeof(codelut[0]); i++) { if (codelut[i].code == exit_code) CPC_INFO("KVM EXIT (%s)\n", codelut[i].name); @@ -3482,9 +3482,9 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; - if (cachepc_pause_vm) { + if (cpc_pause_vm) { CPC_DBG("Pausing vm..\n"); - cachepc_send_pause_event(); + cpc_send_pause_event(); } trace_kvm_exit(vcpu, KVM_ISA_SVM); @@ -3972,27 +3972,27 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) cpu = get_cpu(); WARN_ON(cpu != 2); - memset(cachepc_msrmts, 0, L1_SETS); + memset(cpc_msrmts, 0, L1_SETS); - if (cachepc_singlestep_reset) { - if (cachepc_apic_timer < CPC_APIC_TIMER_MIN) - cachepc_apic_timer = CPC_APIC_TIMER_MIN; - cachepc_rip_prev_set = false; - cachepc_singlestep = true; - cachepc_singlestep_reset = false; + if (cpc_singlestep_reset) { + if (cpc_apic_timer < CPC_APIC_TIMER_MIN) + cpc_apic_timer = CPC_APIC_TIMER_MIN; + cpc_rip_prev_set = false; + cpc_singlestep = true; + cpc_singlestep_reset = false; } - if (cachepc_long_step) { - WARN_ON(cachepc_singlestep); - cachepc_apic_timer = 500000 * CPC_APIC_TIMER_SOFTDIV; - cachepc_apic_oneshot = true; - } else if (cachepc_singlestep) { - cachepc_apic_oneshot = true; + if (cpc_long_step) { + WARN_ON(cpc_singlestep); + cpc_apic_timer = 500000 * CPC_APIC_TIMER_SOFTDIV; + cpc_apic_oneshot = true; + } else if (cpc_singlestep) { + cpc_apic_oneshot = true; } else { - cachepc_apic_oneshot = false; + cpc_apic_oneshot = false; } - cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC); + cpc_retinst = cpc_read_pmc(CPC_RETINST_PMC); if (sev_es_guest(vcpu->kvm)) { __svm_sev_es_vcpu_run(vmcb_pa); @@ -4012,18 +4012,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) vmload(__sme_page_pa(sd->save_area)); } - cachepc_retinst = cachepc_read_pmc(CPC_RETINST_PMC) - cachepc_retinst; + cpc_retinst = cpc_read_pmc(CPC_RETINST_PMC) - cpc_retinst; - if (cachepc_prime_probe) - cachepc_save_msrmts(cachepc_ds); + if (cpc_prime_probe) + cpc_save_msrmts(cpc_ds); - cachepc_apic_oneshot = false; + cpc_apic_oneshot = false; - if (cachepc_track_mode == CPC_TRACK_PAGES - || cachepc_track_mode == CPC_TRACK_PAGES_RESOLVE) - cpc_track_pages.retinst += cachepc_retinst; + if (cpc_track_mode == CPC_TRACK_PAGES + || cpc_track_mode == CPC_TRACK_PAGES_RESOLVE) + cpc_track_pages.retinst += cpc_retinst; - if (!cachepc_singlestep) + if (!cpc_singlestep) CPC_DBG("post vcpu_run\n"); put_cpu(); @@ -4035,9 +4035,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if (cachepc_pause_vm) { + if (cpc_pause_vm) { CPC_DBG("Pausing vm..\n"); - cachepc_send_pause_event(); + cpc_send_pause_event(); } trace_kvm_entry(vcpu); diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 198b17c9a842..df79ac1b9804 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -31,24 +31,24 @@ #include "../cachepc/macro.S" -.extern cachepc_msrmts -.extern cachepc_regs_tmp -.extern cachepc_regs_vm +.extern cpc_msrmts +.extern cpc_regs_tmp +.extern cpc_regs_vm .macro load_tmp off reg - mov cachepc_regs_tmp+\off, \reg + mov cpc_regs_tmp+\off, \reg .endm .macro save_tmp off reg - mov \reg, cachepc_regs_tmp+\off + mov \reg, cpc_regs_tmp+\off .endm .macro load_vm off reg - mov cachepc_regs_vm+\off, \reg + mov cpc_regs_vm+\off, \reg .endm .macro save_vm off reg - mov \reg, cachepc_regs_vm+\off + mov \reg, cpc_regs_vm+\off .endm .macro apply_regs func @@ -74,26 +74,26 @@ apply_regs save_vm apply_regs load_tmp - movb cachepc_apic_oneshot, %al + movb cpc_apic_oneshot, %al cmp $0, %al je skip_apic_\name mov $0, %rdx - mov cachepc_apic_timer, %eax + mov cpc_apic_timer, %eax mov $CPC_APIC_TIMER_SOFTDIV, %rbx divq %rbx mov %rax, %rdi - call cachepc_apic_oneshot_run + call cpc_apic_oneshot_run skip_apic_\name: - movb cachepc_prime_probe, %al + movb cpc_prime_probe, %al mov %rax, %r15 cmp $0, %al je skip_prime_\name wbinvd - mov cachepc_ds, %r9 + mov cpc_ds, %r9 prime \name %r9 %r10 %r8 prime 1_\name %r9 %r10 %r8 prime 2_\name %r9 %r10 %r8 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1addee3a05c5..bc6864d930ea 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9332,21 +9332,21 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } case KVM_HC_CPC_VMMCALL_SIGNAL: CPC_DBG("SIGNAL VMMCALL %lu:%lu\n", a0, a1); - if (cachepc_track_mode == CPC_TRACK_STEPS_SIGNALLED) { + if (cpc_track_mode == CPC_TRACK_STEPS_SIGNALLED) { if (a0 == CPC_GUEST_START_TRACK) { cpc_track_steps_signalled.enabled = true; cpc_track_steps_signalled.target_avail = false; - cachepc_singlestep = false; - cachepc_prime_probe = false; - cachepc_track_all(vcpu, KVM_PAGE_TRACK_EXEC); + cpc_singlestep = false; + cpc_prime_probe = false; + cpc_track_all(vcpu, KVM_PAGE_TRACK_EXEC); } else if (a0 == CPC_GUEST_STOP_TRACK) { cpc_track_steps_signalled.enabled = false; - cachepc_singlestep = false; - cachepc_prime_probe = false; - cachepc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC); + cpc_singlestep = false; + cpc_prime_probe = false; + cpc_untrack_all(vcpu, KVM_PAGE_TRACK_EXEC); } } - cachepc_send_guest_event(a0, a1); + cpc_send_guest_event(a0, a1); ret = 0; break; case KVM_HC_CPC_VMMCALL_EXIT: @@ -9539,7 +9539,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) } /* Don't inject interrupts if the user asked to avoid doing so */ - if (cachepc_singlestep || (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ)) + if (cpc_singlestep || (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ)) return 0; /* diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0012c8b49f46..d242bff4d015 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4874,7 +4874,7 @@ static long kvm_dev_ioctl(struct file *filp, r = -EOPNOTSUPP; break; default: - return cachepc_kvm_ioctl(filp, ioctl, arg); + return cpc_kvm_ioctl(filp, ioctl, arg); } out: return r; @@ -5802,7 +5802,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, r = kvm_vfio_ops_init(); WARN_ON(r); - cachepc_kvm_init(); + cpc_kvm_init(); return 0; @@ -5833,7 +5833,7 @@ void kvm_exit(void) { int cpu; - cachepc_kvm_exit(); + cpc_kvm_exit(); debugfs_remove_recursive(kvm_debugfs_dir); misc_deregister(&kvm_dev); |
