summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLouis Burda <quent.burda@gmail.com>2022-12-11 02:50:50 +0100
committerLouis Burda <quent.burda@gmail.com>2022-12-11 02:50:50 +0100
commitbe291b876f75894ecb80308be35d2983f069038f (patch)
tree612cd595ea258b358e5917a692179f9a833cfda4
parent6a825bc86322af6c7bac4304dec5722e18539aa2 (diff)
downloadcachepc-linux-be291b876f75894ecb80308be35d2983f069038f.tar.gz
cachepc-linux-be291b876f75894ecb80308be35d2983f069038f.zip
Revert unnecessary diffs and small tdp_mmu fix
-rw-r--r--arch/x86/kernel/sev.c12
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/mmu/mmu.c18
-rw-r--r--arch/x86/kvm/mmu/page_track.c4
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c53
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--[-rwxr-xr-x]drivers/crypto/ccp/sev-dev.c5
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h2
-rw-r--r--virt/kvm/kvm_main.c2
9 files changed, 16 insertions, 86 deletions
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 5c2d9b07c6aa..f603a724b08e 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -1034,8 +1034,6 @@ static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
if (!vmsa)
return -ENOMEM;
- CPC_WARN("New VMSA allocated!\n");
-
/* CR4 should maintain the MCE value */
cr4 = native_read_cr4() & X86_CR4_MCE;
@@ -2591,11 +2589,11 @@ static int rmpupdate(u64 pfn, struct rmpupdate *val)
* direct map.
*/
if (val->assigned) {
- // if (invalid_direct_map(pfn, npages)) {
- // pr_err("Failed to unmap pfn 0x%llx pages %d from direct_map\n",
- // pfn, npages);
- // return -EFAULT;
- // }
+ if (invalid_direct_map(pfn, npages)) {
+ pr_err("Failed to unmap pfn 0x%llx pages %d from direct_map\n",
+ pfn, npages);
+ return -EFAULT;
+ }
}
retry:
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index b63672b47321..6598ca7022b3 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -1449,8 +1449,8 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
return 1;
- eax = kvm_rax_read(vcpu);
- ecx = kvm_rcx_read(vcpu);
+ eax = kvm_rax_read(vcpu);
+ ecx = kvm_rcx_read(vcpu);
kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
kvm_rax_write(vcpu, eax);
kvm_rbx_write(vcpu, ebx);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8206e929adf3..4b219d84792d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1154,24 +1154,6 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
#include "../cachepc/mmu.c"
-/*
- * Write-protect on the specified @sptep, @pt_protect indicates whether
- * spte write-protection is caused by protecting shadow page table.
- *
- * Note: write protection is difference between dirty logging and spte
- * protection:
- * - for dirty logging, the spte can be set to writable at anytime if
- * its dirty bitmap is properly set.
- * - for spte protection, the spte can be writable only after unsync-ing
- * shadow page.
- *
- * Return true if tlb need be flushed.
- */
-// static bool spte_write_protect(u64 *sptep, bool pt_protect)
-// {
-// return cachepc_spte_protect(sptep, pt_protect, KVM_PAGE_TRACK_WRITE);
-// }
-
static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
bool pt_protect)
{
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 315b2d06118c..977278e841ff 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -125,8 +125,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
!kvm_page_track_write_tracking_enabled(kvm)))
return;
- CPC_DBG("Tracking page: %llu %i\n", gfn, mode);
-
update_gfn_track(slot, gfn, mode, 1);
/*
@@ -166,8 +164,6 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
!kvm_page_track_write_tracking_enabled(kvm)))
return;
- CPC_DBG("Untracking page: %llu %i\n", gfn, mode);
-
update_gfn_track(slot, gfn, mode, -1);
/*
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 73edaf70f4e8..f6d6f934525c 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1827,8 +1827,10 @@ static bool cachepc_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
continue;
new_spte = iter.old_spte & ~shadow_mmu_writable_mask;
- new_spte &= ~PT_WRITABLE_MASK;
- if (mode == KVM_PAGE_TRACK_ACCESS) {
+ if (mode == KVM_PAGE_TRACK_WRITE) {
+ new_spte &= ~PT_WRITABLE_MASK;
+ } else if (mode == KVM_PAGE_TRACK_ACCESS) {
+ new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~PT_PRESENT_MASK;
new_spte &= ~PT_USER_MASK;
new_spte |= (0x1ULL << PT64_NX_SHIFT);
@@ -1854,8 +1856,6 @@ bool cachepc_tdp_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
struct kvm_mmu_page *root;
bool spte_set = false;
- // pr_warn("Sevstep: tdp_protect_gfn\n");
-
lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root, slot->as_id)
spte_set |= cachepc_protect_gfn(kvm, root, gfn, min_level, mode);
@@ -1869,57 +1869,12 @@ EXPORT_SYMBOL(cachepc_tdp_protect_gfn);
* MMU-writable bit to ensure future writes continue to be intercepted.
* Returns true if an SPTE was set and a TLB flush is needed.
*/
-// static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
-// gfn_t gfn, int min_level)
-// {
-// struct tdp_iter iter;
-// u64 new_spte;
-// bool spte_set = false;
-//
-// BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
-//
-// rcu_read_lock();
-//
-// for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
-// if (!is_shadow_present_pte(iter.old_spte) ||
-// !is_last_spte(iter.old_spte, iter.level))
-// continue;
-//
-// new_spte = iter.old_spte & ~shadow_mmu_writable_mask;
-// new_spte &= ~PT_WRITABLE_MASK;
-//
-// if (new_spte == iter.old_spte)
-// break;
-//
-// tdp_mmu_set_spte(kvm, &iter, new_spte);
-// spte_set = true;
-// }
-//
-// rcu_read_unlock();
-//
-// return spte_set;
-// }
-
-/*
- * Removes write access on the last level SPTE mapping this GFN and unsets the
- * MMU-writable bit to ensure future writes continue to be intercepted.
- * Returns true if an SPTE was set and a TLB flush is needed.
- */
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
int min_level)
{
return cachepc_tdp_protect_gfn(kvm, slot, gfn, min_level,
KVM_PAGE_TRACK_WRITE);
-
- // struct kvm_mmu_page *root;
- // bool spte_set = false;
-
- // lockdep_assert_held_write(&kvm->mmu_lock);
- // for_each_tdp_mmu_root(kvm, root, slot->as_id)
- // spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
-
- // return spte_set;
}
/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index dfe54e6ca5cc..9929c8cb3999 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9331,7 +9331,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
return 0;
}
case KVM_HC_CPC_VMMCALL:
- CPC_WARN("Cachepc: Hypecrcall Run\n");
+ CPC_WARN("Intercepted VMMCALL %lu:%lu\n", a0, a1);
cachepc_send_guest_event(a0, a1);
ret = 0;
break;
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 7899e1efe852..e089fbf9017f 100755..100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -87,7 +87,7 @@ static void *sev_init_ex_buffer;
static size_t sev_es_tmr_size = SEV_ES_TMR_SIZE;
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
-int sev_do_cmd(int cmd, void *data, int *psp_ret);
+static int sev_do_cmd(int cmd, void *data, int *psp_ret);
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
@@ -865,7 +865,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
return ret;
}
-int sev_do_cmd(int cmd, void *data, int *psp_ret)
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
{
int rc;
@@ -875,7 +875,6 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret)
return rc;
}
-EXPORT_SYMBOL(sev_do_cmd);
static int __sev_init_locked(int *error)
{
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index ec317e7c348a..5b1019dab328 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -275,7 +275,7 @@
*
* 512GB Pages are not supported due to a hardware bug
*/
-#define AMD_IOMMU_PGSIZES (PAGE_SIZE)
+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 869faf927e5d..0012c8b49f46 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1367,7 +1367,7 @@ static void kvm_insert_gfn_node(struct kvm_memslots *slots,
int idx = slots->node_idx;
parent = NULL;
- for (node = &gfn_tree->rb_node; *node;) {
+ for (node = &gfn_tree->rb_node; *node; ) {
struct kvm_memory_slot *tmp;
tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);