summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorAshish Kalra <ashish.kalra@amd.com>2022-09-28 20:47:12 +0000
committerAshish Kalra <ashish.kalra@amd.com>2022-09-28 20:47:12 +0000
commitcd1599797a25194b293754293d67b217d739a039 (patch)
tree3a6bcf0eeb8d01c4abb5c707211c8ea17eb72f92 /virt
parent0aaa1e599bee256b3b15643bbb95e80ce7aa9be5 (diff)
downloadcachepc-linux-cd1599797a25194b293754293d67b217d739a039.tar.gz
cachepc-linux-cd1599797a25194b293754293d67b217d739a039.zip
Revert "KVM: SEV: add cache flush to solve SEV cache incoherency issues"
This reverts commit 683412ccf61294d727ead4a73d97397396e69a6b. Need to revert this commit to fix soft-lockup and RCU stall issues on both SNP host and guest. The wbinvd_on_all_cpus() invoked from the MMU invalidation notifiers as part of this patch adds a lot of additional overhead and latencies on SNP host kernel especially with large physical count cpus during NUMA autobalancing and host RMP page fault handling. Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c27
1 files changed, 3 insertions, 24 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f2a63cb2658b..ce78dd138ab7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -164,10 +164,6 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
{
}
-__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
-{
-}
-
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
{
/*
@@ -361,12 +357,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
#endif
-static void kvm_flush_shadow_all(struct kvm *kvm)
-{
- kvm_arch_flush_shadow_all(kvm);
- kvm_arch_guest_memory_reclaimed(kvm);
-}
-
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
@@ -495,15 +485,12 @@ typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
unsigned long end);
-typedef void (*on_unlock_fn_t)(struct kvm *kvm);
-
struct kvm_hva_range {
unsigned long start;
unsigned long end;
pte_t pte;
hva_handler_t handler;
on_lock_fn_t on_lock;
- on_unlock_fn_t on_unlock;
bool flush_on_ret;
bool may_block;
};
@@ -591,11 +578,8 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
if (range->flush_on_ret && ret)
kvm_flush_remote_tlbs(kvm);
- if (locked) {
+ if (locked)
KVM_MMU_UNLOCK(kvm);
- if (!IS_KVM_NULL_FN(range->on_unlock))
- range->on_unlock(kvm);
- }
srcu_read_unlock(&kvm->srcu, idx);
@@ -616,7 +600,6 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
.pte = pte,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
- .on_unlock = (void *)kvm_null_fn,
.flush_on_ret = true,
.may_block = false,
};
@@ -636,7 +619,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
.pte = __pte(0),
.handler = handler,
.on_lock = (void *)kvm_null_fn,
- .on_unlock = (void *)kvm_null_fn,
.flush_on_ret = false,
.may_block = false,
};
@@ -705,7 +687,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
.pte = __pte(0),
.handler = kvm_unmap_gfn_range,
.on_lock = kvm_inc_notifier_count,
- .on_unlock = kvm_arch_guest_memory_reclaimed,
.flush_on_ret = true,
.may_block = mmu_notifier_range_blockable(range),
};
@@ -760,7 +741,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
.pte = __pte(0),
.handler = (void *)kvm_null_fn,
.on_lock = kvm_dec_notifier_count,
- .on_unlock = (void *)kvm_null_fn,
.flush_on_ret = false,
.may_block = mmu_notifier_range_blockable(range),
};
@@ -833,7 +813,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
int idx;
idx = srcu_read_lock(&kvm->srcu);
- kvm_flush_shadow_all(kvm);
+ kvm_arch_flush_shadow_all(kvm);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1246,7 +1226,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
kvm->mn_active_invalidate_count = 0;
#else
- kvm_flush_shadow_all(kvm);
+ kvm_arch_flush_shadow_all(kvm);
#endif
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
@@ -1673,7 +1653,6 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_root)
*/
kvm_arch_flush_shadow_memslot(kvm, old);
- kvm_arch_guest_memory_reclaimed(kvm);
/* Was released by kvm_swap_active_memslots, reacquire. */
mutex_lock(&kvm->slots_arch_lock);