tdp_mmu.h (3384B)
1// SPDX-License-Identifier: GPL-2.0 2 3#ifndef __KVM_X86_MMU_TDP_MMU_H 4#define __KVM_X86_MMU_TDP_MMU_H 5 6#include <linux/kvm_host.h> 7 8hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); 9 10__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root) 11{ 12 return refcount_inc_not_zero(&root->tdp_mmu_root_count); 13} 14 15void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 16 bool shared); 17 18bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, 19 gfn_t end, bool can_yield, bool flush); 20bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); 21void kvm_tdp_mmu_zap_all(struct kvm *kvm); 22void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); 23void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); 24 25int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 26 27bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 28 bool flush); 29bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 30bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 31bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 32 33bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 34 const struct kvm_memory_slot *slot, int min_level); 35bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 36 const struct kvm_memory_slot *slot); 37void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 38 struct kvm_memory_slot *slot, 39 gfn_t gfn, unsigned long mask, 40 bool wrprot); 41void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 42 const struct kvm_memory_slot *slot); 43 44bool cpc_tdp_protect_gfn(struct kvm *kvm, 45 struct kvm_memory_slot *slot, gfn_t gfn, 46 int min_level, enum kvm_page_track_mode mode); 47bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 48 struct kvm_memory_slot *slot, gfn_t gfn, 49 int min_level); 50 51void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, 52 const struct kvm_memory_slot *slot, 53 gfn_t start, gfn_t end, 54 int target_level, bool shared); 55 56static inline void kvm_tdp_mmu_walk_lockless_begin(void) 57{ 58 rcu_read_lock(); 59} 60 61static inline void kvm_tdp_mmu_walk_lockless_end(void) 62{ 63 rcu_read_unlock(); 64} 65 66int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 67 int *root_level); 68u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 69 u64 *spte); 70 71#ifdef CONFIG_X86_64 72int kvm_mmu_init_tdp_mmu(struct kvm *kvm); 73void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); 74static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } 75 76static inline bool is_tdp_mmu(struct kvm_mmu *mmu) 77{ 78 struct kvm_mmu_page *sp; 79 hpa_t hpa = mmu->root.hpa; 80 81 if (WARN_ON(!VALID_PAGE(hpa))) 82 return false; 83 84 /* 85 * A NULL shadow page is legal when shadowing a non-paging guest with 86 * PAE paging, as the MMU will be direct with root_hpa pointing at the 87 * pae_root page, not a shadow page. 88 */ 89 sp = to_shadow_page(hpa); 90 return sp && is_tdp_mmu_page(sp) && sp->root_count; 91} 92#else 93static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; } 94static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} 95static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } 96static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; } 97#endif 98 99#endif /* __KVM_X86_MMU_TDP_MMU_H */