kvm_mmu.h (9222B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012,2013 - ARM Ltd 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#ifndef __ARM64_KVM_MMU_H__ 8#define __ARM64_KVM_MMU_H__ 9 10#include <asm/page.h> 11#include <asm/memory.h> 12#include <asm/mmu.h> 13#include <asm/cpufeature.h> 14 15/* 16 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express 17 * "negative" addresses. This makes it impossible to directly share 18 * mappings with the kernel. 19 * 20 * Instead, give the HYP mode its own VA region at a fixed offset from 21 * the kernel by just masking the top bits (which are all ones for a 22 * kernel address). We need to find out how many bits to mask. 23 * 24 * We want to build a set of page tables that cover both parts of the 25 * idmap (the trampoline page used to initialize EL2), and our normal 26 * runtime VA space, at the same time. 27 * 28 * Given that the kernel uses VA_BITS for its entire address space, 29 * and that half of that space (VA_BITS - 1) is used for the linear 30 * mapping, we can also limit the EL2 space to (VA_BITS - 1). 31 * 32 * The main question is "Within the VA_BITS space, does EL2 use the 33 * top or the bottom half of that space to shadow the kernel's linear 34 * mapping?". As we need to idmap the trampoline page, this is 35 * determined by the range in which this page lives. 36 * 37 * If the page is in the bottom half, we have to use the top half. If 38 * the page is in the top half, we have to use the bottom half: 39 * 40 * T = __pa_symbol(__hyp_idmap_text_start) 41 * if (T & BIT(VA_BITS - 1)) 42 * HYP_VA_MIN = 0 //idmap in upper half 43 * else 44 * HYP_VA_MIN = 1 << (VA_BITS - 1) 45 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1 46 * 47 * When using VHE, there are no separate hyp mappings and all KVM 48 * functionality is already mapped as part of the main kernel 49 * mappings, and none of this applies in that case. 50 */ 51 52#ifdef __ASSEMBLY__ 53 54#include <asm/alternative.h> 55 56/* 57 * Convert a kernel VA into a HYP VA. 58 * reg: VA to be converted. 59 * 60 * The actual code generation takes place in kvm_update_va_mask, and 61 * the instructions below are only there to reserve the space and 62 * perform the register allocation (kvm_update_va_mask uses the 63 * specific registers encoded in the instructions). 64 */ 65.macro kern_hyp_va reg 66alternative_cb kvm_update_va_mask 67 and \reg, \reg, #1 /* mask with va_mask */ 68 ror \reg, \reg, #1 /* rotate to the first tag bit */ 69 add \reg, \reg, #0 /* insert the low 12 bits of the tag */ 70 add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */ 71 ror \reg, \reg, #63 /* rotate back */ 72alternative_cb_end 73.endm 74 75/* 76 * Convert a hypervisor VA to a PA 77 * reg: hypervisor address to be converted in place 78 * tmp: temporary register 79 */ 80.macro hyp_pa reg, tmp 81 ldr_l \tmp, hyp_physvirt_offset 82 add \reg, \reg, \tmp 83.endm 84 85/* 86 * Convert a hypervisor VA to a kernel image address 87 * reg: hypervisor address to be converted in place 88 * tmp: temporary register 89 * 90 * The actual code generation takes place in kvm_get_kimage_voffset, and 91 * the instructions below are only there to reserve the space and 92 * perform the register allocation (kvm_get_kimage_voffset uses the 93 * specific registers encoded in the instructions). 94 */ 95.macro hyp_kimg_va reg, tmp 96 /* Convert hyp VA -> PA. */ 97 hyp_pa \reg, \tmp 98 99 /* Load kimage_voffset. */ 100alternative_cb kvm_get_kimage_voffset 101 movz \tmp, #0 102 movk \tmp, #0, lsl #16 103 movk \tmp, #0, lsl #32 104 movk \tmp, #0, lsl #48 105alternative_cb_end 106 107 /* Convert PA -> kimg VA. */ 108 add \reg, \reg, \tmp 109.endm 110 111#else 112 113#include <linux/pgtable.h> 114#include <asm/pgalloc.h> 115#include <asm/cache.h> 116#include <asm/cacheflush.h> 117#include <asm/mmu_context.h> 118#include <asm/kvm_host.h> 119 120void kvm_update_va_mask(struct alt_instr *alt, 121 __le32 *origptr, __le32 *updptr, int nr_inst); 122void kvm_compute_layout(void); 123void kvm_apply_hyp_relocations(void); 124 125#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset) 126 127static __always_inline unsigned long __kern_hyp_va(unsigned long v) 128{ 129 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" 130 "ror %0, %0, #1\n" 131 "add %0, %0, #0\n" 132 "add %0, %0, #0, lsl 12\n" 133 "ror %0, %0, #63\n", 134 kvm_update_va_mask) 135 : "+r" (v)); 136 return v; 137} 138 139#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) 140 141/* 142 * We currently support using a VM-specified IPA size. For backward 143 * compatibility, the default IPA size is fixed to 40bits. 144 */ 145#define KVM_PHYS_SHIFT (40) 146 147#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) 148#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) 149#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) 150 151#include <asm/kvm_pgtable.h> 152#include <asm/stage2_pgtable.h> 153 154int kvm_share_hyp(void *from, void *to); 155void kvm_unshare_hyp(void *from, void *to); 156int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); 157int __create_hyp_mappings(unsigned long start, unsigned long size, 158 unsigned long phys, enum kvm_pgtable_prot prot); 159int hyp_alloc_private_va_range(size_t size, unsigned long *haddr); 160int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, 161 void __iomem **kaddr, 162 void __iomem **haddr); 163int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, 164 void **haddr); 165void free_hyp_pgds(void); 166 167void stage2_unmap_vm(struct kvm *kvm); 168int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu); 169void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); 170int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, 171 phys_addr_t pa, unsigned long size, bool writable); 172 173int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); 174 175phys_addr_t kvm_mmu_get_httbr(void); 176phys_addr_t kvm_get_idmap_vector(void); 177int kvm_mmu_init(u32 *hyp_va_bits); 178 179static inline void *__kvm_vector_slot2addr(void *base, 180 enum arm64_hyp_spectre_vector slot) 181{ 182 int idx = slot - (slot != HYP_VECTOR_DIRECT); 183 184 return base + (idx * SZ_2K); 185} 186 187struct kvm; 188 189#define kvm_flush_dcache_to_poc(a,l) \ 190 dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) 191 192static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 193{ 194 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 195} 196 197static inline void __clean_dcache_guest_page(void *va, size_t size) 198{ 199 /* 200 * With FWB, we ensure that the guest always accesses memory using 201 * cacheable attributes, and we don't have to clean to PoC when 202 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to 203 * PoU is not required either in this case. 204 */ 205 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) 206 return; 207 208 kvm_flush_dcache_to_poc(va, size); 209} 210 211static inline void __invalidate_icache_guest_page(void *va, size_t size) 212{ 213 if (icache_is_aliasing()) { 214 /* any kind of VIPT cache */ 215 icache_inval_all_pou(); 216 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { 217 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ 218 icache_inval_pou((unsigned long)va, (unsigned long)va + size); 219 } 220} 221 222void kvm_set_way_flush(struct kvm_vcpu *vcpu); 223void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); 224 225static inline unsigned int kvm_get_vmid_bits(void) 226{ 227 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); 228 229 return get_vmid_bits(reg); 230} 231 232/* 233 * We are not in the kvm->srcu critical section most of the time, so we take 234 * the SRCU read lock here. Since we copy the data from the user page, we 235 * can immediately drop the lock again. 236 */ 237static inline int kvm_read_guest_lock(struct kvm *kvm, 238 gpa_t gpa, void *data, unsigned long len) 239{ 240 int srcu_idx = srcu_read_lock(&kvm->srcu); 241 int ret = kvm_read_guest(kvm, gpa, data, len); 242 243 srcu_read_unlock(&kvm->srcu, srcu_idx); 244 245 return ret; 246} 247 248static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa, 249 const void *data, unsigned long len) 250{ 251 int srcu_idx = srcu_read_lock(&kvm->srcu); 252 int ret = kvm_write_guest(kvm, gpa, data, len); 253 254 srcu_read_unlock(&kvm->srcu, srcu_idx); 255 256 return ret; 257} 258 259#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) 260 261/* 262 * When this is (directly or indirectly) used on the TLB invalidation 263 * path, we rely on a previously issued DSB so that page table updates 264 * and VMID reads are correctly ordered. 265 */ 266static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) 267{ 268 struct kvm_vmid *vmid = &mmu->vmid; 269 u64 vmid_field, baddr; 270 u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; 271 272 baddr = mmu->pgd_phys; 273 vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT; 274 vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits); 275 return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; 276} 277 278/* 279 * Must be called from hyp code running at EL2 with an updated VTTBR 280 * and interrupts disabled. 281 */ 282static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, 283 struct kvm_arch *arch) 284{ 285 write_sysreg(arch->vtcr, vtcr_el2); 286 write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); 287 288 /* 289 * ARM errata 1165522 and 1530923 require the actual execution of the 290 * above before we can switch to the EL1/EL0 translation regime used by 291 * the guest. 292 */ 293 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); 294} 295 296static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) 297{ 298 return container_of(mmu->arch, struct kvm, arch); 299} 300#endif /* __ASSEMBLY__ */ 301#endif /* __ARM64_KVM_MMU_H__ */