mmu.c (19100B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * 5 * Authors: 6 * Anup Patel <anup.patel@wdc.com> 7 */ 8 9#include <linux/bitops.h> 10#include <linux/errno.h> 11#include <linux/err.h> 12#include <linux/hugetlb.h> 13#include <linux/module.h> 14#include <linux/uaccess.h> 15#include <linux/vmalloc.h> 16#include <linux/kvm_host.h> 17#include <linux/sched/signal.h> 18#include <asm/csr.h> 19#include <asm/page.h> 20#include <asm/pgtable.h> 21 22#ifdef CONFIG_64BIT 23static unsigned long gstage_mode = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT); 24static unsigned long gstage_pgd_levels = 3; 25#define gstage_index_bits 9 26#else 27static unsigned long gstage_mode = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT); 28static unsigned long gstage_pgd_levels = 2; 29#define gstage_index_bits 10 30#endif 31 32#define gstage_pgd_xbits 2 33#define gstage_pgd_size (1UL << (HGATP_PAGE_SHIFT + gstage_pgd_xbits)) 34#define gstage_gpa_bits (HGATP_PAGE_SHIFT + \ 35 (gstage_pgd_levels * gstage_index_bits) + \ 36 gstage_pgd_xbits) 37#define gstage_gpa_size ((gpa_t)(1ULL << gstage_gpa_bits)) 38 39#define gstage_pte_leaf(__ptep) \ 40 (pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)) 41 42static inline unsigned long gstage_pte_index(gpa_t addr, u32 level) 43{ 44 unsigned long mask; 45 unsigned long shift = HGATP_PAGE_SHIFT + (gstage_index_bits * level); 46 47 if (level == (gstage_pgd_levels - 1)) 48 mask = (PTRS_PER_PTE * (1UL << gstage_pgd_xbits)) - 1; 49 else 50 mask = PTRS_PER_PTE - 1; 51 52 return (addr >> shift) & mask; 53} 54 55static inline unsigned long gstage_pte_page_vaddr(pte_t pte) 56{ 57 return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT); 58} 59 60static int gstage_page_size_to_level(unsigned long page_size, u32 *out_level) 61{ 62 u32 i; 63 unsigned long psz = 1UL << 12; 64 65 for (i = 0; i < gstage_pgd_levels; i++) { 66 if (page_size == (psz << (i * gstage_index_bits))) { 67 *out_level = i; 68 return 0; 69 } 70 } 71 72 return -EINVAL; 73} 74 75static int gstage_level_to_page_order(u32 level, unsigned long *out_pgorder) 76{ 77 if (gstage_pgd_levels < level) 78 return -EINVAL; 79 80 *out_pgorder = 12 + (level * gstage_index_bits); 81 return 0; 82} 83 84static int gstage_level_to_page_size(u32 level, unsigned long *out_pgsize) 85{ 86 int rc; 87 unsigned long page_order = PAGE_SHIFT; 88 89 rc = gstage_level_to_page_order(level, &page_order); 90 if (rc) 91 return rc; 92 93 *out_pgsize = BIT(page_order); 94 return 0; 95} 96 97static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr, 98 pte_t **ptepp, u32 *ptep_level) 99{ 100 pte_t *ptep; 101 u32 current_level = gstage_pgd_levels - 1; 102 103 *ptep_level = current_level; 104 ptep = (pte_t *)kvm->arch.pgd; 105 ptep = &ptep[gstage_pte_index(addr, current_level)]; 106 while (ptep && pte_val(*ptep)) { 107 if (gstage_pte_leaf(ptep)) { 108 *ptep_level = current_level; 109 *ptepp = ptep; 110 return true; 111 } 112 113 if (current_level) { 114 current_level--; 115 *ptep_level = current_level; 116 ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); 117 ptep = &ptep[gstage_pte_index(addr, current_level)]; 118 } else { 119 ptep = NULL; 120 } 121 } 122 123 return false; 124} 125 126static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr) 127{ 128 unsigned long order = PAGE_SHIFT; 129 130 if (gstage_level_to_page_order(level, &order)) 131 return; 132 addr &= ~(BIT(order) - 1); 133 134 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order); 135} 136 137static int gstage_set_pte(struct kvm *kvm, u32 level, 138 struct kvm_mmu_memory_cache *pcache, 139 gpa_t addr, const pte_t *new_pte) 140{ 141 u32 current_level = gstage_pgd_levels - 1; 142 pte_t *next_ptep = (pte_t *)kvm->arch.pgd; 143 pte_t *ptep = &next_ptep[gstage_pte_index(addr, current_level)]; 144 145 if (current_level < level) 146 return -EINVAL; 147 148 while (current_level != level) { 149 if (gstage_pte_leaf(ptep)) 150 return -EEXIST; 151 152 if (!pte_val(*ptep)) { 153 if (!pcache) 154 return -ENOMEM; 155 next_ptep = kvm_mmu_memory_cache_alloc(pcache); 156 if (!next_ptep) 157 return -ENOMEM; 158 *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)), 159 __pgprot(_PAGE_TABLE)); 160 } else { 161 if (gstage_pte_leaf(ptep)) 162 return -EEXIST; 163 next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); 164 } 165 166 current_level--; 167 ptep = &next_ptep[gstage_pte_index(addr, current_level)]; 168 } 169 170 *ptep = *new_pte; 171 if (gstage_pte_leaf(ptep)) 172 gstage_remote_tlb_flush(kvm, current_level, addr); 173 174 return 0; 175} 176 177static int gstage_map_page(struct kvm *kvm, 178 struct kvm_mmu_memory_cache *pcache, 179 gpa_t gpa, phys_addr_t hpa, 180 unsigned long page_size, 181 bool page_rdonly, bool page_exec) 182{ 183 int ret; 184 u32 level = 0; 185 pte_t new_pte; 186 pgprot_t prot; 187 188 ret = gstage_page_size_to_level(page_size, &level); 189 if (ret) 190 return ret; 191 192 /* 193 * A RISC-V implementation can choose to either: 194 * 1) Update 'A' and 'D' PTE bits in hardware 195 * 2) Generate page fault when 'A' and/or 'D' bits are not set 196 * PTE so that software can update these bits. 197 * 198 * We support both options mentioned above. To achieve this, we 199 * always set 'A' and 'D' PTE bits at time of creating G-stage 200 * mapping. To support KVM dirty page logging with both options 201 * mentioned above, we will write-protect G-stage PTEs to track 202 * dirty pages. 203 */ 204 205 if (page_exec) { 206 if (page_rdonly) 207 prot = PAGE_READ_EXEC; 208 else 209 prot = PAGE_WRITE_EXEC; 210 } else { 211 if (page_rdonly) 212 prot = PAGE_READ; 213 else 214 prot = PAGE_WRITE; 215 } 216 new_pte = pfn_pte(PFN_DOWN(hpa), prot); 217 new_pte = pte_mkdirty(new_pte); 218 219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); 220} 221 222enum gstage_op { 223 GSTAGE_OP_NOP = 0, /* Nothing */ 224 GSTAGE_OP_CLEAR, /* Clear/Unmap */ 225 GSTAGE_OP_WP, /* Write-protect */ 226}; 227 228static void gstage_op_pte(struct kvm *kvm, gpa_t addr, 229 pte_t *ptep, u32 ptep_level, enum gstage_op op) 230{ 231 int i, ret; 232 pte_t *next_ptep; 233 u32 next_ptep_level; 234 unsigned long next_page_size, page_size; 235 236 ret = gstage_level_to_page_size(ptep_level, &page_size); 237 if (ret) 238 return; 239 240 BUG_ON(addr & (page_size - 1)); 241 242 if (!pte_val(*ptep)) 243 return; 244 245 if (ptep_level && !gstage_pte_leaf(ptep)) { 246 next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); 247 next_ptep_level = ptep_level - 1; 248 ret = gstage_level_to_page_size(next_ptep_level, 249 &next_page_size); 250 if (ret) 251 return; 252 253 if (op == GSTAGE_OP_CLEAR) 254 set_pte(ptep, __pte(0)); 255 for (i = 0; i < PTRS_PER_PTE; i++) 256 gstage_op_pte(kvm, addr + i * next_page_size, 257 &next_ptep[i], next_ptep_level, op); 258 if (op == GSTAGE_OP_CLEAR) 259 put_page(virt_to_page(next_ptep)); 260 } else { 261 if (op == GSTAGE_OP_CLEAR) 262 set_pte(ptep, __pte(0)); 263 else if (op == GSTAGE_OP_WP) 264 set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE)); 265 gstage_remote_tlb_flush(kvm, ptep_level, addr); 266 } 267} 268 269static void gstage_unmap_range(struct kvm *kvm, gpa_t start, 270 gpa_t size, bool may_block) 271{ 272 int ret; 273 pte_t *ptep; 274 u32 ptep_level; 275 bool found_leaf; 276 unsigned long page_size; 277 gpa_t addr = start, end = start + size; 278 279 while (addr < end) { 280 found_leaf = gstage_get_leaf_entry(kvm, addr, 281 &ptep, &ptep_level); 282 ret = gstage_level_to_page_size(ptep_level, &page_size); 283 if (ret) 284 break; 285 286 if (!found_leaf) 287 goto next; 288 289 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) 290 gstage_op_pte(kvm, addr, ptep, 291 ptep_level, GSTAGE_OP_CLEAR); 292 293next: 294 addr += page_size; 295 296 /* 297 * If the range is too large, release the kvm->mmu_lock 298 * to prevent starvation and lockup detector warnings. 299 */ 300 if (may_block && addr < end) 301 cond_resched_lock(&kvm->mmu_lock); 302 } 303} 304 305static void gstage_wp_range(struct kvm *kvm, gpa_t start, gpa_t end) 306{ 307 int ret; 308 pte_t *ptep; 309 u32 ptep_level; 310 bool found_leaf; 311 gpa_t addr = start; 312 unsigned long page_size; 313 314 while (addr < end) { 315 found_leaf = gstage_get_leaf_entry(kvm, addr, 316 &ptep, &ptep_level); 317 ret = gstage_level_to_page_size(ptep_level, &page_size); 318 if (ret) 319 break; 320 321 if (!found_leaf) 322 goto next; 323 324 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) 325 gstage_op_pte(kvm, addr, ptep, 326 ptep_level, GSTAGE_OP_WP); 327 328next: 329 addr += page_size; 330 } 331} 332 333static void gstage_wp_memory_region(struct kvm *kvm, int slot) 334{ 335 struct kvm_memslots *slots = kvm_memslots(kvm); 336 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); 337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; 338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; 339 340 spin_lock(&kvm->mmu_lock); 341 gstage_wp_range(kvm, start, end); 342 spin_unlock(&kvm->mmu_lock); 343 kvm_flush_remote_tlbs(kvm); 344} 345 346static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa, 347 unsigned long size, bool writable) 348{ 349 pte_t pte; 350 int ret = 0; 351 unsigned long pfn; 352 phys_addr_t addr, end; 353 struct kvm_mmu_memory_cache pcache; 354 355 memset(&pcache, 0, sizeof(pcache)); 356 pcache.gfp_zero = __GFP_ZERO; 357 358 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; 359 pfn = __phys_to_pfn(hpa); 360 361 for (addr = gpa; addr < end; addr += PAGE_SIZE) { 362 pte = pfn_pte(pfn, PAGE_KERNEL); 363 364 if (!writable) 365 pte = pte_wrprotect(pte); 366 367 ret = kvm_mmu_topup_memory_cache(&pcache, gstage_pgd_levels); 368 if (ret) 369 goto out; 370 371 spin_lock(&kvm->mmu_lock); 372 ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte); 373 spin_unlock(&kvm->mmu_lock); 374 if (ret) 375 goto out; 376 377 pfn++; 378 } 379 380out: 381 kvm_mmu_free_memory_cache(&pcache); 382 return ret; 383} 384 385void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 386 struct kvm_memory_slot *slot, 387 gfn_t gfn_offset, 388 unsigned long mask) 389{ 390 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; 391 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; 392 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; 393 394 gstage_wp_range(kvm, start, end); 395} 396 397void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) 398{ 399} 400 401void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 402 const struct kvm_memory_slot *memslot) 403{ 404 kvm_flush_remote_tlbs(kvm); 405} 406 407void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) 408{ 409} 410 411void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) 412{ 413} 414 415void kvm_arch_flush_shadow_all(struct kvm *kvm) 416{ 417 kvm_riscv_gstage_free_pgd(kvm); 418} 419 420void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 421 struct kvm_memory_slot *slot) 422{ 423 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; 424 phys_addr_t size = slot->npages << PAGE_SHIFT; 425 426 spin_lock(&kvm->mmu_lock); 427 gstage_unmap_range(kvm, gpa, size, false); 428 spin_unlock(&kvm->mmu_lock); 429} 430 431void kvm_arch_commit_memory_region(struct kvm *kvm, 432 struct kvm_memory_slot *old, 433 const struct kvm_memory_slot *new, 434 enum kvm_mr_change change) 435{ 436 /* 437 * At this point memslot has been committed and there is an 438 * allocated dirty_bitmap[], dirty pages will be tracked while 439 * the memory slot is write protected. 440 */ 441 if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) 442 gstage_wp_memory_region(kvm, new->id); 443} 444 445int kvm_arch_prepare_memory_region(struct kvm *kvm, 446 const struct kvm_memory_slot *old, 447 struct kvm_memory_slot *new, 448 enum kvm_mr_change change) 449{ 450 hva_t hva, reg_end, size; 451 gpa_t base_gpa; 452 bool writable; 453 int ret = 0; 454 455 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && 456 change != KVM_MR_FLAGS_ONLY) 457 return 0; 458 459 /* 460 * Prevent userspace from creating a memory region outside of the GPA 461 * space addressable by the KVM guest GPA space. 462 */ 463 if ((new->base_gfn + new->npages) >= 464 (gstage_gpa_size >> PAGE_SHIFT)) 465 return -EFAULT; 466 467 hva = new->userspace_addr; 468 size = new->npages << PAGE_SHIFT; 469 reg_end = hva + size; 470 base_gpa = new->base_gfn << PAGE_SHIFT; 471 writable = !(new->flags & KVM_MEM_READONLY); 472 473 mmap_read_lock(current->mm); 474 475 /* 476 * A memory region could potentially cover multiple VMAs, and 477 * any holes between them, so iterate over all of them to find 478 * out if we can map any of them right now. 479 * 480 * +--------------------------------------------+ 481 * +---------------+----------------+ +----------------+ 482 * | : VMA 1 | VMA 2 | | VMA 3 : | 483 * +---------------+----------------+ +----------------+ 484 * | memory region | 485 * +--------------------------------------------+ 486 */ 487 do { 488 struct vm_area_struct *vma = find_vma(current->mm, hva); 489 hva_t vm_start, vm_end; 490 491 if (!vma || vma->vm_start >= reg_end) 492 break; 493 494 /* 495 * Mapping a read-only VMA is only allowed if the 496 * memory region is configured as read-only. 497 */ 498 if (writable && !(vma->vm_flags & VM_WRITE)) { 499 ret = -EPERM; 500 break; 501 } 502 503 /* Take the intersection of this VMA with the memory region */ 504 vm_start = max(hva, vma->vm_start); 505 vm_end = min(reg_end, vma->vm_end); 506 507 if (vma->vm_flags & VM_PFNMAP) { 508 gpa_t gpa = base_gpa + (vm_start - hva); 509 phys_addr_t pa; 510 511 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; 512 pa += vm_start - vma->vm_start; 513 514 /* IO region dirty page logging not allowed */ 515 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { 516 ret = -EINVAL; 517 goto out; 518 } 519 520 ret = gstage_ioremap(kvm, gpa, pa, 521 vm_end - vm_start, writable); 522 if (ret) 523 break; 524 } 525 hva = vm_end; 526 } while (hva < reg_end); 527 528 if (change == KVM_MR_FLAGS_ONLY) 529 goto out; 530 531 spin_lock(&kvm->mmu_lock); 532 if (ret) 533 gstage_unmap_range(kvm, base_gpa, size, false); 534 spin_unlock(&kvm->mmu_lock); 535 536out: 537 mmap_read_unlock(current->mm); 538 return ret; 539} 540 541bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 542{ 543 if (!kvm->arch.pgd) 544 return false; 545 546 gstage_unmap_range(kvm, range->start << PAGE_SHIFT, 547 (range->end - range->start) << PAGE_SHIFT, 548 range->may_block); 549 return false; 550} 551 552bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 553{ 554 int ret; 555 kvm_pfn_t pfn = pte_pfn(range->pte); 556 557 if (!kvm->arch.pgd) 558 return false; 559 560 WARN_ON(range->end - range->start != 1); 561 562 ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT, 563 __pfn_to_phys(pfn), PAGE_SIZE, true, true); 564 if (ret) { 565 kvm_debug("Failed to map G-stage page (error %d)\n", ret); 566 return true; 567 } 568 569 return false; 570} 571 572bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 573{ 574 pte_t *ptep; 575 u32 ptep_level = 0; 576 u64 size = (range->end - range->start) << PAGE_SHIFT; 577 578 if (!kvm->arch.pgd) 579 return false; 580 581 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); 582 583 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, 584 &ptep, &ptep_level)) 585 return false; 586 587 return ptep_test_and_clear_young(NULL, 0, ptep); 588} 589 590bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) 591{ 592 pte_t *ptep; 593 u32 ptep_level = 0; 594 u64 size = (range->end - range->start) << PAGE_SHIFT; 595 596 if (!kvm->arch.pgd) 597 return false; 598 599 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE); 600 601 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, 602 &ptep, &ptep_level)) 603 return false; 604 605 return pte_young(*ptep); 606} 607 608int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, 609 struct kvm_memory_slot *memslot, 610 gpa_t gpa, unsigned long hva, bool is_write) 611{ 612 int ret; 613 kvm_pfn_t hfn; 614 bool writeable; 615 short vma_pageshift; 616 gfn_t gfn = gpa >> PAGE_SHIFT; 617 struct vm_area_struct *vma; 618 struct kvm *kvm = vcpu->kvm; 619 struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache; 620 bool logging = (memslot->dirty_bitmap && 621 !(memslot->flags & KVM_MEM_READONLY)) ? true : false; 622 unsigned long vma_pagesize, mmu_seq; 623 624 mmap_read_lock(current->mm); 625 626 vma = find_vma_intersection(current->mm, hva, hva + 1); 627 if (unlikely(!vma)) { 628 kvm_err("Failed to find VMA for hva 0x%lx\n", hva); 629 mmap_read_unlock(current->mm); 630 return -EFAULT; 631 } 632 633 if (is_vm_hugetlb_page(vma)) 634 vma_pageshift = huge_page_shift(hstate_vma(vma)); 635 else 636 vma_pageshift = PAGE_SHIFT; 637 vma_pagesize = 1ULL << vma_pageshift; 638 if (logging || (vma->vm_flags & VM_PFNMAP)) 639 vma_pagesize = PAGE_SIZE; 640 641 if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE) 642 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 643 644 mmap_read_unlock(current->mm); 645 646 if (vma_pagesize != PGDIR_SIZE && 647 vma_pagesize != PMD_SIZE && 648 vma_pagesize != PAGE_SIZE) { 649 kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize); 650 return -EFAULT; 651 } 652 653 /* We need minimum second+third level pages */ 654 ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels); 655 if (ret) { 656 kvm_err("Failed to topup G-stage cache\n"); 657 return ret; 658 } 659 660 mmu_seq = kvm->mmu_notifier_seq; 661 662 hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable); 663 if (hfn == KVM_PFN_ERR_HWPOISON) { 664 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, 665 vma_pageshift, current); 666 return 0; 667 } 668 if (is_error_noslot_pfn(hfn)) 669 return -EFAULT; 670 671 /* 672 * If logging is active then we allow writable pages only 673 * for write faults. 674 */ 675 if (logging && !is_write) 676 writeable = false; 677 678 spin_lock(&kvm->mmu_lock); 679 680 if (mmu_notifier_retry(kvm, mmu_seq)) 681 goto out_unlock; 682 683 if (writeable) { 684 kvm_set_pfn_dirty(hfn); 685 mark_page_dirty(kvm, gfn); 686 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, 687 vma_pagesize, false, true); 688 } else { 689 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, 690 vma_pagesize, true, true); 691 } 692 693 if (ret) 694 kvm_err("Failed to map in G-stage\n"); 695 696out_unlock: 697 spin_unlock(&kvm->mmu_lock); 698 kvm_set_pfn_accessed(hfn); 699 kvm_release_pfn_clean(hfn); 700 return ret; 701} 702 703int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm) 704{ 705 struct page *pgd_page; 706 707 if (kvm->arch.pgd != NULL) { 708 kvm_err("kvm_arch already initialized?\n"); 709 return -EINVAL; 710 } 711 712 pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 713 get_order(gstage_pgd_size)); 714 if (!pgd_page) 715 return -ENOMEM; 716 kvm->arch.pgd = page_to_virt(pgd_page); 717 kvm->arch.pgd_phys = page_to_phys(pgd_page); 718 719 return 0; 720} 721 722void kvm_riscv_gstage_free_pgd(struct kvm *kvm) 723{ 724 void *pgd = NULL; 725 726 spin_lock(&kvm->mmu_lock); 727 if (kvm->arch.pgd) { 728 gstage_unmap_range(kvm, 0UL, gstage_gpa_size, false); 729 pgd = READ_ONCE(kvm->arch.pgd); 730 kvm->arch.pgd = NULL; 731 kvm->arch.pgd_phys = 0; 732 } 733 spin_unlock(&kvm->mmu_lock); 734 735 if (pgd) 736 free_pages((unsigned long)pgd, get_order(gstage_pgd_size)); 737} 738 739void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu) 740{ 741 unsigned long hgatp = gstage_mode; 742 struct kvm_arch *k = &vcpu->kvm->arch; 743 744 hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & 745 HGATP_VMID_MASK; 746 hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN; 747 748 csr_write(CSR_HGATP, hgatp); 749 750 if (!kvm_riscv_gstage_vmid_bits()) 751 kvm_riscv_local_hfence_gvma_all(); 752} 753 754void kvm_riscv_gstage_mode_detect(void) 755{ 756#ifdef CONFIG_64BIT 757 /* Try Sv57x4 G-stage mode */ 758 csr_write(CSR_HGATP, HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT); 759 if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV57X4) { 760 gstage_mode = (HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT); 761 gstage_pgd_levels = 5; 762 goto skip_sv48x4_test; 763 } 764 765 /* Try Sv48x4 G-stage mode */ 766 csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT); 767 if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) { 768 gstage_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT); 769 gstage_pgd_levels = 4; 770 } 771skip_sv48x4_test: 772 773 csr_write(CSR_HGATP, 0); 774 kvm_riscv_local_hfence_gvma_all(); 775#endif 776} 777 778unsigned long kvm_riscv_gstage_mode(void) 779{ 780 return gstage_mode >> HGATP_MODE_SHIFT; 781} 782 783int kvm_riscv_gstage_gpa_bits(void) 784{ 785 return gstage_gpa_bits; 786}