cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pkvm.c (2616B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2020 - Google LLC
      4 * Author: Quentin Perret <qperret@google.com>
      5 */
      6
      7#include <linux/kvm_host.h>
      8#include <linux/memblock.h>
      9#include <linux/sort.h>
     10
     11#include <asm/kvm_pkvm.h>
     12
     13#include "hyp_constants.h"
     14
     15static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
     16static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
     17
     18phys_addr_t hyp_mem_base;
     19phys_addr_t hyp_mem_size;
     20
     21static int cmp_hyp_memblock(const void *p1, const void *p2)
     22{
     23	const struct memblock_region *r1 = p1;
     24	const struct memblock_region *r2 = p2;
     25
     26	return r1->base < r2->base ? -1 : (r1->base > r2->base);
     27}
     28
     29static void __init sort_memblock_regions(void)
     30{
     31	sort(hyp_memory,
     32	     *hyp_memblock_nr_ptr,
     33	     sizeof(struct memblock_region),
     34	     cmp_hyp_memblock,
     35	     NULL);
     36}
     37
     38static int __init register_memblock_regions(void)
     39{
     40	struct memblock_region *reg;
     41
     42	for_each_mem_region(reg) {
     43		if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS)
     44			return -ENOMEM;
     45
     46		hyp_memory[*hyp_memblock_nr_ptr] = *reg;
     47		(*hyp_memblock_nr_ptr)++;
     48	}
     49	sort_memblock_regions();
     50
     51	return 0;
     52}
     53
     54void __init kvm_hyp_reserve(void)
     55{
     56	u64 nr_pages, prev, hyp_mem_pages = 0;
     57	int ret;
     58
     59	if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
     60		return;
     61
     62	if (kvm_get_mode() != KVM_MODE_PROTECTED)
     63		return;
     64
     65	ret = register_memblock_regions();
     66	if (ret) {
     67		*hyp_memblock_nr_ptr = 0;
     68		kvm_err("Failed to register hyp memblocks: %d\n", ret);
     69		return;
     70	}
     71
     72	hyp_mem_pages += hyp_s1_pgtable_pages();
     73	hyp_mem_pages += host_s2_pgtable_pages();
     74
     75	/*
     76	 * The hyp_vmemmap needs to be backed by pages, but these pages
     77	 * themselves need to be present in the vmemmap, so compute the number
     78	 * of pages needed by looking for a fixed point.
     79	 */
     80	nr_pages = 0;
     81	do {
     82		prev = nr_pages;
     83		nr_pages = hyp_mem_pages + prev;
     84		nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
     85					PAGE_SIZE);
     86		nr_pages += __hyp_pgtable_max_pages(nr_pages);
     87	} while (nr_pages != prev);
     88	hyp_mem_pages += nr_pages;
     89
     90	/*
     91	 * Try to allocate a PMD-aligned region to reduce TLB pressure once
     92	 * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
     93	 */
     94	hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
     95	hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
     96					   PMD_SIZE);
     97	if (!hyp_mem_base)
     98		hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
     99	else
    100		hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
    101
    102	if (!hyp_mem_base) {
    103		kvm_err("Failed to reserve hyp memory\n");
    104		return;
    105	}
    106
    107	kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
    108		 hyp_mem_base);
    109}