cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

highmem.c (2589B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
      4 */
      5
      6#include <linux/memblock.h>
      7#include <linux/export.h>
      8#include <linux/highmem.h>
      9#include <linux/pgtable.h>
     10#include <asm/processor.h>
     11#include <asm/pgalloc.h>
     12#include <asm/tlbflush.h>
     13
     14/*
     15 * HIGHMEM API:
     16 *
     17 * kmap() API provides sleep semantics hence referred to as "permanent maps"
     18 * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
     19 * for book-keeping
     20 *
     21 * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
     22 * shortlived ala "temporary mappings" which historically were implemented as
     23 * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
     24 *
     25 *	Both these facts combined (preemption disabled and per-cpu allocation)
     26 *	means the total number of concurrent fixmaps will be limited to max
     27 *	such allocations in a single control path. Thus KM_TYPE_NR (another
     28 *	historic relic) is a small'ish number which caps max percpu fixmaps
     29 *
     30 * ARC HIGHMEM Details
     31 *
     32 * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
     33 *   is now shared between vmalloc and kmap (non overlapping though)
     34 *
     35 * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
     36 *   This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
     37 *   2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
     38 *
     39 * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
     40 *   CPU. So the number of CPUs sharing a single PTE page is limited.
     41 *
     42 * - pkmap being preemptible, in theory could do with more than 256 concurrent
     43 *   mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
     44 *   the PGD and only works with a single page table @pkmap_page_table, hence
     45 *   sets the limit
     46 */
     47
     48extern pte_t * pkmap_page_table;
     49
     50static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
     51{
     52	pmd_t *pmd_k = pmd_off_k(kvaddr);
     53	pte_t *pte_k;
     54
     55	pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
     56	if (!pte_k)
     57		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
     58		      __func__, PAGE_SIZE, PAGE_SIZE);
     59
     60	pmd_populate_kernel(&init_mm, pmd_k, pte_k);
     61	return pte_k;
     62}
     63
     64void __init kmap_init(void)
     65{
     66	/* Due to recursive include hell, we can't do this in processor.h */
     67	BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
     68	BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
     69	BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
     70
     71	pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
     72	alloc_kmap_pgtable(FIXMAP_BASE);
     73}