cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kasan_init.c (13967B)


      1// SPDX-License-Identifier: GPL-2.0
      2// Copyright (C) 2019 Andes Technology Corporation
      3
      4#include <linux/pfn.h>
      5#include <linux/init_task.h>
      6#include <linux/kasan.h>
      7#include <linux/kernel.h>
      8#include <linux/memblock.h>
      9#include <linux/pgtable.h>
     10#include <asm/tlbflush.h>
     11#include <asm/fixmap.h>
     12#include <asm/pgalloc.h>
     13
     14/*
     15 * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
     16 * which is right before the kernel.
     17 *
     18 * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
     19 * the page global directory with kasan_early_shadow_pmd.
     20 *
     21 * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
     22 * must be divided as follows:
     23 * - the first PGD entry, although incomplete, is populated with
     24 *   kasan_early_shadow_pud/p4d
     25 * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
     26 * - the last PGD entry is shared with the kernel mapping so populated at the
     27 *   lower levels pud/p4d
     28 *
     29 * In addition, when shallow populating a kasan region (for example vmalloc),
     30 * this region may also not be aligned on PGDIR size, so we must go down to the
     31 * pud level too.
     32 */
     33
     34extern pgd_t early_pg_dir[PTRS_PER_PGD];
     35
     36static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
     37{
     38	phys_addr_t phys_addr;
     39	pte_t *ptep, *base_pte;
     40
     41	if (pmd_none(*pmd))
     42		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
     43	else
     44		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
     45
     46	ptep = base_pte + pte_index(vaddr);
     47
     48	do {
     49		if (pte_none(*ptep)) {
     50			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
     51			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
     52		}
     53	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
     54
     55	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
     56}
     57
     58static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
     59{
     60	phys_addr_t phys_addr;
     61	pmd_t *pmdp, *base_pmd;
     62	unsigned long next;
     63
     64	if (pud_none(*pud)) {
     65		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
     66	} else {
     67		base_pmd = (pmd_t *)pud_pgtable(*pud);
     68		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
     69			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
     70	}
     71
     72	pmdp = base_pmd + pmd_index(vaddr);
     73
     74	do {
     75		next = pmd_addr_end(vaddr, end);
     76
     77		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
     78			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
     79			if (phys_addr) {
     80				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
     81				continue;
     82			}
     83		}
     84
     85		kasan_populate_pte(pmdp, vaddr, next);
     86	} while (pmdp++, vaddr = next, vaddr != end);
     87
     88	/*
     89	 * Wait for the whole PGD to be populated before setting the PGD in
     90	 * the page table, otherwise, if we did set the PGD before populating
     91	 * it entirely, memblock could allocate a page at a physical address
     92	 * where KASAN is not populated yet and then we'd get a page fault.
     93	 */
     94	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
     95}
     96
     97static void __init kasan_populate_pud(pgd_t *pgd,
     98				      unsigned long vaddr, unsigned long end,
     99				      bool early)
    100{
    101	phys_addr_t phys_addr;
    102	pud_t *pudp, *base_pud;
    103	unsigned long next;
    104
    105	if (early) {
    106		/*
    107		 * We can't use pgd_page_vaddr here as it would return a linear
    108		 * mapping address but it is not mapped yet, but when populating
    109		 * early_pg_dir, we need the physical address and when populating
    110		 * swapper_pg_dir, we need the kernel virtual address so use
    111		 * pt_ops facility.
    112		 */
    113		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
    114	} else if (pgd_none(*pgd)) {
    115		base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
    116	} else {
    117		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
    118		if (base_pud == lm_alias(kasan_early_shadow_pud)) {
    119			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
    120			memcpy(base_pud, (void *)kasan_early_shadow_pud,
    121			       sizeof(pud_t) * PTRS_PER_PUD);
    122		}
    123	}
    124
    125	pudp = base_pud + pud_index(vaddr);
    126
    127	do {
    128		next = pud_addr_end(vaddr, end);
    129
    130		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
    131			if (early) {
    132				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
    133				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
    134				continue;
    135			} else {
    136				phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
    137				if (phys_addr) {
    138					set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
    139					continue;
    140				}
    141			}
    142		}
    143
    144		kasan_populate_pmd(pudp, vaddr, next);
    145	} while (pudp++, vaddr = next, vaddr != end);
    146
    147	/*
    148	 * Wait for the whole PGD to be populated before setting the PGD in
    149	 * the page table, otherwise, if we did set the PGD before populating
    150	 * it entirely, memblock could allocate a page at a physical address
    151	 * where KASAN is not populated yet and then we'd get a page fault.
    152	 */
    153	if (!early)
    154		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
    155}
    156
    157static void __init kasan_populate_p4d(pgd_t *pgd,
    158				      unsigned long vaddr, unsigned long end,
    159				      bool early)
    160{
    161	phys_addr_t phys_addr;
    162	p4d_t *p4dp, *base_p4d;
    163	unsigned long next;
    164
    165	if (early) {
    166		/*
    167		 * We can't use pgd_page_vaddr here as it would return a linear
    168		 * mapping address but it is not mapped yet, but when populating
    169		 * early_pg_dir, we need the physical address and when populating
    170		 * swapper_pg_dir, we need the kernel virtual address so use
    171		 * pt_ops facility.
    172		 */
    173		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
    174	} else {
    175		base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
    176		if (base_p4d == lm_alias(kasan_early_shadow_p4d))
    177			base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
    178	}
    179
    180	p4dp = base_p4d + p4d_index(vaddr);
    181
    182	do {
    183		next = p4d_addr_end(vaddr, end);
    184
    185		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
    186			if (early) {
    187				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
    188				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
    189				continue;
    190			} else {
    191				phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
    192				if (phys_addr) {
    193					set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
    194					continue;
    195				}
    196			}
    197		}
    198
    199		kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
    200	} while (p4dp++, vaddr = next, vaddr != end);
    201
    202	/*
    203	 * Wait for the whole P4D to be populated before setting the P4D in
    204	 * the page table, otherwise, if we did set the P4D before populating
    205	 * it entirely, memblock could allocate a page at a physical address
    206	 * where KASAN is not populated yet and then we'd get a page fault.
    207	 */
    208	if (!early)
    209		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
    210}
    211
    212#define kasan_early_shadow_pgd_next			(pgtable_l5_enabled ?	\
    213				(uintptr_t)kasan_early_shadow_p4d :		\
    214							(pgtable_l4_enabled ?	\
    215				(uintptr_t)kasan_early_shadow_pud :		\
    216				(uintptr_t)kasan_early_shadow_pmd))
    217#define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
    218		(pgtable_l5_enabled ?						\
    219		kasan_populate_p4d(pgdp, vaddr, next, early) :			\
    220		(pgtable_l4_enabled ?						\
    221			kasan_populate_pud(pgdp, vaddr, next, early) :		\
    222			kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
    223
    224static void __init kasan_populate_pgd(pgd_t *pgdp,
    225				      unsigned long vaddr, unsigned long end,
    226				      bool early)
    227{
    228	phys_addr_t phys_addr;
    229	unsigned long next;
    230
    231	do {
    232		next = pgd_addr_end(vaddr, end);
    233
    234		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
    235			if (early) {
    236				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
    237				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
    238				continue;
    239			} else if (pgd_page_vaddr(*pgdp) ==
    240				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
    241				/*
    242				 * pgdp can't be none since kasan_early_init
    243				 * initialized all KASAN shadow region with
    244				 * kasan_early_shadow_pud: if this is still the
    245				 * case, that means we can try to allocate a
    246				 * hugepage as a replacement.
    247				 */
    248				phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
    249				if (phys_addr) {
    250					set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
    251					continue;
    252				}
    253			}
    254		}
    255
    256		kasan_populate_pgd_next(pgdp, vaddr, next, early);
    257	} while (pgdp++, vaddr = next, vaddr != end);
    258}
    259
    260asmlinkage void __init kasan_early_init(void)
    261{
    262	uintptr_t i;
    263
    264	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
    265		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
    266
    267	for (i = 0; i < PTRS_PER_PTE; ++i)
    268		set_pte(kasan_early_shadow_pte + i,
    269			pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
    270
    271	for (i = 0; i < PTRS_PER_PMD; ++i)
    272		set_pmd(kasan_early_shadow_pmd + i,
    273			pfn_pmd(PFN_DOWN
    274				(__pa((uintptr_t)kasan_early_shadow_pte)),
    275				PAGE_TABLE));
    276
    277	if (pgtable_l4_enabled) {
    278		for (i = 0; i < PTRS_PER_PUD; ++i)
    279			set_pud(kasan_early_shadow_pud + i,
    280				pfn_pud(PFN_DOWN
    281					(__pa(((uintptr_t)kasan_early_shadow_pmd))),
    282					PAGE_TABLE));
    283	}
    284
    285	if (pgtable_l5_enabled) {
    286		for (i = 0; i < PTRS_PER_P4D; ++i)
    287			set_p4d(kasan_early_shadow_p4d + i,
    288				pfn_p4d(PFN_DOWN
    289					(__pa(((uintptr_t)kasan_early_shadow_pud))),
    290					PAGE_TABLE));
    291	}
    292
    293	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
    294			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
    295
    296	local_flush_tlb_all();
    297}
    298
    299void __init kasan_swapper_init(void)
    300{
    301	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
    302			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
    303
    304	local_flush_tlb_all();
    305}
    306
    307static void __init kasan_populate(void *start, void *end)
    308{
    309	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
    310	unsigned long vend = PAGE_ALIGN((unsigned long)end);
    311
    312	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
    313
    314	local_flush_tlb_all();
    315	memset(start, KASAN_SHADOW_INIT, end - start);
    316}
    317
    318static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
    319					      unsigned long vaddr, unsigned long end)
    320{
    321	unsigned long next;
    322	pmd_t *pmdp, *base_pmd;
    323	bool is_kasan_pte;
    324
    325	base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
    326	pmdp = base_pmd + pmd_index(vaddr);
    327
    328	do {
    329		next = pmd_addr_end(vaddr, end);
    330		is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));
    331
    332		if (is_kasan_pte)
    333			pmd_clear(pmdp);
    334	} while (pmdp++, vaddr = next, vaddr != end);
    335}
    336
    337static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
    338					      unsigned long vaddr, unsigned long end)
    339{
    340	unsigned long next;
    341	pud_t *pudp, *base_pud;
    342	pmd_t *base_pmd;
    343	bool is_kasan_pmd;
    344
    345	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
    346	pudp = base_pud + pud_index(vaddr);
    347
    348	do {
    349		next = pud_addr_end(vaddr, end);
    350		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
    351
    352		if (!is_kasan_pmd)
    353			continue;
    354
    355		base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
    356		set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
    357
    358		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
    359			continue;
    360
    361		memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
    362		kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
    363	} while (pudp++, vaddr = next, vaddr != end);
    364}
    365
    366static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
    367					      unsigned long vaddr, unsigned long end)
    368{
    369	unsigned long next;
    370	p4d_t *p4dp, *base_p4d;
    371	pud_t *base_pud;
    372	bool is_kasan_pud;
    373
    374	base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
    375	p4dp = base_p4d + p4d_index(vaddr);
    376
    377	do {
    378		next = p4d_addr_end(vaddr, end);
    379		is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));
    380
    381		if (!is_kasan_pud)
    382			continue;
    383
    384		base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
    385		set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
    386
    387		if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
    388			continue;
    389
    390		memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
    391		kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
    392	} while (p4dp++, vaddr = next, vaddr != end);
    393}
    394
    395#define kasan_shallow_populate_pgd_next(pgdp, vaddr, next)			\
    396		(pgtable_l5_enabled ?						\
    397		kasan_shallow_populate_p4d(pgdp, vaddr, next) :			\
    398		(pgtable_l4_enabled ?						\
    399		kasan_shallow_populate_pud(pgdp, vaddr, next) :			\
    400		kasan_shallow_populate_pmd(pgdp, vaddr, next)))
    401
    402static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
    403{
    404	unsigned long next;
    405	void *p;
    406	pgd_t *pgd_k = pgd_offset_k(vaddr);
    407	bool is_kasan_pgd_next;
    408
    409	do {
    410		next = pgd_addr_end(vaddr, end);
    411		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
    412				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
    413
    414		if (is_kasan_pgd_next) {
    415			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
    416			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
    417		}
    418
    419		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
    420			continue;
    421
    422		memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
    423		kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
    424	} while (pgd_k++, vaddr = next, vaddr != end);
    425}
    426
    427static void __init kasan_shallow_populate(void *start, void *end)
    428{
    429	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
    430	unsigned long vend = PAGE_ALIGN((unsigned long)end);
    431
    432	kasan_shallow_populate_pgd(vaddr, vend);
    433	local_flush_tlb_all();
    434}
    435
    436void __init kasan_init(void)
    437{
    438	phys_addr_t p_start, p_end;
    439	u64 i;
    440
    441	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
    442		kasan_shallow_populate(
    443			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
    444			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
    445
    446	/* Populate the linear mapping */
    447	for_each_mem_range(i, &p_start, &p_end) {
    448		void *start = (void *)__va(p_start);
    449		void *end = (void *)__va(p_end);
    450
    451		if (start >= end)
    452			break;
    453
    454		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
    455	}
    456
    457	/* Populate kernel, BPF, modules mapping */
    458	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
    459		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
    460
    461	for (i = 0; i < PTRS_PER_PTE; i++)
    462		set_pte(&kasan_early_shadow_pte[i],
    463			mk_pte(virt_to_page(kasan_early_shadow_page),
    464			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
    465					_PAGE_ACCESSED)));
    466
    467	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
    468	init_task.kasan_depth = 0;
    469}