cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

init.c (11202B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * This file contains KASAN shadow initialization code.
      4 *
      5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
      6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
      7 */
      8
      9#include <linux/memblock.h>
     10#include <linux/init.h>
     11#include <linux/kasan.h>
     12#include <linux/kernel.h>
     13#include <linux/mm.h>
     14#include <linux/pfn.h>
     15#include <linux/slab.h>
     16
     17#include <asm/page.h>
     18#include <asm/pgalloc.h>
     19
     20#include "kasan.h"
     21
     22/*
     23 * This page serves two purposes:
     24 *   - It used as early shadow memory. The entire shadow region populated
     25 *     with this page, before we will be able to setup normal shadow memory.
     26 *   - Latter it reused it as zero shadow to cover large ranges of memory
     27 *     that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
     28 */
     29unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
     30
     31#if CONFIG_PGTABLE_LEVELS > 4
     32p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
     33static inline bool kasan_p4d_table(pgd_t pgd)
     34{
     35	return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
     36}
     37#else
     38static inline bool kasan_p4d_table(pgd_t pgd)
     39{
     40	return false;
     41}
     42#endif
     43#if CONFIG_PGTABLE_LEVELS > 3
     44pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD] __page_aligned_bss;
     45static inline bool kasan_pud_table(p4d_t p4d)
     46{
     47	return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
     48}
     49#else
     50static inline bool kasan_pud_table(p4d_t p4d)
     51{
     52	return false;
     53}
     54#endif
     55#if CONFIG_PGTABLE_LEVELS > 2
     56pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD] __page_aligned_bss;
     57static inline bool kasan_pmd_table(pud_t pud)
     58{
     59	return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
     60}
     61#else
     62static inline bool kasan_pmd_table(pud_t pud)
     63{
     64	return false;
     65}
     66#endif
     67pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]
     68	__page_aligned_bss;
     69
     70static inline bool kasan_pte_table(pmd_t pmd)
     71{
     72	return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
     73}
     74
     75static inline bool kasan_early_shadow_page_entry(pte_t pte)
     76{
     77	return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
     78}
     79
     80static __init void *early_alloc(size_t size, int node)
     81{
     82	void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
     83					   MEMBLOCK_ALLOC_ACCESSIBLE, node);
     84
     85	if (!ptr)
     86		panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
     87		      __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
     88
     89	return ptr;
     90}
     91
     92static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
     93				unsigned long end)
     94{
     95	pte_t *pte = pte_offset_kernel(pmd, addr);
     96	pte_t zero_pte;
     97
     98	zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
     99				PAGE_KERNEL);
    100	zero_pte = pte_wrprotect(zero_pte);
    101
    102	while (addr + PAGE_SIZE <= end) {
    103		set_pte_at(&init_mm, addr, pte, zero_pte);
    104		addr += PAGE_SIZE;
    105		pte = pte_offset_kernel(pmd, addr);
    106	}
    107}
    108
    109static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
    110				unsigned long end)
    111{
    112	pmd_t *pmd = pmd_offset(pud, addr);
    113	unsigned long next;
    114
    115	do {
    116		next = pmd_addr_end(addr, end);
    117
    118		if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
    119			pmd_populate_kernel(&init_mm, pmd,
    120					lm_alias(kasan_early_shadow_pte));
    121			continue;
    122		}
    123
    124		if (pmd_none(*pmd)) {
    125			pte_t *p;
    126
    127			if (slab_is_available())
    128				p = pte_alloc_one_kernel(&init_mm);
    129			else
    130				p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
    131			if (!p)
    132				return -ENOMEM;
    133
    134			pmd_populate_kernel(&init_mm, pmd, p);
    135		}
    136		zero_pte_populate(pmd, addr, next);
    137	} while (pmd++, addr = next, addr != end);
    138
    139	return 0;
    140}
    141
    142static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
    143				unsigned long end)
    144{
    145	pud_t *pud = pud_offset(p4d, addr);
    146	unsigned long next;
    147
    148	do {
    149		next = pud_addr_end(addr, end);
    150		if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
    151			pmd_t *pmd;
    152
    153			pud_populate(&init_mm, pud,
    154					lm_alias(kasan_early_shadow_pmd));
    155			pmd = pmd_offset(pud, addr);
    156			pmd_populate_kernel(&init_mm, pmd,
    157					lm_alias(kasan_early_shadow_pte));
    158			continue;
    159		}
    160
    161		if (pud_none(*pud)) {
    162			pmd_t *p;
    163
    164			if (slab_is_available()) {
    165				p = pmd_alloc(&init_mm, pud, addr);
    166				if (!p)
    167					return -ENOMEM;
    168			} else {
    169				pud_populate(&init_mm, pud,
    170					early_alloc(PAGE_SIZE, NUMA_NO_NODE));
    171			}
    172		}
    173		zero_pmd_populate(pud, addr, next);
    174	} while (pud++, addr = next, addr != end);
    175
    176	return 0;
    177}
    178
    179static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
    180				unsigned long end)
    181{
    182	p4d_t *p4d = p4d_offset(pgd, addr);
    183	unsigned long next;
    184
    185	do {
    186		next = p4d_addr_end(addr, end);
    187		if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
    188			pud_t *pud;
    189			pmd_t *pmd;
    190
    191			p4d_populate(&init_mm, p4d,
    192					lm_alias(kasan_early_shadow_pud));
    193			pud = pud_offset(p4d, addr);
    194			pud_populate(&init_mm, pud,
    195					lm_alias(kasan_early_shadow_pmd));
    196			pmd = pmd_offset(pud, addr);
    197			pmd_populate_kernel(&init_mm, pmd,
    198					lm_alias(kasan_early_shadow_pte));
    199			continue;
    200		}
    201
    202		if (p4d_none(*p4d)) {
    203			pud_t *p;
    204
    205			if (slab_is_available()) {
    206				p = pud_alloc(&init_mm, p4d, addr);
    207				if (!p)
    208					return -ENOMEM;
    209			} else {
    210				p4d_populate(&init_mm, p4d,
    211					early_alloc(PAGE_SIZE, NUMA_NO_NODE));
    212			}
    213		}
    214		zero_pud_populate(p4d, addr, next);
    215	} while (p4d++, addr = next, addr != end);
    216
    217	return 0;
    218}
    219
    220/**
    221 * kasan_populate_early_shadow - populate shadow memory region with
    222 *                               kasan_early_shadow_page
    223 * @shadow_start: start of the memory range to populate
    224 * @shadow_end: end of the memory range to populate
    225 */
    226int __ref kasan_populate_early_shadow(const void *shadow_start,
    227					const void *shadow_end)
    228{
    229	unsigned long addr = (unsigned long)shadow_start;
    230	unsigned long end = (unsigned long)shadow_end;
    231	pgd_t *pgd = pgd_offset_k(addr);
    232	unsigned long next;
    233
    234	do {
    235		next = pgd_addr_end(addr, end);
    236
    237		if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
    238			p4d_t *p4d;
    239			pud_t *pud;
    240			pmd_t *pmd;
    241
    242			/*
    243			 * kasan_early_shadow_pud should be populated with pmds
    244			 * at this moment.
    245			 * [pud,pmd]_populate*() below needed only for
    246			 * 3,2 - level page tables where we don't have
    247			 * puds,pmds, so pgd_populate(), pud_populate()
    248			 * is noops.
    249			 */
    250			pgd_populate(&init_mm, pgd,
    251					lm_alias(kasan_early_shadow_p4d));
    252			p4d = p4d_offset(pgd, addr);
    253			p4d_populate(&init_mm, p4d,
    254					lm_alias(kasan_early_shadow_pud));
    255			pud = pud_offset(p4d, addr);
    256			pud_populate(&init_mm, pud,
    257					lm_alias(kasan_early_shadow_pmd));
    258			pmd = pmd_offset(pud, addr);
    259			pmd_populate_kernel(&init_mm, pmd,
    260					lm_alias(kasan_early_shadow_pte));
    261			continue;
    262		}
    263
    264		if (pgd_none(*pgd)) {
    265			p4d_t *p;
    266
    267			if (slab_is_available()) {
    268				p = p4d_alloc(&init_mm, pgd, addr);
    269				if (!p)
    270					return -ENOMEM;
    271			} else {
    272				pgd_populate(&init_mm, pgd,
    273					early_alloc(PAGE_SIZE, NUMA_NO_NODE));
    274			}
    275		}
    276		zero_p4d_populate(pgd, addr, next);
    277	} while (pgd++, addr = next, addr != end);
    278
    279	return 0;
    280}
    281
    282static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
    283{
    284	pte_t *pte;
    285	int i;
    286
    287	for (i = 0; i < PTRS_PER_PTE; i++) {
    288		pte = pte_start + i;
    289		if (!pte_none(*pte))
    290			return;
    291	}
    292
    293	pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
    294	pmd_clear(pmd);
    295}
    296
    297static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
    298{
    299	pmd_t *pmd;
    300	int i;
    301
    302	for (i = 0; i < PTRS_PER_PMD; i++) {
    303		pmd = pmd_start + i;
    304		if (!pmd_none(*pmd))
    305			return;
    306	}
    307
    308	pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
    309	pud_clear(pud);
    310}
    311
    312static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
    313{
    314	pud_t *pud;
    315	int i;
    316
    317	for (i = 0; i < PTRS_PER_PUD; i++) {
    318		pud = pud_start + i;
    319		if (!pud_none(*pud))
    320			return;
    321	}
    322
    323	pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
    324	p4d_clear(p4d);
    325}
    326
    327static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
    328{
    329	p4d_t *p4d;
    330	int i;
    331
    332	for (i = 0; i < PTRS_PER_P4D; i++) {
    333		p4d = p4d_start + i;
    334		if (!p4d_none(*p4d))
    335			return;
    336	}
    337
    338	p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
    339	pgd_clear(pgd);
    340}
    341
    342static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
    343				unsigned long end)
    344{
    345	unsigned long next;
    346
    347	for (; addr < end; addr = next, pte++) {
    348		next = (addr + PAGE_SIZE) & PAGE_MASK;
    349		if (next > end)
    350			next = end;
    351
    352		if (!pte_present(*pte))
    353			continue;
    354
    355		if (WARN_ON(!kasan_early_shadow_page_entry(*pte)))
    356			continue;
    357		pte_clear(&init_mm, addr, pte);
    358	}
    359}
    360
    361static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
    362				unsigned long end)
    363{
    364	unsigned long next;
    365
    366	for (; addr < end; addr = next, pmd++) {
    367		pte_t *pte;
    368
    369		next = pmd_addr_end(addr, end);
    370
    371		if (!pmd_present(*pmd))
    372			continue;
    373
    374		if (kasan_pte_table(*pmd)) {
    375			if (IS_ALIGNED(addr, PMD_SIZE) &&
    376			    IS_ALIGNED(next, PMD_SIZE)) {
    377				pmd_clear(pmd);
    378				continue;
    379			}
    380		}
    381		pte = pte_offset_kernel(pmd, addr);
    382		kasan_remove_pte_table(pte, addr, next);
    383		kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
    384	}
    385}
    386
    387static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
    388				unsigned long end)
    389{
    390	unsigned long next;
    391
    392	for (; addr < end; addr = next, pud++) {
    393		pmd_t *pmd, *pmd_base;
    394
    395		next = pud_addr_end(addr, end);
    396
    397		if (!pud_present(*pud))
    398			continue;
    399
    400		if (kasan_pmd_table(*pud)) {
    401			if (IS_ALIGNED(addr, PUD_SIZE) &&
    402			    IS_ALIGNED(next, PUD_SIZE)) {
    403				pud_clear(pud);
    404				continue;
    405			}
    406		}
    407		pmd = pmd_offset(pud, addr);
    408		pmd_base = pmd_offset(pud, 0);
    409		kasan_remove_pmd_table(pmd, addr, next);
    410		kasan_free_pmd(pmd_base, pud);
    411	}
    412}
    413
    414static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
    415				unsigned long end)
    416{
    417	unsigned long next;
    418
    419	for (; addr < end; addr = next, p4d++) {
    420		pud_t *pud;
    421
    422		next = p4d_addr_end(addr, end);
    423
    424		if (!p4d_present(*p4d))
    425			continue;
    426
    427		if (kasan_pud_table(*p4d)) {
    428			if (IS_ALIGNED(addr, P4D_SIZE) &&
    429			    IS_ALIGNED(next, P4D_SIZE)) {
    430				p4d_clear(p4d);
    431				continue;
    432			}
    433		}
    434		pud = pud_offset(p4d, addr);
    435		kasan_remove_pud_table(pud, addr, next);
    436		kasan_free_pud(pud_offset(p4d, 0), p4d);
    437	}
    438}
    439
    440void kasan_remove_zero_shadow(void *start, unsigned long size)
    441{
    442	unsigned long addr, end, next;
    443	pgd_t *pgd;
    444
    445	addr = (unsigned long)kasan_mem_to_shadow(start);
    446	end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
    447
    448	if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
    449	    WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
    450		return;
    451
    452	for (; addr < end; addr = next) {
    453		p4d_t *p4d;
    454
    455		next = pgd_addr_end(addr, end);
    456
    457		pgd = pgd_offset_k(addr);
    458		if (!pgd_present(*pgd))
    459			continue;
    460
    461		if (kasan_p4d_table(*pgd)) {
    462			if (IS_ALIGNED(addr, PGDIR_SIZE) &&
    463			    IS_ALIGNED(next, PGDIR_SIZE)) {
    464				pgd_clear(pgd);
    465				continue;
    466			}
    467		}
    468
    469		p4d = p4d_offset(pgd, addr);
    470		kasan_remove_p4d_table(p4d, addr, next);
    471		kasan_free_p4d(p4d_offset(pgd, 0), pgd);
    472	}
    473}
    474
    475int kasan_add_zero_shadow(void *start, unsigned long size)
    476{
    477	int ret;
    478	void *shadow_start, *shadow_end;
    479
    480	shadow_start = kasan_mem_to_shadow(start);
    481	shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
    482
    483	if (WARN_ON((unsigned long)start % KASAN_MEMORY_PER_SHADOW_PAGE) ||
    484	    WARN_ON(size % KASAN_MEMORY_PER_SHADOW_PAGE))
    485		return -EINVAL;
    486
    487	ret = kasan_populate_early_shadow(shadow_start, shadow_end);
    488	if (ret)
    489		kasan_remove_zero_shadow(start, size);
    490	return ret;
    491}