cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

shadow.c (16490B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * This file contains KASAN runtime code that manages shadow memory for
      4 * generic and software tag-based KASAN modes.
      5 *
      6 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
      7 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
      8 *
      9 * Some code borrowed from https://github.com/xairy/kasan-prototype by
     10 *        Andrey Konovalov <andreyknvl@gmail.com>
     11 */
     12
     13#include <linux/init.h>
     14#include <linux/kasan.h>
     15#include <linux/kernel.h>
     16#include <linux/kfence.h>
     17#include <linux/kmemleak.h>
     18#include <linux/memory.h>
     19#include <linux/mm.h>
     20#include <linux/string.h>
     21#include <linux/types.h>
     22#include <linux/vmalloc.h>
     23
     24#include <asm/cacheflush.h>
     25#include <asm/tlbflush.h>
     26
     27#include "kasan.h"
     28
     29bool __kasan_check_read(const volatile void *p, unsigned int size)
     30{
     31	return kasan_check_range((unsigned long)p, size, false, _RET_IP_);
     32}
     33EXPORT_SYMBOL(__kasan_check_read);
     34
     35bool __kasan_check_write(const volatile void *p, unsigned int size)
     36{
     37	return kasan_check_range((unsigned long)p, size, true, _RET_IP_);
     38}
     39EXPORT_SYMBOL(__kasan_check_write);
     40
     41#undef memset
     42void *memset(void *addr, int c, size_t len)
     43{
     44	if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_))
     45		return NULL;
     46
     47	return __memset(addr, c, len);
     48}
     49
     50#ifdef __HAVE_ARCH_MEMMOVE
     51#undef memmove
     52void *memmove(void *dest, const void *src, size_t len)
     53{
     54	if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
     55	    !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
     56		return NULL;
     57
     58	return __memmove(dest, src, len);
     59}
     60#endif
     61
     62#undef memcpy
     63void *memcpy(void *dest, const void *src, size_t len)
     64{
     65	if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
     66	    !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
     67		return NULL;
     68
     69	return __memcpy(dest, src, len);
     70}
     71
     72void kasan_poison(const void *addr, size_t size, u8 value, bool init)
     73{
     74	void *shadow_start, *shadow_end;
     75
     76	if (!kasan_arch_is_ready())
     77		return;
     78
     79	/*
     80	 * Perform shadow offset calculation based on untagged address, as
     81	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
     82	 * addresses to this function.
     83	 */
     84	addr = kasan_reset_tag(addr);
     85
     86	/* Skip KFENCE memory if called explicitly outside of sl*b. */
     87	if (is_kfence_address(addr))
     88		return;
     89
     90	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
     91		return;
     92	if (WARN_ON(size & KASAN_GRANULE_MASK))
     93		return;
     94
     95	shadow_start = kasan_mem_to_shadow(addr);
     96	shadow_end = kasan_mem_to_shadow(addr + size);
     97
     98	__memset(shadow_start, value, shadow_end - shadow_start);
     99}
    100EXPORT_SYMBOL(kasan_poison);
    101
    102#ifdef CONFIG_KASAN_GENERIC
    103void kasan_poison_last_granule(const void *addr, size_t size)
    104{
    105	if (!kasan_arch_is_ready())
    106		return;
    107
    108	if (size & KASAN_GRANULE_MASK) {
    109		u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
    110		*shadow = size & KASAN_GRANULE_MASK;
    111	}
    112}
    113#endif
    114
    115void kasan_unpoison(const void *addr, size_t size, bool init)
    116{
    117	u8 tag = get_tag(addr);
    118
    119	/*
    120	 * Perform shadow offset calculation based on untagged address, as
    121	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
    122	 * addresses to this function.
    123	 */
    124	addr = kasan_reset_tag(addr);
    125
    126	/*
    127	 * Skip KFENCE memory if called explicitly outside of sl*b. Also note
    128	 * that calls to ksize(), where size is not a multiple of machine-word
    129	 * size, would otherwise poison the invalid portion of the word.
    130	 */
    131	if (is_kfence_address(addr))
    132		return;
    133
    134	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
    135		return;
    136
    137	/* Unpoison all granules that cover the object. */
    138	kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false);
    139
    140	/* Partially poison the last granule for the generic mode. */
    141	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
    142		kasan_poison_last_granule(addr, size);
    143}
    144
    145#ifdef CONFIG_MEMORY_HOTPLUG
    146static bool shadow_mapped(unsigned long addr)
    147{
    148	pgd_t *pgd = pgd_offset_k(addr);
    149	p4d_t *p4d;
    150	pud_t *pud;
    151	pmd_t *pmd;
    152	pte_t *pte;
    153
    154	if (pgd_none(*pgd))
    155		return false;
    156	p4d = p4d_offset(pgd, addr);
    157	if (p4d_none(*p4d))
    158		return false;
    159	pud = pud_offset(p4d, addr);
    160	if (pud_none(*pud))
    161		return false;
    162
    163	/*
    164	 * We can't use pud_large() or pud_huge(), the first one is
    165	 * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
    166	 * pud_bad(), if pud is bad then it's bad because it's huge.
    167	 */
    168	if (pud_bad(*pud))
    169		return true;
    170	pmd = pmd_offset(pud, addr);
    171	if (pmd_none(*pmd))
    172		return false;
    173
    174	if (pmd_bad(*pmd))
    175		return true;
    176	pte = pte_offset_kernel(pmd, addr);
    177	return !pte_none(*pte);
    178}
    179
    180static int __meminit kasan_mem_notifier(struct notifier_block *nb,
    181			unsigned long action, void *data)
    182{
    183	struct memory_notify *mem_data = data;
    184	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
    185	unsigned long shadow_end, shadow_size;
    186
    187	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
    188	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
    189	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
    190	shadow_size = nr_shadow_pages << PAGE_SHIFT;
    191	shadow_end = shadow_start + shadow_size;
    192
    193	if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
    194		WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
    195		return NOTIFY_BAD;
    196
    197	switch (action) {
    198	case MEM_GOING_ONLINE: {
    199		void *ret;
    200
    201		/*
    202		 * If shadow is mapped already than it must have been mapped
    203		 * during the boot. This could happen if we onlining previously
    204		 * offlined memory.
    205		 */
    206		if (shadow_mapped(shadow_start))
    207			return NOTIFY_OK;
    208
    209		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
    210					shadow_end, GFP_KERNEL,
    211					PAGE_KERNEL, VM_NO_GUARD,
    212					pfn_to_nid(mem_data->start_pfn),
    213					__builtin_return_address(0));
    214		if (!ret)
    215			return NOTIFY_BAD;
    216
    217		kmemleak_ignore(ret);
    218		return NOTIFY_OK;
    219	}
    220	case MEM_CANCEL_ONLINE:
    221	case MEM_OFFLINE: {
    222		struct vm_struct *vm;
    223
    224		/*
    225		 * shadow_start was either mapped during boot by kasan_init()
    226		 * or during memory online by __vmalloc_node_range().
    227		 * In the latter case we can use vfree() to free shadow.
    228		 * Non-NULL result of the find_vm_area() will tell us if
    229		 * that was the second case.
    230		 *
    231		 * Currently it's not possible to free shadow mapped
    232		 * during boot by kasan_init(). It's because the code
    233		 * to do that hasn't been written yet. So we'll just
    234		 * leak the memory.
    235		 */
    236		vm = find_vm_area((void *)shadow_start);
    237		if (vm)
    238			vfree((void *)shadow_start);
    239	}
    240	}
    241
    242	return NOTIFY_OK;
    243}
    244
    245static int __init kasan_memhotplug_init(void)
    246{
    247	hotplug_memory_notifier(kasan_mem_notifier, 0);
    248
    249	return 0;
    250}
    251
    252core_initcall(kasan_memhotplug_init);
    253#endif
    254
    255#ifdef CONFIG_KASAN_VMALLOC
    256
    257void __init __weak kasan_populate_early_vm_area_shadow(void *start,
    258						       unsigned long size)
    259{
    260}
    261
    262static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
    263				      void *unused)
    264{
    265	unsigned long page;
    266	pte_t pte;
    267
    268	if (likely(!pte_none(*ptep)))
    269		return 0;
    270
    271	page = __get_free_page(GFP_KERNEL);
    272	if (!page)
    273		return -ENOMEM;
    274
    275	memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
    276	pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
    277
    278	spin_lock(&init_mm.page_table_lock);
    279	if (likely(pte_none(*ptep))) {
    280		set_pte_at(&init_mm, addr, ptep, pte);
    281		page = 0;
    282	}
    283	spin_unlock(&init_mm.page_table_lock);
    284	if (page)
    285		free_page(page);
    286	return 0;
    287}
    288
    289int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
    290{
    291	unsigned long shadow_start, shadow_end;
    292	int ret;
    293
    294	if (!is_vmalloc_or_module_addr((void *)addr))
    295		return 0;
    296
    297	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
    298	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
    299	shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
    300	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
    301
    302	ret = apply_to_page_range(&init_mm, shadow_start,
    303				  shadow_end - shadow_start,
    304				  kasan_populate_vmalloc_pte, NULL);
    305	if (ret)
    306		return ret;
    307
    308	flush_cache_vmap(shadow_start, shadow_end);
    309
    310	/*
    311	 * We need to be careful about inter-cpu effects here. Consider:
    312	 *
    313	 *   CPU#0				  CPU#1
    314	 * WRITE_ONCE(p, vmalloc(100));		while (x = READ_ONCE(p)) ;
    315	 *					p[99] = 1;
    316	 *
    317	 * With compiler instrumentation, that ends up looking like this:
    318	 *
    319	 *   CPU#0				  CPU#1
    320	 * // vmalloc() allocates memory
    321	 * // let a = area->addr
    322	 * // we reach kasan_populate_vmalloc
    323	 * // and call kasan_unpoison:
    324	 * STORE shadow(a), unpoison_val
    325	 * ...
    326	 * STORE shadow(a+99), unpoison_val	x = LOAD p
    327	 * // rest of vmalloc process		<data dependency>
    328	 * STORE p, a				LOAD shadow(x+99)
    329	 *
    330	 * If there is no barrier between the end of unpoisoning the shadow
    331	 * and the store of the result to p, the stores could be committed
    332	 * in a different order by CPU#0, and CPU#1 could erroneously observe
    333	 * poison in the shadow.
    334	 *
    335	 * We need some sort of barrier between the stores.
    336	 *
    337	 * In the vmalloc() case, this is provided by a smp_wmb() in
    338	 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
    339	 * get_vm_area() and friends, the caller gets shadow allocated but
    340	 * doesn't have any pages mapped into the virtual address space that
    341	 * has been reserved. Mapping those pages in will involve taking and
    342	 * releasing a page-table lock, which will provide the barrier.
    343	 */
    344
    345	return 0;
    346}
    347
    348static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
    349					void *unused)
    350{
    351	unsigned long page;
    352
    353	page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
    354
    355	spin_lock(&init_mm.page_table_lock);
    356
    357	if (likely(!pte_none(*ptep))) {
    358		pte_clear(&init_mm, addr, ptep);
    359		free_page(page);
    360	}
    361	spin_unlock(&init_mm.page_table_lock);
    362
    363	return 0;
    364}
    365
    366/*
    367 * Release the backing for the vmalloc region [start, end), which
    368 * lies within the free region [free_region_start, free_region_end).
    369 *
    370 * This can be run lazily, long after the region was freed. It runs
    371 * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
    372 * infrastructure.
    373 *
    374 * How does this work?
    375 * -------------------
    376 *
    377 * We have a region that is page aligned, labeled as A.
    378 * That might not map onto the shadow in a way that is page-aligned:
    379 *
    380 *                    start                     end
    381 *                    v                         v
    382 * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
    383 *  -------- -------- --------          -------- --------
    384 *      |        |       |                 |        |
    385 *      |        |       |         /-------/        |
    386 *      \-------\|/------/         |/---------------/
    387 *              |||                ||
    388 *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
    389 *                 (1)      (2)      (3)
    390 *
    391 * First we align the start upwards and the end downwards, so that the
    392 * shadow of the region aligns with shadow page boundaries. In the
    393 * example, this gives us the shadow page (2). This is the shadow entirely
    394 * covered by this allocation.
    395 *
    396 * Then we have the tricky bits. We want to know if we can free the
    397 * partially covered shadow pages - (1) and (3) in the example. For this,
    398 * we are given the start and end of the free region that contains this
    399 * allocation. Extending our previous example, we could have:
    400 *
    401 *  free_region_start                                    free_region_end
    402 *  |                 start                     end      |
    403 *  v                 v                         v        v
    404 * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
    405 *  -------- -------- --------          -------- --------
    406 *      |        |       |                 |        |
    407 *      |        |       |         /-------/        |
    408 *      \-------\|/------/         |/---------------/
    409 *              |||                ||
    410 *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
    411 *                 (1)      (2)      (3)
    412 *
    413 * Once again, we align the start of the free region up, and the end of
    414 * the free region down so that the shadow is page aligned. So we can free
    415 * page (1) - we know no allocation currently uses anything in that page,
    416 * because all of it is in the vmalloc free region. But we cannot free
    417 * page (3), because we can't be sure that the rest of it is unused.
    418 *
    419 * We only consider pages that contain part of the original region for
    420 * freeing: we don't try to free other pages from the free region or we'd
    421 * end up trying to free huge chunks of virtual address space.
    422 *
    423 * Concurrency
    424 * -----------
    425 *
    426 * How do we know that we're not freeing a page that is simultaneously
    427 * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
    428 *
    429 * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
    430 * at the same time. While we run under free_vmap_area_lock, the population
    431 * code does not.
    432 *
    433 * free_vmap_area_lock instead operates to ensure that the larger range
    434 * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
    435 * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
    436 * no space identified as free will become used while we are running. This
    437 * means that so long as we are careful with alignment and only free shadow
    438 * pages entirely covered by the free region, we will not run in to any
    439 * trouble - any simultaneous allocations will be for disjoint regions.
    440 */
    441void kasan_release_vmalloc(unsigned long start, unsigned long end,
    442			   unsigned long free_region_start,
    443			   unsigned long free_region_end)
    444{
    445	void *shadow_start, *shadow_end;
    446	unsigned long region_start, region_end;
    447	unsigned long size;
    448
    449	region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
    450	region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
    451
    452	free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
    453
    454	if (start != region_start &&
    455	    free_region_start < region_start)
    456		region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
    457
    458	free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
    459
    460	if (end != region_end &&
    461	    free_region_end > region_end)
    462		region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
    463
    464	shadow_start = kasan_mem_to_shadow((void *)region_start);
    465	shadow_end = kasan_mem_to_shadow((void *)region_end);
    466
    467	if (shadow_end > shadow_start) {
    468		size = shadow_end - shadow_start;
    469		apply_to_existing_page_range(&init_mm,
    470					     (unsigned long)shadow_start,
    471					     size, kasan_depopulate_vmalloc_pte,
    472					     NULL);
    473		flush_tlb_kernel_range((unsigned long)shadow_start,
    474				       (unsigned long)shadow_end);
    475	}
    476}
    477
    478void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
    479			       kasan_vmalloc_flags_t flags)
    480{
    481	/*
    482	 * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC
    483	 * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored.
    484	 * Software KASAN modes can't optimize zeroing memory by combining it
    485	 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
    486	 */
    487
    488	if (!is_vmalloc_or_module_addr(start))
    489		return (void *)start;
    490
    491	/*
    492	 * Don't tag executable memory with the tag-based mode.
    493	 * The kernel doesn't tolerate having the PC register tagged.
    494	 */
    495	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
    496	    !(flags & KASAN_VMALLOC_PROT_NORMAL))
    497		return (void *)start;
    498
    499	start = set_tag(start, kasan_random_tag());
    500	kasan_unpoison(start, size, false);
    501	return (void *)start;
    502}
    503
    504/*
    505 * Poison the shadow for a vmalloc region. Called as part of the
    506 * freeing process at the time the region is freed.
    507 */
    508void __kasan_poison_vmalloc(const void *start, unsigned long size)
    509{
    510	if (!is_vmalloc_or_module_addr(start))
    511		return;
    512
    513	size = round_up(size, KASAN_GRANULE_SIZE);
    514	kasan_poison(start, size, KASAN_VMALLOC_INVALID, false);
    515}
    516
    517#else /* CONFIG_KASAN_VMALLOC */
    518
    519int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
    520{
    521	void *ret;
    522	size_t scaled_size;
    523	size_t shadow_size;
    524	unsigned long shadow_start;
    525
    526	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
    527	scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
    528				KASAN_SHADOW_SCALE_SHIFT;
    529	shadow_size = round_up(scaled_size, PAGE_SIZE);
    530
    531	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
    532		return -EINVAL;
    533
    534	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
    535			shadow_start + shadow_size,
    536			GFP_KERNEL,
    537			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
    538			__builtin_return_address(0));
    539
    540	if (ret) {
    541		struct vm_struct *vm = find_vm_area(addr);
    542		__memset(ret, KASAN_SHADOW_INIT, shadow_size);
    543		vm->flags |= VM_KASAN;
    544		kmemleak_ignore(ret);
    545
    546		if (vm->flags & VM_DEFER_KMEMLEAK)
    547			kmemleak_vmalloc(vm, size, gfp_mask);
    548
    549		return 0;
    550	}
    551
    552	return -ENOMEM;
    553}
    554
    555void kasan_free_module_shadow(const struct vm_struct *vm)
    556{
    557	if (vm->flags & VM_KASAN)
    558		vfree(kasan_mem_to_shadow(vm->addr));
    559}
    560
    561#endif