cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

page_poison.c (2543B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/kernel.h>
      3#include <linux/string.h>
      4#include <linux/mm.h>
      5#include <linux/mmdebug.h>
      6#include <linux/highmem.h>
      7#include <linux/page_ext.h>
      8#include <linux/poison.h>
      9#include <linux/ratelimit.h>
     10#include <linux/kasan.h>
     11
     12bool _page_poisoning_enabled_early;
     13EXPORT_SYMBOL(_page_poisoning_enabled_early);
     14DEFINE_STATIC_KEY_FALSE(_page_poisoning_enabled);
     15EXPORT_SYMBOL(_page_poisoning_enabled);
     16
     17static int __init early_page_poison_param(char *buf)
     18{
     19	return kstrtobool(buf, &_page_poisoning_enabled_early);
     20}
     21early_param("page_poison", early_page_poison_param);
     22
     23static void poison_page(struct page *page)
     24{
     25	void *addr = kmap_atomic(page);
     26
     27	/* KASAN still think the page is in-use, so skip it. */
     28	kasan_disable_current();
     29	memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
     30	kasan_enable_current();
     31	kunmap_atomic(addr);
     32}
     33
     34void __kernel_poison_pages(struct page *page, int n)
     35{
     36	int i;
     37
     38	for (i = 0; i < n; i++)
     39		poison_page(page + i);
     40}
     41
     42static bool single_bit_flip(unsigned char a, unsigned char b)
     43{
     44	unsigned char error = a ^ b;
     45
     46	return error && !(error & (error - 1));
     47}
     48
     49static void check_poison_mem(struct page *page, unsigned char *mem, size_t bytes)
     50{
     51	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
     52	unsigned char *start;
     53	unsigned char *end;
     54
     55	start = memchr_inv(mem, PAGE_POISON, bytes);
     56	if (!start)
     57		return;
     58
     59	for (end = mem + bytes - 1; end > start; end--) {
     60		if (*end != PAGE_POISON)
     61			break;
     62	}
     63
     64	if (!__ratelimit(&ratelimit))
     65		return;
     66	else if (start == end && single_bit_flip(*start, PAGE_POISON))
     67		pr_err("pagealloc: single bit error\n");
     68	else
     69		pr_err("pagealloc: memory corruption\n");
     70
     71	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
     72			end - start + 1, 1);
     73	dump_stack();
     74	dump_page(page, "pagealloc: corrupted page details");
     75}
     76
     77static void unpoison_page(struct page *page)
     78{
     79	void *addr;
     80
     81	addr = kmap_atomic(page);
     82	kasan_disable_current();
     83	/*
     84	 * Page poisoning when enabled poisons each and every page
     85	 * that is freed to buddy. Thus no extra check is done to
     86	 * see if a page was poisoned.
     87	 */
     88	check_poison_mem(page, kasan_reset_tag(addr), PAGE_SIZE);
     89	kasan_enable_current();
     90	kunmap_atomic(addr);
     91}
     92
     93void __kernel_unpoison_pages(struct page *page, int n)
     94{
     95	int i;
     96
     97	for (i = 0; i < n; i++)
     98		unpoison_page(page + i);
     99}
    100
    101#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
    102void __kernel_map_pages(struct page *page, int numpages, int enable)
    103{
    104	/* This function does nothing, all work is done via poison pages */
    105}
    106#endif