cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bugs.c (16142B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * This is for all the tests related to logic bugs (e.g. bad dereferences,
      4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
      5 * lockups) along with other things that don't fit well into existing LKDTM
      6 * test source files.
      7 */
      8#include "lkdtm.h"
      9#include <linux/list.h>
     10#include <linux/sched.h>
     11#include <linux/sched/signal.h>
     12#include <linux/sched/task_stack.h>
     13#include <linux/uaccess.h>
     14#include <linux/slab.h>
     15
     16#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
     17#include <asm/desc.h>
     18#endif
     19
     20struct lkdtm_list {
     21	struct list_head node;
     22};
     23
     24/*
     25 * Make sure our attempts to over run the kernel stack doesn't trigger
     26 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
     27 * recurse past the end of THREAD_SIZE by default.
     28 */
     29#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
     30#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
     31#else
     32#define REC_STACK_SIZE (THREAD_SIZE / 8)
     33#endif
     34#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
     35
     36static int recur_count = REC_NUM_DEFAULT;
     37
     38static DEFINE_SPINLOCK(lock_me_up);
     39
     40/*
     41 * Make sure compiler does not optimize this function or stack frame away:
     42 * - function marked noinline
     43 * - stack variables are marked volatile
     44 * - stack variables are written (memset()) and read (buf[..] passed as arg)
     45 * - function may have external effects (memzero_explicit())
     46 * - no tail recursion possible
     47 */
     48static int noinline recursive_loop(int remaining)
     49{
     50	volatile char buf[REC_STACK_SIZE];
     51	volatile int ret;
     52
     53	memset((void *)buf, remaining & 0xFF, sizeof(buf));
     54	if (!remaining)
     55		ret = 0;
     56	else
     57		ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
     58	memzero_explicit((void *)buf, sizeof(buf));
     59	return ret;
     60}
     61
     62/* If the depth is negative, use the default, otherwise keep parameter. */
     63void __init lkdtm_bugs_init(int *recur_param)
     64{
     65	if (*recur_param < 0)
     66		*recur_param = recur_count;
     67	else
     68		recur_count = *recur_param;
     69}
     70
     71static void lkdtm_PANIC(void)
     72{
     73	panic("dumptest");
     74}
     75
     76static void lkdtm_BUG(void)
     77{
     78	BUG();
     79}
     80
     81static int warn_counter;
     82
     83static void lkdtm_WARNING(void)
     84{
     85	WARN_ON(++warn_counter);
     86}
     87
     88static void lkdtm_WARNING_MESSAGE(void)
     89{
     90	WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
     91}
     92
     93static void lkdtm_EXCEPTION(void)
     94{
     95	*((volatile int *) 0) = 0;
     96}
     97
     98static void lkdtm_LOOP(void)
     99{
    100	for (;;)
    101		;
    102}
    103
    104static void lkdtm_EXHAUST_STACK(void)
    105{
    106	pr_info("Calling function with %lu frame size to depth %d ...\n",
    107		REC_STACK_SIZE, recur_count);
    108	recursive_loop(recur_count);
    109	pr_info("FAIL: survived without exhausting stack?!\n");
    110}
    111
    112static noinline void __lkdtm_CORRUPT_STACK(void *stack)
    113{
    114	memset(stack, '\xff', 64);
    115}
    116
    117/* This should trip the stack canary, not corrupt the return address. */
    118static noinline void lkdtm_CORRUPT_STACK(void)
    119{
    120	/* Use default char array length that triggers stack protection. */
    121	char data[8] __aligned(sizeof(void *));
    122
    123	pr_info("Corrupting stack containing char array ...\n");
    124	__lkdtm_CORRUPT_STACK((void *)&data);
    125}
    126
    127/* Same as above but will only get a canary with -fstack-protector-strong */
    128static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
    129{
    130	union {
    131		unsigned short shorts[4];
    132		unsigned long *ptr;
    133	} data __aligned(sizeof(void *));
    134
    135	pr_info("Corrupting stack containing union ...\n");
    136	__lkdtm_CORRUPT_STACK((void *)&data);
    137}
    138
    139static pid_t stack_pid;
    140static unsigned long stack_addr;
    141
    142static void lkdtm_REPORT_STACK(void)
    143{
    144	volatile uintptr_t magic;
    145	pid_t pid = task_pid_nr(current);
    146
    147	if (pid != stack_pid) {
    148		pr_info("Starting stack offset tracking for pid %d\n", pid);
    149		stack_pid = pid;
    150		stack_addr = (uintptr_t)&magic;
    151	}
    152
    153	pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
    154}
    155
    156static pid_t stack_canary_pid;
    157static unsigned long stack_canary;
    158static unsigned long stack_canary_offset;
    159
    160static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
    161{
    162	int i = 0;
    163	pid_t pid = task_pid_nr(current);
    164	unsigned long *canary = (unsigned long *)stack;
    165	unsigned long current_offset = 0, init_offset = 0;
    166
    167	/* Do our best to find the canary in a 16 word window ... */
    168	for (i = 1; i < 16; i++) {
    169		canary = (unsigned long *)stack + i;
    170#ifdef CONFIG_STACKPROTECTOR
    171		if (*canary == current->stack_canary)
    172			current_offset = i;
    173		if (*canary == init_task.stack_canary)
    174			init_offset = i;
    175#endif
    176	}
    177
    178	if (current_offset == 0) {
    179		/*
    180		 * If the canary doesn't match what's in the task_struct,
    181		 * we're either using a global canary or the stack frame
    182		 * layout changed.
    183		 */
    184		if (init_offset != 0) {
    185			pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
    186			       init_offset, pid);
    187		} else {
    188			pr_warn("FAIL: did not correctly locate stack canary :(\n");
    189			pr_expected_config(CONFIG_STACKPROTECTOR);
    190		}
    191
    192		return;
    193	} else if (init_offset != 0) {
    194		pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
    195	}
    196
    197	canary = (unsigned long *)stack + current_offset;
    198	if (stack_canary_pid == 0) {
    199		stack_canary = *canary;
    200		stack_canary_pid = pid;
    201		stack_canary_offset = current_offset;
    202		pr_info("Recorded stack canary for pid %d at offset %ld\n",
    203			stack_canary_pid, stack_canary_offset);
    204	} else if (pid == stack_canary_pid) {
    205		pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
    206	} else {
    207		if (current_offset != stack_canary_offset) {
    208			pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
    209				stack_canary_offset, current_offset);
    210			return;
    211		}
    212
    213		if (*canary == stack_canary) {
    214			pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
    215				stack_canary_pid, pid, current_offset);
    216		} else {
    217			pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
    218				stack_canary_pid, pid, current_offset);
    219			/* Reset the test. */
    220			stack_canary_pid = 0;
    221		}
    222	}
    223}
    224
    225static void lkdtm_REPORT_STACK_CANARY(void)
    226{
    227	/* Use default char array length that triggers stack protection. */
    228	char data[8] __aligned(sizeof(void *)) = { };
    229
    230	__lkdtm_REPORT_STACK_CANARY((void *)&data);
    231}
    232
    233static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
    234{
    235	static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
    236	u32 *p;
    237	u32 val = 0x12345678;
    238
    239	p = (u32 *)(data + 1);
    240	if (*p == 0)
    241		val = 0x87654321;
    242	*p = val;
    243
    244	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
    245		pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
    246}
    247
    248static void lkdtm_SOFTLOCKUP(void)
    249{
    250	preempt_disable();
    251	for (;;)
    252		cpu_relax();
    253}
    254
    255static void lkdtm_HARDLOCKUP(void)
    256{
    257	local_irq_disable();
    258	for (;;)
    259		cpu_relax();
    260}
    261
    262static void lkdtm_SPINLOCKUP(void)
    263{
    264	/* Must be called twice to trigger. */
    265	spin_lock(&lock_me_up);
    266	/* Let sparse know we intended to exit holding the lock. */
    267	__release(&lock_me_up);
    268}
    269
    270static void lkdtm_HUNG_TASK(void)
    271{
    272	set_current_state(TASK_UNINTERRUPTIBLE);
    273	schedule();
    274}
    275
    276volatile unsigned int huge = INT_MAX - 2;
    277volatile unsigned int ignored;
    278
    279static void lkdtm_OVERFLOW_SIGNED(void)
    280{
    281	int value;
    282
    283	value = huge;
    284	pr_info("Normal signed addition ...\n");
    285	value += 1;
    286	ignored = value;
    287
    288	pr_info("Overflowing signed addition ...\n");
    289	value += 4;
    290	ignored = value;
    291}
    292
    293
    294static void lkdtm_OVERFLOW_UNSIGNED(void)
    295{
    296	unsigned int value;
    297
    298	value = huge;
    299	pr_info("Normal unsigned addition ...\n");
    300	value += 1;
    301	ignored = value;
    302
    303	pr_info("Overflowing unsigned addition ...\n");
    304	value += 4;
    305	ignored = value;
    306}
    307
    308/* Intentionally using old-style flex array definition of 1 byte. */
    309struct array_bounds_flex_array {
    310	int one;
    311	int two;
    312	char data[1];
    313};
    314
    315struct array_bounds {
    316	int one;
    317	int two;
    318	char data[8];
    319	int three;
    320};
    321
    322static void lkdtm_ARRAY_BOUNDS(void)
    323{
    324	struct array_bounds_flex_array *not_checked;
    325	struct array_bounds *checked;
    326	volatile int i;
    327
    328	not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
    329	checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
    330	if (!not_checked || !checked) {
    331		kfree(not_checked);
    332		kfree(checked);
    333		return;
    334	}
    335
    336	pr_info("Array access within bounds ...\n");
    337	/* For both, touch all bytes in the actual member size. */
    338	for (i = 0; i < sizeof(checked->data); i++)
    339		checked->data[i] = 'A';
    340	/*
    341	 * For the uninstrumented flex array member, also touch 1 byte
    342	 * beyond to verify it is correctly uninstrumented.
    343	 */
    344	for (i = 0; i < sizeof(not_checked->data) + 1; i++)
    345		not_checked->data[i] = 'A';
    346
    347	pr_info("Array access beyond bounds ...\n");
    348	for (i = 0; i < sizeof(checked->data) + 1; i++)
    349		checked->data[i] = 'B';
    350
    351	kfree(not_checked);
    352	kfree(checked);
    353	pr_err("FAIL: survived array bounds overflow!\n");
    354	if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
    355		pr_expected_config(CONFIG_UBSAN_TRAP);
    356	else
    357		pr_expected_config(CONFIG_UBSAN_BOUNDS);
    358}
    359
    360static void lkdtm_CORRUPT_LIST_ADD(void)
    361{
    362	/*
    363	 * Initially, an empty list via LIST_HEAD:
    364	 *	test_head.next = &test_head
    365	 *	test_head.prev = &test_head
    366	 */
    367	LIST_HEAD(test_head);
    368	struct lkdtm_list good, bad;
    369	void *target[2] = { };
    370	void *redirection = &target;
    371
    372	pr_info("attempting good list addition\n");
    373
    374	/*
    375	 * Adding to the list performs these actions:
    376	 *	test_head.next->prev = &good.node
    377	 *	good.node.next = test_head.next
    378	 *	good.node.prev = test_head
    379	 *	test_head.next = good.node
    380	 */
    381	list_add(&good.node, &test_head);
    382
    383	pr_info("attempting corrupted list addition\n");
    384	/*
    385	 * In simulating this "write what where" primitive, the "what" is
    386	 * the address of &bad.node, and the "where" is the address held
    387	 * by "redirection".
    388	 */
    389	test_head.next = redirection;
    390	list_add(&bad.node, &test_head);
    391
    392	if (target[0] == NULL && target[1] == NULL)
    393		pr_err("Overwrite did not happen, but no BUG?!\n");
    394	else {
    395		pr_err("list_add() corruption not detected!\n");
    396		pr_expected_config(CONFIG_DEBUG_LIST);
    397	}
    398}
    399
    400static void lkdtm_CORRUPT_LIST_DEL(void)
    401{
    402	LIST_HEAD(test_head);
    403	struct lkdtm_list item;
    404	void *target[2] = { };
    405	void *redirection = &target;
    406
    407	list_add(&item.node, &test_head);
    408
    409	pr_info("attempting good list removal\n");
    410	list_del(&item.node);
    411
    412	pr_info("attempting corrupted list removal\n");
    413	list_add(&item.node, &test_head);
    414
    415	/* As with the list_add() test above, this corrupts "next". */
    416	item.node.next = redirection;
    417	list_del(&item.node);
    418
    419	if (target[0] == NULL && target[1] == NULL)
    420		pr_err("Overwrite did not happen, but no BUG?!\n");
    421	else {
    422		pr_err("list_del() corruption not detected!\n");
    423		pr_expected_config(CONFIG_DEBUG_LIST);
    424	}
    425}
    426
    427/* Test that VMAP_STACK is actually allocating with a leading guard page */
    428static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
    429{
    430	const unsigned char *stack = task_stack_page(current);
    431	const unsigned char *ptr = stack - 1;
    432	volatile unsigned char byte;
    433
    434	pr_info("attempting bad read from page below current stack\n");
    435
    436	byte = *ptr;
    437
    438	pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
    439}
    440
    441/* Test that VMAP_STACK is actually allocating with a trailing guard page */
    442static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
    443{
    444	const unsigned char *stack = task_stack_page(current);
    445	const unsigned char *ptr = stack + THREAD_SIZE;
    446	volatile unsigned char byte;
    447
    448	pr_info("attempting bad read from page above current stack\n");
    449
    450	byte = *ptr;
    451
    452	pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
    453}
    454
    455static void lkdtm_UNSET_SMEP(void)
    456{
    457#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
    458#define MOV_CR4_DEPTH	64
    459	void (*direct_write_cr4)(unsigned long val);
    460	unsigned char *insn;
    461	unsigned long cr4;
    462	int i;
    463
    464	cr4 = native_read_cr4();
    465
    466	if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
    467		pr_err("FAIL: SMEP not in use\n");
    468		return;
    469	}
    470	cr4 &= ~(X86_CR4_SMEP);
    471
    472	pr_info("trying to clear SMEP normally\n");
    473	native_write_cr4(cr4);
    474	if (cr4 == native_read_cr4()) {
    475		pr_err("FAIL: pinning SMEP failed!\n");
    476		cr4 |= X86_CR4_SMEP;
    477		pr_info("restoring SMEP\n");
    478		native_write_cr4(cr4);
    479		return;
    480	}
    481	pr_info("ok: SMEP did not get cleared\n");
    482
    483	/*
    484	 * To test the post-write pinning verification we need to call
    485	 * directly into the middle of native_write_cr4() where the
    486	 * cr4 write happens, skipping any pinning. This searches for
    487	 * the cr4 writing instruction.
    488	 */
    489	insn = (unsigned char *)native_write_cr4;
    490	for (i = 0; i < MOV_CR4_DEPTH; i++) {
    491		/* mov %rdi, %cr4 */
    492		if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
    493			break;
    494		/* mov %rdi,%rax; mov %rax, %cr4 */
    495		if (insn[i]   == 0x48 && insn[i+1] == 0x89 &&
    496		    insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
    497		    insn[i+4] == 0x22 && insn[i+5] == 0xe0)
    498			break;
    499	}
    500	if (i >= MOV_CR4_DEPTH) {
    501		pr_info("ok: cannot locate cr4 writing call gadget\n");
    502		return;
    503	}
    504	direct_write_cr4 = (void *)(insn + i);
    505
    506	pr_info("trying to clear SMEP with call gadget\n");
    507	direct_write_cr4(cr4);
    508	if (native_read_cr4() & X86_CR4_SMEP) {
    509		pr_info("ok: SMEP removal was reverted\n");
    510	} else {
    511		pr_err("FAIL: cleared SMEP not detected!\n");
    512		cr4 |= X86_CR4_SMEP;
    513		pr_info("restoring SMEP\n");
    514		native_write_cr4(cr4);
    515	}
    516#else
    517	pr_err("XFAIL: this test is x86_64-only\n");
    518#endif
    519}
    520
    521static void lkdtm_DOUBLE_FAULT(void)
    522{
    523#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
    524	/*
    525	 * Trigger #DF by setting the stack limit to zero.  This clobbers
    526	 * a GDT TLS slot, which is okay because the current task will die
    527	 * anyway due to the double fault.
    528	 */
    529	struct desc_struct d = {
    530		.type = 3,	/* expand-up, writable, accessed data */
    531		.p = 1,		/* present */
    532		.d = 1,		/* 32-bit */
    533		.g = 0,		/* limit in bytes */
    534		.s = 1,		/* not system */
    535	};
    536
    537	local_irq_disable();
    538	write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
    539			GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
    540
    541	/*
    542	 * Put our zero-limit segment in SS and then trigger a fault.  The
    543	 * 4-byte access to (%esp) will fault with #SS, and the attempt to
    544	 * deliver the fault will recursively cause #SS and result in #DF.
    545	 * This whole process happens while NMIs and MCEs are blocked by the
    546	 * MOV SS window.  This is nice because an NMI with an invalid SS
    547	 * would also double-fault, resulting in the NMI or MCE being lost.
    548	 */
    549	asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
    550		      "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
    551
    552	pr_err("FAIL: tried to double fault but didn't die\n");
    553#else
    554	pr_err("XFAIL: this test is ia32-only\n");
    555#endif
    556}
    557
    558#ifdef CONFIG_ARM64
    559static noinline void change_pac_parameters(void)
    560{
    561	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
    562		/* Reset the keys of current task */
    563		ptrauth_thread_init_kernel(current);
    564		ptrauth_thread_switch_kernel(current);
    565	}
    566}
    567#endif
    568
    569static noinline void lkdtm_CORRUPT_PAC(void)
    570{
    571#ifdef CONFIG_ARM64
    572#define CORRUPT_PAC_ITERATE	10
    573	int i;
    574
    575	if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
    576		pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
    577
    578	if (!system_supports_address_auth()) {
    579		pr_err("FAIL: CPU lacks pointer authentication feature\n");
    580		return;
    581	}
    582
    583	pr_info("changing PAC parameters to force function return failure...\n");
    584	/*
    585	 * PAC is a hash value computed from input keys, return address and
    586	 * stack pointer. As pac has fewer bits so there is a chance of
    587	 * collision, so iterate few times to reduce the collision probability.
    588	 */
    589	for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
    590		change_pac_parameters();
    591
    592	pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
    593#else
    594	pr_err("XFAIL: this test is arm64-only\n");
    595#endif
    596}
    597
    598static struct crashtype crashtypes[] = {
    599	CRASHTYPE(PANIC),
    600	CRASHTYPE(BUG),
    601	CRASHTYPE(WARNING),
    602	CRASHTYPE(WARNING_MESSAGE),
    603	CRASHTYPE(EXCEPTION),
    604	CRASHTYPE(LOOP),
    605	CRASHTYPE(EXHAUST_STACK),
    606	CRASHTYPE(CORRUPT_STACK),
    607	CRASHTYPE(CORRUPT_STACK_STRONG),
    608	CRASHTYPE(REPORT_STACK),
    609	CRASHTYPE(REPORT_STACK_CANARY),
    610	CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
    611	CRASHTYPE(SOFTLOCKUP),
    612	CRASHTYPE(HARDLOCKUP),
    613	CRASHTYPE(SPINLOCKUP),
    614	CRASHTYPE(HUNG_TASK),
    615	CRASHTYPE(OVERFLOW_SIGNED),
    616	CRASHTYPE(OVERFLOW_UNSIGNED),
    617	CRASHTYPE(ARRAY_BOUNDS),
    618	CRASHTYPE(CORRUPT_LIST_ADD),
    619	CRASHTYPE(CORRUPT_LIST_DEL),
    620	CRASHTYPE(STACK_GUARD_PAGE_LEADING),
    621	CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
    622	CRASHTYPE(UNSET_SMEP),
    623	CRASHTYPE(DOUBLE_FAULT),
    624	CRASHTYPE(CORRUPT_PAC),
    625};
    626
    627struct crashtype_category bugs_crashtypes = {
    628	.crashtypes = crashtypes,
    629	.len	    = ARRAY_SIZE(crashtypes),
    630};