cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

traps.c (15470B)


      1/*
      2 * arch/xtensa/kernel/traps.c
      3 *
      4 * Exception handling.
      5 *
      6 * Derived from code with the following copyrights:
      7 * Copyright (C) 1994 - 1999 by Ralf Baechle
      8 * Modified for R3000 by Paul M. Antoine, 1995, 1996
      9 * Complete output from die() by Ulf Carlsson, 1998
     10 * Copyright (C) 1999 Silicon Graphics, Inc.
     11 *
     12 * Essentially rewritten for the Xtensa architecture port.
     13 *
     14 * Copyright (C) 2001 - 2013 Tensilica Inc.
     15 *
     16 * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
     17 * Chris Zankel	<chris@zankel.net>
     18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
     19 * Kevin Chea
     20 *
     21 * This file is subject to the terms and conditions of the GNU General Public
     22 * License.  See the file "COPYING" in the main directory of this archive
     23 * for more details.
     24 */
     25
     26#include <linux/kernel.h>
     27#include <linux/sched/signal.h>
     28#include <linux/sched/debug.h>
     29#include <linux/sched/task_stack.h>
     30#include <linux/init.h>
     31#include <linux/module.h>
     32#include <linux/stringify.h>
     33#include <linux/kallsyms.h>
     34#include <linux/delay.h>
     35#include <linux/hardirq.h>
     36#include <linux/ratelimit.h>
     37#include <linux/pgtable.h>
     38
     39#include <asm/stacktrace.h>
     40#include <asm/ptrace.h>
     41#include <asm/timex.h>
     42#include <linux/uaccess.h>
     43#include <asm/processor.h>
     44#include <asm/traps.h>
     45#include <asm/hw_breakpoint.h>
     46
     47/*
     48 * Machine specific interrupt handlers
     49 */
     50
     51static void do_illegal_instruction(struct pt_regs *regs);
     52static void do_div0(struct pt_regs *regs);
     53static void do_interrupt(struct pt_regs *regs);
     54#if XTENSA_FAKE_NMI
     55static void do_nmi(struct pt_regs *regs);
     56#endif
     57#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
     58static void do_unaligned_user(struct pt_regs *regs);
     59#endif
     60static void do_multihit(struct pt_regs *regs);
     61#if XTENSA_HAVE_COPROCESSORS
     62static void do_coprocessor(struct pt_regs *regs);
     63#endif
     64static void do_debug(struct pt_regs *regs);
     65
     66/*
     67 * The vector table must be preceded by a save area (which
     68 * implies it must be in RAM, unless one places RAM immediately
     69 * before a ROM and puts the vector at the start of the ROM (!))
     70 */
     71
     72#define KRNL		0x01
     73#define USER		0x02
     74
     75#define COPROCESSOR(x)							\
     76{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\
     77{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor }
     78
     79typedef struct {
     80	int cause;
     81	int fast;
     82	void* handler;
     83} dispatch_init_table_t;
     84
     85static dispatch_init_table_t __initdata dispatch_init_table[] = {
     86
     87#ifdef CONFIG_USER_ABI_CALL0_PROBE
     88{ EXCCAUSE_ILLEGAL_INSTRUCTION,	USER,	   fast_illegal_instruction_user },
     89#endif
     90{ EXCCAUSE_ILLEGAL_INSTRUCTION,	0,	   do_illegal_instruction},
     91{ EXCCAUSE_SYSTEM_CALL,		USER,	   fast_syscall_user },
     92{ EXCCAUSE_SYSTEM_CALL,		0,	   system_call },
     93/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
     94/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
     95{ EXCCAUSE_LEVEL1_INTERRUPT,	0,	   do_interrupt },
     96#ifdef SUPPORT_WINDOWED
     97{ EXCCAUSE_ALLOCA,		USER|KRNL, fast_alloca },
     98#endif
     99{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0,	   do_div0 },
    100/* EXCCAUSE_PRIVILEGED unhandled */
    101#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
    102#ifdef CONFIG_XTENSA_UNALIGNED_USER
    103{ EXCCAUSE_UNALIGNED,		USER,	   fast_unaligned },
    104#endif
    105{ EXCCAUSE_UNALIGNED,		0,	   do_unaligned_user },
    106{ EXCCAUSE_UNALIGNED,		KRNL,	   fast_unaligned },
    107#endif
    108#ifdef CONFIG_MMU
    109{ EXCCAUSE_ITLB_MISS,			0,	   do_page_fault },
    110{ EXCCAUSE_ITLB_MISS,			USER|KRNL, fast_second_level_miss},
    111{ EXCCAUSE_DTLB_MISS,			USER|KRNL, fast_second_level_miss},
    112{ EXCCAUSE_DTLB_MISS,			0,	   do_page_fault },
    113{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	USER|KRNL, fast_store_prohibited },
    114#endif /* CONFIG_MMU */
    115#ifdef CONFIG_PFAULT
    116{ EXCCAUSE_ITLB_MULTIHIT,		0,	   do_multihit },
    117{ EXCCAUSE_ITLB_PRIVILEGE,		0,	   do_page_fault },
    118{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE,	0,	   do_page_fault },
    119{ EXCCAUSE_DTLB_MULTIHIT,		0,	   do_multihit },
    120{ EXCCAUSE_DTLB_PRIVILEGE,		0,	   do_page_fault },
    121{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	0,	   do_page_fault },
    122{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE,	0,	   do_page_fault },
    123#endif
    124/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
    125#if XTENSA_HAVE_COPROCESSOR(0)
    126COPROCESSOR(0),
    127#endif
    128#if XTENSA_HAVE_COPROCESSOR(1)
    129COPROCESSOR(1),
    130#endif
    131#if XTENSA_HAVE_COPROCESSOR(2)
    132COPROCESSOR(2),
    133#endif
    134#if XTENSA_HAVE_COPROCESSOR(3)
    135COPROCESSOR(3),
    136#endif
    137#if XTENSA_HAVE_COPROCESSOR(4)
    138COPROCESSOR(4),
    139#endif
    140#if XTENSA_HAVE_COPROCESSOR(5)
    141COPROCESSOR(5),
    142#endif
    143#if XTENSA_HAVE_COPROCESSOR(6)
    144COPROCESSOR(6),
    145#endif
    146#if XTENSA_HAVE_COPROCESSOR(7)
    147COPROCESSOR(7),
    148#endif
    149#if XTENSA_FAKE_NMI
    150{ EXCCAUSE_MAPPED_NMI,			0,		do_nmi },
    151#endif
    152{ EXCCAUSE_MAPPED_DEBUG,		0,		do_debug },
    153{ -1, -1, 0 }
    154
    155};
    156
    157/* The exception table <exc_table> serves two functions:
    158 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
    159 * 2. it is a temporary memory buffer for the exception handlers.
    160 */
    161
    162DEFINE_PER_CPU(struct exc_table, exc_table);
    163DEFINE_PER_CPU(struct debug_table, debug_table);
    164
    165void die(const char*, struct pt_regs*, long);
    166
    167static inline void
    168__die_if_kernel(const char *str, struct pt_regs *regs, long err)
    169{
    170	if (!user_mode(regs))
    171		die(str, regs, err);
    172}
    173
    174/*
    175 * Unhandled Exceptions. Kill user task or panic if in kernel space.
    176 */
    177
    178void do_unhandled(struct pt_regs *regs)
    179{
    180	__die_if_kernel("Caught unhandled exception - should not happen",
    181			regs, SIGKILL);
    182
    183	/* If in user mode, send SIGILL signal to current process */
    184	pr_info_ratelimited("Caught unhandled exception in '%s' "
    185			    "(pid = %d, pc = %#010lx) - should not happen\n"
    186			    "\tEXCCAUSE is %ld\n",
    187			    current->comm, task_pid_nr(current), regs->pc,
    188			    regs->exccause);
    189	force_sig(SIGILL);
    190}
    191
    192/*
    193 * Multi-hit exception. This if fatal!
    194 */
    195
    196static void do_multihit(struct pt_regs *regs)
    197{
    198	die("Caught multihit exception", regs, SIGKILL);
    199}
    200
    201/*
    202 * IRQ handler.
    203 */
    204
    205#if XTENSA_FAKE_NMI
    206
    207#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
    208
    209#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
    210      IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
    211#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
    212#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
    213
    214static inline void check_valid_nmi(void)
    215{
    216	unsigned intread = xtensa_get_sr(interrupt);
    217	unsigned intenable = xtensa_get_sr(intenable);
    218
    219	BUG_ON(intread & intenable &
    220	       ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
    221		 XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
    222		 BIT(XCHAL_PROFILING_INTERRUPT)));
    223}
    224
    225#else
    226
    227static inline void check_valid_nmi(void)
    228{
    229}
    230
    231#endif
    232
    233irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
    234
    235DEFINE_PER_CPU(unsigned long, nmi_count);
    236
    237static void do_nmi(struct pt_regs *regs)
    238{
    239	struct pt_regs *old_regs = set_irq_regs(regs);
    240
    241	nmi_enter();
    242	++*this_cpu_ptr(&nmi_count);
    243	check_valid_nmi();
    244	xtensa_pmu_irq_handler(0, NULL);
    245	nmi_exit();
    246	set_irq_regs(old_regs);
    247}
    248#endif
    249
    250static void do_interrupt(struct pt_regs *regs)
    251{
    252	static const unsigned int_level_mask[] = {
    253		0,
    254		XCHAL_INTLEVEL1_MASK,
    255		XCHAL_INTLEVEL2_MASK,
    256		XCHAL_INTLEVEL3_MASK,
    257		XCHAL_INTLEVEL4_MASK,
    258		XCHAL_INTLEVEL5_MASK,
    259		XCHAL_INTLEVEL6_MASK,
    260		XCHAL_INTLEVEL7_MASK,
    261	};
    262	struct pt_regs *old_regs = set_irq_regs(regs);
    263	unsigned unhandled = ~0u;
    264
    265	irq_enter();
    266
    267	for (;;) {
    268		unsigned intread = xtensa_get_sr(interrupt);
    269		unsigned intenable = xtensa_get_sr(intenable);
    270		unsigned int_at_level = intread & intenable;
    271		unsigned level;
    272
    273		for (level = LOCKLEVEL; level > 0; --level) {
    274			if (int_at_level & int_level_mask[level]) {
    275				int_at_level &= int_level_mask[level];
    276				if (int_at_level & unhandled)
    277					int_at_level &= unhandled;
    278				else
    279					unhandled |= int_level_mask[level];
    280				break;
    281			}
    282		}
    283
    284		if (level == 0)
    285			break;
    286
    287		/* clear lowest pending irq in the unhandled mask */
    288		unhandled ^= (int_at_level & -int_at_level);
    289		do_IRQ(__ffs(int_at_level), regs);
    290	}
    291
    292	irq_exit();
    293	set_irq_regs(old_regs);
    294}
    295
    296static bool check_div0(struct pt_regs *regs)
    297{
    298	static const u8 pattern[] = {'D', 'I', 'V', '0'};
    299	const u8 *p;
    300	u8 buf[5];
    301
    302	if (user_mode(regs)) {
    303		if (copy_from_user(buf, (void __user *)regs->pc + 2, 5))
    304			return false;
    305		p = buf;
    306	} else {
    307		p = (const u8 *)regs->pc + 2;
    308	}
    309
    310	return memcmp(p, pattern, sizeof(pattern)) == 0 ||
    311		memcmp(p + 1, pattern, sizeof(pattern)) == 0;
    312}
    313
    314/*
    315 * Illegal instruction. Fatal if in kernel space.
    316 */
    317
    318static void do_illegal_instruction(struct pt_regs *regs)
    319{
    320#ifdef CONFIG_USER_ABI_CALL0_PROBE
    321	/*
    322	 * When call0 application encounters an illegal instruction fast
    323	 * exception handler will attempt to set PS.WOE and retry failing
    324	 * instruction.
    325	 * If we get here we know that that instruction is also illegal
    326	 * with PS.WOE set, so it's not related to the windowed option
    327	 * hence PS.WOE may be cleared.
    328	 */
    329	if (regs->pc == current_thread_info()->ps_woe_fix_addr)
    330		regs->ps &= ~PS_WOE_MASK;
    331#endif
    332	if (check_div0(regs)) {
    333		do_div0(regs);
    334		return;
    335	}
    336
    337	__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
    338
    339	/* If in user mode, send SIGILL signal to current process. */
    340
    341	pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
    342			    current->comm, task_pid_nr(current), regs->pc);
    343	force_sig(SIGILL);
    344}
    345
    346static void do_div0(struct pt_regs *regs)
    347{
    348	__die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL);
    349	force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
    350}
    351
    352/*
    353 * Handle unaligned memory accesses from user space. Kill task.
    354 *
    355 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
    356 * accesses causes from user space.
    357 */
    358
    359#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
    360static void do_unaligned_user(struct pt_regs *regs)
    361{
    362	__die_if_kernel("Unhandled unaligned exception in kernel",
    363			regs, SIGKILL);
    364
    365	current->thread.bad_vaddr = regs->excvaddr;
    366	current->thread.error_code = -3;
    367	pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
    368			    "(pid = %d, pc = %#010lx)\n",
    369			    regs->excvaddr, current->comm,
    370			    task_pid_nr(current), regs->pc);
    371	force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr);
    372}
    373#endif
    374
    375#if XTENSA_HAVE_COPROCESSORS
    376static void do_coprocessor(struct pt_regs *regs)
    377{
    378	coprocessor_flush_release_all(current_thread_info());
    379}
    380#endif
    381
    382/* Handle debug events.
    383 * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
    384 * preemption disabled to avoid rescheduling and keep mapping of hardware
    385 * breakpoint structures to debug registers intact, so that
    386 * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
    387 */
    388static void do_debug(struct pt_regs *regs)
    389{
    390#ifdef CONFIG_HAVE_HW_BREAKPOINT
    391	int ret = check_hw_breakpoint(regs);
    392
    393	preempt_enable();
    394	if (ret == 0)
    395		return;
    396#endif
    397	__die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
    398
    399	/* If in user mode, send SIGTRAP signal to current process */
    400
    401	force_sig(SIGTRAP);
    402}
    403
    404
    405#define set_handler(type, cause, handler)				\
    406	do {								\
    407		unsigned int cpu;					\
    408									\
    409		for_each_possible_cpu(cpu)				\
    410			per_cpu(exc_table, cpu).type[cause] = (handler);\
    411	} while (0)
    412
    413/* Set exception C handler - for temporary use when probing exceptions */
    414
    415xtensa_exception_handler *
    416__init trap_set_handler(int cause, xtensa_exception_handler *handler)
    417{
    418	void *previous = per_cpu(exc_table, 0).default_handler[cause];
    419
    420	set_handler(default_handler, cause, handler);
    421	return previous;
    422}
    423
    424
    425static void trap_init_excsave(void)
    426{
    427	xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1);
    428}
    429
    430static void trap_init_debug(void)
    431{
    432	unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table);
    433
    434	this_cpu_ptr(&debug_table)->debug_exception = debug_exception;
    435	__asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL)
    436			     :: "a"(debugsave));
    437}
    438
    439/*
    440 * Initialize dispatch tables.
    441 *
    442 * The exception vectors are stored compressed the __init section in the
    443 * dispatch_init_table. This function initializes the following three tables
    444 * from that compressed table:
    445 * - fast user		first dispatch table for user exceptions
    446 * - fast kernel	first dispatch table for kernel exceptions
    447 * - default C-handler	C-handler called by the default fast handler.
    448 *
    449 * See vectors.S for more details.
    450 */
    451
    452void __init trap_init(void)
    453{
    454	int i;
    455
    456	/* Setup default vectors. */
    457
    458	for (i = 0; i < EXCCAUSE_N; i++) {
    459		set_handler(fast_user_handler, i, user_exception);
    460		set_handler(fast_kernel_handler, i, kernel_exception);
    461		set_handler(default_handler, i, do_unhandled);
    462	}
    463
    464	/* Setup specific handlers. */
    465
    466	for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
    467		int fast = dispatch_init_table[i].fast;
    468		int cause = dispatch_init_table[i].cause;
    469		void *handler = dispatch_init_table[i].handler;
    470
    471		if (fast == 0)
    472			set_handler(default_handler, cause, handler);
    473		if ((fast & USER) != 0)
    474			set_handler(fast_user_handler, cause, handler);
    475		if ((fast & KRNL) != 0)
    476			set_handler(fast_kernel_handler, cause, handler);
    477	}
    478
    479	/* Initialize EXCSAVE_1 to hold the address of the exception table. */
    480	trap_init_excsave();
    481	trap_init_debug();
    482}
    483
    484#ifdef CONFIG_SMP
    485void secondary_trap_init(void)
    486{
    487	trap_init_excsave();
    488	trap_init_debug();
    489}
    490#endif
    491
    492/*
    493 * This function dumps the current valid window frame and other base registers.
    494 */
    495
    496void show_regs(struct pt_regs * regs)
    497{
    498	int i;
    499
    500	show_regs_print_info(KERN_DEFAULT);
    501
    502	for (i = 0; i < 16; i++) {
    503		if ((i % 8) == 0)
    504			pr_info("a%02d:", i);
    505		pr_cont(" %08lx", regs->areg[i]);
    506	}
    507	pr_cont("\n");
    508	pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
    509		regs->pc, regs->ps, regs->depc, regs->excvaddr);
    510	pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
    511		regs->lbeg, regs->lend, regs->lcount, regs->sar);
    512	if (user_mode(regs))
    513		pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
    514			regs->windowbase, regs->windowstart, regs->wmask,
    515			regs->syscall);
    516}
    517
    518static int show_trace_cb(struct stackframe *frame, void *data)
    519{
    520	const char *loglvl = data;
    521
    522	if (kernel_text_address(frame->pc))
    523		printk("%s [<%08lx>] %pB\n",
    524			loglvl, frame->pc, (void *)frame->pc);
    525	return 0;
    526}
    527
    528static void show_trace(struct task_struct *task, unsigned long *sp,
    529		       const char *loglvl)
    530{
    531	if (!sp)
    532		sp = stack_pointer(task);
    533
    534	printk("%sCall Trace:\n", loglvl);
    535	walk_stackframe(sp, show_trace_cb, (void *)loglvl);
    536}
    537
    538#define STACK_DUMP_ENTRY_SIZE 4
    539#define STACK_DUMP_LINE_SIZE 32
    540static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
    541
    542void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
    543{
    544	size_t len;
    545
    546	if (!sp)
    547		sp = stack_pointer(task);
    548
    549	len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
    550		  kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
    551
    552	printk("%sStack:\n", loglvl);
    553	print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
    554		       STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
    555		       sp, len, false);
    556	show_trace(task, sp, loglvl);
    557}
    558
    559DEFINE_SPINLOCK(die_lock);
    560
    561void __noreturn die(const char * str, struct pt_regs * regs, long err)
    562{
    563	static int die_counter;
    564	const char *pr = "";
    565
    566	if (IS_ENABLED(CONFIG_PREEMPTION))
    567		pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
    568
    569	console_verbose();
    570	spin_lock_irq(&die_lock);
    571
    572	pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
    573	show_regs(regs);
    574	if (!user_mode(regs))
    575		show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
    576
    577	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
    578	spin_unlock_irq(&die_lock);
    579
    580	if (in_interrupt())
    581		panic("Fatal exception in interrupt");
    582
    583	if (panic_on_oops)
    584		panic("Fatal exception");
    585
    586	make_task_dead(err);
    587}