cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

traps.c (17599B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Author: Huacai Chen <chenhuacai@loongson.cn>
      4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
      5 */
      6#include <linux/bitops.h>
      7#include <linux/bug.h>
      8#include <linux/compiler.h>
      9#include <linux/context_tracking.h>
     10#include <linux/entry-common.h>
     11#include <linux/init.h>
     12#include <linux/kernel.h>
     13#include <linux/module.h>
     14#include <linux/extable.h>
     15#include <linux/mm.h>
     16#include <linux/sched/mm.h>
     17#include <linux/sched/debug.h>
     18#include <linux/smp.h>
     19#include <linux/spinlock.h>
     20#include <linux/kallsyms.h>
     21#include <linux/memblock.h>
     22#include <linux/interrupt.h>
     23#include <linux/ptrace.h>
     24#include <linux/kgdb.h>
     25#include <linux/kdebug.h>
     26#include <linux/kprobes.h>
     27#include <linux/notifier.h>
     28#include <linux/irq.h>
     29#include <linux/perf_event.h>
     30
     31#include <asm/addrspace.h>
     32#include <asm/bootinfo.h>
     33#include <asm/branch.h>
     34#include <asm/break.h>
     35#include <asm/cpu.h>
     36#include <asm/fpu.h>
     37#include <asm/loongarch.h>
     38#include <asm/mmu_context.h>
     39#include <asm/pgtable.h>
     40#include <asm/ptrace.h>
     41#include <asm/sections.h>
     42#include <asm/siginfo.h>
     43#include <asm/stacktrace.h>
     44#include <asm/tlb.h>
     45#include <asm/types.h>
     46
     47#include "access-helper.h"
     48
     49extern asmlinkage void handle_ade(void);
     50extern asmlinkage void handle_ale(void);
     51extern asmlinkage void handle_sys(void);
     52extern asmlinkage void handle_bp(void);
     53extern asmlinkage void handle_ri(void);
     54extern asmlinkage void handle_fpu(void);
     55extern asmlinkage void handle_fpe(void);
     56extern asmlinkage void handle_lbt(void);
     57extern asmlinkage void handle_lsx(void);
     58extern asmlinkage void handle_lasx(void);
     59extern asmlinkage void handle_reserved(void);
     60extern asmlinkage void handle_watch(void);
     61extern asmlinkage void handle_vint(void);
     62
     63static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
     64			   const char *loglvl, bool user)
     65{
     66	unsigned long addr;
     67	unsigned long *sp = (unsigned long *)(regs->regs[3] & ~3);
     68
     69	printk("%sCall Trace:", loglvl);
     70#ifdef CONFIG_KALLSYMS
     71	printk("%s\n", loglvl);
     72#endif
     73	while (!kstack_end(sp)) {
     74		if (__get_addr(&addr, sp++, user)) {
     75			printk("%s (Bad stack address)", loglvl);
     76			break;
     77		}
     78		if (__kernel_text_address(addr))
     79			print_ip_sym(loglvl, addr);
     80	}
     81	printk("%s\n", loglvl);
     82}
     83
     84static void show_stacktrace(struct task_struct *task,
     85	const struct pt_regs *regs, const char *loglvl, bool user)
     86{
     87	int i;
     88	const int field = 2 * sizeof(unsigned long);
     89	unsigned long stackdata;
     90	unsigned long *sp = (unsigned long *)regs->regs[3];
     91
     92	printk("%sStack :", loglvl);
     93	i = 0;
     94	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
     95		if (i && ((i % (64 / field)) == 0)) {
     96			pr_cont("\n");
     97			printk("%s       ", loglvl);
     98		}
     99		if (i > 39) {
    100			pr_cont(" ...");
    101			break;
    102		}
    103
    104		if (__get_addr(&stackdata, sp++, user)) {
    105			pr_cont(" (Bad stack address)");
    106			break;
    107		}
    108
    109		pr_cont(" %0*lx", field, stackdata);
    110		i++;
    111	}
    112	pr_cont("\n");
    113	show_backtrace(task, regs, loglvl, user);
    114}
    115
    116void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
    117{
    118	struct pt_regs regs;
    119
    120	regs.csr_crmd = 0;
    121	if (sp) {
    122		regs.csr_era = 0;
    123		regs.regs[1] = 0;
    124		regs.regs[3] = (unsigned long)sp;
    125	} else {
    126		if (!task || task == current)
    127			prepare_frametrace(&regs);
    128		else {
    129			regs.csr_era = task->thread.reg01;
    130			regs.regs[1] = 0;
    131			regs.regs[3] = task->thread.reg03;
    132			regs.regs[22] = task->thread.reg22;
    133		}
    134	}
    135
    136	show_stacktrace(task, &regs, loglvl, false);
    137}
    138
    139static void show_code(unsigned int *pc, bool user)
    140{
    141	long i;
    142	unsigned int insn;
    143
    144	printk("Code:");
    145
    146	for(i = -3 ; i < 6 ; i++) {
    147		if (__get_inst(&insn, pc + i, user)) {
    148			pr_cont(" (Bad address in era)\n");
    149			break;
    150		}
    151		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
    152	}
    153	pr_cont("\n");
    154}
    155
    156static void __show_regs(const struct pt_regs *regs)
    157{
    158	const int field = 2 * sizeof(unsigned long);
    159	unsigned int excsubcode;
    160	unsigned int exccode;
    161	int i;
    162
    163	show_regs_print_info(KERN_DEFAULT);
    164
    165	/*
    166	 * Saved main processor registers
    167	 */
    168	for (i = 0; i < 32; ) {
    169		if ((i % 4) == 0)
    170			printk("$%2d   :", i);
    171		pr_cont(" %0*lx", field, regs->regs[i]);
    172
    173		i++;
    174		if ((i % 4) == 0)
    175			pr_cont("\n");
    176	}
    177
    178	/*
    179	 * Saved csr registers
    180	 */
    181	printk("era   : %0*lx %pS\n", field, regs->csr_era,
    182	       (void *) regs->csr_era);
    183	printk("ra    : %0*lx %pS\n", field, regs->regs[1],
    184	       (void *) regs->regs[1]);
    185
    186	printk("CSR crmd: %08lx	", regs->csr_crmd);
    187	printk("CSR prmd: %08lx	", regs->csr_prmd);
    188	printk("CSR euen: %08lx	", regs->csr_euen);
    189	printk("CSR ecfg: %08lx	", regs->csr_ecfg);
    190	printk("CSR estat: %08lx	", regs->csr_estat);
    191
    192	pr_cont("\n");
    193
    194	exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
    195	excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT;
    196	printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode);
    197
    198	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
    199		printk("BadVA : %0*lx\n", field, regs->csr_badvaddr);
    200
    201	printk("PrId  : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
    202	       cpu_family_string());
    203}
    204
    205void show_regs(struct pt_regs *regs)
    206{
    207	__show_regs((struct pt_regs *)regs);
    208	dump_stack();
    209}
    210
    211void show_registers(struct pt_regs *regs)
    212{
    213	__show_regs(regs);
    214	print_modules();
    215	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
    216	       current->comm, current->pid, current_thread_info(), current);
    217
    218	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
    219	show_code((void *)regs->csr_era, user_mode(regs));
    220	printk("\n");
    221}
    222
    223static DEFINE_RAW_SPINLOCK(die_lock);
    224
    225void __noreturn die(const char *str, struct pt_regs *regs)
    226{
    227	static int die_counter;
    228	int sig = SIGSEGV;
    229
    230	oops_enter();
    231
    232	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
    233		       SIGSEGV) == NOTIFY_STOP)
    234		sig = 0;
    235
    236	console_verbose();
    237	raw_spin_lock_irq(&die_lock);
    238	bust_spinlocks(1);
    239
    240	printk("%s[#%d]:\n", str, ++die_counter);
    241	show_registers(regs);
    242	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
    243	raw_spin_unlock_irq(&die_lock);
    244
    245	oops_exit();
    246
    247	if (in_interrupt())
    248		panic("Fatal exception in interrupt");
    249
    250	if (panic_on_oops)
    251		panic("Fatal exception");
    252
    253	make_task_dead(sig);
    254}
    255
    256static inline void setup_vint_size(unsigned int size)
    257{
    258	unsigned int vs;
    259
    260	vs = ilog2(size/4);
    261
    262	if (vs == 0 || vs > 7)
    263		panic("vint_size %d Not support yet", vs);
    264
    265	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
    266}
    267
    268/*
    269 * Send SIGFPE according to FCSR Cause bits, which must have already
    270 * been masked against Enable bits.  This is impotant as Inexact can
    271 * happen together with Overflow or Underflow, and `ptrace' can set
    272 * any bits.
    273 */
    274void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
    275		     struct task_struct *tsk)
    276{
    277	int si_code = FPE_FLTUNK;
    278
    279	if (fcsr & FPU_CSR_INV_X)
    280		si_code = FPE_FLTINV;
    281	else if (fcsr & FPU_CSR_DIV_X)
    282		si_code = FPE_FLTDIV;
    283	else if (fcsr & FPU_CSR_OVF_X)
    284		si_code = FPE_FLTOVF;
    285	else if (fcsr & FPU_CSR_UDF_X)
    286		si_code = FPE_FLTUND;
    287	else if (fcsr & FPU_CSR_INE_X)
    288		si_code = FPE_FLTRES;
    289
    290	force_sig_fault(SIGFPE, si_code, fault_addr);
    291}
    292
    293int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
    294{
    295	int si_code;
    296
    297	switch (sig) {
    298	case 0:
    299		return 0;
    300
    301	case SIGFPE:
    302		force_fcsr_sig(fcsr, fault_addr, current);
    303		return 1;
    304
    305	case SIGBUS:
    306		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
    307		return 1;
    308
    309	case SIGSEGV:
    310		mmap_read_lock(current->mm);
    311		if (vma_lookup(current->mm, (unsigned long)fault_addr))
    312			si_code = SEGV_ACCERR;
    313		else
    314			si_code = SEGV_MAPERR;
    315		mmap_read_unlock(current->mm);
    316		force_sig_fault(SIGSEGV, si_code, fault_addr);
    317		return 1;
    318
    319	default:
    320		force_sig(sig);
    321		return 1;
    322	}
    323}
    324
    325/*
    326 * Delayed fp exceptions when doing a lazy ctx switch
    327 */
    328asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
    329{
    330	int sig;
    331	void __user *fault_addr;
    332	irqentry_state_t state = irqentry_enter(regs);
    333
    334	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
    335		       SIGFPE) == NOTIFY_STOP)
    336		goto out;
    337
    338	/* Clear FCSR.Cause before enabling interrupts */
    339	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
    340	local_irq_enable();
    341
    342	die_if_kernel("FP exception in kernel code", regs);
    343
    344	sig = SIGFPE;
    345	fault_addr = (void __user *) regs->csr_era;
    346
    347	/* Send a signal if required.  */
    348	process_fpemu_return(sig, fault_addr, fcsr);
    349
    350out:
    351	local_irq_disable();
    352	irqentry_exit(regs, state);
    353}
    354
    355asmlinkage void noinstr do_ade(struct pt_regs *regs)
    356{
    357	irqentry_state_t state = irqentry_enter(regs);
    358
    359	die_if_kernel("Kernel ade access", regs);
    360	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
    361
    362	irqentry_exit(regs, state);
    363}
    364
    365asmlinkage void noinstr do_ale(struct pt_regs *regs)
    366{
    367	irqentry_state_t state = irqentry_enter(regs);
    368
    369	die_if_kernel("Kernel ale access", regs);
    370	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
    371
    372	irqentry_exit(regs, state);
    373}
    374
    375asmlinkage void noinstr do_bp(struct pt_regs *regs)
    376{
    377	bool user = user_mode(regs);
    378	unsigned int opcode, bcode;
    379	unsigned long era = exception_era(regs);
    380	irqentry_state_t state = irqentry_enter(regs);
    381
    382	local_irq_enable();
    383	current->thread.trap_nr = read_csr_excode();
    384	if (__get_inst(&opcode, (u32 *)era, user))
    385		goto out_sigsegv;
    386
    387	bcode = (opcode & 0x7fff);
    388
    389	/*
    390	 * notify the kprobe handlers, if instruction is likely to
    391	 * pertain to them.
    392	 */
    393	switch (bcode) {
    394	case BRK_KPROBE_BP:
    395		if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
    396			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
    397			goto out;
    398		else
    399			break;
    400	case BRK_KPROBE_SSTEPBP:
    401		if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
    402			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
    403			goto out;
    404		else
    405			break;
    406	case BRK_UPROBE_BP:
    407		if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode,
    408			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
    409			goto out;
    410		else
    411			break;
    412	case BRK_UPROBE_XOLBP:
    413		if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode,
    414			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
    415			goto out;
    416		else
    417			break;
    418	default:
    419		if (notify_die(DIE_TRAP, "Break", regs, bcode,
    420			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
    421			goto out;
    422		else
    423			break;
    424	}
    425
    426	switch (bcode) {
    427	case BRK_BUG:
    428		die_if_kernel("Kernel bug detected", regs);
    429		force_sig(SIGTRAP);
    430		break;
    431	case BRK_DIVZERO:
    432		die_if_kernel("Break instruction in kernel code", regs);
    433		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
    434		break;
    435	case BRK_OVERFLOW:
    436		die_if_kernel("Break instruction in kernel code", regs);
    437		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
    438		break;
    439	default:
    440		die_if_kernel("Break instruction in kernel code", regs);
    441		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
    442		break;
    443	}
    444
    445out:
    446	local_irq_disable();
    447	irqentry_exit(regs, state);
    448	return;
    449
    450out_sigsegv:
    451	force_sig(SIGSEGV);
    452	goto out;
    453}
    454
    455asmlinkage void noinstr do_watch(struct pt_regs *regs)
    456{
    457	pr_warn("Hardware watch point handler not implemented!\n");
    458}
    459
    460asmlinkage void noinstr do_ri(struct pt_regs *regs)
    461{
    462	int status = -1;
    463	unsigned int opcode = 0;
    464	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
    465	unsigned long old_era = regs->csr_era;
    466	unsigned long old_ra = regs->regs[1];
    467	irqentry_state_t state = irqentry_enter(regs);
    468
    469	local_irq_enable();
    470	current->thread.trap_nr = read_csr_excode();
    471
    472	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
    473		       SIGILL) == NOTIFY_STOP)
    474		goto out;
    475
    476	die_if_kernel("Reserved instruction in kernel code", regs);
    477
    478	compute_return_era(regs);
    479
    480	if (unlikely(get_user(opcode, era) < 0)) {
    481		status = SIGSEGV;
    482		current->thread.error_code = 1;
    483	}
    484
    485	if (status < 0)
    486		status = SIGILL;
    487
    488	if (unlikely(status > 0)) {
    489		regs->csr_era = old_era;		/* Undo skip-over.  */
    490		regs->regs[1] = old_ra;
    491		force_sig(status);
    492	}
    493
    494out:
    495	local_irq_disable();
    496	irqentry_exit(regs, state);
    497}
    498
    499static void init_restore_fp(void)
    500{
    501	if (!used_math()) {
    502		/* First time FP context user. */
    503		init_fpu();
    504	} else {
    505		/* This task has formerly used the FP context */
    506		if (!is_fpu_owner())
    507			own_fpu_inatomic(1);
    508	}
    509
    510	BUG_ON(!is_fp_enabled());
    511}
    512
    513asmlinkage void noinstr do_fpu(struct pt_regs *regs)
    514{
    515	irqentry_state_t state = irqentry_enter(regs);
    516
    517	local_irq_enable();
    518	die_if_kernel("do_fpu invoked from kernel context!", regs);
    519
    520	preempt_disable();
    521	init_restore_fp();
    522	preempt_enable();
    523
    524	local_irq_disable();
    525	irqentry_exit(regs, state);
    526}
    527
    528asmlinkage void noinstr do_lsx(struct pt_regs *regs)
    529{
    530	irqentry_state_t state = irqentry_enter(regs);
    531
    532	local_irq_enable();
    533	force_sig(SIGILL);
    534	local_irq_disable();
    535
    536	irqentry_exit(regs, state);
    537}
    538
    539asmlinkage void noinstr do_lasx(struct pt_regs *regs)
    540{
    541	irqentry_state_t state = irqentry_enter(regs);
    542
    543	local_irq_enable();
    544	force_sig(SIGILL);
    545	local_irq_disable();
    546
    547	irqentry_exit(regs, state);
    548}
    549
    550asmlinkage void noinstr do_lbt(struct pt_regs *regs)
    551{
    552	irqentry_state_t state = irqentry_enter(regs);
    553
    554	local_irq_enable();
    555	force_sig(SIGILL);
    556	local_irq_disable();
    557
    558	irqentry_exit(regs, state);
    559}
    560
    561asmlinkage void noinstr do_reserved(struct pt_regs *regs)
    562{
    563	irqentry_state_t state = irqentry_enter(regs);
    564
    565	local_irq_enable();
    566	/*
    567	 * Game over - no way to handle this if it ever occurs.	Most probably
    568	 * caused by a fatal error after another hardware/software error.
    569	 */
    570	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
    571		read_csr_excode(), current->pid, current->comm);
    572	die_if_kernel("do_reserved exception", regs);
    573	force_sig(SIGUNUSED);
    574
    575	local_irq_disable();
    576
    577	irqentry_exit(regs, state);
    578}
    579
    580asmlinkage void cache_parity_error(void)
    581{
    582	/* For the moment, report the problem and hang. */
    583	pr_err("Cache error exception:\n");
    584	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
    585	pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
    586	panic("Can't handle the cache error!");
    587}
    588
    589asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
    590{
    591	struct pt_regs *old_regs;
    592
    593	irq_enter_rcu();
    594	old_regs = set_irq_regs(regs);
    595	handle_arch_irq(regs);
    596	set_irq_regs(old_regs);
    597	irq_exit_rcu();
    598}
    599
    600asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
    601{
    602	register int cpu;
    603	register unsigned long stack;
    604	irqentry_state_t state = irqentry_enter(regs);
    605
    606	cpu = smp_processor_id();
    607
    608	if (on_irq_stack(cpu, sp))
    609		handle_loongarch_irq(regs);
    610	else {
    611		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
    612
    613		/* Save task's sp on IRQ stack for unwinding */
    614		*(unsigned long *)stack = sp;
    615
    616		__asm__ __volatile__(
    617		"move	$s0, $sp		\n" /* Preserve sp */
    618		"move	$sp, %[stk]		\n" /* Switch stack */
    619		"move	$a0, %[regs]		\n"
    620		"bl	handle_loongarch_irq	\n"
    621		"move	$sp, $s0		\n" /* Restore sp */
    622		: /* No outputs */
    623		: [stk] "r" (stack), [regs] "r" (regs)
    624		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
    625		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
    626		  "memory");
    627	}
    628
    629	irqentry_exit(regs, state);
    630}
    631
    632extern void tlb_init(int cpu);
    633extern void cache_error_setup(void);
    634
    635unsigned long eentry;
    636unsigned long tlbrentry;
    637
    638long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
    639
    640static void configure_exception_vector(void)
    641{
    642	eentry    = (unsigned long)exception_handlers;
    643	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
    644
    645	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
    646	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
    647	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
    648}
    649
    650void per_cpu_trap_init(int cpu)
    651{
    652	unsigned int i;
    653
    654	setup_vint_size(VECSIZE);
    655
    656	configure_exception_vector();
    657
    658	if (!cpu_data[cpu].asid_cache)
    659		cpu_data[cpu].asid_cache = asid_first_version(cpu);
    660
    661	mmgrab(&init_mm);
    662	current->active_mm = &init_mm;
    663	BUG_ON(current->mm);
    664	enter_lazy_tlb(&init_mm, current);
    665
    666	/* Initialise exception handlers */
    667	if (cpu == 0)
    668		for (i = 0; i < 64; i++)
    669			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
    670
    671	tlb_init(cpu);
    672	cpu_cache_init();
    673}
    674
    675/* Install CPU exception handler */
    676void set_handler(unsigned long offset, void *addr, unsigned long size)
    677{
    678	memcpy((void *)(eentry + offset), addr, size);
    679	local_flush_icache_range(eentry + offset, eentry + offset + size);
    680}
    681
    682static const char panic_null_cerr[] =
    683	"Trying to set NULL cache error exception handler\n";
    684
    685/*
    686 * Install uncached CPU exception handler.
    687 * This is suitable only for the cache error exception which is the only
    688 * exception handler that is being run uncached.
    689 */
    690void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
    691{
    692	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
    693
    694	if (!addr)
    695		panic(panic_null_cerr);
    696
    697	memcpy((void *)(uncached_eentry + offset), addr, size);
    698}
    699
    700void __init trap_init(void)
    701{
    702	long i;
    703
    704	/* Set interrupt vector handler */
    705	for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++)
    706		set_handler(i * VECSIZE, handle_vint, VECSIZE);
    707
    708	set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
    709	set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
    710	set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
    711	set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
    712	set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
    713	set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
    714	set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
    715	set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
    716	set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
    717	set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
    718	set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
    719	set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
    720
    721	cache_error_setup();
    722
    723	local_flush_icache_range(eentry, eentry + 0x400);
    724}