cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

traps_32.c (18932B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * 'traps.c' handles hardware traps and faults after we have saved some
      4 * state in 'entry.S'.
      5 *
      6 *  SuperH version: Copyright (C) 1999 Niibe Yutaka
      7 *                  Copyright (C) 2000 Philipp Rumpf
      8 *                  Copyright (C) 2000 David Howells
      9 *                  Copyright (C) 2002 - 2010 Paul Mundt
     10 */
     11#include <linux/kernel.h>
     12#include <linux/ptrace.h>
     13#include <linux/hardirq.h>
     14#include <linux/init.h>
     15#include <linux/spinlock.h>
     16#include <linux/kallsyms.h>
     17#include <linux/io.h>
     18#include <linux/bug.h>
     19#include <linux/debug_locks.h>
     20#include <linux/kdebug.h>
     21#include <linux/limits.h>
     22#include <linux/sysfs.h>
     23#include <linux/uaccess.h>
     24#include <linux/perf_event.h>
     25#include <linux/sched/task_stack.h>
     26
     27#include <asm/alignment.h>
     28#include <asm/fpu.h>
     29#include <asm/kprobes.h>
     30#include <asm/traps.h>
     31#include <asm/bl_bit.h>
     32
     33#ifdef CONFIG_CPU_SH2
     34# define TRAP_RESERVED_INST	4
     35# define TRAP_ILLEGAL_SLOT_INST	6
     36# define TRAP_ADDRESS_ERROR	9
     37# ifdef CONFIG_CPU_SH2A
     38#  define TRAP_UBC		12
     39#  define TRAP_FPU_ERROR	13
     40#  define TRAP_DIVZERO_ERROR	17
     41#  define TRAP_DIVOVF_ERROR	18
     42# endif
     43#else
     44#define TRAP_RESERVED_INST	12
     45#define TRAP_ILLEGAL_SLOT_INST	13
     46#endif
     47
     48static inline void sign_extend(unsigned int count, unsigned char *dst)
     49{
     50#ifdef __LITTLE_ENDIAN__
     51	if ((count == 1) && dst[0] & 0x80) {
     52		dst[1] = 0xff;
     53		dst[2] = 0xff;
     54		dst[3] = 0xff;
     55	}
     56	if ((count == 2) && dst[1] & 0x80) {
     57		dst[2] = 0xff;
     58		dst[3] = 0xff;
     59	}
     60#else
     61	if ((count == 1) && dst[3] & 0x80) {
     62		dst[2] = 0xff;
     63		dst[1] = 0xff;
     64		dst[0] = 0xff;
     65	}
     66	if ((count == 2) && dst[2] & 0x80) {
     67		dst[1] = 0xff;
     68		dst[0] = 0xff;
     69	}
     70#endif
     71}
     72
     73static struct mem_access user_mem_access = {
     74	copy_from_user,
     75	copy_to_user,
     76};
     77
     78static unsigned long copy_from_kernel_wrapper(void *dst, const void __user *src,
     79					      unsigned long cnt)
     80{
     81	return copy_from_kernel_nofault(dst, (const void __force *)src, cnt);
     82}
     83
     84static unsigned long copy_to_kernel_wrapper(void __user *dst, const void *src,
     85					    unsigned long cnt)
     86{
     87	return copy_to_kernel_nofault((void __force *)dst, src, cnt);
     88}
     89
     90static struct mem_access kernel_mem_access = {
     91	copy_from_kernel_wrapper,
     92	copy_to_kernel_wrapper,
     93};
     94
     95/*
     96 * handle an instruction that does an unaligned memory access by emulating the
     97 * desired behaviour
     98 * - note that PC _may not_ point to the faulting instruction
     99 *   (if that instruction is in a branch delay slot)
    100 * - return 0 if emulation okay, -EFAULT on existential error
    101 */
    102static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
    103				struct mem_access *ma)
    104{
    105	int ret, index, count;
    106	unsigned long *rm, *rn;
    107	unsigned char *src, *dst;
    108	unsigned char __user *srcu, *dstu;
    109
    110	index = (instruction>>8)&15;	/* 0x0F00 */
    111	rn = &regs->regs[index];
    112
    113	index = (instruction>>4)&15;	/* 0x00F0 */
    114	rm = &regs->regs[index];
    115
    116	count = 1<<(instruction&3);
    117
    118	switch (count) {
    119	case 1: inc_unaligned_byte_access(); break;
    120	case 2: inc_unaligned_word_access(); break;
    121	case 4: inc_unaligned_dword_access(); break;
    122	case 8: inc_unaligned_multi_access(); break;
    123	}
    124
    125	ret = -EFAULT;
    126	switch (instruction>>12) {
    127	case 0: /* mov.[bwl] to/from memory via r0+rn */
    128		if (instruction & 8) {
    129			/* from memory */
    130			srcu = (unsigned char __user *)*rm;
    131			srcu += regs->regs[0];
    132			dst = (unsigned char *)rn;
    133			*(unsigned long *)dst = 0;
    134
    135#if !defined(__LITTLE_ENDIAN__)
    136			dst += 4-count;
    137#endif
    138			if (ma->from(dst, srcu, count))
    139				goto fetch_fault;
    140
    141			sign_extend(count, dst);
    142		} else {
    143			/* to memory */
    144			src = (unsigned char *)rm;
    145#if !defined(__LITTLE_ENDIAN__)
    146			src += 4-count;
    147#endif
    148			dstu = (unsigned char __user *)*rn;
    149			dstu += regs->regs[0];
    150
    151			if (ma->to(dstu, src, count))
    152				goto fetch_fault;
    153		}
    154		ret = 0;
    155		break;
    156
    157	case 1: /* mov.l Rm,@(disp,Rn) */
    158		src = (unsigned char*) rm;
    159		dstu = (unsigned char __user *)*rn;
    160		dstu += (instruction&0x000F)<<2;
    161
    162		if (ma->to(dstu, src, 4))
    163			goto fetch_fault;
    164		ret = 0;
    165		break;
    166
    167	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
    168		if (instruction & 4)
    169			*rn -= count;
    170		src = (unsigned char*) rm;
    171		dstu = (unsigned char __user *)*rn;
    172#if !defined(__LITTLE_ENDIAN__)
    173		src += 4-count;
    174#endif
    175		if (ma->to(dstu, src, count))
    176			goto fetch_fault;
    177		ret = 0;
    178		break;
    179
    180	case 5: /* mov.l @(disp,Rm),Rn */
    181		srcu = (unsigned char __user *)*rm;
    182		srcu += (instruction & 0x000F) << 2;
    183		dst = (unsigned char *)rn;
    184		*(unsigned long *)dst = 0;
    185
    186		if (ma->from(dst, srcu, 4))
    187			goto fetch_fault;
    188		ret = 0;
    189		break;
    190
    191	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
    192		srcu = (unsigned char __user *)*rm;
    193		if (instruction & 4)
    194			*rm += count;
    195		dst = (unsigned char*) rn;
    196		*(unsigned long*)dst = 0;
    197
    198#if !defined(__LITTLE_ENDIAN__)
    199		dst += 4-count;
    200#endif
    201		if (ma->from(dst, srcu, count))
    202			goto fetch_fault;
    203		sign_extend(count, dst);
    204		ret = 0;
    205		break;
    206
    207	case 8:
    208		switch ((instruction&0xFF00)>>8) {
    209		case 0x81: /* mov.w R0,@(disp,Rn) */
    210			src = (unsigned char *) &regs->regs[0];
    211#if !defined(__LITTLE_ENDIAN__)
    212			src += 2;
    213#endif
    214			dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
    215			dstu += (instruction & 0x000F) << 1;
    216
    217			if (ma->to(dstu, src, 2))
    218				goto fetch_fault;
    219			ret = 0;
    220			break;
    221
    222		case 0x85: /* mov.w @(disp,Rm),R0 */
    223			srcu = (unsigned char __user *)*rm;
    224			srcu += (instruction & 0x000F) << 1;
    225			dst = (unsigned char *) &regs->regs[0];
    226			*(unsigned long *)dst = 0;
    227
    228#if !defined(__LITTLE_ENDIAN__)
    229			dst += 2;
    230#endif
    231			if (ma->from(dst, srcu, 2))
    232				goto fetch_fault;
    233			sign_extend(2, dst);
    234			ret = 0;
    235			break;
    236		}
    237		break;
    238
    239	case 9: /* mov.w @(disp,PC),Rn */
    240		srcu = (unsigned char __user *)regs->pc;
    241		srcu += 4;
    242		srcu += (instruction & 0x00FF) << 1;
    243		dst = (unsigned char *)rn;
    244		*(unsigned long *)dst = 0;
    245
    246#if !defined(__LITTLE_ENDIAN__)
    247		dst += 2;
    248#endif
    249
    250		if (ma->from(dst, srcu, 2))
    251			goto fetch_fault;
    252		sign_extend(2, dst);
    253		ret = 0;
    254		break;
    255
    256	case 0xd: /* mov.l @(disp,PC),Rn */
    257		srcu = (unsigned char __user *)(regs->pc & ~0x3);
    258		srcu += 4;
    259		srcu += (instruction & 0x00FF) << 2;
    260		dst = (unsigned char *)rn;
    261		*(unsigned long *)dst = 0;
    262
    263		if (ma->from(dst, srcu, 4))
    264			goto fetch_fault;
    265		ret = 0;
    266		break;
    267	}
    268	return ret;
    269
    270 fetch_fault:
    271	/* Argh. Address not only misaligned but also non-existent.
    272	 * Raise an EFAULT and see if it's trapped
    273	 */
    274	die_if_no_fixup("Fault in unaligned fixup", regs, 0);
    275	return -EFAULT;
    276}
    277
    278/*
    279 * emulate the instruction in the delay slot
    280 * - fetches the instruction from PC+2
    281 */
    282static inline int handle_delayslot(struct pt_regs *regs,
    283				   insn_size_t old_instruction,
    284				   struct mem_access *ma)
    285{
    286	insn_size_t instruction;
    287	void __user *addr = (void __user *)(regs->pc +
    288		instruction_size(old_instruction));
    289
    290	if (copy_from_user(&instruction, addr, sizeof(instruction))) {
    291		/* the instruction-fetch faulted */
    292		if (user_mode(regs))
    293			return -EFAULT;
    294
    295		/* kernel */
    296		die("delay-slot-insn faulting in handle_unaligned_delayslot",
    297		    regs, 0);
    298	}
    299
    300	return handle_unaligned_ins(instruction, regs, ma);
    301}
    302
    303/*
    304 * handle an instruction that does an unaligned memory access
    305 * - have to be careful of branch delay-slot instructions that fault
    306 *  SH3:
    307 *   - if the branch would be taken PC points to the branch
    308 *   - if the branch would not be taken, PC points to delay-slot
    309 *  SH4:
    310 *   - PC always points to delayed branch
    311 * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
    312 */
    313
    314/* Macros to determine offset from current PC for branch instructions */
    315/* Explicit type coercion is used to force sign extension where needed */
    316#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
    317#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
    318
    319int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
    320			    struct mem_access *ma, int expected,
    321			    unsigned long address)
    322{
    323	u_int rm;
    324	int ret, index;
    325
    326	/*
    327	 * XXX: We can't handle mixed 16/32-bit instructions yet
    328	 */
    329	if (instruction_size(instruction) != 2)
    330		return -EINVAL;
    331
    332	index = (instruction>>8)&15;	/* 0x0F00 */
    333	rm = regs->regs[index];
    334
    335	/*
    336	 * Log the unexpected fixups, and then pass them on to perf.
    337	 *
    338	 * We intentionally don't report the expected cases to perf as
    339	 * otherwise the trapped I/O case will skew the results too much
    340	 * to be useful.
    341	 */
    342	if (!expected) {
    343		unaligned_fixups_notify(current, instruction, regs);
    344		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
    345			      regs, address);
    346	}
    347
    348	ret = -EFAULT;
    349	switch (instruction&0xF000) {
    350	case 0x0000:
    351		if (instruction==0x000B) {
    352			/* rts */
    353			ret = handle_delayslot(regs, instruction, ma);
    354			if (ret==0)
    355				regs->pc = regs->pr;
    356		}
    357		else if ((instruction&0x00FF)==0x0023) {
    358			/* braf @Rm */
    359			ret = handle_delayslot(regs, instruction, ma);
    360			if (ret==0)
    361				regs->pc += rm + 4;
    362		}
    363		else if ((instruction&0x00FF)==0x0003) {
    364			/* bsrf @Rm */
    365			ret = handle_delayslot(regs, instruction, ma);
    366			if (ret==0) {
    367				regs->pr = regs->pc + 4;
    368				regs->pc += rm + 4;
    369			}
    370		}
    371		else {
    372			/* mov.[bwl] to/from memory via r0+rn */
    373			goto simple;
    374		}
    375		break;
    376
    377	case 0x1000: /* mov.l Rm,@(disp,Rn) */
    378		goto simple;
    379
    380	case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
    381		goto simple;
    382
    383	case 0x4000:
    384		if ((instruction&0x00FF)==0x002B) {
    385			/* jmp @Rm */
    386			ret = handle_delayslot(regs, instruction, ma);
    387			if (ret==0)
    388				regs->pc = rm;
    389		}
    390		else if ((instruction&0x00FF)==0x000B) {
    391			/* jsr @Rm */
    392			ret = handle_delayslot(regs, instruction, ma);
    393			if (ret==0) {
    394				regs->pr = regs->pc + 4;
    395				regs->pc = rm;
    396			}
    397		}
    398		else {
    399			/* mov.[bwl] to/from memory via r0+rn */
    400			goto simple;
    401		}
    402		break;
    403
    404	case 0x5000: /* mov.l @(disp,Rm),Rn */
    405		goto simple;
    406
    407	case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
    408		goto simple;
    409
    410	case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
    411		switch (instruction&0x0F00) {
    412		case 0x0100: /* mov.w R0,@(disp,Rm) */
    413			goto simple;
    414		case 0x0500: /* mov.w @(disp,Rm),R0 */
    415			goto simple;
    416		case 0x0B00: /* bf   lab - no delayslot*/
    417			ret = 0;
    418			break;
    419		case 0x0F00: /* bf/s lab */
    420			ret = handle_delayslot(regs, instruction, ma);
    421			if (ret==0) {
    422#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
    423				if ((regs->sr & 0x00000001) != 0)
    424					regs->pc += 4; /* next after slot */
    425				else
    426#endif
    427					regs->pc += SH_PC_8BIT_OFFSET(instruction);
    428			}
    429			break;
    430		case 0x0900: /* bt   lab - no delayslot */
    431			ret = 0;
    432			break;
    433		case 0x0D00: /* bt/s lab */
    434			ret = handle_delayslot(regs, instruction, ma);
    435			if (ret==0) {
    436#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
    437				if ((regs->sr & 0x00000001) == 0)
    438					regs->pc += 4; /* next after slot */
    439				else
    440#endif
    441					regs->pc += SH_PC_8BIT_OFFSET(instruction);
    442			}
    443			break;
    444		}
    445		break;
    446
    447	case 0x9000: /* mov.w @(disp,Rm),Rn */
    448		goto simple;
    449
    450	case 0xA000: /* bra label */
    451		ret = handle_delayslot(regs, instruction, ma);
    452		if (ret==0)
    453			regs->pc += SH_PC_12BIT_OFFSET(instruction);
    454		break;
    455
    456	case 0xB000: /* bsr label */
    457		ret = handle_delayslot(regs, instruction, ma);
    458		if (ret==0) {
    459			regs->pr = regs->pc + 4;
    460			regs->pc += SH_PC_12BIT_OFFSET(instruction);
    461		}
    462		break;
    463
    464	case 0xD000: /* mov.l @(disp,Rm),Rn */
    465		goto simple;
    466	}
    467	return ret;
    468
    469	/* handle non-delay-slot instruction */
    470 simple:
    471	ret = handle_unaligned_ins(instruction, regs, ma);
    472	if (ret==0)
    473		regs->pc += instruction_size(instruction);
    474	return ret;
    475}
    476
    477/*
    478 * Handle various address error exceptions:
    479 *  - instruction address error:
    480 *       misaligned PC
    481 *       PC >= 0x80000000 in user mode
    482 *  - data address error (read and write)
    483 *       misaligned data access
    484 *       access to >= 0x80000000 is user mode
    485 * Unfortuntaly we can't distinguish between instruction address error
    486 * and data address errors caused by read accesses.
    487 */
    488asmlinkage void do_address_error(struct pt_regs *regs,
    489				 unsigned long writeaccess,
    490				 unsigned long address)
    491{
    492	unsigned long error_code = 0;
    493	insn_size_t instruction;
    494	int tmp;
    495
    496	/* Intentional ifdef */
    497#ifdef CONFIG_CPU_HAS_SR_RB
    498	error_code = lookup_exception_vector();
    499#endif
    500
    501	if (user_mode(regs)) {
    502		int si_code = BUS_ADRERR;
    503		unsigned int user_action;
    504
    505		local_irq_enable();
    506		inc_unaligned_user_access();
    507
    508		if (copy_from_user(&instruction, (insn_size_t __user *)(regs->pc & ~1),
    509				   sizeof(instruction))) {
    510			goto uspace_segv;
    511		}
    512
    513		/* shout about userspace fixups */
    514		unaligned_fixups_notify(current, instruction, regs);
    515
    516		user_action = unaligned_user_action();
    517		if (user_action & UM_FIXUP)
    518			goto fixup;
    519		if (user_action & UM_SIGNAL)
    520			goto uspace_segv;
    521		else {
    522			/* ignore */
    523			regs->pc += instruction_size(instruction);
    524			return;
    525		}
    526
    527fixup:
    528		/* bad PC is not something we can fix */
    529		if (regs->pc & 1) {
    530			si_code = BUS_ADRALN;
    531			goto uspace_segv;
    532		}
    533
    534		tmp = handle_unaligned_access(instruction, regs,
    535					      &user_mem_access, 0,
    536					      address);
    537
    538		if (tmp == 0)
    539			return; /* sorted */
    540uspace_segv:
    541		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
    542		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
    543		       regs->pr);
    544
    545		force_sig_fault(SIGBUS, si_code, (void __user *)address);
    546	} else {
    547		inc_unaligned_kernel_access();
    548
    549		if (regs->pc & 1)
    550			die("unaligned program counter", regs, error_code);
    551
    552		if (copy_from_kernel_nofault(&instruction, (void *)(regs->pc),
    553				   sizeof(instruction))) {
    554			/* Argh. Fault on the instruction itself.
    555			   This should never happen non-SMP
    556			*/
    557			die("insn faulting in do_address_error", regs, 0);
    558		}
    559
    560		unaligned_fixups_notify(current, instruction, regs);
    561
    562		handle_unaligned_access(instruction, regs, &kernel_mem_access,
    563					0, address);
    564	}
    565}
    566
    567#ifdef CONFIG_SH_DSP
    568/*
    569 *	SH-DSP support gerg@snapgear.com.
    570 */
    571int is_dsp_inst(struct pt_regs *regs)
    572{
    573	unsigned short inst = 0;
    574
    575	/*
    576	 * Safe guard if DSP mode is already enabled or we're lacking
    577	 * the DSP altogether.
    578	 */
    579	if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
    580		return 0;
    581
    582	get_user(inst, ((unsigned short *) regs->pc));
    583
    584	inst &= 0xf000;
    585
    586	/* Check for any type of DSP or support instruction */
    587	if ((inst == 0xf000) || (inst == 0x4000))
    588		return 1;
    589
    590	return 0;
    591}
    592#else
    593#define is_dsp_inst(regs)	(0)
    594#endif /* CONFIG_SH_DSP */
    595
    596#ifdef CONFIG_CPU_SH2A
    597asmlinkage void do_divide_error(unsigned long r4)
    598{
    599	int code;
    600
    601	switch (r4) {
    602	case TRAP_DIVZERO_ERROR:
    603		code = FPE_INTDIV;
    604		break;
    605	case TRAP_DIVOVF_ERROR:
    606		code = FPE_INTOVF;
    607		break;
    608	default:
    609		/* Let gcc know unhandled cases don't make it past here */
    610		return;
    611	}
    612	force_sig_fault(SIGFPE, code, NULL);
    613}
    614#endif
    615
    616asmlinkage void do_reserved_inst(void)
    617{
    618	struct pt_regs *regs = current_pt_regs();
    619	unsigned long error_code;
    620
    621#ifdef CONFIG_SH_FPU_EMU
    622	unsigned short inst = 0;
    623	int err;
    624
    625	get_user(inst, (unsigned short __user *)regs->pc);
    626
    627	err = do_fpu_inst(inst, regs);
    628	if (!err) {
    629		regs->pc += instruction_size(inst);
    630		return;
    631	}
    632	/* not a FPU inst. */
    633#endif
    634
    635#ifdef CONFIG_SH_DSP
    636	/* Check if it's a DSP instruction */
    637	if (is_dsp_inst(regs)) {
    638		/* Enable DSP mode, and restart instruction. */
    639		regs->sr |= SR_DSP;
    640		/* Save DSP mode */
    641		current->thread.dsp_status.status |= SR_DSP;
    642		return;
    643	}
    644#endif
    645
    646	error_code = lookup_exception_vector();
    647
    648	local_irq_enable();
    649	force_sig(SIGILL);
    650	die_if_no_fixup("reserved instruction", regs, error_code);
    651}
    652
    653#ifdef CONFIG_SH_FPU_EMU
    654static int emulate_branch(unsigned short inst, struct pt_regs *regs)
    655{
    656	/*
    657	 * bfs: 8fxx: PC+=d*2+4;
    658	 * bts: 8dxx: PC+=d*2+4;
    659	 * bra: axxx: PC+=D*2+4;
    660	 * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
    661	 * braf:0x23: PC+=Rn*2+4;
    662	 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
    663	 * jmp: 4x2b: PC=Rn;
    664	 * jsr: 4x0b: PC=Rn      after PR=PC+4;
    665	 * rts: 000b: PC=PR;
    666	 */
    667	if (((inst & 0xf000) == 0xb000)  ||	/* bsr */
    668	    ((inst & 0xf0ff) == 0x0003)  ||	/* bsrf */
    669	    ((inst & 0xf0ff) == 0x400b))	/* jsr */
    670		regs->pr = regs->pc + 4;
    671
    672	if ((inst & 0xfd00) == 0x8d00) {	/* bfs, bts */
    673		regs->pc += SH_PC_8BIT_OFFSET(inst);
    674		return 0;
    675	}
    676
    677	if ((inst & 0xe000) == 0xa000) {	/* bra, bsr */
    678		regs->pc += SH_PC_12BIT_OFFSET(inst);
    679		return 0;
    680	}
    681
    682	if ((inst & 0xf0df) == 0x0003) {	/* braf, bsrf */
    683		regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
    684		return 0;
    685	}
    686
    687	if ((inst & 0xf0df) == 0x400b) {	/* jmp, jsr */
    688		regs->pc = regs->regs[(inst & 0x0f00) >> 8];
    689		return 0;
    690	}
    691
    692	if ((inst & 0xffff) == 0x000b) {	/* rts */
    693		regs->pc = regs->pr;
    694		return 0;
    695	}
    696
    697	return 1;
    698}
    699#endif
    700
    701asmlinkage void do_illegal_slot_inst(void)
    702{
    703	struct pt_regs *regs = current_pt_regs();
    704	unsigned long inst;
    705
    706	if (kprobe_handle_illslot(regs->pc) == 0)
    707		return;
    708
    709#ifdef CONFIG_SH_FPU_EMU
    710	get_user(inst, (unsigned short __user *)regs->pc + 1);
    711	if (!do_fpu_inst(inst, regs)) {
    712		get_user(inst, (unsigned short __user *)regs->pc);
    713		if (!emulate_branch(inst, regs))
    714			return;
    715		/* fault in branch.*/
    716	}
    717	/* not a FPU inst. */
    718#endif
    719
    720	inst = lookup_exception_vector();
    721
    722	local_irq_enable();
    723	force_sig(SIGILL);
    724	die_if_no_fixup("illegal slot instruction", regs, inst);
    725}
    726
    727asmlinkage void do_exception_error(void)
    728{
    729	long ex;
    730
    731	ex = lookup_exception_vector();
    732	die_if_kernel("exception", current_pt_regs(), ex);
    733}
    734
    735void per_cpu_trap_init(void)
    736{
    737	extern void *vbr_base;
    738
    739	/* NOTE: The VBR value should be at P1
    740	   (or P2, virtural "fixed" address space).
    741	   It's definitely should not in physical address.  */
    742
    743	asm volatile("ldc	%0, vbr"
    744		     : /* no output */
    745		     : "r" (&vbr_base)
    746		     : "memory");
    747
    748	/* disable exception blocking now when the vbr has been setup */
    749	clear_bl_bit();
    750}
    751
    752void *set_exception_table_vec(unsigned int vec, void *handler)
    753{
    754	extern void *exception_handling_table[];
    755	void *old_handler;
    756
    757	old_handler = exception_handling_table[vec];
    758	exception_handling_table[vec] = handler;
    759	return old_handler;
    760}
    761
    762void __init trap_init(void)
    763{
    764	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
    765	set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
    766
    767#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
    768    defined(CONFIG_SH_FPU_EMU)
    769	/*
    770	 * For SH-4 lacking an FPU, treat floating point instructions as
    771	 * reserved. They'll be handled in the math-emu case, or faulted on
    772	 * otherwise.
    773	 */
    774	set_exception_table_evt(0x800, do_reserved_inst);
    775	set_exception_table_evt(0x820, do_illegal_slot_inst);
    776#elif defined(CONFIG_SH_FPU)
    777	set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
    778	set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
    779#endif
    780
    781#ifdef CONFIG_CPU_SH2
    782	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
    783#endif
    784#ifdef CONFIG_CPU_SH2A
    785	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
    786	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
    787#ifdef CONFIG_SH_FPU
    788	set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
    789#endif
    790#endif
    791
    792#ifdef TRAP_UBC
    793	set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
    794#endif
    795}