cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

unaligned.c (34307B)


      1/*
      2 * Handle unaligned accesses by emulation.
      3 *
      4 * This file is subject to the terms and conditions of the GNU General Public
      5 * License.  See the file "COPYING" in the main directory of this archive
      6 * for more details.
      7 *
      8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
      9 * Copyright (C) 1999 Silicon Graphics, Inc.
     10 * Copyright (C) 2014 Imagination Technologies Ltd.
     11 *
     12 * This file contains exception handler for address error exception with the
     13 * special capability to execute faulting instructions in software.  The
     14 * handler does not try to handle the case when the program counter points
     15 * to an address not aligned to a word boundary.
     16 *
     17 * Putting data to unaligned addresses is a bad practice even on Intel where
     18 * only the performance is affected.  Much worse is that such code is non-
     19 * portable.  Due to several programs that die on MIPS due to alignment
     20 * problems I decided to implement this handler anyway though I originally
     21 * didn't intend to do this at all for user code.
     22 *
     23 * For now I enable fixing of address errors by default to make life easier.
     24 * I however intend to disable this somewhen in the future when the alignment
     25 * problems with user programs have been fixed.	 For programmers this is the
     26 * right way to go.
     27 *
     28 * Fixing address errors is a per process option.  The option is inherited
     29 * across fork(2) and execve(2) calls.	If you really want to use the
     30 * option in your user programs - I discourage the use of the software
     31 * emulation strongly - use the following code in your userland stuff:
     32 *
     33 * #include <sys/sysmips.h>
     34 *
     35 * ...
     36 * sysmips(MIPS_FIXADE, x);
     37 * ...
     38 *
     39 * The argument x is 0 for disabling software emulation, enabled otherwise.
     40 *
     41 * Below a little program to play around with this feature.
     42 *
     43 * #include <stdio.h>
     44 * #include <sys/sysmips.h>
     45 *
     46 * struct foo {
     47 *	   unsigned char bar[8];
     48 * };
     49 *
     50 * main(int argc, char *argv[])
     51 * {
     52 *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
     53 *	   unsigned int *p = (unsigned int *) (x.bar + 3);
     54 *	   int i;
     55 *
     56 *	   if (argc > 1)
     57 *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
     58 *
     59 *	   printf("*p = %08lx\n", *p);
     60 *
     61 *	   *p = 0xdeadface;
     62 *
     63 *	   for(i = 0; i <= 7; i++)
     64 *	   printf("%02x ", x.bar[i]);
     65 *	   printf("\n");
     66 * }
     67 *
     68 * Coprocessor loads are not supported; I think this case is unimportant
     69 * in the practice.
     70 *
     71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
     72 *	 exception for the R6000.
     73 *	 A store crossing a page boundary might be executed only partially.
     74 *	 Undo the partial store in this case.
     75 */
     76#include <linux/context_tracking.h>
     77#include <linux/mm.h>
     78#include <linux/signal.h>
     79#include <linux/smp.h>
     80#include <linux/sched.h>
     81#include <linux/debugfs.h>
     82#include <linux/perf_event.h>
     83
     84#include <asm/asm.h>
     85#include <asm/branch.h>
     86#include <asm/byteorder.h>
     87#include <asm/cop2.h>
     88#include <asm/debug.h>
     89#include <asm/fpu.h>
     90#include <asm/fpu_emulator.h>
     91#include <asm/inst.h>
     92#include <asm/unaligned-emul.h>
     93#include <asm/mmu_context.h>
     94#include <linux/uaccess.h>
     95
     96#include "access-helper.h"
     97
     98enum {
     99	UNALIGNED_ACTION_QUIET,
    100	UNALIGNED_ACTION_SIGNAL,
    101	UNALIGNED_ACTION_SHOW,
    102};
    103#ifdef CONFIG_DEBUG_FS
    104static u32 unaligned_instructions;
    105static u32 unaligned_action;
    106#else
    107#define unaligned_action UNALIGNED_ACTION_QUIET
    108#endif
    109extern void show_registers(struct pt_regs *regs);
    110
    111static void emulate_load_store_insn(struct pt_regs *regs,
    112	void __user *addr, unsigned int *pc)
    113{
    114	unsigned long origpc, orig31, value;
    115	union mips_instruction insn;
    116	unsigned int res;
    117	bool user = user_mode(regs);
    118
    119	origpc = (unsigned long)pc;
    120	orig31 = regs->regs[31];
    121
    122	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
    123
    124	/*
    125	 * This load never faults.
    126	 */
    127	__get_inst32(&insn.word, pc, user);
    128
    129	switch (insn.i_format.opcode) {
    130		/*
    131		 * These are instructions that a compiler doesn't generate.  We
    132		 * can assume therefore that the code is MIPS-aware and
    133		 * really buggy.  Emulating these instructions would break the
    134		 * semantics anyway.
    135		 */
    136	case ll_op:
    137	case lld_op:
    138	case sc_op:
    139	case scd_op:
    140
    141		/*
    142		 * For these instructions the only way to create an address
    143		 * error is an attempted access to kernel/supervisor address
    144		 * space.
    145		 */
    146	case ldl_op:
    147	case ldr_op:
    148	case lwl_op:
    149	case lwr_op:
    150	case sdl_op:
    151	case sdr_op:
    152	case swl_op:
    153	case swr_op:
    154	case lb_op:
    155	case lbu_op:
    156	case sb_op:
    157		goto sigbus;
    158
    159		/*
    160		 * The remaining opcodes are the ones that are really of
    161		 * interest.
    162		 */
    163	case spec3_op:
    164		if (insn.dsp_format.func == lx_op) {
    165			switch (insn.dsp_format.op) {
    166			case lwx_op:
    167				if (user && !access_ok(addr, 4))
    168					goto sigbus;
    169				LoadW(addr, value, res);
    170				if (res)
    171					goto fault;
    172				compute_return_epc(regs);
    173				regs->regs[insn.dsp_format.rd] = value;
    174				break;
    175			case lhx_op:
    176				if (user && !access_ok(addr, 2))
    177					goto sigbus;
    178				LoadHW(addr, value, res);
    179				if (res)
    180					goto fault;
    181				compute_return_epc(regs);
    182				regs->regs[insn.dsp_format.rd] = value;
    183				break;
    184			default:
    185				goto sigill;
    186			}
    187		}
    188#ifdef CONFIG_EVA
    189		else {
    190			/*
    191			 * we can land here only from kernel accessing user
    192			 * memory, so we need to "switch" the address limit to
    193			 * user space, so that address check can work properly.
    194			 */
    195			switch (insn.spec3_format.func) {
    196			case lhe_op:
    197				if (!access_ok(addr, 2))
    198					goto sigbus;
    199				LoadHWE(addr, value, res);
    200				if (res)
    201					goto fault;
    202				compute_return_epc(regs);
    203				regs->regs[insn.spec3_format.rt] = value;
    204				break;
    205			case lwe_op:
    206				if (!access_ok(addr, 4))
    207					goto sigbus;
    208				LoadWE(addr, value, res);
    209				if (res)
    210					goto fault;
    211				compute_return_epc(regs);
    212				regs->regs[insn.spec3_format.rt] = value;
    213				break;
    214			case lhue_op:
    215				if (!access_ok(addr, 2))
    216					goto sigbus;
    217				LoadHWUE(addr, value, res);
    218				if (res)
    219					goto fault;
    220				compute_return_epc(regs);
    221				regs->regs[insn.spec3_format.rt] = value;
    222				break;
    223			case she_op:
    224				if (!access_ok(addr, 2))
    225					goto sigbus;
    226				compute_return_epc(regs);
    227				value = regs->regs[insn.spec3_format.rt];
    228				StoreHWE(addr, value, res);
    229				if (res)
    230					goto fault;
    231				break;
    232			case swe_op:
    233				if (!access_ok(addr, 4))
    234					goto sigbus;
    235				compute_return_epc(regs);
    236				value = regs->regs[insn.spec3_format.rt];
    237				StoreWE(addr, value, res);
    238				if (res)
    239					goto fault;
    240				break;
    241			default:
    242				goto sigill;
    243			}
    244		}
    245#endif
    246		break;
    247	case lh_op:
    248		if (user && !access_ok(addr, 2))
    249			goto sigbus;
    250
    251		if (IS_ENABLED(CONFIG_EVA) && user)
    252			LoadHWE(addr, value, res);
    253		else
    254			LoadHW(addr, value, res);
    255
    256		if (res)
    257			goto fault;
    258		compute_return_epc(regs);
    259		regs->regs[insn.i_format.rt] = value;
    260		break;
    261
    262	case lw_op:
    263		if (user && !access_ok(addr, 4))
    264			goto sigbus;
    265
    266		if (IS_ENABLED(CONFIG_EVA) && user)
    267			LoadWE(addr, value, res);
    268		else
    269			LoadW(addr, value, res);
    270
    271		if (res)
    272			goto fault;
    273		compute_return_epc(regs);
    274		regs->regs[insn.i_format.rt] = value;
    275		break;
    276
    277	case lhu_op:
    278		if (user && !access_ok(addr, 2))
    279			goto sigbus;
    280
    281		if (IS_ENABLED(CONFIG_EVA) && user)
    282			LoadHWUE(addr, value, res);
    283		else
    284			LoadHWU(addr, value, res);
    285
    286		if (res)
    287			goto fault;
    288		compute_return_epc(regs);
    289		regs->regs[insn.i_format.rt] = value;
    290		break;
    291
    292	case lwu_op:
    293#ifdef CONFIG_64BIT
    294		/*
    295		 * A 32-bit kernel might be running on a 64-bit processor.  But
    296		 * if we're on a 32-bit processor and an i-cache incoherency
    297		 * or race makes us see a 64-bit instruction here the sdl/sdr
    298		 * would blow up, so for now we don't handle unaligned 64-bit
    299		 * instructions on 32-bit kernels.
    300		 */
    301		if (user && !access_ok(addr, 4))
    302			goto sigbus;
    303
    304		LoadWU(addr, value, res);
    305		if (res)
    306			goto fault;
    307		compute_return_epc(regs);
    308		regs->regs[insn.i_format.rt] = value;
    309		break;
    310#endif /* CONFIG_64BIT */
    311
    312		/* Cannot handle 64-bit instructions in 32-bit kernel */
    313		goto sigill;
    314
    315	case ld_op:
    316#ifdef CONFIG_64BIT
    317		/*
    318		 * A 32-bit kernel might be running on a 64-bit processor.  But
    319		 * if we're on a 32-bit processor and an i-cache incoherency
    320		 * or race makes us see a 64-bit instruction here the sdl/sdr
    321		 * would blow up, so for now we don't handle unaligned 64-bit
    322		 * instructions on 32-bit kernels.
    323		 */
    324		if (user && !access_ok(addr, 8))
    325			goto sigbus;
    326
    327		LoadDW(addr, value, res);
    328		if (res)
    329			goto fault;
    330		compute_return_epc(regs);
    331		regs->regs[insn.i_format.rt] = value;
    332		break;
    333#endif /* CONFIG_64BIT */
    334
    335		/* Cannot handle 64-bit instructions in 32-bit kernel */
    336		goto sigill;
    337
    338	case sh_op:
    339		if (user && !access_ok(addr, 2))
    340			goto sigbus;
    341
    342		compute_return_epc(regs);
    343		value = regs->regs[insn.i_format.rt];
    344
    345		if (IS_ENABLED(CONFIG_EVA) && user)
    346			StoreHWE(addr, value, res);
    347		else
    348			StoreHW(addr, value, res);
    349
    350		if (res)
    351			goto fault;
    352		break;
    353
    354	case sw_op:
    355		if (user && !access_ok(addr, 4))
    356			goto sigbus;
    357
    358		compute_return_epc(regs);
    359		value = regs->regs[insn.i_format.rt];
    360
    361		if (IS_ENABLED(CONFIG_EVA) && user)
    362			StoreWE(addr, value, res);
    363		else
    364			StoreW(addr, value, res);
    365
    366		if (res)
    367			goto fault;
    368		break;
    369
    370	case sd_op:
    371#ifdef CONFIG_64BIT
    372		/*
    373		 * A 32-bit kernel might be running on a 64-bit processor.  But
    374		 * if we're on a 32-bit processor and an i-cache incoherency
    375		 * or race makes us see a 64-bit instruction here the sdl/sdr
    376		 * would blow up, so for now we don't handle unaligned 64-bit
    377		 * instructions on 32-bit kernels.
    378		 */
    379		if (user && !access_ok(addr, 8))
    380			goto sigbus;
    381
    382		compute_return_epc(regs);
    383		value = regs->regs[insn.i_format.rt];
    384		StoreDW(addr, value, res);
    385		if (res)
    386			goto fault;
    387		break;
    388#endif /* CONFIG_64BIT */
    389
    390		/* Cannot handle 64-bit instructions in 32-bit kernel */
    391		goto sigill;
    392
    393#ifdef CONFIG_MIPS_FP_SUPPORT
    394
    395	case lwc1_op:
    396	case ldc1_op:
    397	case swc1_op:
    398	case sdc1_op:
    399	case cop1x_op: {
    400		void __user *fault_addr = NULL;
    401
    402		die_if_kernel("Unaligned FP access in kernel code", regs);
    403		BUG_ON(!used_math());
    404
    405		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
    406					       &fault_addr);
    407		own_fpu(1);	/* Restore FPU state. */
    408
    409		/* Signal if something went wrong. */
    410		process_fpemu_return(res, fault_addr, 0);
    411
    412		if (res == 0)
    413			break;
    414		return;
    415	}
    416#endif /* CONFIG_MIPS_FP_SUPPORT */
    417
    418#ifdef CONFIG_CPU_HAS_MSA
    419
    420	case msa_op: {
    421		unsigned int wd, preempted;
    422		enum msa_2b_fmt df;
    423		union fpureg *fpr;
    424
    425		if (!cpu_has_msa)
    426			goto sigill;
    427
    428		/*
    429		 * If we've reached this point then userland should have taken
    430		 * the MSA disabled exception & initialised vector context at
    431		 * some point in the past.
    432		 */
    433		BUG_ON(!thread_msa_context_live());
    434
    435		df = insn.msa_mi10_format.df;
    436		wd = insn.msa_mi10_format.wd;
    437		fpr = &current->thread.fpu.fpr[wd];
    438
    439		switch (insn.msa_mi10_format.func) {
    440		case msa_ld_op:
    441			if (!access_ok(addr, sizeof(*fpr)))
    442				goto sigbus;
    443
    444			do {
    445				/*
    446				 * If we have live MSA context keep track of
    447				 * whether we get preempted in order to avoid
    448				 * the register context we load being clobbered
    449				 * by the live context as it's saved during
    450				 * preemption. If we don't have live context
    451				 * then it can't be saved to clobber the value
    452				 * we load.
    453				 */
    454				preempted = test_thread_flag(TIF_USEDMSA);
    455
    456				res = __copy_from_user_inatomic(fpr, addr,
    457								sizeof(*fpr));
    458				if (res)
    459					goto fault;
    460
    461				/*
    462				 * Update the hardware register if it is in use
    463				 * by the task in this quantum, in order to
    464				 * avoid having to save & restore the whole
    465				 * vector context.
    466				 */
    467				preempt_disable();
    468				if (test_thread_flag(TIF_USEDMSA)) {
    469					write_msa_wr(wd, fpr, df);
    470					preempted = 0;
    471				}
    472				preempt_enable();
    473			} while (preempted);
    474			break;
    475
    476		case msa_st_op:
    477			if (!access_ok(addr, sizeof(*fpr)))
    478				goto sigbus;
    479
    480			/*
    481			 * Update from the hardware register if it is in use by
    482			 * the task in this quantum, in order to avoid having to
    483			 * save & restore the whole vector context.
    484			 */
    485			preempt_disable();
    486			if (test_thread_flag(TIF_USEDMSA))
    487				read_msa_wr(wd, fpr, df);
    488			preempt_enable();
    489
    490			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
    491			if (res)
    492				goto fault;
    493			break;
    494
    495		default:
    496			goto sigbus;
    497		}
    498
    499		compute_return_epc(regs);
    500		break;
    501	}
    502#endif /* CONFIG_CPU_HAS_MSA */
    503
    504#ifndef CONFIG_CPU_MIPSR6
    505	/*
    506	 * COP2 is available to implementor for application specific use.
    507	 * It's up to applications to register a notifier chain and do
    508	 * whatever they have to do, including possible sending of signals.
    509	 *
    510	 * This instruction has been reallocated in Release 6
    511	 */
    512	case lwc2_op:
    513		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
    514		break;
    515
    516	case ldc2_op:
    517		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
    518		break;
    519
    520	case swc2_op:
    521		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
    522		break;
    523
    524	case sdc2_op:
    525		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
    526		break;
    527#endif
    528	default:
    529		/*
    530		 * Pheeee...  We encountered an yet unknown instruction or
    531		 * cache coherence problem.  Die sucker, die ...
    532		 */
    533		goto sigill;
    534	}
    535
    536#ifdef CONFIG_DEBUG_FS
    537	unaligned_instructions++;
    538#endif
    539
    540	return;
    541
    542fault:
    543	/* roll back jump/branch */
    544	regs->cp0_epc = origpc;
    545	regs->regs[31] = orig31;
    546	/* Did we have an exception handler installed? */
    547	if (fixup_exception(regs))
    548		return;
    549
    550	die_if_kernel("Unhandled kernel unaligned access", regs);
    551	force_sig(SIGSEGV);
    552
    553	return;
    554
    555sigbus:
    556	die_if_kernel("Unhandled kernel unaligned access", regs);
    557	force_sig(SIGBUS);
    558
    559	return;
    560
    561sigill:
    562	die_if_kernel
    563	    ("Unhandled kernel unaligned access or invalid instruction", regs);
    564	force_sig(SIGILL);
    565}
    566
    567/* Recode table from 16-bit register notation to 32-bit GPR. */
    568const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
    569
    570/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
    571static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
    572
    573static void emulate_load_store_microMIPS(struct pt_regs *regs,
    574					 void __user *addr)
    575{
    576	unsigned long value;
    577	unsigned int res;
    578	int i;
    579	unsigned int reg = 0, rvar;
    580	unsigned long orig31;
    581	u16 __user *pc16;
    582	u16 halfword;
    583	unsigned int word;
    584	unsigned long origpc, contpc;
    585	union mips_instruction insn;
    586	struct mm_decoded_insn mminsn;
    587	bool user = user_mode(regs);
    588
    589	origpc = regs->cp0_epc;
    590	orig31 = regs->regs[31];
    591
    592	mminsn.micro_mips_mode = 1;
    593
    594	/*
    595	 * This load never faults.
    596	 */
    597	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
    598	__get_user(halfword, pc16);
    599	pc16++;
    600	contpc = regs->cp0_epc + 2;
    601	word = ((unsigned int)halfword << 16);
    602	mminsn.pc_inc = 2;
    603
    604	if (!mm_insn_16bit(halfword)) {
    605		__get_user(halfword, pc16);
    606		pc16++;
    607		contpc = regs->cp0_epc + 4;
    608		mminsn.pc_inc = 4;
    609		word |= halfword;
    610	}
    611	mminsn.insn = word;
    612
    613	if (get_user(halfword, pc16))
    614		goto fault;
    615	mminsn.next_pc_inc = 2;
    616	word = ((unsigned int)halfword << 16);
    617
    618	if (!mm_insn_16bit(halfword)) {
    619		pc16++;
    620		if (get_user(halfword, pc16))
    621			goto fault;
    622		mminsn.next_pc_inc = 4;
    623		word |= halfword;
    624	}
    625	mminsn.next_insn = word;
    626
    627	insn = (union mips_instruction)(mminsn.insn);
    628	if (mm_isBranchInstr(regs, mminsn, &contpc))
    629		insn = (union mips_instruction)(mminsn.next_insn);
    630
    631	/*  Parse instruction to find what to do */
    632
    633	switch (insn.mm_i_format.opcode) {
    634
    635	case mm_pool32a_op:
    636		switch (insn.mm_x_format.func) {
    637		case mm_lwxs_op:
    638			reg = insn.mm_x_format.rd;
    639			goto loadW;
    640		}
    641
    642		goto sigbus;
    643
    644	case mm_pool32b_op:
    645		switch (insn.mm_m_format.func) {
    646		case mm_lwp_func:
    647			reg = insn.mm_m_format.rd;
    648			if (reg == 31)
    649				goto sigbus;
    650
    651			if (user && !access_ok(addr, 8))
    652				goto sigbus;
    653
    654			LoadW(addr, value, res);
    655			if (res)
    656				goto fault;
    657			regs->regs[reg] = value;
    658			addr += 4;
    659			LoadW(addr, value, res);
    660			if (res)
    661				goto fault;
    662			regs->regs[reg + 1] = value;
    663			goto success;
    664
    665		case mm_swp_func:
    666			reg = insn.mm_m_format.rd;
    667			if (reg == 31)
    668				goto sigbus;
    669
    670			if (user && !access_ok(addr, 8))
    671				goto sigbus;
    672
    673			value = regs->regs[reg];
    674			StoreW(addr, value, res);
    675			if (res)
    676				goto fault;
    677			addr += 4;
    678			value = regs->regs[reg + 1];
    679			StoreW(addr, value, res);
    680			if (res)
    681				goto fault;
    682			goto success;
    683
    684		case mm_ldp_func:
    685#ifdef CONFIG_64BIT
    686			reg = insn.mm_m_format.rd;
    687			if (reg == 31)
    688				goto sigbus;
    689
    690			if (user && !access_ok(addr, 16))
    691				goto sigbus;
    692
    693			LoadDW(addr, value, res);
    694			if (res)
    695				goto fault;
    696			regs->regs[reg] = value;
    697			addr += 8;
    698			LoadDW(addr, value, res);
    699			if (res)
    700				goto fault;
    701			regs->regs[reg + 1] = value;
    702			goto success;
    703#endif /* CONFIG_64BIT */
    704
    705			goto sigill;
    706
    707		case mm_sdp_func:
    708#ifdef CONFIG_64BIT
    709			reg = insn.mm_m_format.rd;
    710			if (reg == 31)
    711				goto sigbus;
    712
    713			if (user && !access_ok(addr, 16))
    714				goto sigbus;
    715
    716			value = regs->regs[reg];
    717			StoreDW(addr, value, res);
    718			if (res)
    719				goto fault;
    720			addr += 8;
    721			value = regs->regs[reg + 1];
    722			StoreDW(addr, value, res);
    723			if (res)
    724				goto fault;
    725			goto success;
    726#endif /* CONFIG_64BIT */
    727
    728			goto sigill;
    729
    730		case mm_lwm32_func:
    731			reg = insn.mm_m_format.rd;
    732			rvar = reg & 0xf;
    733			if ((rvar > 9) || !reg)
    734				goto sigill;
    735			if (reg & 0x10) {
    736				if (user && !access_ok(addr, 4 * (rvar + 1)))
    737					goto sigbus;
    738			} else {
    739				if (user && !access_ok(addr, 4 * rvar))
    740					goto sigbus;
    741			}
    742			if (rvar == 9)
    743				rvar = 8;
    744			for (i = 16; rvar; rvar--, i++) {
    745				LoadW(addr, value, res);
    746				if (res)
    747					goto fault;
    748				addr += 4;
    749				regs->regs[i] = value;
    750			}
    751			if ((reg & 0xf) == 9) {
    752				LoadW(addr, value, res);
    753				if (res)
    754					goto fault;
    755				addr += 4;
    756				regs->regs[30] = value;
    757			}
    758			if (reg & 0x10) {
    759				LoadW(addr, value, res);
    760				if (res)
    761					goto fault;
    762				regs->regs[31] = value;
    763			}
    764			goto success;
    765
    766		case mm_swm32_func:
    767			reg = insn.mm_m_format.rd;
    768			rvar = reg & 0xf;
    769			if ((rvar > 9) || !reg)
    770				goto sigill;
    771			if (reg & 0x10) {
    772				if (user && !access_ok(addr, 4 * (rvar + 1)))
    773					goto sigbus;
    774			} else {
    775				if (user && !access_ok(addr, 4 * rvar))
    776					goto sigbus;
    777			}
    778			if (rvar == 9)
    779				rvar = 8;
    780			for (i = 16; rvar; rvar--, i++) {
    781				value = regs->regs[i];
    782				StoreW(addr, value, res);
    783				if (res)
    784					goto fault;
    785				addr += 4;
    786			}
    787			if ((reg & 0xf) == 9) {
    788				value = regs->regs[30];
    789				StoreW(addr, value, res);
    790				if (res)
    791					goto fault;
    792				addr += 4;
    793			}
    794			if (reg & 0x10) {
    795				value = regs->regs[31];
    796				StoreW(addr, value, res);
    797				if (res)
    798					goto fault;
    799			}
    800			goto success;
    801
    802		case mm_ldm_func:
    803#ifdef CONFIG_64BIT
    804			reg = insn.mm_m_format.rd;
    805			rvar = reg & 0xf;
    806			if ((rvar > 9) || !reg)
    807				goto sigill;
    808			if (reg & 0x10) {
    809				if (user && !access_ok(addr, 8 * (rvar + 1)))
    810					goto sigbus;
    811			} else {
    812				if (user && !access_ok(addr, 8 * rvar))
    813					goto sigbus;
    814			}
    815			if (rvar == 9)
    816				rvar = 8;
    817
    818			for (i = 16; rvar; rvar--, i++) {
    819				LoadDW(addr, value, res);
    820				if (res)
    821					goto fault;
    822				addr += 4;
    823				regs->regs[i] = value;
    824			}
    825			if ((reg & 0xf) == 9) {
    826				LoadDW(addr, value, res);
    827				if (res)
    828					goto fault;
    829				addr += 8;
    830				regs->regs[30] = value;
    831			}
    832			if (reg & 0x10) {
    833				LoadDW(addr, value, res);
    834				if (res)
    835					goto fault;
    836				regs->regs[31] = value;
    837			}
    838			goto success;
    839#endif /* CONFIG_64BIT */
    840
    841			goto sigill;
    842
    843		case mm_sdm_func:
    844#ifdef CONFIG_64BIT
    845			reg = insn.mm_m_format.rd;
    846			rvar = reg & 0xf;
    847			if ((rvar > 9) || !reg)
    848				goto sigill;
    849			if (reg & 0x10) {
    850				if (user && !access_ok(addr, 8 * (rvar + 1)))
    851					goto sigbus;
    852			} else {
    853				if (user && !access_ok(addr, 8 * rvar))
    854					goto sigbus;
    855			}
    856			if (rvar == 9)
    857				rvar = 8;
    858
    859			for (i = 16; rvar; rvar--, i++) {
    860				value = regs->regs[i];
    861				StoreDW(addr, value, res);
    862				if (res)
    863					goto fault;
    864				addr += 8;
    865			}
    866			if ((reg & 0xf) == 9) {
    867				value = regs->regs[30];
    868				StoreDW(addr, value, res);
    869				if (res)
    870					goto fault;
    871				addr += 8;
    872			}
    873			if (reg & 0x10) {
    874				value = regs->regs[31];
    875				StoreDW(addr, value, res);
    876				if (res)
    877					goto fault;
    878			}
    879			goto success;
    880#endif /* CONFIG_64BIT */
    881
    882			goto sigill;
    883
    884			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
    885		}
    886
    887		goto sigbus;
    888
    889	case mm_pool32c_op:
    890		switch (insn.mm_m_format.func) {
    891		case mm_lwu_func:
    892			reg = insn.mm_m_format.rd;
    893			goto loadWU;
    894		}
    895
    896		/*  LL,SC,LLD,SCD are not serviced */
    897		goto sigbus;
    898
    899#ifdef CONFIG_MIPS_FP_SUPPORT
    900	case mm_pool32f_op:
    901		switch (insn.mm_x_format.func) {
    902		case mm_lwxc1_func:
    903		case mm_swxc1_func:
    904		case mm_ldxc1_func:
    905		case mm_sdxc1_func:
    906			goto fpu_emul;
    907		}
    908
    909		goto sigbus;
    910
    911	case mm_ldc132_op:
    912	case mm_sdc132_op:
    913	case mm_lwc132_op:
    914	case mm_swc132_op: {
    915		void __user *fault_addr = NULL;
    916
    917fpu_emul:
    918		/* roll back jump/branch */
    919		regs->cp0_epc = origpc;
    920		regs->regs[31] = orig31;
    921
    922		die_if_kernel("Unaligned FP access in kernel code", regs);
    923		BUG_ON(!used_math());
    924		BUG_ON(!is_fpu_owner());
    925
    926		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
    927					       &fault_addr);
    928		own_fpu(1);	/* restore FPU state */
    929
    930		/* If something went wrong, signal */
    931		process_fpemu_return(res, fault_addr, 0);
    932
    933		if (res == 0)
    934			goto success;
    935		return;
    936	}
    937#endif /* CONFIG_MIPS_FP_SUPPORT */
    938
    939	case mm_lh32_op:
    940		reg = insn.mm_i_format.rt;
    941		goto loadHW;
    942
    943	case mm_lhu32_op:
    944		reg = insn.mm_i_format.rt;
    945		goto loadHWU;
    946
    947	case mm_lw32_op:
    948		reg = insn.mm_i_format.rt;
    949		goto loadW;
    950
    951	case mm_sh32_op:
    952		reg = insn.mm_i_format.rt;
    953		goto storeHW;
    954
    955	case mm_sw32_op:
    956		reg = insn.mm_i_format.rt;
    957		goto storeW;
    958
    959	case mm_ld32_op:
    960		reg = insn.mm_i_format.rt;
    961		goto loadDW;
    962
    963	case mm_sd32_op:
    964		reg = insn.mm_i_format.rt;
    965		goto storeDW;
    966
    967	case mm_pool16c_op:
    968		switch (insn.mm16_m_format.func) {
    969		case mm_lwm16_op:
    970			reg = insn.mm16_m_format.rlist;
    971			rvar = reg + 1;
    972			if (user && !access_ok(addr, 4 * rvar))
    973				goto sigbus;
    974
    975			for (i = 16; rvar; rvar--, i++) {
    976				LoadW(addr, value, res);
    977				if (res)
    978					goto fault;
    979				addr += 4;
    980				regs->regs[i] = value;
    981			}
    982			LoadW(addr, value, res);
    983			if (res)
    984				goto fault;
    985			regs->regs[31] = value;
    986
    987			goto success;
    988
    989		case mm_swm16_op:
    990			reg = insn.mm16_m_format.rlist;
    991			rvar = reg + 1;
    992			if (user && !access_ok(addr, 4 * rvar))
    993				goto sigbus;
    994
    995			for (i = 16; rvar; rvar--, i++) {
    996				value = regs->regs[i];
    997				StoreW(addr, value, res);
    998				if (res)
    999					goto fault;
   1000				addr += 4;
   1001			}
   1002			value = regs->regs[31];
   1003			StoreW(addr, value, res);
   1004			if (res)
   1005				goto fault;
   1006
   1007			goto success;
   1008
   1009		}
   1010
   1011		goto sigbus;
   1012
   1013	case mm_lhu16_op:
   1014		reg = reg16to32[insn.mm16_rb_format.rt];
   1015		goto loadHWU;
   1016
   1017	case mm_lw16_op:
   1018		reg = reg16to32[insn.mm16_rb_format.rt];
   1019		goto loadW;
   1020
   1021	case mm_sh16_op:
   1022		reg = reg16to32st[insn.mm16_rb_format.rt];
   1023		goto storeHW;
   1024
   1025	case mm_sw16_op:
   1026		reg = reg16to32st[insn.mm16_rb_format.rt];
   1027		goto storeW;
   1028
   1029	case mm_lwsp16_op:
   1030		reg = insn.mm16_r5_format.rt;
   1031		goto loadW;
   1032
   1033	case mm_swsp16_op:
   1034		reg = insn.mm16_r5_format.rt;
   1035		goto storeW;
   1036
   1037	case mm_lwgp16_op:
   1038		reg = reg16to32[insn.mm16_r3_format.rt];
   1039		goto loadW;
   1040
   1041	default:
   1042		goto sigill;
   1043	}
   1044
   1045loadHW:
   1046	if (user && !access_ok(addr, 2))
   1047		goto sigbus;
   1048
   1049	LoadHW(addr, value, res);
   1050	if (res)
   1051		goto fault;
   1052	regs->regs[reg] = value;
   1053	goto success;
   1054
   1055loadHWU:
   1056	if (user && !access_ok(addr, 2))
   1057		goto sigbus;
   1058
   1059	LoadHWU(addr, value, res);
   1060	if (res)
   1061		goto fault;
   1062	regs->regs[reg] = value;
   1063	goto success;
   1064
   1065loadW:
   1066	if (user && !access_ok(addr, 4))
   1067		goto sigbus;
   1068
   1069	LoadW(addr, value, res);
   1070	if (res)
   1071		goto fault;
   1072	regs->regs[reg] = value;
   1073	goto success;
   1074
   1075loadWU:
   1076#ifdef CONFIG_64BIT
   1077	/*
   1078	 * A 32-bit kernel might be running on a 64-bit processor.  But
   1079	 * if we're on a 32-bit processor and an i-cache incoherency
   1080	 * or race makes us see a 64-bit instruction here the sdl/sdr
   1081	 * would blow up, so for now we don't handle unaligned 64-bit
   1082	 * instructions on 32-bit kernels.
   1083	 */
   1084	if (user && !access_ok(addr, 4))
   1085		goto sigbus;
   1086
   1087	LoadWU(addr, value, res);
   1088	if (res)
   1089		goto fault;
   1090	regs->regs[reg] = value;
   1091	goto success;
   1092#endif /* CONFIG_64BIT */
   1093
   1094	/* Cannot handle 64-bit instructions in 32-bit kernel */
   1095	goto sigill;
   1096
   1097loadDW:
   1098#ifdef CONFIG_64BIT
   1099	/*
   1100	 * A 32-bit kernel might be running on a 64-bit processor.  But
   1101	 * if we're on a 32-bit processor and an i-cache incoherency
   1102	 * or race makes us see a 64-bit instruction here the sdl/sdr
   1103	 * would blow up, so for now we don't handle unaligned 64-bit
   1104	 * instructions on 32-bit kernels.
   1105	 */
   1106	if (user && !access_ok(addr, 8))
   1107		goto sigbus;
   1108
   1109	LoadDW(addr, value, res);
   1110	if (res)
   1111		goto fault;
   1112	regs->regs[reg] = value;
   1113	goto success;
   1114#endif /* CONFIG_64BIT */
   1115
   1116	/* Cannot handle 64-bit instructions in 32-bit kernel */
   1117	goto sigill;
   1118
   1119storeHW:
   1120	if (user && !access_ok(addr, 2))
   1121		goto sigbus;
   1122
   1123	value = regs->regs[reg];
   1124	StoreHW(addr, value, res);
   1125	if (res)
   1126		goto fault;
   1127	goto success;
   1128
   1129storeW:
   1130	if (user && !access_ok(addr, 4))
   1131		goto sigbus;
   1132
   1133	value = regs->regs[reg];
   1134	StoreW(addr, value, res);
   1135	if (res)
   1136		goto fault;
   1137	goto success;
   1138
   1139storeDW:
   1140#ifdef CONFIG_64BIT
   1141	/*
   1142	 * A 32-bit kernel might be running on a 64-bit processor.  But
   1143	 * if we're on a 32-bit processor and an i-cache incoherency
   1144	 * or race makes us see a 64-bit instruction here the sdl/sdr
   1145	 * would blow up, so for now we don't handle unaligned 64-bit
   1146	 * instructions on 32-bit kernels.
   1147	 */
   1148	if (user && !access_ok(addr, 8))
   1149		goto sigbus;
   1150
   1151	value = regs->regs[reg];
   1152	StoreDW(addr, value, res);
   1153	if (res)
   1154		goto fault;
   1155	goto success;
   1156#endif /* CONFIG_64BIT */
   1157
   1158	/* Cannot handle 64-bit instructions in 32-bit kernel */
   1159	goto sigill;
   1160
   1161success:
   1162	regs->cp0_epc = contpc;	/* advance or branch */
   1163
   1164#ifdef CONFIG_DEBUG_FS
   1165	unaligned_instructions++;
   1166#endif
   1167	return;
   1168
   1169fault:
   1170	/* roll back jump/branch */
   1171	regs->cp0_epc = origpc;
   1172	regs->regs[31] = orig31;
   1173	/* Did we have an exception handler installed? */
   1174	if (fixup_exception(regs))
   1175		return;
   1176
   1177	die_if_kernel("Unhandled kernel unaligned access", regs);
   1178	force_sig(SIGSEGV);
   1179
   1180	return;
   1181
   1182sigbus:
   1183	die_if_kernel("Unhandled kernel unaligned access", regs);
   1184	force_sig(SIGBUS);
   1185
   1186	return;
   1187
   1188sigill:
   1189	die_if_kernel
   1190	    ("Unhandled kernel unaligned access or invalid instruction", regs);
   1191	force_sig(SIGILL);
   1192}
   1193
   1194static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
   1195{
   1196	unsigned long value;
   1197	unsigned int res;
   1198	int reg;
   1199	unsigned long orig31;
   1200	u16 __user *pc16;
   1201	unsigned long origpc;
   1202	union mips16e_instruction mips16inst, oldinst;
   1203	unsigned int opcode;
   1204	int extended = 0;
   1205	bool user = user_mode(regs);
   1206
   1207	origpc = regs->cp0_epc;
   1208	orig31 = regs->regs[31];
   1209	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
   1210	/*
   1211	 * This load never faults.
   1212	 */
   1213	__get_user(mips16inst.full, pc16);
   1214	oldinst = mips16inst;
   1215
   1216	/* skip EXTEND instruction */
   1217	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
   1218		extended = 1;
   1219		pc16++;
   1220		__get_user(mips16inst.full, pc16);
   1221	} else if (delay_slot(regs)) {
   1222		/*  skip jump instructions */
   1223		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
   1224		if (mips16inst.ri.opcode == MIPS16e_jal_op)
   1225			pc16++;
   1226		pc16++;
   1227		if (get_user(mips16inst.full, pc16))
   1228			goto sigbus;
   1229	}
   1230
   1231	opcode = mips16inst.ri.opcode;
   1232	switch (opcode) {
   1233	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
   1234		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
   1235		case MIPS16e_ldpc_func:
   1236		case MIPS16e_ldsp_func:
   1237			reg = reg16to32[mips16inst.ri64.ry];
   1238			goto loadDW;
   1239
   1240		case MIPS16e_sdsp_func:
   1241			reg = reg16to32[mips16inst.ri64.ry];
   1242			goto writeDW;
   1243
   1244		case MIPS16e_sdrasp_func:
   1245			reg = 29;	/* GPRSP */
   1246			goto writeDW;
   1247		}
   1248
   1249		goto sigbus;
   1250
   1251	case MIPS16e_swsp_op:
   1252		reg = reg16to32[mips16inst.ri.rx];
   1253		if (extended && cpu_has_mips16e2)
   1254			switch (mips16inst.ri.imm >> 5) {
   1255			case 0:		/* SWSP */
   1256			case 1:		/* SWGP */
   1257				break;
   1258			case 2:		/* SHGP */
   1259				opcode = MIPS16e_sh_op;
   1260				break;
   1261			default:
   1262				goto sigbus;
   1263			}
   1264		break;
   1265
   1266	case MIPS16e_lwpc_op:
   1267		reg = reg16to32[mips16inst.ri.rx];
   1268		break;
   1269
   1270	case MIPS16e_lwsp_op:
   1271		reg = reg16to32[mips16inst.ri.rx];
   1272		if (extended && cpu_has_mips16e2)
   1273			switch (mips16inst.ri.imm >> 5) {
   1274			case 0:		/* LWSP */
   1275			case 1:		/* LWGP */
   1276				break;
   1277			case 2:		/* LHGP */
   1278				opcode = MIPS16e_lh_op;
   1279				break;
   1280			case 4:		/* LHUGP */
   1281				opcode = MIPS16e_lhu_op;
   1282				break;
   1283			default:
   1284				goto sigbus;
   1285			}
   1286		break;
   1287
   1288	case MIPS16e_i8_op:
   1289		if (mips16inst.i8.func != MIPS16e_swrasp_func)
   1290			goto sigbus;
   1291		reg = 29;	/* GPRSP */
   1292		break;
   1293
   1294	default:
   1295		reg = reg16to32[mips16inst.rri.ry];
   1296		break;
   1297	}
   1298
   1299	switch (opcode) {
   1300
   1301	case MIPS16e_lb_op:
   1302	case MIPS16e_lbu_op:
   1303	case MIPS16e_sb_op:
   1304		goto sigbus;
   1305
   1306	case MIPS16e_lh_op:
   1307		if (user && !access_ok(addr, 2))
   1308			goto sigbus;
   1309
   1310		LoadHW(addr, value, res);
   1311		if (res)
   1312			goto fault;
   1313		MIPS16e_compute_return_epc(regs, &oldinst);
   1314		regs->regs[reg] = value;
   1315		break;
   1316
   1317	case MIPS16e_lhu_op:
   1318		if (user && !access_ok(addr, 2))
   1319			goto sigbus;
   1320
   1321		LoadHWU(addr, value, res);
   1322		if (res)
   1323			goto fault;
   1324		MIPS16e_compute_return_epc(regs, &oldinst);
   1325		regs->regs[reg] = value;
   1326		break;
   1327
   1328	case MIPS16e_lw_op:
   1329	case MIPS16e_lwpc_op:
   1330	case MIPS16e_lwsp_op:
   1331		if (user && !access_ok(addr, 4))
   1332			goto sigbus;
   1333
   1334		LoadW(addr, value, res);
   1335		if (res)
   1336			goto fault;
   1337		MIPS16e_compute_return_epc(regs, &oldinst);
   1338		regs->regs[reg] = value;
   1339		break;
   1340
   1341	case MIPS16e_lwu_op:
   1342#ifdef CONFIG_64BIT
   1343		/*
   1344		 * A 32-bit kernel might be running on a 64-bit processor.  But
   1345		 * if we're on a 32-bit processor and an i-cache incoherency
   1346		 * or race makes us see a 64-bit instruction here the sdl/sdr
   1347		 * would blow up, so for now we don't handle unaligned 64-bit
   1348		 * instructions on 32-bit kernels.
   1349		 */
   1350		if (user && !access_ok(addr, 4))
   1351			goto sigbus;
   1352
   1353		LoadWU(addr, value, res);
   1354		if (res)
   1355			goto fault;
   1356		MIPS16e_compute_return_epc(regs, &oldinst);
   1357		regs->regs[reg] = value;
   1358		break;
   1359#endif /* CONFIG_64BIT */
   1360
   1361		/* Cannot handle 64-bit instructions in 32-bit kernel */
   1362		goto sigill;
   1363
   1364	case MIPS16e_ld_op:
   1365loadDW:
   1366#ifdef CONFIG_64BIT
   1367		/*
   1368		 * A 32-bit kernel might be running on a 64-bit processor.  But
   1369		 * if we're on a 32-bit processor and an i-cache incoherency
   1370		 * or race makes us see a 64-bit instruction here the sdl/sdr
   1371		 * would blow up, so for now we don't handle unaligned 64-bit
   1372		 * instructions on 32-bit kernels.
   1373		 */
   1374		if (user && !access_ok(addr, 8))
   1375			goto sigbus;
   1376
   1377		LoadDW(addr, value, res);
   1378		if (res)
   1379			goto fault;
   1380		MIPS16e_compute_return_epc(regs, &oldinst);
   1381		regs->regs[reg] = value;
   1382		break;
   1383#endif /* CONFIG_64BIT */
   1384
   1385		/* Cannot handle 64-bit instructions in 32-bit kernel */
   1386		goto sigill;
   1387
   1388	case MIPS16e_sh_op:
   1389		if (user && !access_ok(addr, 2))
   1390			goto sigbus;
   1391
   1392		MIPS16e_compute_return_epc(regs, &oldinst);
   1393		value = regs->regs[reg];
   1394		StoreHW(addr, value, res);
   1395		if (res)
   1396			goto fault;
   1397		break;
   1398
   1399	case MIPS16e_sw_op:
   1400	case MIPS16e_swsp_op:
   1401	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
   1402		if (user && !access_ok(addr, 4))
   1403			goto sigbus;
   1404
   1405		MIPS16e_compute_return_epc(regs, &oldinst);
   1406		value = regs->regs[reg];
   1407		StoreW(addr, value, res);
   1408		if (res)
   1409			goto fault;
   1410		break;
   1411
   1412	case MIPS16e_sd_op:
   1413writeDW:
   1414#ifdef CONFIG_64BIT
   1415		/*
   1416		 * A 32-bit kernel might be running on a 64-bit processor.  But
   1417		 * if we're on a 32-bit processor and an i-cache incoherency
   1418		 * or race makes us see a 64-bit instruction here the sdl/sdr
   1419		 * would blow up, so for now we don't handle unaligned 64-bit
   1420		 * instructions on 32-bit kernels.
   1421		 */
   1422		if (user && !access_ok(addr, 8))
   1423			goto sigbus;
   1424
   1425		MIPS16e_compute_return_epc(regs, &oldinst);
   1426		value = regs->regs[reg];
   1427		StoreDW(addr, value, res);
   1428		if (res)
   1429			goto fault;
   1430		break;
   1431#endif /* CONFIG_64BIT */
   1432
   1433		/* Cannot handle 64-bit instructions in 32-bit kernel */
   1434		goto sigill;
   1435
   1436	default:
   1437		/*
   1438		 * Pheeee...  We encountered an yet unknown instruction or
   1439		 * cache coherence problem.  Die sucker, die ...
   1440		 */
   1441		goto sigill;
   1442	}
   1443
   1444#ifdef CONFIG_DEBUG_FS
   1445	unaligned_instructions++;
   1446#endif
   1447
   1448	return;
   1449
   1450fault:
   1451	/* roll back jump/branch */
   1452	regs->cp0_epc = origpc;
   1453	regs->regs[31] = orig31;
   1454	/* Did we have an exception handler installed? */
   1455	if (fixup_exception(regs))
   1456		return;
   1457
   1458	die_if_kernel("Unhandled kernel unaligned access", regs);
   1459	force_sig(SIGSEGV);
   1460
   1461	return;
   1462
   1463sigbus:
   1464	die_if_kernel("Unhandled kernel unaligned access", regs);
   1465	force_sig(SIGBUS);
   1466
   1467	return;
   1468
   1469sigill:
   1470	die_if_kernel
   1471	    ("Unhandled kernel unaligned access or invalid instruction", regs);
   1472	force_sig(SIGILL);
   1473}
   1474
   1475asmlinkage void do_ade(struct pt_regs *regs)
   1476{
   1477	enum ctx_state prev_state;
   1478	unsigned int *pc;
   1479
   1480	prev_state = exception_enter();
   1481	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
   1482			1, regs, regs->cp0_badvaddr);
   1483
   1484#ifdef CONFIG_64BIT
   1485	/*
   1486	 * check, if we are hitting space between CPU implemented maximum
   1487	 * virtual user address and 64bit maximum virtual user address
   1488	 * and do exception handling to get EFAULTs for get_user/put_user
   1489	 */
   1490	if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
   1491	    (regs->cp0_badvaddr < XKSSEG)) {
   1492		if (fixup_exception(regs)) {
   1493			current->thread.cp0_baduaddr = regs->cp0_badvaddr;
   1494			return;
   1495		}
   1496		goto sigbus;
   1497	}
   1498#endif
   1499
   1500	/*
   1501	 * Did we catch a fault trying to load an instruction?
   1502	 */
   1503	if (regs->cp0_badvaddr == regs->cp0_epc)
   1504		goto sigbus;
   1505
   1506	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
   1507		goto sigbus;
   1508	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
   1509		goto sigbus;
   1510
   1511	/*
   1512	 * Do branch emulation only if we didn't forward the exception.
   1513	 * This is all so but ugly ...
   1514	 */
   1515
   1516	/*
   1517	 * Are we running in microMIPS mode?
   1518	 */
   1519	if (get_isa16_mode(regs->cp0_epc)) {
   1520		/*
   1521		 * Did we catch a fault trying to load an instruction in
   1522		 * 16-bit mode?
   1523		 */
   1524		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
   1525			goto sigbus;
   1526		if (unaligned_action == UNALIGNED_ACTION_SHOW)
   1527			show_registers(regs);
   1528
   1529		if (cpu_has_mmips) {
   1530			emulate_load_store_microMIPS(regs,
   1531				(void __user *)regs->cp0_badvaddr);
   1532			return;
   1533		}
   1534
   1535		if (cpu_has_mips16) {
   1536			emulate_load_store_MIPS16e(regs,
   1537				(void __user *)regs->cp0_badvaddr);
   1538			return;
   1539		}
   1540
   1541		goto sigbus;
   1542	}
   1543
   1544	if (unaligned_action == UNALIGNED_ACTION_SHOW)
   1545		show_registers(regs);
   1546	pc = (unsigned int *)exception_epc(regs);
   1547
   1548	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
   1549
   1550	return;
   1551
   1552sigbus:
   1553	die_if_kernel("Kernel unaligned instruction access", regs);
   1554	force_sig(SIGBUS);
   1555
   1556	/*
   1557	 * XXX On return from the signal handler we should advance the epc
   1558	 */
   1559	exception_exit(prev_state);
   1560}
   1561
   1562#ifdef CONFIG_DEBUG_FS
   1563static int __init debugfs_unaligned(void)
   1564{
   1565	debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
   1566			   &unaligned_instructions);
   1567	debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
   1568			   mips_debugfs_dir, &unaligned_action);
   1569	return 0;
   1570}
   1571arch_initcall(debugfs_unaligned);
   1572#endif