cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

traps.c (24249B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/arch/arm/kernel/traps.c
      4 *
      5 *  Copyright (C) 1995-2009 Russell King
      6 *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
      7 *
      8 *  'traps.c' handles hardware exceptions after we have saved some state in
      9 *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
     10 *  kill the offending process.
     11 */
     12#include <linux/signal.h>
     13#include <linux/personality.h>
     14#include <linux/kallsyms.h>
     15#include <linux/spinlock.h>
     16#include <linux/uaccess.h>
     17#include <linux/hardirq.h>
     18#include <linux/kdebug.h>
     19#include <linux/kprobes.h>
     20#include <linux/module.h>
     21#include <linux/kexec.h>
     22#include <linux/bug.h>
     23#include <linux/delay.h>
     24#include <linux/init.h>
     25#include <linux/sched/signal.h>
     26#include <linux/sched/debug.h>
     27#include <linux/sched/task_stack.h>
     28#include <linux/irq.h>
     29
     30#include <linux/atomic.h>
     31#include <asm/cacheflush.h>
     32#include <asm/exception.h>
     33#include <asm/spectre.h>
     34#include <asm/unistd.h>
     35#include <asm/traps.h>
     36#include <asm/ptrace.h>
     37#include <asm/unwind.h>
     38#include <asm/tls.h>
     39#include <asm/stacktrace.h>
     40#include <asm/system_misc.h>
     41#include <asm/opcodes.h>
     42
     43
     44static const char *handler[]= {
     45	"prefetch abort",
     46	"data abort",
     47	"address exception",
     48	"interrupt",
     49	"undefined instruction",
     50};
     51
     52void *vectors_page;
     53
     54#ifdef CONFIG_DEBUG_USER
     55unsigned int user_debug;
     56
     57static int __init user_debug_setup(char *str)
     58{
     59	get_option(&str, &user_debug);
     60	return 1;
     61}
     62__setup("user_debug=", user_debug_setup);
     63#endif
     64
     65void dump_backtrace_entry(unsigned long where, unsigned long from,
     66			  unsigned long frame, const char *loglvl)
     67{
     68	unsigned long end = frame + 4 + sizeof(struct pt_regs);
     69
     70	if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
     71	    IS_ENABLED(CONFIG_CC_IS_GCC) &&
     72	    end > ALIGN(frame, THREAD_SIZE)) {
     73		/*
     74		 * If we are walking past the end of the stack, it may be due
     75		 * to the fact that we are on an IRQ or overflow stack. In this
     76		 * case, we can load the address of the other stack from the
     77		 * frame record.
     78		 */
     79		frame = ((unsigned long *)frame)[-2] - 4;
     80		end = frame + 4 + sizeof(struct pt_regs);
     81	}
     82
     83#ifndef CONFIG_KALLSYMS
     84	printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
     85		loglvl, where, from);
     86#elif defined CONFIG_BACKTRACE_VERBOSE
     87	printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
     88		loglvl, where, (void *)where, from, (void *)from);
     89#else
     90	printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from);
     91#endif
     92
     93	if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
     94		dump_mem(loglvl, "Exception stack", frame + 4, end);
     95}
     96
     97void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
     98{
     99	char str[80], *p;
    100	unsigned int x;
    101	int reg;
    102
    103	for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
    104		if (instruction & BIT(reg)) {
    105			p += sprintf(p, " r%d:%08x", reg, *stack--);
    106			if (++x == 6) {
    107				x = 0;
    108				p = str;
    109				printk("%s%s\n", loglvl, str);
    110			}
    111		}
    112	}
    113	if (p != str)
    114		printk("%s%s\n", loglvl, str);
    115}
    116
    117#ifndef CONFIG_ARM_UNWIND
    118/*
    119 * Stack pointers should always be within the kernels view of
    120 * physical memory.  If it is not there, then we can't dump
    121 * out any information relating to the stack.
    122 */
    123static int verify_stack(unsigned long sp)
    124{
    125	if (sp < PAGE_OFFSET ||
    126	    (!IS_ENABLED(CONFIG_VMAP_STACK) &&
    127	     sp > (unsigned long)high_memory && high_memory != NULL))
    128		return -EFAULT;
    129
    130	return 0;
    131}
    132#endif
    133
    134/*
    135 * Dump out the contents of some memory nicely...
    136 */
    137void dump_mem(const char *lvl, const char *str, unsigned long bottom,
    138	      unsigned long top)
    139{
    140	unsigned long first;
    141	int i;
    142
    143	printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
    144
    145	for (first = bottom & ~31; first < top; first += 32) {
    146		unsigned long p;
    147		char str[sizeof(" 12345678") * 8 + 1];
    148
    149		memset(str, ' ', sizeof(str));
    150		str[sizeof(str) - 1] = '\0';
    151
    152		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
    153			if (p >= bottom && p < top) {
    154				unsigned long val;
    155				if (!get_kernel_nofault(val, (unsigned long *)p))
    156					sprintf(str + i * 9, " %08lx", val);
    157				else
    158					sprintf(str + i * 9, " ????????");
    159			}
    160		}
    161		printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
    162	}
    163}
    164
    165static void dump_instr(const char *lvl, struct pt_regs *regs)
    166{
    167	unsigned long addr = instruction_pointer(regs);
    168	const int thumb = thumb_mode(regs);
    169	const int width = thumb ? 4 : 8;
    170	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
    171	int i;
    172
    173	/*
    174	 * Note that we now dump the code first, just in case the backtrace
    175	 * kills us.
    176	 */
    177
    178	for (i = -4; i < 1 + !!thumb; i++) {
    179		unsigned int val, bad;
    180
    181		if (!user_mode(regs)) {
    182			if (thumb) {
    183				u16 val16;
    184				bad = get_kernel_nofault(val16, &((u16 *)addr)[i]);
    185				val = val16;
    186			} else {
    187				bad = get_kernel_nofault(val, &((u32 *)addr)[i]);
    188			}
    189		} else {
    190			if (thumb)
    191				bad = get_user(val, &((u16 *)addr)[i]);
    192			else
    193				bad = get_user(val, &((u32 *)addr)[i]);
    194		}
    195
    196		if (!bad)
    197			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
    198					width, val);
    199		else {
    200			p += sprintf(p, "bad PC value");
    201			break;
    202		}
    203	}
    204	printk("%sCode: %s\n", lvl, str);
    205}
    206
    207#ifdef CONFIG_ARM_UNWIND
    208static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
    209				  const char *loglvl)
    210{
    211	unwind_backtrace(regs, tsk, loglvl);
    212}
    213#else
    214static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
    215			   const char *loglvl)
    216{
    217	unsigned int fp, mode;
    218	int ok = 1;
    219
    220	printk("%sBacktrace: ", loglvl);
    221
    222	if (!tsk)
    223		tsk = current;
    224
    225	if (regs) {
    226		fp = frame_pointer(regs);
    227		mode = processor_mode(regs);
    228	} else if (tsk != current) {
    229		fp = thread_saved_fp(tsk);
    230		mode = 0x10;
    231	} else {
    232		asm("mov %0, fp" : "=r" (fp) : : "cc");
    233		mode = 0x10;
    234	}
    235
    236	if (!fp) {
    237		pr_cont("no frame pointer");
    238		ok = 0;
    239	} else if (verify_stack(fp)) {
    240		pr_cont("invalid frame pointer 0x%08x", fp);
    241		ok = 0;
    242	} else if (fp < (unsigned long)end_of_stack(tsk))
    243		pr_cont("frame pointer underflow");
    244	pr_cont("\n");
    245
    246	if (ok)
    247		c_backtrace(fp, mode, loglvl);
    248}
    249#endif
    250
    251void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
    252{
    253	dump_backtrace(NULL, tsk, loglvl);
    254	barrier();
    255}
    256
    257#ifdef CONFIG_PREEMPT
    258#define S_PREEMPT " PREEMPT"
    259#elif defined(CONFIG_PREEMPT_RT)
    260#define S_PREEMPT " PREEMPT_RT"
    261#else
    262#define S_PREEMPT ""
    263#endif
    264#ifdef CONFIG_SMP
    265#define S_SMP " SMP"
    266#else
    267#define S_SMP ""
    268#endif
    269#ifdef CONFIG_THUMB2_KERNEL
    270#define S_ISA " THUMB2"
    271#else
    272#define S_ISA " ARM"
    273#endif
    274
    275static int __die(const char *str, int err, struct pt_regs *regs)
    276{
    277	struct task_struct *tsk = current;
    278	static int die_counter;
    279	int ret;
    280
    281	pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
    282	         str, err, ++die_counter);
    283
    284	/* trap and error numbers are mostly meaningless on ARM */
    285	ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
    286	if (ret == NOTIFY_STOP)
    287		return 1;
    288
    289	print_modules();
    290	__show_regs(regs);
    291	__show_regs_alloc_free(regs);
    292	pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
    293		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
    294
    295	if (!user_mode(regs) || in_interrupt()) {
    296		dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
    297			 ALIGN(regs->ARM_sp - THREAD_SIZE, THREAD_ALIGN)
    298			 + THREAD_SIZE);
    299		dump_backtrace(regs, tsk, KERN_EMERG);
    300		dump_instr(KERN_EMERG, regs);
    301	}
    302
    303	return 0;
    304}
    305
    306static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
    307static int die_owner = -1;
    308static unsigned int die_nest_count;
    309
    310static unsigned long oops_begin(void)
    311{
    312	int cpu;
    313	unsigned long flags;
    314
    315	oops_enter();
    316
    317	/* racy, but better than risking deadlock. */
    318	raw_local_irq_save(flags);
    319	cpu = smp_processor_id();
    320	if (!arch_spin_trylock(&die_lock)) {
    321		if (cpu == die_owner)
    322			/* nested oops. should stop eventually */;
    323		else
    324			arch_spin_lock(&die_lock);
    325	}
    326	die_nest_count++;
    327	die_owner = cpu;
    328	console_verbose();
    329	bust_spinlocks(1);
    330	return flags;
    331}
    332
    333static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
    334{
    335	if (regs && kexec_should_crash(current))
    336		crash_kexec(regs);
    337
    338	bust_spinlocks(0);
    339	die_owner = -1;
    340	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
    341	die_nest_count--;
    342	if (!die_nest_count)
    343		/* Nest count reaches zero, release the lock. */
    344		arch_spin_unlock(&die_lock);
    345	raw_local_irq_restore(flags);
    346	oops_exit();
    347
    348	if (in_interrupt())
    349		panic("Fatal exception in interrupt");
    350	if (panic_on_oops)
    351		panic("Fatal exception");
    352	if (signr)
    353		make_task_dead(signr);
    354}
    355
    356/*
    357 * This function is protected against re-entrancy.
    358 */
    359void die(const char *str, struct pt_regs *regs, int err)
    360{
    361	enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
    362	unsigned long flags = oops_begin();
    363	int sig = SIGSEGV;
    364
    365	if (!user_mode(regs))
    366		bug_type = report_bug(regs->ARM_pc, regs);
    367	if (bug_type != BUG_TRAP_TYPE_NONE)
    368		str = "Oops - BUG";
    369
    370	if (__die(str, err, regs))
    371		sig = 0;
    372
    373	oops_end(flags, regs, sig);
    374}
    375
    376void arm_notify_die(const char *str, struct pt_regs *regs,
    377		int signo, int si_code, void __user *addr,
    378		unsigned long err, unsigned long trap)
    379{
    380	if (user_mode(regs)) {
    381		current->thread.error_code = err;
    382		current->thread.trap_no = trap;
    383
    384		force_sig_fault(signo, si_code, addr);
    385	} else {
    386		die(str, regs, err);
    387	}
    388}
    389
    390#ifdef CONFIG_GENERIC_BUG
    391
    392int is_valid_bugaddr(unsigned long pc)
    393{
    394#ifdef CONFIG_THUMB2_KERNEL
    395	u16 bkpt;
    396	u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
    397#else
    398	u32 bkpt;
    399	u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
    400#endif
    401
    402	if (get_kernel_nofault(bkpt, (void *)pc))
    403		return 0;
    404
    405	return bkpt == insn;
    406}
    407
    408#endif
    409
    410static LIST_HEAD(undef_hook);
    411static DEFINE_RAW_SPINLOCK(undef_lock);
    412
    413void register_undef_hook(struct undef_hook *hook)
    414{
    415	unsigned long flags;
    416
    417	raw_spin_lock_irqsave(&undef_lock, flags);
    418	list_add(&hook->node, &undef_hook);
    419	raw_spin_unlock_irqrestore(&undef_lock, flags);
    420}
    421
    422void unregister_undef_hook(struct undef_hook *hook)
    423{
    424	unsigned long flags;
    425
    426	raw_spin_lock_irqsave(&undef_lock, flags);
    427	list_del(&hook->node);
    428	raw_spin_unlock_irqrestore(&undef_lock, flags);
    429}
    430
    431static nokprobe_inline
    432int call_undef_hook(struct pt_regs *regs, unsigned int instr)
    433{
    434	struct undef_hook *hook;
    435	unsigned long flags;
    436	int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
    437
    438	raw_spin_lock_irqsave(&undef_lock, flags);
    439	list_for_each_entry(hook, &undef_hook, node)
    440		if ((instr & hook->instr_mask) == hook->instr_val &&
    441		    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
    442			fn = hook->fn;
    443	raw_spin_unlock_irqrestore(&undef_lock, flags);
    444
    445	return fn ? fn(regs, instr) : 1;
    446}
    447
    448asmlinkage void do_undefinstr(struct pt_regs *regs)
    449{
    450	unsigned int instr;
    451	void __user *pc;
    452
    453	pc = (void __user *)instruction_pointer(regs);
    454
    455	if (processor_mode(regs) == SVC_MODE) {
    456#ifdef CONFIG_THUMB2_KERNEL
    457		if (thumb_mode(regs)) {
    458			instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
    459			if (is_wide_instruction(instr)) {
    460				u16 inst2;
    461				inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
    462				instr = __opcode_thumb32_compose(instr, inst2);
    463			}
    464		} else
    465#endif
    466			instr = __mem_to_opcode_arm(*(u32 *) pc);
    467	} else if (thumb_mode(regs)) {
    468		if (get_user(instr, (u16 __user *)pc))
    469			goto die_sig;
    470		instr = __mem_to_opcode_thumb16(instr);
    471		if (is_wide_instruction(instr)) {
    472			unsigned int instr2;
    473			if (get_user(instr2, (u16 __user *)pc+1))
    474				goto die_sig;
    475			instr2 = __mem_to_opcode_thumb16(instr2);
    476			instr = __opcode_thumb32_compose(instr, instr2);
    477		}
    478	} else {
    479		if (get_user(instr, (u32 __user *)pc))
    480			goto die_sig;
    481		instr = __mem_to_opcode_arm(instr);
    482	}
    483
    484	if (call_undef_hook(regs, instr) == 0)
    485		return;
    486
    487die_sig:
    488#ifdef CONFIG_DEBUG_USER
    489	if (user_debug & UDBG_UNDEFINED) {
    490		pr_info("%s (%d): undefined instruction: pc=%p\n",
    491			current->comm, task_pid_nr(current), pc);
    492		__show_regs(regs);
    493		dump_instr(KERN_INFO, regs);
    494	}
    495#endif
    496	arm_notify_die("Oops - undefined instruction", regs,
    497		       SIGILL, ILL_ILLOPC, pc, 0, 6);
    498}
    499NOKPROBE_SYMBOL(do_undefinstr)
    500
    501/*
    502 * Handle FIQ similarly to NMI on x86 systems.
    503 *
    504 * The runtime environment for NMIs is extremely restrictive
    505 * (NMIs can pre-empt critical sections meaning almost all locking is
    506 * forbidden) meaning this default FIQ handling must only be used in
    507 * circumstances where non-maskability improves robustness, such as
    508 * watchdog or debug logic.
    509 *
    510 * This handler is not appropriate for general purpose use in drivers
    511 * platform code and can be overrideen using set_fiq_handler.
    512 */
    513asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
    514{
    515	struct pt_regs *old_regs = set_irq_regs(regs);
    516
    517	nmi_enter();
    518
    519	/* nop. FIQ handlers for special arch/arm features can be added here. */
    520
    521	nmi_exit();
    522
    523	set_irq_regs(old_regs);
    524}
    525
    526/*
    527 * bad_mode handles the impossible case in the vectors.  If you see one of
    528 * these, then it's extremely serious, and could mean you have buggy hardware.
    529 * It never returns, and never tries to sync.  We hope that we can at least
    530 * dump out some state information...
    531 */
    532asmlinkage void bad_mode(struct pt_regs *regs, int reason)
    533{
    534	console_verbose();
    535
    536	pr_crit("Bad mode in %s handler detected\n", handler[reason]);
    537
    538	die("Oops - bad mode", regs, 0);
    539	local_irq_disable();
    540	panic("bad mode");
    541}
    542
    543static int bad_syscall(int n, struct pt_regs *regs)
    544{
    545	if ((current->personality & PER_MASK) != PER_LINUX) {
    546		send_sig(SIGSEGV, current, 1);
    547		return regs->ARM_r0;
    548	}
    549
    550#ifdef CONFIG_DEBUG_USER
    551	if (user_debug & UDBG_SYSCALL) {
    552		pr_err("[%d] %s: obsolete system call %08x.\n",
    553			task_pid_nr(current), current->comm, n);
    554		dump_instr(KERN_ERR, regs);
    555	}
    556#endif
    557
    558	arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
    559		       (void __user *)instruction_pointer(regs) -
    560			 (thumb_mode(regs) ? 2 : 4),
    561		       n, 0);
    562
    563	return regs->ARM_r0;
    564}
    565
    566static inline int
    567__do_cache_op(unsigned long start, unsigned long end)
    568{
    569	int ret;
    570
    571	do {
    572		unsigned long chunk = min(PAGE_SIZE, end - start);
    573
    574		if (fatal_signal_pending(current))
    575			return 0;
    576
    577		ret = flush_icache_user_range(start, start + chunk);
    578		if (ret)
    579			return ret;
    580
    581		cond_resched();
    582		start += chunk;
    583	} while (start < end);
    584
    585	return 0;
    586}
    587
    588static inline int
    589do_cache_op(unsigned long start, unsigned long end, int flags)
    590{
    591	if (end < start || flags)
    592		return -EINVAL;
    593
    594	if (!access_ok((void __user *)start, end - start))
    595		return -EFAULT;
    596
    597	return __do_cache_op(start, end);
    598}
    599
    600/*
    601 * Handle all unrecognised system calls.
    602 *  0x9f0000 - 0x9fffff are some more esoteric system calls
    603 */
    604#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
    605asmlinkage int arm_syscall(int no, struct pt_regs *regs)
    606{
    607	if ((no >> 16) != (__ARM_NR_BASE>> 16))
    608		return bad_syscall(no, regs);
    609
    610	switch (no & 0xffff) {
    611	case 0: /* branch through 0 */
    612		arm_notify_die("branch through zero", regs,
    613			       SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
    614		return 0;
    615
    616	case NR(breakpoint): /* SWI BREAK_POINT */
    617		regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
    618		ptrace_break(regs);
    619		return regs->ARM_r0;
    620
    621	/*
    622	 * Flush a region from virtual address 'r0' to virtual address 'r1'
    623	 * _exclusive_.  There is no alignment requirement on either address;
    624	 * user space does not need to know the hardware cache layout.
    625	 *
    626	 * r2 contains flags.  It should ALWAYS be passed as ZERO until it
    627	 * is defined to be something else.  For now we ignore it, but may
    628	 * the fires of hell burn in your belly if you break this rule. ;)
    629	 *
    630	 * (at a later date, we may want to allow this call to not flush
    631	 * various aspects of the cache.  Passing '0' will guarantee that
    632	 * everything necessary gets flushed to maintain consistency in
    633	 * the specified region).
    634	 */
    635	case NR(cacheflush):
    636		return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
    637
    638	case NR(usr26):
    639		if (!(elf_hwcap & HWCAP_26BIT))
    640			break;
    641		regs->ARM_cpsr &= ~MODE32_BIT;
    642		return regs->ARM_r0;
    643
    644	case NR(usr32):
    645		if (!(elf_hwcap & HWCAP_26BIT))
    646			break;
    647		regs->ARM_cpsr |= MODE32_BIT;
    648		return regs->ARM_r0;
    649
    650	case NR(set_tls):
    651		set_tls(regs->ARM_r0);
    652		return 0;
    653
    654	case NR(get_tls):
    655		return current_thread_info()->tp_value[0];
    656
    657	default:
    658		/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
    659		   if not implemented, rather than raising SIGILL.  This
    660		   way the calling program can gracefully determine whether
    661		   a feature is supported.  */
    662		if ((no & 0xffff) <= 0x7ff)
    663			return -ENOSYS;
    664		break;
    665	}
    666#ifdef CONFIG_DEBUG_USER
    667	/*
    668	 * experience shows that these seem to indicate that
    669	 * something catastrophic has happened
    670	 */
    671	if (user_debug & UDBG_SYSCALL) {
    672		pr_err("[%d] %s: arm syscall %d\n",
    673		       task_pid_nr(current), current->comm, no);
    674		dump_instr(KERN_ERR, regs);
    675		if (user_mode(regs)) {
    676			__show_regs(regs);
    677			c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
    678		}
    679	}
    680#endif
    681	arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
    682		       (void __user *)instruction_pointer(regs) -
    683			 (thumb_mode(regs) ? 2 : 4),
    684		       no, 0);
    685	return 0;
    686}
    687
    688#ifdef CONFIG_TLS_REG_EMUL
    689
    690/*
    691 * We might be running on an ARMv6+ processor which should have the TLS
    692 * register but for some reason we can't use it, or maybe an SMP system
    693 * using a pre-ARMv6 processor (there are apparently a few prototypes like
    694 * that in existence) and therefore access to that register must be
    695 * emulated.
    696 */
    697
    698static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
    699{
    700	int reg = (instr >> 12) & 15;
    701	if (reg == 15)
    702		return 1;
    703	regs->uregs[reg] = current_thread_info()->tp_value[0];
    704	regs->ARM_pc += 4;
    705	return 0;
    706}
    707
    708static struct undef_hook arm_mrc_hook = {
    709	.instr_mask	= 0x0fff0fff,
    710	.instr_val	= 0x0e1d0f70,
    711	.cpsr_mask	= PSR_T_BIT,
    712	.cpsr_val	= 0,
    713	.fn		= get_tp_trap,
    714};
    715
    716static int __init arm_mrc_hook_init(void)
    717{
    718	register_undef_hook(&arm_mrc_hook);
    719	return 0;
    720}
    721
    722late_initcall(arm_mrc_hook_init);
    723
    724#endif
    725
    726/*
    727 * A data abort trap was taken, but we did not handle the instruction.
    728 * Try to abort the user program, or panic if it was the kernel.
    729 */
    730asmlinkage void
    731baddataabort(int code, unsigned long instr, struct pt_regs *regs)
    732{
    733	unsigned long addr = instruction_pointer(regs);
    734
    735#ifdef CONFIG_DEBUG_USER
    736	if (user_debug & UDBG_BADABORT) {
    737		pr_err("8<--- cut here ---\n");
    738		pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
    739		       task_pid_nr(current), current->comm, code, instr);
    740		dump_instr(KERN_ERR, regs);
    741		show_pte(KERN_ERR, current->mm, addr);
    742	}
    743#endif
    744
    745	arm_notify_die("unknown data abort code", regs,
    746		       SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
    747}
    748
    749void __readwrite_bug(const char *fn)
    750{
    751	pr_err("%s called, but not implemented\n", fn);
    752	BUG();
    753}
    754EXPORT_SYMBOL(__readwrite_bug);
    755
    756void __pte_error(const char *file, int line, pte_t pte)
    757{
    758	pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
    759}
    760
    761void __pmd_error(const char *file, int line, pmd_t pmd)
    762{
    763	pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
    764}
    765
    766void __pgd_error(const char *file, int line, pgd_t pgd)
    767{
    768	pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
    769}
    770
    771asmlinkage void __div0(void)
    772{
    773	pr_err("Division by zero in kernel.\n");
    774	dump_stack();
    775}
    776EXPORT_SYMBOL(__div0);
    777
    778void abort(void)
    779{
    780	BUG();
    781
    782	/* if that doesn't kill us, halt */
    783	panic("Oops failed to kill thread");
    784}
    785
    786#ifdef CONFIG_KUSER_HELPERS
    787static void __init kuser_init(void *vectors)
    788{
    789	extern char __kuser_helper_start[], __kuser_helper_end[];
    790	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
    791
    792	memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
    793
    794	/*
    795	 * vectors + 0xfe0 = __kuser_get_tls
    796	 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
    797	 */
    798	if (tls_emu || has_tls_reg)
    799		memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
    800}
    801#else
    802static inline void __init kuser_init(void *vectors)
    803{
    804}
    805#endif
    806
    807#ifndef CONFIG_CPU_V7M
    808static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
    809{
    810	memcpy(vma, lma_start, lma_end - lma_start);
    811}
    812
    813static void flush_vectors(void *vma, size_t offset, size_t size)
    814{
    815	unsigned long start = (unsigned long)vma + offset;
    816	unsigned long end = start + size;
    817
    818	flush_icache_range(start, end);
    819}
    820
    821#ifdef CONFIG_HARDEN_BRANCH_HISTORY
    822int spectre_bhb_update_vectors(unsigned int method)
    823{
    824	extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
    825	extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
    826	void *vec_start, *vec_end;
    827
    828	if (system_state >= SYSTEM_FREEING_INITMEM) {
    829		pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
    830		       smp_processor_id());
    831		return SPECTRE_VULNERABLE;
    832	}
    833
    834	switch (method) {
    835	case SPECTRE_V2_METHOD_LOOP8:
    836		vec_start = __vectors_bhb_loop8_start;
    837		vec_end = __vectors_bhb_loop8_end;
    838		break;
    839
    840	case SPECTRE_V2_METHOD_BPIALL:
    841		vec_start = __vectors_bhb_bpiall_start;
    842		vec_end = __vectors_bhb_bpiall_end;
    843		break;
    844
    845	default:
    846		pr_err("CPU%u: unknown Spectre BHB state %d\n",
    847		       smp_processor_id(), method);
    848		return SPECTRE_VULNERABLE;
    849	}
    850
    851	copy_from_lma(vectors_page, vec_start, vec_end);
    852	flush_vectors(vectors_page, 0, vec_end - vec_start);
    853
    854	return SPECTRE_MITIGATED;
    855}
    856#endif
    857
    858void __init early_trap_init(void *vectors_base)
    859{
    860	extern char __stubs_start[], __stubs_end[];
    861	extern char __vectors_start[], __vectors_end[];
    862	unsigned i;
    863
    864	vectors_page = vectors_base;
    865
    866	/*
    867	 * Poison the vectors page with an undefined instruction.  This
    868	 * instruction is chosen to be undefined for both ARM and Thumb
    869	 * ISAs.  The Thumb version is an undefined instruction with a
    870	 * branch back to the undefined instruction.
    871	 */
    872	for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
    873		((u32 *)vectors_base)[i] = 0xe7fddef1;
    874
    875	/*
    876	 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
    877	 * into the vector page, mapped at 0xffff0000, and ensure these
    878	 * are visible to the instruction stream.
    879	 */
    880	copy_from_lma(vectors_base, __vectors_start, __vectors_end);
    881	copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
    882
    883	kuser_init(vectors_base);
    884
    885	flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
    886}
    887#else /* ifndef CONFIG_CPU_V7M */
    888void __init early_trap_init(void *vectors_base)
    889{
    890	/*
    891	 * on V7-M there is no need to copy the vector table to a dedicated
    892	 * memory area. The address is configurable and so a table in the kernel
    893	 * image can be used.
    894	 */
    895}
    896#endif
    897
    898#ifdef CONFIG_VMAP_STACK
    899
    900DECLARE_PER_CPU(u8 *, irq_stack_ptr);
    901
    902asmlinkage DEFINE_PER_CPU(u8 *, overflow_stack_ptr);
    903
    904static int __init allocate_overflow_stacks(void)
    905{
    906	u8 *stack;
    907	int cpu;
    908
    909	for_each_possible_cpu(cpu) {
    910		stack = (u8 *)__get_free_page(GFP_KERNEL);
    911		if (WARN_ON(!stack))
    912			return -ENOMEM;
    913		per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE];
    914	}
    915	return 0;
    916}
    917early_initcall(allocate_overflow_stacks);
    918
    919asmlinkage void handle_bad_stack(struct pt_regs *regs)
    920{
    921	unsigned long tsk_stk = (unsigned long)current->stack;
    922#ifdef CONFIG_IRQSTACKS
    923	unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
    924#endif
    925	unsigned long ovf_stk = (unsigned long)this_cpu_read(overflow_stack_ptr);
    926
    927	console_verbose();
    928	pr_emerg("Insufficient stack space to handle exception!");
    929
    930	pr_emerg("Task stack:     [0x%08lx..0x%08lx]\n",
    931		 tsk_stk, tsk_stk + THREAD_SIZE);
    932#ifdef CONFIG_IRQSTACKS
    933	pr_emerg("IRQ stack:      [0x%08lx..0x%08lx]\n",
    934		 irq_stk - THREAD_SIZE, irq_stk);
    935#endif
    936	pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n",
    937		 ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk);
    938
    939	die("kernel stack overflow", regs, 0);
    940}
    941
    942#ifndef CONFIG_ARM_LPAE
    943/*
    944 * Normally, we rely on the logic in do_translation_fault() to update stale PMD
    945 * entries covering the vmalloc space in a task's page tables when it first
    946 * accesses the region in question. Unfortunately, this is not sufficient when
    947 * the task stack resides in the vmalloc region, as do_translation_fault() is a
    948 * C function that needs a stack to run.
    949 *
    950 * So we need to ensure that these PMD entries are up to date *before* the MM
    951 * switch. As we already have some logic in the MM switch path that takes care
    952 * of this, let's trigger it by bumping the counter every time the core vmalloc
    953 * code modifies a PMD entry in the vmalloc region. Use release semantics on
    954 * the store so that other CPUs observing the counter's new value are
    955 * guaranteed to see the updated page table entries as well.
    956 */
    957void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
    958{
    959	if (start < VMALLOC_END && end > VMALLOC_START)
    960		atomic_inc_return_release(&init_mm.context.vmalloc_seq);
    961}
    962#endif
    963#endif