cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

entry-common.c (21586B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Exception handling code
      4 *
      5 * Copyright (C) 2019 ARM Ltd.
      6 */
      7
      8#include <linux/context_tracking.h>
      9#include <linux/kasan.h>
     10#include <linux/linkage.h>
     11#include <linux/lockdep.h>
     12#include <linux/ptrace.h>
     13#include <linux/sched.h>
     14#include <linux/sched/debug.h>
     15#include <linux/thread_info.h>
     16
     17#include <asm/cpufeature.h>
     18#include <asm/daifflags.h>
     19#include <asm/esr.h>
     20#include <asm/exception.h>
     21#include <asm/irq_regs.h>
     22#include <asm/kprobes.h>
     23#include <asm/mmu.h>
     24#include <asm/processor.h>
     25#include <asm/sdei.h>
     26#include <asm/stacktrace.h>
     27#include <asm/sysreg.h>
     28#include <asm/system_misc.h>
     29
     30/*
     31 * Handle IRQ/context state management when entering from kernel mode.
     32 * Before this function is called it is not safe to call regular kernel code,
     33 * intrumentable code, or any code which may trigger an exception.
     34 *
     35 * This is intended to match the logic in irqentry_enter(), handling the kernel
     36 * mode transitions only.
     37 */
     38static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
     39{
     40	regs->exit_rcu = false;
     41
     42	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
     43		lockdep_hardirqs_off(CALLER_ADDR0);
     44		rcu_irq_enter();
     45		trace_hardirqs_off_finish();
     46
     47		regs->exit_rcu = true;
     48		return;
     49	}
     50
     51	lockdep_hardirqs_off(CALLER_ADDR0);
     52	rcu_irq_enter_check_tick();
     53	trace_hardirqs_off_finish();
     54}
     55
     56static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
     57{
     58	__enter_from_kernel_mode(regs);
     59	mte_check_tfsr_entry();
     60	mte_disable_tco_entry(current);
     61}
     62
     63/*
     64 * Handle IRQ/context state management when exiting to kernel mode.
     65 * After this function returns it is not safe to call regular kernel code,
     66 * intrumentable code, or any code which may trigger an exception.
     67 *
     68 * This is intended to match the logic in irqentry_exit(), handling the kernel
     69 * mode transitions only, and with preemption handled elsewhere.
     70 */
     71static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
     72{
     73	lockdep_assert_irqs_disabled();
     74
     75	if (interrupts_enabled(regs)) {
     76		if (regs->exit_rcu) {
     77			trace_hardirqs_on_prepare();
     78			lockdep_hardirqs_on_prepare();
     79			rcu_irq_exit();
     80			lockdep_hardirqs_on(CALLER_ADDR0);
     81			return;
     82		}
     83
     84		trace_hardirqs_on();
     85	} else {
     86		if (regs->exit_rcu)
     87			rcu_irq_exit();
     88	}
     89}
     90
     91static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
     92{
     93	mte_check_tfsr_exit();
     94	__exit_to_kernel_mode(regs);
     95}
     96
     97/*
     98 * Handle IRQ/context state management when entering from user mode.
     99 * Before this function is called it is not safe to call regular kernel code,
    100 * intrumentable code, or any code which may trigger an exception.
    101 */
    102static __always_inline void __enter_from_user_mode(void)
    103{
    104	lockdep_hardirqs_off(CALLER_ADDR0);
    105	CT_WARN_ON(ct_state() != CONTEXT_USER);
    106	user_exit_irqoff();
    107	trace_hardirqs_off_finish();
    108	mte_disable_tco_entry(current);
    109}
    110
    111static __always_inline void enter_from_user_mode(struct pt_regs *regs)
    112{
    113	__enter_from_user_mode();
    114}
    115
    116/*
    117 * Handle IRQ/context state management when exiting to user mode.
    118 * After this function returns it is not safe to call regular kernel code,
    119 * intrumentable code, or any code which may trigger an exception.
    120 */
    121static __always_inline void __exit_to_user_mode(void)
    122{
    123	trace_hardirqs_on_prepare();
    124	lockdep_hardirqs_on_prepare();
    125	user_enter_irqoff();
    126	lockdep_hardirqs_on(CALLER_ADDR0);
    127}
    128
    129static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
    130{
    131	unsigned long flags;
    132
    133	local_daif_mask();
    134
    135	flags = read_thread_flags();
    136	if (unlikely(flags & _TIF_WORK_MASK))
    137		do_notify_resume(regs, flags);
    138}
    139
    140static __always_inline void exit_to_user_mode(struct pt_regs *regs)
    141{
    142	prepare_exit_to_user_mode(regs);
    143	mte_check_tfsr_exit();
    144	__exit_to_user_mode();
    145}
    146
    147asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
    148{
    149	exit_to_user_mode(regs);
    150}
    151
    152/*
    153 * Handle IRQ/context state management when entering an NMI from user/kernel
    154 * mode. Before this function is called it is not safe to call regular kernel
    155 * code, intrumentable code, or any code which may trigger an exception.
    156 */
    157static void noinstr arm64_enter_nmi(struct pt_regs *regs)
    158{
    159	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
    160
    161	__nmi_enter();
    162	lockdep_hardirqs_off(CALLER_ADDR0);
    163	lockdep_hardirq_enter();
    164	rcu_nmi_enter();
    165
    166	trace_hardirqs_off_finish();
    167	ftrace_nmi_enter();
    168}
    169
    170/*
    171 * Handle IRQ/context state management when exiting an NMI from user/kernel
    172 * mode. After this function returns it is not safe to call regular kernel
    173 * code, intrumentable code, or any code which may trigger an exception.
    174 */
    175static void noinstr arm64_exit_nmi(struct pt_regs *regs)
    176{
    177	bool restore = regs->lockdep_hardirqs;
    178
    179	ftrace_nmi_exit();
    180	if (restore) {
    181		trace_hardirqs_on_prepare();
    182		lockdep_hardirqs_on_prepare();
    183	}
    184
    185	rcu_nmi_exit();
    186	lockdep_hardirq_exit();
    187	if (restore)
    188		lockdep_hardirqs_on(CALLER_ADDR0);
    189	__nmi_exit();
    190}
    191
    192/*
    193 * Handle IRQ/context state management when entering a debug exception from
    194 * kernel mode. Before this function is called it is not safe to call regular
    195 * kernel code, intrumentable code, or any code which may trigger an exception.
    196 */
    197static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
    198{
    199	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
    200
    201	lockdep_hardirqs_off(CALLER_ADDR0);
    202	rcu_nmi_enter();
    203
    204	trace_hardirqs_off_finish();
    205}
    206
    207/*
    208 * Handle IRQ/context state management when exiting a debug exception from
    209 * kernel mode. After this function returns it is not safe to call regular
    210 * kernel code, intrumentable code, or any code which may trigger an exception.
    211 */
    212static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
    213{
    214	bool restore = regs->lockdep_hardirqs;
    215
    216	if (restore) {
    217		trace_hardirqs_on_prepare();
    218		lockdep_hardirqs_on_prepare();
    219	}
    220
    221	rcu_nmi_exit();
    222	if (restore)
    223		lockdep_hardirqs_on(CALLER_ADDR0);
    224}
    225
    226#ifdef CONFIG_PREEMPT_DYNAMIC
    227DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
    228#define need_irq_preemption() \
    229	(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
    230#else
    231#define need_irq_preemption()	(IS_ENABLED(CONFIG_PREEMPTION))
    232#endif
    233
    234static void __sched arm64_preempt_schedule_irq(void)
    235{
    236	if (!need_irq_preemption())
    237		return;
    238
    239	/*
    240	 * Note: thread_info::preempt_count includes both thread_info::count
    241	 * and thread_info::need_resched, and is not equivalent to
    242	 * preempt_count().
    243	 */
    244	if (READ_ONCE(current_thread_info()->preempt_count) != 0)
    245		return;
    246
    247	/*
    248	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
    249	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
    250	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
    251	 * DAIF we must have handled an NMI, so skip preemption.
    252	 */
    253	if (system_uses_irq_prio_masking() && read_sysreg(daif))
    254		return;
    255
    256	/*
    257	 * Preempting a task from an IRQ means we leave copies of PSTATE
    258	 * on the stack. cpufeature's enable calls may modify PSTATE, but
    259	 * resuming one of these preempted tasks would undo those changes.
    260	 *
    261	 * Only allow a task to be preempted once cpufeatures have been
    262	 * enabled.
    263	 */
    264	if (system_capabilities_finalized())
    265		preempt_schedule_irq();
    266}
    267
    268static void do_interrupt_handler(struct pt_regs *regs,
    269				 void (*handler)(struct pt_regs *))
    270{
    271	struct pt_regs *old_regs = set_irq_regs(regs);
    272
    273	if (on_thread_stack())
    274		call_on_irq_stack(regs, handler);
    275	else
    276		handler(regs);
    277
    278	set_irq_regs(old_regs);
    279}
    280
    281extern void (*handle_arch_irq)(struct pt_regs *);
    282extern void (*handle_arch_fiq)(struct pt_regs *);
    283
    284static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
    285				      unsigned long esr)
    286{
    287	arm64_enter_nmi(regs);
    288
    289	console_verbose();
    290
    291	pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
    292		vector, smp_processor_id(), esr,
    293		esr_get_class_string(esr));
    294
    295	__show_regs(regs);
    296	panic("Unhandled exception");
    297}
    298
    299#define UNHANDLED(el, regsize, vector)							\
    300asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)	\
    301{											\
    302	const char *desc = #regsize "-bit " #el " " #vector;				\
    303	__panic_unhandled(regs, desc, read_sysreg(esr_el1));				\
    304}
    305
    306#ifdef CONFIG_ARM64_ERRATUM_1463225
    307static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
    308
    309static void cortex_a76_erratum_1463225_svc_handler(void)
    310{
    311	u32 reg, val;
    312
    313	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
    314		return;
    315
    316	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
    317		return;
    318
    319	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
    320	reg = read_sysreg(mdscr_el1);
    321	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
    322	write_sysreg(val, mdscr_el1);
    323	asm volatile("msr daifclr, #8");
    324	isb();
    325
    326	/* We will have taken a single-step exception by this point */
    327
    328	write_sysreg(reg, mdscr_el1);
    329	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
    330}
    331
    332static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
    333{
    334	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
    335		return false;
    336
    337	/*
    338	 * We've taken a dummy step exception from the kernel to ensure
    339	 * that interrupts are re-enabled on the syscall path. Return back
    340	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
    341	 * masked so that we can safely restore the mdscr and get on with
    342	 * handling the syscall.
    343	 */
    344	regs->pstate |= PSR_D_BIT;
    345	return true;
    346}
    347#else /* CONFIG_ARM64_ERRATUM_1463225 */
    348static void cortex_a76_erratum_1463225_svc_handler(void) { }
    349static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
    350{
    351	return false;
    352}
    353#endif /* CONFIG_ARM64_ERRATUM_1463225 */
    354
    355UNHANDLED(el1t, 64, sync)
    356UNHANDLED(el1t, 64, irq)
    357UNHANDLED(el1t, 64, fiq)
    358UNHANDLED(el1t, 64, error)
    359
    360static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
    361{
    362	unsigned long far = read_sysreg(far_el1);
    363
    364	enter_from_kernel_mode(regs);
    365	local_daif_inherit(regs);
    366	do_mem_abort(far, esr, regs);
    367	local_daif_mask();
    368	exit_to_kernel_mode(regs);
    369}
    370
    371static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
    372{
    373	unsigned long far = read_sysreg(far_el1);
    374
    375	enter_from_kernel_mode(regs);
    376	local_daif_inherit(regs);
    377	do_sp_pc_abort(far, esr, regs);
    378	local_daif_mask();
    379	exit_to_kernel_mode(regs);
    380}
    381
    382static void noinstr el1_undef(struct pt_regs *regs)
    383{
    384	enter_from_kernel_mode(regs);
    385	local_daif_inherit(regs);
    386	do_undefinstr(regs);
    387	local_daif_mask();
    388	exit_to_kernel_mode(regs);
    389}
    390
    391static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
    392{
    393	unsigned long far = read_sysreg(far_el1);
    394
    395	arm64_enter_el1_dbg(regs);
    396	if (!cortex_a76_erratum_1463225_debug_handler(regs))
    397		do_debug_exception(far, esr, regs);
    398	arm64_exit_el1_dbg(regs);
    399}
    400
    401static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
    402{
    403	enter_from_kernel_mode(regs);
    404	local_daif_inherit(regs);
    405	do_ptrauth_fault(regs, esr);
    406	local_daif_mask();
    407	exit_to_kernel_mode(regs);
    408}
    409
    410asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
    411{
    412	unsigned long esr = read_sysreg(esr_el1);
    413
    414	switch (ESR_ELx_EC(esr)) {
    415	case ESR_ELx_EC_DABT_CUR:
    416	case ESR_ELx_EC_IABT_CUR:
    417		el1_abort(regs, esr);
    418		break;
    419	/*
    420	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
    421	 * recursive exception when trying to push the initial pt_regs.
    422	 */
    423	case ESR_ELx_EC_PC_ALIGN:
    424		el1_pc(regs, esr);
    425		break;
    426	case ESR_ELx_EC_SYS64:
    427	case ESR_ELx_EC_UNKNOWN:
    428		el1_undef(regs);
    429		break;
    430	case ESR_ELx_EC_BREAKPT_CUR:
    431	case ESR_ELx_EC_SOFTSTP_CUR:
    432	case ESR_ELx_EC_WATCHPT_CUR:
    433	case ESR_ELx_EC_BRK64:
    434		el1_dbg(regs, esr);
    435		break;
    436	case ESR_ELx_EC_FPAC:
    437		el1_fpac(regs, esr);
    438		break;
    439	default:
    440		__panic_unhandled(regs, "64-bit el1h sync", esr);
    441	}
    442}
    443
    444static __always_inline void __el1_pnmi(struct pt_regs *regs,
    445				       void (*handler)(struct pt_regs *))
    446{
    447	arm64_enter_nmi(regs);
    448	do_interrupt_handler(regs, handler);
    449	arm64_exit_nmi(regs);
    450}
    451
    452static __always_inline void __el1_irq(struct pt_regs *regs,
    453				      void (*handler)(struct pt_regs *))
    454{
    455	enter_from_kernel_mode(regs);
    456
    457	irq_enter_rcu();
    458	do_interrupt_handler(regs, handler);
    459	irq_exit_rcu();
    460
    461	arm64_preempt_schedule_irq();
    462
    463	exit_to_kernel_mode(regs);
    464}
    465static void noinstr el1_interrupt(struct pt_regs *regs,
    466				  void (*handler)(struct pt_regs *))
    467{
    468	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
    469
    470	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
    471		__el1_pnmi(regs, handler);
    472	else
    473		__el1_irq(regs, handler);
    474}
    475
    476asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
    477{
    478	el1_interrupt(regs, handle_arch_irq);
    479}
    480
    481asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
    482{
    483	el1_interrupt(regs, handle_arch_fiq);
    484}
    485
    486asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
    487{
    488	unsigned long esr = read_sysreg(esr_el1);
    489
    490	local_daif_restore(DAIF_ERRCTX);
    491	arm64_enter_nmi(regs);
    492	do_serror(regs, esr);
    493	arm64_exit_nmi(regs);
    494}
    495
    496static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
    497{
    498	unsigned long far = read_sysreg(far_el1);
    499
    500	enter_from_user_mode(regs);
    501	local_daif_restore(DAIF_PROCCTX);
    502	do_mem_abort(far, esr, regs);
    503	exit_to_user_mode(regs);
    504}
    505
    506static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
    507{
    508	unsigned long far = read_sysreg(far_el1);
    509
    510	/*
    511	 * We've taken an instruction abort from userspace and not yet
    512	 * re-enabled IRQs. If the address is a kernel address, apply
    513	 * BP hardening prior to enabling IRQs and pre-emption.
    514	 */
    515	if (!is_ttbr0_addr(far))
    516		arm64_apply_bp_hardening();
    517
    518	enter_from_user_mode(regs);
    519	local_daif_restore(DAIF_PROCCTX);
    520	do_mem_abort(far, esr, regs);
    521	exit_to_user_mode(regs);
    522}
    523
    524static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
    525{
    526	enter_from_user_mode(regs);
    527	local_daif_restore(DAIF_PROCCTX);
    528	do_fpsimd_acc(esr, regs);
    529	exit_to_user_mode(regs);
    530}
    531
    532static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
    533{
    534	enter_from_user_mode(regs);
    535	local_daif_restore(DAIF_PROCCTX);
    536	do_sve_acc(esr, regs);
    537	exit_to_user_mode(regs);
    538}
    539
    540static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
    541{
    542	enter_from_user_mode(regs);
    543	local_daif_restore(DAIF_PROCCTX);
    544	do_sme_acc(esr, regs);
    545	exit_to_user_mode(regs);
    546}
    547
    548static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
    549{
    550	enter_from_user_mode(regs);
    551	local_daif_restore(DAIF_PROCCTX);
    552	do_fpsimd_exc(esr, regs);
    553	exit_to_user_mode(regs);
    554}
    555
    556static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
    557{
    558	enter_from_user_mode(regs);
    559	local_daif_restore(DAIF_PROCCTX);
    560	do_sysinstr(esr, regs);
    561	exit_to_user_mode(regs);
    562}
    563
    564static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
    565{
    566	unsigned long far = read_sysreg(far_el1);
    567
    568	if (!is_ttbr0_addr(instruction_pointer(regs)))
    569		arm64_apply_bp_hardening();
    570
    571	enter_from_user_mode(regs);
    572	local_daif_restore(DAIF_PROCCTX);
    573	do_sp_pc_abort(far, esr, regs);
    574	exit_to_user_mode(regs);
    575}
    576
    577static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
    578{
    579	enter_from_user_mode(regs);
    580	local_daif_restore(DAIF_PROCCTX);
    581	do_sp_pc_abort(regs->sp, esr, regs);
    582	exit_to_user_mode(regs);
    583}
    584
    585static void noinstr el0_undef(struct pt_regs *regs)
    586{
    587	enter_from_user_mode(regs);
    588	local_daif_restore(DAIF_PROCCTX);
    589	do_undefinstr(regs);
    590	exit_to_user_mode(regs);
    591}
    592
    593static void noinstr el0_bti(struct pt_regs *regs)
    594{
    595	enter_from_user_mode(regs);
    596	local_daif_restore(DAIF_PROCCTX);
    597	do_bti(regs);
    598	exit_to_user_mode(regs);
    599}
    600
    601static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
    602{
    603	enter_from_user_mode(regs);
    604	local_daif_restore(DAIF_PROCCTX);
    605	bad_el0_sync(regs, 0, esr);
    606	exit_to_user_mode(regs);
    607}
    608
    609static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
    610{
    611	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
    612	unsigned long far = read_sysreg(far_el1);
    613
    614	enter_from_user_mode(regs);
    615	do_debug_exception(far, esr, regs);
    616	local_daif_restore(DAIF_PROCCTX);
    617	exit_to_user_mode(regs);
    618}
    619
    620static void noinstr el0_svc(struct pt_regs *regs)
    621{
    622	enter_from_user_mode(regs);
    623	cortex_a76_erratum_1463225_svc_handler();
    624	do_el0_svc(regs);
    625	exit_to_user_mode(regs);
    626}
    627
    628static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
    629{
    630	enter_from_user_mode(regs);
    631	local_daif_restore(DAIF_PROCCTX);
    632	do_ptrauth_fault(regs, esr);
    633	exit_to_user_mode(regs);
    634}
    635
    636asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
    637{
    638	unsigned long esr = read_sysreg(esr_el1);
    639
    640	switch (ESR_ELx_EC(esr)) {
    641	case ESR_ELx_EC_SVC64:
    642		el0_svc(regs);
    643		break;
    644	case ESR_ELx_EC_DABT_LOW:
    645		el0_da(regs, esr);
    646		break;
    647	case ESR_ELx_EC_IABT_LOW:
    648		el0_ia(regs, esr);
    649		break;
    650	case ESR_ELx_EC_FP_ASIMD:
    651		el0_fpsimd_acc(regs, esr);
    652		break;
    653	case ESR_ELx_EC_SVE:
    654		el0_sve_acc(regs, esr);
    655		break;
    656	case ESR_ELx_EC_SME:
    657		el0_sme_acc(regs, esr);
    658		break;
    659	case ESR_ELx_EC_FP_EXC64:
    660		el0_fpsimd_exc(regs, esr);
    661		break;
    662	case ESR_ELx_EC_SYS64:
    663	case ESR_ELx_EC_WFx:
    664		el0_sys(regs, esr);
    665		break;
    666	case ESR_ELx_EC_SP_ALIGN:
    667		el0_sp(regs, esr);
    668		break;
    669	case ESR_ELx_EC_PC_ALIGN:
    670		el0_pc(regs, esr);
    671		break;
    672	case ESR_ELx_EC_UNKNOWN:
    673		el0_undef(regs);
    674		break;
    675	case ESR_ELx_EC_BTI:
    676		el0_bti(regs);
    677		break;
    678	case ESR_ELx_EC_BREAKPT_LOW:
    679	case ESR_ELx_EC_SOFTSTP_LOW:
    680	case ESR_ELx_EC_WATCHPT_LOW:
    681	case ESR_ELx_EC_BRK64:
    682		el0_dbg(regs, esr);
    683		break;
    684	case ESR_ELx_EC_FPAC:
    685		el0_fpac(regs, esr);
    686		break;
    687	default:
    688		el0_inv(regs, esr);
    689	}
    690}
    691
    692static void noinstr el0_interrupt(struct pt_regs *regs,
    693				  void (*handler)(struct pt_regs *))
    694{
    695	enter_from_user_mode(regs);
    696
    697	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
    698
    699	if (regs->pc & BIT(55))
    700		arm64_apply_bp_hardening();
    701
    702	irq_enter_rcu();
    703	do_interrupt_handler(regs, handler);
    704	irq_exit_rcu();
    705
    706	exit_to_user_mode(regs);
    707}
    708
    709static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
    710{
    711	el0_interrupt(regs, handle_arch_irq);
    712}
    713
    714asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
    715{
    716	__el0_irq_handler_common(regs);
    717}
    718
    719static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
    720{
    721	el0_interrupt(regs, handle_arch_fiq);
    722}
    723
    724asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
    725{
    726	__el0_fiq_handler_common(regs);
    727}
    728
    729static void noinstr __el0_error_handler_common(struct pt_regs *regs)
    730{
    731	unsigned long esr = read_sysreg(esr_el1);
    732
    733	enter_from_user_mode(regs);
    734	local_daif_restore(DAIF_ERRCTX);
    735	arm64_enter_nmi(regs);
    736	do_serror(regs, esr);
    737	arm64_exit_nmi(regs);
    738	local_daif_restore(DAIF_PROCCTX);
    739	exit_to_user_mode(regs);
    740}
    741
    742asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
    743{
    744	__el0_error_handler_common(regs);
    745}
    746
    747#ifdef CONFIG_COMPAT
    748static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
    749{
    750	enter_from_user_mode(regs);
    751	local_daif_restore(DAIF_PROCCTX);
    752	do_cp15instr(esr, regs);
    753	exit_to_user_mode(regs);
    754}
    755
    756static void noinstr el0_svc_compat(struct pt_regs *regs)
    757{
    758	enter_from_user_mode(regs);
    759	cortex_a76_erratum_1463225_svc_handler();
    760	do_el0_svc_compat(regs);
    761	exit_to_user_mode(regs);
    762}
    763
    764asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
    765{
    766	unsigned long esr = read_sysreg(esr_el1);
    767
    768	switch (ESR_ELx_EC(esr)) {
    769	case ESR_ELx_EC_SVC32:
    770		el0_svc_compat(regs);
    771		break;
    772	case ESR_ELx_EC_DABT_LOW:
    773		el0_da(regs, esr);
    774		break;
    775	case ESR_ELx_EC_IABT_LOW:
    776		el0_ia(regs, esr);
    777		break;
    778	case ESR_ELx_EC_FP_ASIMD:
    779		el0_fpsimd_acc(regs, esr);
    780		break;
    781	case ESR_ELx_EC_FP_EXC32:
    782		el0_fpsimd_exc(regs, esr);
    783		break;
    784	case ESR_ELx_EC_PC_ALIGN:
    785		el0_pc(regs, esr);
    786		break;
    787	case ESR_ELx_EC_UNKNOWN:
    788	case ESR_ELx_EC_CP14_MR:
    789	case ESR_ELx_EC_CP14_LS:
    790	case ESR_ELx_EC_CP14_64:
    791		el0_undef(regs);
    792		break;
    793	case ESR_ELx_EC_CP15_32:
    794	case ESR_ELx_EC_CP15_64:
    795		el0_cp15(regs, esr);
    796		break;
    797	case ESR_ELx_EC_BREAKPT_LOW:
    798	case ESR_ELx_EC_SOFTSTP_LOW:
    799	case ESR_ELx_EC_WATCHPT_LOW:
    800	case ESR_ELx_EC_BKPT32:
    801		el0_dbg(regs, esr);
    802		break;
    803	default:
    804		el0_inv(regs, esr);
    805	}
    806}
    807
    808asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
    809{
    810	__el0_irq_handler_common(regs);
    811}
    812
    813asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
    814{
    815	__el0_fiq_handler_common(regs);
    816}
    817
    818asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
    819{
    820	__el0_error_handler_common(regs);
    821}
    822#else /* CONFIG_COMPAT */
    823UNHANDLED(el0t, 32, sync)
    824UNHANDLED(el0t, 32, irq)
    825UNHANDLED(el0t, 32, fiq)
    826UNHANDLED(el0t, 32, error)
    827#endif /* CONFIG_COMPAT */
    828
    829#ifdef CONFIG_VMAP_STACK
    830asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
    831{
    832	unsigned long esr = read_sysreg(esr_el1);
    833	unsigned long far = read_sysreg(far_el1);
    834
    835	arm64_enter_nmi(regs);
    836	panic_bad_stack(regs, esr, far);
    837}
    838#endif /* CONFIG_VMAP_STACK */
    839
    840#ifdef CONFIG_ARM_SDE_INTERFACE
    841asmlinkage noinstr unsigned long
    842__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
    843{
    844	unsigned long ret;
    845
    846	/*
    847	 * We didn't take an exception to get here, so the HW hasn't
    848	 * set/cleared bits in PSTATE that we may rely on.
    849	 *
    850	 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
    851	 * whether PSTATE bits are inherited unchanged or generated from
    852	 * scratch, and the TF-A implementation always clears PAN and always
    853	 * clears UAO. There are no other known implementations.
    854	 *
    855	 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
    856	 * PSTATE is modified upon architectural exceptions, and so PAN is
    857	 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
    858	 * cleared.
    859	 *
    860	 * We must explicitly reset PAN to the expected state, including
    861	 * clearing it when the host isn't using it, in case a VM had it set.
    862	 */
    863	if (system_uses_hw_pan())
    864		set_pstate_pan(1);
    865	else if (cpu_has_pan())
    866		set_pstate_pan(0);
    867
    868	arm64_enter_nmi(regs);
    869	ret = do_sdei_event(regs, arg);
    870	arm64_exit_nmi(regs);
    871
    872	return ret;
    873}
    874#endif /* CONFIG_ARM_SDE_INTERFACE */