cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core.c (12914B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * arch/arm/kernel/kprobes.c
      4 *
      5 * Kprobes on ARM
      6 *
      7 * Abhishek Sagar <sagar.abhishek@gmail.com>
      8 * Copyright (C) 2006, 2007 Motorola Inc.
      9 *
     10 * Nicolas Pitre <nico@marvell.com>
     11 * Copyright (C) 2007 Marvell Ltd.
     12 */
     13
     14#define pr_fmt(fmt) "kprobes: " fmt
     15
     16#include <linux/kernel.h>
     17#include <linux/kprobes.h>
     18#include <linux/module.h>
     19#include <linux/slab.h>
     20#include <linux/stop_machine.h>
     21#include <linux/sched/debug.h>
     22#include <linux/stringify.h>
     23#include <asm/traps.h>
     24#include <asm/opcodes.h>
     25#include <asm/cacheflush.h>
     26#include <linux/percpu.h>
     27#include <linux/bug.h>
     28#include <asm/patch.h>
     29#include <asm/sections.h>
     30
     31#include "../decode-arm.h"
     32#include "../decode-thumb.h"
     33#include "core.h"
     34
     35#define MIN_STACK_SIZE(addr) 				\
     36	min((unsigned long)MAX_STACK_SIZE,		\
     37	    (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
     38
     39#define flush_insns(addr, size)				\
     40	flush_icache_range((unsigned long)(addr),	\
     41			   (unsigned long)(addr) +	\
     42			   (size))
     43
     44DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
     45DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
     46
     47
     48int __kprobes arch_prepare_kprobe(struct kprobe *p)
     49{
     50	kprobe_opcode_t insn;
     51	kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
     52	unsigned long addr = (unsigned long)p->addr;
     53	bool thumb;
     54	kprobe_decode_insn_t *decode_insn;
     55	const union decode_action *actions;
     56	int is;
     57	const struct decode_checker **checkers;
     58
     59#ifdef CONFIG_THUMB2_KERNEL
     60	thumb = true;
     61	addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
     62	insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
     63	if (is_wide_instruction(insn)) {
     64		u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
     65		insn = __opcode_thumb32_compose(insn, inst2);
     66		decode_insn = thumb32_probes_decode_insn;
     67		actions = kprobes_t32_actions;
     68		checkers = kprobes_t32_checkers;
     69	} else {
     70		decode_insn = thumb16_probes_decode_insn;
     71		actions = kprobes_t16_actions;
     72		checkers = kprobes_t16_checkers;
     73	}
     74#else /* !CONFIG_THUMB2_KERNEL */
     75	thumb = false;
     76	if (addr & 0x3)
     77		return -EINVAL;
     78	insn = __mem_to_opcode_arm(*p->addr);
     79	decode_insn = arm_probes_decode_insn;
     80	actions = kprobes_arm_actions;
     81	checkers = kprobes_arm_checkers;
     82#endif
     83
     84	p->opcode = insn;
     85	p->ainsn.insn = tmp_insn;
     86
     87	switch ((*decode_insn)(insn, &p->ainsn, true, actions, checkers)) {
     88	case INSN_REJECTED:	/* not supported */
     89		return -EINVAL;
     90
     91	case INSN_GOOD:		/* instruction uses slot */
     92		p->ainsn.insn = get_insn_slot();
     93		if (!p->ainsn.insn)
     94			return -ENOMEM;
     95		for (is = 0; is < MAX_INSN_SIZE; ++is)
     96			p->ainsn.insn[is] = tmp_insn[is];
     97		flush_insns(p->ainsn.insn,
     98				sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
     99		p->ainsn.insn_fn = (probes_insn_fn_t *)
    100					((uintptr_t)p->ainsn.insn | thumb);
    101		break;
    102
    103	case INSN_GOOD_NO_SLOT:	/* instruction doesn't need insn slot */
    104		p->ainsn.insn = NULL;
    105		break;
    106	}
    107
    108	/*
    109	 * Never instrument insn like 'str r0, [sp, +/-r1]'. Also, insn likes
    110	 * 'str r0, [sp, #-68]' should also be prohibited.
    111	 * See __und_svc.
    112	 */
    113	if ((p->ainsn.stack_space < 0) ||
    114			(p->ainsn.stack_space > MAX_STACK_SIZE))
    115		return -EINVAL;
    116
    117	return 0;
    118}
    119
    120void __kprobes arch_arm_kprobe(struct kprobe *p)
    121{
    122	unsigned int brkp;
    123	void *addr;
    124
    125	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
    126		/* Remove any Thumb flag */
    127		addr = (void *)((uintptr_t)p->addr & ~1);
    128
    129		if (is_wide_instruction(p->opcode))
    130			brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
    131		else
    132			brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
    133	} else {
    134		kprobe_opcode_t insn = p->opcode;
    135
    136		addr = p->addr;
    137		brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
    138
    139		if (insn >= 0xe0000000)
    140			brkp |= 0xe0000000;  /* Unconditional instruction */
    141		else
    142			brkp |= insn & 0xf0000000;  /* Copy condition from insn */
    143	}
    144
    145	patch_text(addr, brkp);
    146}
    147
    148/*
    149 * The actual disarming is done here on each CPU and synchronized using
    150 * stop_machine. This synchronization is necessary on SMP to avoid removing
    151 * a probe between the moment the 'Undefined Instruction' exception is raised
    152 * and the moment the exception handler reads the faulting instruction from
    153 * memory. It is also needed to atomically set the two half-words of a 32-bit
    154 * Thumb breakpoint.
    155 */
    156struct patch {
    157	void *addr;
    158	unsigned int insn;
    159};
    160
    161static int __kprobes_remove_breakpoint(void *data)
    162{
    163	struct patch *p = data;
    164	__patch_text(p->addr, p->insn);
    165	return 0;
    166}
    167
    168void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn)
    169{
    170	struct patch p = {
    171		.addr = addr,
    172		.insn = insn,
    173	};
    174	stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p,
    175				cpu_online_mask);
    176}
    177
    178void __kprobes arch_disarm_kprobe(struct kprobe *p)
    179{
    180	kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1),
    181			p->opcode);
    182}
    183
    184void __kprobes arch_remove_kprobe(struct kprobe *p)
    185{
    186	if (p->ainsn.insn) {
    187		free_insn_slot(p->ainsn.insn, 0);
    188		p->ainsn.insn = NULL;
    189	}
    190}
    191
    192static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
    193{
    194	kcb->prev_kprobe.kp = kprobe_running();
    195	kcb->prev_kprobe.status = kcb->kprobe_status;
    196}
    197
    198static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
    199{
    200	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
    201	kcb->kprobe_status = kcb->prev_kprobe.status;
    202}
    203
    204static void __kprobes set_current_kprobe(struct kprobe *p)
    205{
    206	__this_cpu_write(current_kprobe, p);
    207}
    208
    209static void __kprobes
    210singlestep_skip(struct kprobe *p, struct pt_regs *regs)
    211{
    212#ifdef CONFIG_THUMB2_KERNEL
    213	regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
    214	if (is_wide_instruction(p->opcode))
    215		regs->ARM_pc += 4;
    216	else
    217		regs->ARM_pc += 2;
    218#else
    219	regs->ARM_pc += 4;
    220#endif
    221}
    222
    223static inline void __kprobes
    224singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
    225{
    226	p->ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
    227}
    228
    229/*
    230 * Called with IRQs disabled. IRQs must remain disabled from that point
    231 * all the way until processing this kprobe is complete.  The current
    232 * kprobes implementation cannot process more than one nested level of
    233 * kprobe, and that level is reserved for user kprobe handlers, so we can't
    234 * risk encountering a new kprobe in an interrupt handler.
    235 */
    236void __kprobes kprobe_handler(struct pt_regs *regs)
    237{
    238	struct kprobe *p, *cur;
    239	struct kprobe_ctlblk *kcb;
    240
    241	kcb = get_kprobe_ctlblk();
    242	cur = kprobe_running();
    243
    244#ifdef CONFIG_THUMB2_KERNEL
    245	/*
    246	 * First look for a probe which was registered using an address with
    247	 * bit 0 set, this is the usual situation for pointers to Thumb code.
    248	 * If not found, fallback to looking for one with bit 0 clear.
    249	 */
    250	p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
    251	if (!p)
    252		p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
    253
    254#else /* ! CONFIG_THUMB2_KERNEL */
    255	p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
    256#endif
    257
    258	if (p) {
    259		if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
    260			/*
    261			 * Probe hit but conditional execution check failed,
    262			 * so just skip the instruction and continue as if
    263			 * nothing had happened.
    264			 * In this case, we can skip recursing check too.
    265			 */
    266			singlestep_skip(p, regs);
    267		} else if (cur) {
    268			/* Kprobe is pending, so we're recursing. */
    269			switch (kcb->kprobe_status) {
    270			case KPROBE_HIT_ACTIVE:
    271			case KPROBE_HIT_SSDONE:
    272			case KPROBE_HIT_SS:
    273				/* A pre- or post-handler probe got us here. */
    274				kprobes_inc_nmissed_count(p);
    275				save_previous_kprobe(kcb);
    276				set_current_kprobe(p);
    277				kcb->kprobe_status = KPROBE_REENTER;
    278				singlestep(p, regs, kcb);
    279				restore_previous_kprobe(kcb);
    280				break;
    281			case KPROBE_REENTER:
    282				/* A nested probe was hit in FIQ, it is a BUG */
    283				pr_warn("Failed to recover from reentered kprobes.\n");
    284				dump_kprobe(p);
    285				fallthrough;
    286			default:
    287				/* impossible cases */
    288				BUG();
    289			}
    290		} else {
    291			/* Probe hit and conditional execution check ok. */
    292			set_current_kprobe(p);
    293			kcb->kprobe_status = KPROBE_HIT_ACTIVE;
    294
    295			/*
    296			 * If we have no pre-handler or it returned 0, we
    297			 * continue with normal processing. If we have a
    298			 * pre-handler and it returned non-zero, it will
    299			 * modify the execution path and no need to single
    300			 * stepping. Let's just reset current kprobe and exit.
    301			 */
    302			if (!p->pre_handler || !p->pre_handler(p, regs)) {
    303				kcb->kprobe_status = KPROBE_HIT_SS;
    304				singlestep(p, regs, kcb);
    305				if (p->post_handler) {
    306					kcb->kprobe_status = KPROBE_HIT_SSDONE;
    307					p->post_handler(p, regs, 0);
    308				}
    309			}
    310			reset_current_kprobe();
    311		}
    312	} else {
    313		/*
    314		 * The probe was removed and a race is in progress.
    315		 * There is nothing we can do about it.  Let's restart
    316		 * the instruction.  By the time we can restart, the
    317		 * real instruction will be there.
    318		 */
    319	}
    320}
    321
    322static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
    323{
    324	unsigned long flags;
    325	local_irq_save(flags);
    326	kprobe_handler(regs);
    327	local_irq_restore(flags);
    328	return 0;
    329}
    330
    331int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
    332{
    333	struct kprobe *cur = kprobe_running();
    334	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
    335
    336	switch (kcb->kprobe_status) {
    337	case KPROBE_HIT_SS:
    338	case KPROBE_REENTER:
    339		/*
    340		 * We are here because the instruction being single
    341		 * stepped caused a page fault. We reset the current
    342		 * kprobe and the PC to point back to the probe address
    343		 * and allow the page fault handler to continue as a
    344		 * normal page fault.
    345		 */
    346		regs->ARM_pc = (long)cur->addr;
    347		if (kcb->kprobe_status == KPROBE_REENTER) {
    348			restore_previous_kprobe(kcb);
    349		} else {
    350			reset_current_kprobe();
    351		}
    352		break;
    353	}
    354
    355	return 0;
    356}
    357
    358int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
    359				       unsigned long val, void *data)
    360{
    361	/*
    362	 * notify_die() is currently never called on ARM,
    363	 * so this callback is currently empty.
    364	 */
    365	return NOTIFY_DONE;
    366}
    367
    368/*
    369 * When a retprobed function returns, trampoline_handler() is called,
    370 * calling the kretprobe's handler. We construct a struct pt_regs to
    371 * give a view of registers r0-r11, sp, lr, and pc to the user
    372 * return-handler. This is not a complete pt_regs structure, but that
    373 * should be enough for stacktrace from the return handler with or
    374 * without pt_regs.
    375 */
    376void __naked __kprobes __kretprobe_trampoline(void)
    377{
    378	__asm__ __volatile__ (
    379#ifdef CONFIG_FRAME_POINTER
    380		"ldr	lr, =__kretprobe_trampoline	\n\t"
    381	/* __kretprobe_trampoline makes a framepointer on pt_regs. */
    382#ifdef CONFIG_CC_IS_CLANG
    383		"stmdb	sp, {sp, lr, pc}	\n\t"
    384		"sub	sp, sp, #12		\n\t"
    385		/* In clang case, pt_regs->ip = lr. */
    386		"stmdb	sp!, {r0 - r11, lr}	\n\t"
    387		/* fp points regs->r11 (fp) */
    388		"add	fp, sp,	#44		\n\t"
    389#else /* !CONFIG_CC_IS_CLANG */
    390		/* In gcc case, pt_regs->ip = fp. */
    391		"stmdb	sp, {fp, sp, lr, pc}	\n\t"
    392		"sub	sp, sp, #16		\n\t"
    393		"stmdb	sp!, {r0 - r11}		\n\t"
    394		/* fp points regs->r15 (pc) */
    395		"add	fp, sp, #60		\n\t"
    396#endif /* CONFIG_CC_IS_CLANG */
    397#else /* !CONFIG_FRAME_POINTER */
    398		"sub	sp, sp, #16		\n\t"
    399		"stmdb	sp!, {r0 - r11}		\n\t"
    400#endif /* CONFIG_FRAME_POINTER */
    401		"mov	r0, sp			\n\t"
    402		"bl	trampoline_handler	\n\t"
    403		"mov	lr, r0			\n\t"
    404		"ldmia	sp!, {r0 - r11}		\n\t"
    405		"add	sp, sp, #16		\n\t"
    406#ifdef CONFIG_THUMB2_KERNEL
    407		"bx	lr			\n\t"
    408#else
    409		"mov	pc, lr			\n\t"
    410#endif
    411		: : : "memory");
    412}
    413
    414/* Called from __kretprobe_trampoline */
    415static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
    416{
    417	return (void *)kretprobe_trampoline_handler(regs, (void *)regs->ARM_fp);
    418}
    419
    420void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
    421				      struct pt_regs *regs)
    422{
    423	ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
    424	ri->fp = (void *)regs->ARM_fp;
    425
    426	/* Replace the return addr with trampoline addr. */
    427	regs->ARM_lr = (unsigned long)&__kretprobe_trampoline;
    428}
    429
    430int __kprobes arch_trampoline_kprobe(struct kprobe *p)
    431{
    432	return 0;
    433}
    434
    435#ifdef CONFIG_THUMB2_KERNEL
    436
    437static struct undef_hook kprobes_thumb16_break_hook = {
    438	.instr_mask	= 0xffff,
    439	.instr_val	= KPROBE_THUMB16_BREAKPOINT_INSTRUCTION,
    440	.cpsr_mask	= MODE_MASK,
    441	.cpsr_val	= SVC_MODE,
    442	.fn		= kprobe_trap_handler,
    443};
    444
    445static struct undef_hook kprobes_thumb32_break_hook = {
    446	.instr_mask	= 0xffffffff,
    447	.instr_val	= KPROBE_THUMB32_BREAKPOINT_INSTRUCTION,
    448	.cpsr_mask	= MODE_MASK,
    449	.cpsr_val	= SVC_MODE,
    450	.fn		= kprobe_trap_handler,
    451};
    452
    453#else  /* !CONFIG_THUMB2_KERNEL */
    454
    455static struct undef_hook kprobes_arm_break_hook = {
    456	.instr_mask	= 0x0fffffff,
    457	.instr_val	= KPROBE_ARM_BREAKPOINT_INSTRUCTION,
    458	.cpsr_mask	= MODE_MASK,
    459	.cpsr_val	= SVC_MODE,
    460	.fn		= kprobe_trap_handler,
    461};
    462
    463#endif /* !CONFIG_THUMB2_KERNEL */
    464
    465int __init arch_init_kprobes(void)
    466{
    467	arm_probes_decode_init();
    468#ifdef CONFIG_THUMB2_KERNEL
    469	register_undef_hook(&kprobes_thumb16_break_hook);
    470	register_undef_hook(&kprobes_thumb32_break_hook);
    471#else
    472	register_undef_hook(&kprobes_arm_break_hook);
    473#endif
    474	return 0;
    475}
    476
    477bool arch_within_kprobe_blacklist(unsigned long addr)
    478{
    479	void *a = (void *)addr;
    480
    481	return __in_irqentry_text(addr) ||
    482	       in_entry_text(addr) ||
    483	       in_idmap_text(addr) ||
    484	       memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1);
    485}