cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kprobes.c (13242B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  Kernel Probes (KProbes)
      4 *  arch/mips/kernel/kprobes.c
      5 *
      6 *  Copyright 2006 Sony Corp.
      7 *  Copyright 2010 Cavium Networks
      8 *
      9 *  Some portions copied from the powerpc version.
     10 *
     11 *   Copyright (C) IBM Corporation, 2002, 2004
     12 */
     13
     14#define pr_fmt(fmt) "kprobes: " fmt
     15
     16#include <linux/kprobes.h>
     17#include <linux/preempt.h>
     18#include <linux/uaccess.h>
     19#include <linux/kdebug.h>
     20#include <linux/slab.h>
     21
     22#include <asm/ptrace.h>
     23#include <asm/branch.h>
     24#include <asm/break.h>
     25
     26#include "probes-common.h"
     27
     28static const union mips_instruction breakpoint_insn = {
     29	.b_format = {
     30		.opcode = spec_op,
     31		.code = BRK_KPROBE_BP,
     32		.func = break_op
     33	}
     34};
     35
     36static const union mips_instruction breakpoint2_insn = {
     37	.b_format = {
     38		.opcode = spec_op,
     39		.code = BRK_KPROBE_SSTEPBP,
     40		.func = break_op
     41	}
     42};
     43
     44DEFINE_PER_CPU(struct kprobe *, current_kprobe);
     45DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
     46
     47static int insn_has_delayslot(union mips_instruction insn)
     48{
     49	return __insn_has_delay_slot(insn);
     50}
     51NOKPROBE_SYMBOL(insn_has_delayslot);
     52
     53/*
     54 * insn_has_ll_or_sc function checks whether instruction is ll or sc
     55 * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
     56 * so we need to prevent it and refuse kprobes insertion for such
     57 * instructions; cannot do much about breakpoint in the middle of
     58 * ll/sc pair; it is upto user to avoid those places
     59 */
     60static int insn_has_ll_or_sc(union mips_instruction insn)
     61{
     62	int ret = 0;
     63
     64	switch (insn.i_format.opcode) {
     65	case ll_op:
     66	case lld_op:
     67	case sc_op:
     68	case scd_op:
     69		ret = 1;
     70		break;
     71	default:
     72		break;
     73	}
     74	return ret;
     75}
     76NOKPROBE_SYMBOL(insn_has_ll_or_sc);
     77
     78int arch_prepare_kprobe(struct kprobe *p)
     79{
     80	union mips_instruction insn;
     81	union mips_instruction prev_insn;
     82	int ret = 0;
     83
     84	insn = p->addr[0];
     85
     86	if (insn_has_ll_or_sc(insn)) {
     87		pr_notice("Kprobes for ll and sc instructions are not supported\n");
     88		ret = -EINVAL;
     89		goto out;
     90	}
     91
     92	if (copy_from_kernel_nofault(&prev_insn, p->addr - 1,
     93			sizeof(mips_instruction)) == 0 &&
     94	    insn_has_delayslot(prev_insn)) {
     95		pr_notice("Kprobes for branch delayslot are not supported\n");
     96		ret = -EINVAL;
     97		goto out;
     98	}
     99
    100	if (__insn_is_compact_branch(insn)) {
    101		pr_notice("Kprobes for compact branches are not supported\n");
    102		ret = -EINVAL;
    103		goto out;
    104	}
    105
    106	/* insn: must be on special executable page on mips. */
    107	p->ainsn.insn = get_insn_slot();
    108	if (!p->ainsn.insn) {
    109		ret = -ENOMEM;
    110		goto out;
    111	}
    112
    113	/*
    114	 * In the kprobe->ainsn.insn[] array we store the original
    115	 * instruction at index zero and a break trap instruction at
    116	 * index one.
    117	 *
    118	 * On MIPS arch if the instruction at probed address is a
    119	 * branch instruction, we need to execute the instruction at
    120	 * Branch Delayslot (BD) at the time of probe hit. As MIPS also
    121	 * doesn't have single stepping support, the BD instruction can
    122	 * not be executed in-line and it would be executed on SSOL slot
    123	 * using a normal breakpoint instruction in the next slot.
    124	 * So, read the instruction and save it for later execution.
    125	 */
    126	if (insn_has_delayslot(insn))
    127		memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
    128	else
    129		memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
    130
    131	p->ainsn.insn[1] = breakpoint2_insn;
    132	p->opcode = *p->addr;
    133
    134out:
    135	return ret;
    136}
    137NOKPROBE_SYMBOL(arch_prepare_kprobe);
    138
    139void arch_arm_kprobe(struct kprobe *p)
    140{
    141	*p->addr = breakpoint_insn;
    142	flush_insn_slot(p);
    143}
    144NOKPROBE_SYMBOL(arch_arm_kprobe);
    145
    146void arch_disarm_kprobe(struct kprobe *p)
    147{
    148	*p->addr = p->opcode;
    149	flush_insn_slot(p);
    150}
    151NOKPROBE_SYMBOL(arch_disarm_kprobe);
    152
    153void arch_remove_kprobe(struct kprobe *p)
    154{
    155	if (p->ainsn.insn) {
    156		free_insn_slot(p->ainsn.insn, 0);
    157		p->ainsn.insn = NULL;
    158	}
    159}
    160NOKPROBE_SYMBOL(arch_remove_kprobe);
    161
    162static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
    163{
    164	kcb->prev_kprobe.kp = kprobe_running();
    165	kcb->prev_kprobe.status = kcb->kprobe_status;
    166	kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
    167	kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
    168	kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
    169}
    170
    171static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
    172{
    173	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
    174	kcb->kprobe_status = kcb->prev_kprobe.status;
    175	kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
    176	kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
    177	kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
    178}
    179
    180static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
    181			       struct kprobe_ctlblk *kcb)
    182{
    183	__this_cpu_write(current_kprobe, p);
    184	kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
    185	kcb->kprobe_saved_epc = regs->cp0_epc;
    186}
    187
    188/**
    189 * evaluate_branch_instrucion -
    190 *
    191 * Evaluate the branch instruction at probed address during probe hit. The
    192 * result of evaluation would be the updated epc. The insturction in delayslot
    193 * would actually be single stepped using a normal breakpoint) on SSOL slot.
    194 *
    195 * The result is also saved in the kprobe control block for later use,
    196 * in case we need to execute the delayslot instruction. The latter will be
    197 * false for NOP instruction in dealyslot and the branch-likely instructions
    198 * when the branch is taken. And for those cases we set a flag as
    199 * SKIP_DELAYSLOT in the kprobe control block
    200 */
    201static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
    202					struct kprobe_ctlblk *kcb)
    203{
    204	union mips_instruction insn = p->opcode;
    205	long epc;
    206	int ret = 0;
    207
    208	epc = regs->cp0_epc;
    209	if (epc & 3)
    210		goto unaligned;
    211
    212	if (p->ainsn.insn->word == 0)
    213		kcb->flags |= SKIP_DELAYSLOT;
    214	else
    215		kcb->flags &= ~SKIP_DELAYSLOT;
    216
    217	ret = __compute_return_epc_for_insn(regs, insn);
    218	if (ret < 0)
    219		return ret;
    220
    221	if (ret == BRANCH_LIKELY_TAKEN)
    222		kcb->flags |= SKIP_DELAYSLOT;
    223
    224	kcb->target_epc = regs->cp0_epc;
    225
    226	return 0;
    227
    228unaligned:
    229	pr_notice("Failed to emulate branch instruction because of unaligned epc - sending SIGBUS to %s.\n", current->comm);
    230	force_sig(SIGBUS);
    231	return -EFAULT;
    232
    233}
    234
    235static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
    236						struct kprobe_ctlblk *kcb)
    237{
    238	int ret = 0;
    239
    240	regs->cp0_status &= ~ST0_IE;
    241
    242	/* single step inline if the instruction is a break */
    243	if (p->opcode.word == breakpoint_insn.word ||
    244	    p->opcode.word == breakpoint2_insn.word)
    245		regs->cp0_epc = (unsigned long)p->addr;
    246	else if (insn_has_delayslot(p->opcode)) {
    247		ret = evaluate_branch_instruction(p, regs, kcb);
    248		if (ret < 0)
    249			return;
    250	}
    251	regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
    252}
    253
    254/*
    255 * Called after single-stepping.  p->addr is the address of the
    256 * instruction whose first byte has been replaced by the "break 0"
    257 * instruction.	 To avoid the SMP problems that can occur when we
    258 * temporarily put back the original opcode to single-step, we
    259 * single-stepped a copy of the instruction.  The address of this
    260 * copy is p->ainsn.insn.
    261 *
    262 * This function prepares to return from the post-single-step
    263 * breakpoint trap. In case of branch instructions, the target
    264 * epc to be restored.
    265 */
    266static void resume_execution(struct kprobe *p,
    267				       struct pt_regs *regs,
    268				       struct kprobe_ctlblk *kcb)
    269{
    270	if (insn_has_delayslot(p->opcode))
    271		regs->cp0_epc = kcb->target_epc;
    272	else {
    273		unsigned long orig_epc = kcb->kprobe_saved_epc;
    274		regs->cp0_epc = orig_epc + 4;
    275	}
    276}
    277NOKPROBE_SYMBOL(resume_execution);
    278
    279static int kprobe_handler(struct pt_regs *regs)
    280{
    281	struct kprobe *p;
    282	int ret = 0;
    283	kprobe_opcode_t *addr;
    284	struct kprobe_ctlblk *kcb;
    285
    286	addr = (kprobe_opcode_t *) regs->cp0_epc;
    287
    288	/*
    289	 * We don't want to be preempted for the entire
    290	 * duration of kprobe processing
    291	 */
    292	preempt_disable();
    293	kcb = get_kprobe_ctlblk();
    294
    295	/* Check we're not actually recursing */
    296	if (kprobe_running()) {
    297		p = get_kprobe(addr);
    298		if (p) {
    299			if (kcb->kprobe_status == KPROBE_HIT_SS &&
    300			    p->ainsn.insn->word == breakpoint_insn.word) {
    301				regs->cp0_status &= ~ST0_IE;
    302				regs->cp0_status |= kcb->kprobe_saved_SR;
    303				goto no_kprobe;
    304			}
    305			/*
    306			 * We have reentered the kprobe_handler(), since
    307			 * another probe was hit while within the handler.
    308			 * We here save the original kprobes variables and
    309			 * just single step on the instruction of the new probe
    310			 * without calling any user handlers.
    311			 */
    312			save_previous_kprobe(kcb);
    313			set_current_kprobe(p, regs, kcb);
    314			kprobes_inc_nmissed_count(p);
    315			prepare_singlestep(p, regs, kcb);
    316			kcb->kprobe_status = KPROBE_REENTER;
    317			if (kcb->flags & SKIP_DELAYSLOT) {
    318				resume_execution(p, regs, kcb);
    319				restore_previous_kprobe(kcb);
    320				preempt_enable_no_resched();
    321			}
    322			return 1;
    323		} else if (addr->word != breakpoint_insn.word) {
    324			/*
    325			 * The breakpoint instruction was removed by
    326			 * another cpu right after we hit, no further
    327			 * handling of this interrupt is appropriate
    328			 */
    329			ret = 1;
    330		}
    331		goto no_kprobe;
    332	}
    333
    334	p = get_kprobe(addr);
    335	if (!p) {
    336		if (addr->word != breakpoint_insn.word) {
    337			/*
    338			 * The breakpoint instruction was removed right
    339			 * after we hit it.  Another cpu has removed
    340			 * either a probepoint or a debugger breakpoint
    341			 * at this address.  In either case, no further
    342			 * handling of this interrupt is appropriate.
    343			 */
    344			ret = 1;
    345		}
    346		/* Not one of ours: let kernel handle it */
    347		goto no_kprobe;
    348	}
    349
    350	set_current_kprobe(p, regs, kcb);
    351	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
    352
    353	if (p->pre_handler && p->pre_handler(p, regs)) {
    354		/* handler has already set things up, so skip ss setup */
    355		reset_current_kprobe();
    356		preempt_enable_no_resched();
    357		return 1;
    358	}
    359
    360	prepare_singlestep(p, regs, kcb);
    361	if (kcb->flags & SKIP_DELAYSLOT) {
    362		kcb->kprobe_status = KPROBE_HIT_SSDONE;
    363		if (p->post_handler)
    364			p->post_handler(p, regs, 0);
    365		resume_execution(p, regs, kcb);
    366		preempt_enable_no_resched();
    367	} else
    368		kcb->kprobe_status = KPROBE_HIT_SS;
    369
    370	return 1;
    371
    372no_kprobe:
    373	preempt_enable_no_resched();
    374	return ret;
    375
    376}
    377NOKPROBE_SYMBOL(kprobe_handler);
    378
    379static inline int post_kprobe_handler(struct pt_regs *regs)
    380{
    381	struct kprobe *cur = kprobe_running();
    382	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
    383
    384	if (!cur)
    385		return 0;
    386
    387	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
    388		kcb->kprobe_status = KPROBE_HIT_SSDONE;
    389		cur->post_handler(cur, regs, 0);
    390	}
    391
    392	resume_execution(cur, regs, kcb);
    393
    394	regs->cp0_status |= kcb->kprobe_saved_SR;
    395
    396	/* Restore back the original saved kprobes variables and continue. */
    397	if (kcb->kprobe_status == KPROBE_REENTER) {
    398		restore_previous_kprobe(kcb);
    399		goto out;
    400	}
    401	reset_current_kprobe();
    402out:
    403	preempt_enable_no_resched();
    404
    405	return 1;
    406}
    407
    408int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
    409{
    410	struct kprobe *cur = kprobe_running();
    411	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
    412
    413	if (kcb->kprobe_status & KPROBE_HIT_SS) {
    414		resume_execution(cur, regs, kcb);
    415		regs->cp0_status |= kcb->kprobe_old_SR;
    416
    417		reset_current_kprobe();
    418		preempt_enable_no_resched();
    419	}
    420	return 0;
    421}
    422
    423/*
    424 * Wrapper routine for handling exceptions.
    425 */
    426int kprobe_exceptions_notify(struct notifier_block *self,
    427				       unsigned long val, void *data)
    428{
    429
    430	struct die_args *args = (struct die_args *)data;
    431	int ret = NOTIFY_DONE;
    432
    433	switch (val) {
    434	case DIE_BREAK:
    435		if (kprobe_handler(args->regs))
    436			ret = NOTIFY_STOP;
    437		break;
    438	case DIE_SSTEPBP:
    439		if (post_kprobe_handler(args->regs))
    440			ret = NOTIFY_STOP;
    441		break;
    442
    443	case DIE_PAGE_FAULT:
    444		/* kprobe_running() needs smp_processor_id() */
    445		preempt_disable();
    446
    447		if (kprobe_running()
    448		    && kprobe_fault_handler(args->regs, args->trapnr))
    449			ret = NOTIFY_STOP;
    450		preempt_enable();
    451		break;
    452	default:
    453		break;
    454	}
    455	return ret;
    456}
    457NOKPROBE_SYMBOL(kprobe_exceptions_notify);
    458
    459/*
    460 * Function return probe trampoline:
    461 *	- init_kprobes() establishes a probepoint here
    462 *	- When the probed function returns, this probe causes the
    463 *	  handlers to fire
    464 */
    465static void __used kretprobe_trampoline_holder(void)
    466{
    467	asm volatile(
    468		".set push\n\t"
    469		/* Keep the assembler from reordering and placing JR here. */
    470		".set noreorder\n\t"
    471		"nop\n\t"
    472		".global __kretprobe_trampoline\n"
    473		"__kretprobe_trampoline:\n\t"
    474		"nop\n\t"
    475		".set pop"
    476		: : : "memory");
    477}
    478
    479void __kretprobe_trampoline(void);
    480
    481void arch_prepare_kretprobe(struct kretprobe_instance *ri,
    482				      struct pt_regs *regs)
    483{
    484	ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
    485	ri->fp = NULL;
    486
    487	/* Replace the return addr with trampoline addr */
    488	regs->regs[31] = (unsigned long)__kretprobe_trampoline;
    489}
    490NOKPROBE_SYMBOL(arch_prepare_kretprobe);
    491
    492/*
    493 * Called when the probe at kretprobe trampoline is hit
    494 */
    495static int trampoline_probe_handler(struct kprobe *p,
    496						struct pt_regs *regs)
    497{
    498	instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, NULL);
    499	/*
    500	 * By returning a non-zero value, we are telling
    501	 * kprobe_handler() that we don't want the post_handler
    502	 * to run (and have re-enabled preemption)
    503	 */
    504	return 1;
    505}
    506NOKPROBE_SYMBOL(trampoline_probe_handler);
    507
    508int arch_trampoline_kprobe(struct kprobe *p)
    509{
    510	if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline)
    511		return 1;
    512
    513	return 0;
    514}
    515NOKPROBE_SYMBOL(arch_trampoline_kprobe);
    516
    517static struct kprobe trampoline_p = {
    518	.addr = (kprobe_opcode_t *)__kretprobe_trampoline,
    519	.pre_handler = trampoline_probe_handler
    520};
    521
    522int __init arch_init_kprobes(void)
    523{
    524	return register_kprobe(&trampoline_p);
    525}