cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uprobes.c (5849B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * User-space Probes (UProbes) for powerpc
      4 *
      5 * Copyright IBM Corporation, 2007-2012
      6 *
      7 * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
      8 */
      9#include <linux/kernel.h>
     10#include <linux/sched.h>
     11#include <linux/ptrace.h>
     12#include <linux/uprobes.h>
     13#include <linux/uaccess.h>
     14#include <linux/kdebug.h>
     15
     16#include <asm/sstep.h>
     17#include <asm/inst.h>
     18
     19#define UPROBE_TRAP_NR	UINT_MAX
     20
     21/**
     22 * is_trap_insn - check if the instruction is a trap variant
     23 * @insn: instruction to be checked.
     24 * Returns true if @insn is a trap variant.
     25 */
     26bool is_trap_insn(uprobe_opcode_t *insn)
     27{
     28	return (is_trap(*insn));
     29}
     30
     31/**
     32 * arch_uprobe_analyze_insn
     33 * @mm: the probed address space.
     34 * @arch_uprobe: the probepoint information.
     35 * @addr: vaddr to probe.
     36 * Return 0 on success or a -ve number on error.
     37 */
     38int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
     39		struct mm_struct *mm, unsigned long addr)
     40{
     41	if (addr & 0x03)
     42		return -EINVAL;
     43
     44	if (cpu_has_feature(CPU_FTR_ARCH_31) &&
     45	    ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
     46	    (addr & 0x3f) == 60) {
     47		pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
     48		return -EINVAL;
     49	}
     50
     51	if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) {
     52		pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n");
     53		return -ENOTSUPP;
     54	}
     55
     56	return 0;
     57}
     58
     59/*
     60 * arch_uprobe_pre_xol - prepare to execute out of line.
     61 * @auprobe: the probepoint information.
     62 * @regs: reflects the saved user state of current task.
     63 */
     64int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
     65{
     66	struct arch_uprobe_task *autask = &current->utask->autask;
     67
     68	autask->saved_trap_nr = current->thread.trap_nr;
     69	current->thread.trap_nr = UPROBE_TRAP_NR;
     70	regs_set_return_ip(regs, current->utask->xol_vaddr);
     71
     72	user_enable_single_step(current);
     73	return 0;
     74}
     75
     76/**
     77 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
     78 * @regs: Reflects the saved state of the task after it has hit a breakpoint
     79 * instruction.
     80 * Return the address of the breakpoint instruction.
     81 */
     82unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
     83{
     84	return instruction_pointer(regs);
     85}
     86
     87/*
     88 * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
     89 * then detect the case where a singlestepped instruction jumps back to its
     90 * own address. It is assumed that anything like do_page_fault/do_trap/etc
     91 * sets thread.trap_nr != UINT_MAX.
     92 *
     93 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
     94 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
     95 * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
     96 */
     97bool arch_uprobe_xol_was_trapped(struct task_struct *t)
     98{
     99	if (t->thread.trap_nr != UPROBE_TRAP_NR)
    100		return true;
    101
    102	return false;
    103}
    104
    105/*
    106 * Called after single-stepping. To avoid the SMP problems that can
    107 * occur when we temporarily put back the original opcode to
    108 * single-step, we single-stepped a copy of the instruction.
    109 *
    110 * This function prepares to resume execution after the single-step.
    111 */
    112int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
    113{
    114	struct uprobe_task *utask = current->utask;
    115
    116	WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
    117
    118	current->thread.trap_nr = utask->autask.saved_trap_nr;
    119
    120	/*
    121	 * On powerpc, except for loads and stores, most instructions
    122	 * including ones that alter code flow (branches, calls, returns)
    123	 * are emulated in the kernel. We get here only if the emulation
    124	 * support doesn't exist and have to fix-up the next instruction
    125	 * to be executed.
    126	 */
    127	regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
    128
    129	user_disable_single_step(current);
    130	return 0;
    131}
    132
    133/* callback routine for handling exceptions. */
    134int arch_uprobe_exception_notify(struct notifier_block *self,
    135				unsigned long val, void *data)
    136{
    137	struct die_args *args = data;
    138	struct pt_regs *regs = args->regs;
    139
    140	/* regs == NULL is a kernel bug */
    141	if (WARN_ON(!regs))
    142		return NOTIFY_DONE;
    143
    144	/* We are only interested in userspace traps */
    145	if (!user_mode(regs))
    146		return NOTIFY_DONE;
    147
    148	switch (val) {
    149	case DIE_BPT:
    150		if (uprobe_pre_sstep_notifier(regs))
    151			return NOTIFY_STOP;
    152		break;
    153	case DIE_SSTEP:
    154		if (uprobe_post_sstep_notifier(regs))
    155			return NOTIFY_STOP;
    156		break;
    157	default:
    158		break;
    159	}
    160	return NOTIFY_DONE;
    161}
    162
    163/*
    164 * This function gets called when XOL instruction either gets trapped or
    165 * the thread has a fatal signal, so reset the instruction pointer to its
    166 * probed address.
    167 */
    168void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
    169{
    170	struct uprobe_task *utask = current->utask;
    171
    172	current->thread.trap_nr = utask->autask.saved_trap_nr;
    173	instruction_pointer_set(regs, utask->vaddr);
    174
    175	user_disable_single_step(current);
    176}
    177
    178/*
    179 * See if the instruction can be emulated.
    180 * Returns true if instruction was emulated, false otherwise.
    181 */
    182bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
    183{
    184	int ret;
    185
    186	/*
    187	 * emulate_step() returns 1 if the insn was successfully emulated.
    188	 * For all other cases, we need to single-step in hardware.
    189	 */
    190	ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
    191	if (ret > 0)
    192		return true;
    193
    194	return false;
    195}
    196
    197unsigned long
    198arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
    199{
    200	unsigned long orig_ret_vaddr;
    201
    202	orig_ret_vaddr = regs->link;
    203
    204	/* Replace the return addr with trampoline addr */
    205	regs->link = trampoline_vaddr;
    206
    207	return orig_ret_vaddr;
    208}
    209
    210bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
    211				struct pt_regs *regs)
    212{
    213	if (ctx == RP_CHECK_CHAIN_CALL)
    214		return regs->gpr[1] <= ret->stack;
    215	else
    216		return regs->gpr[1] < ret->stack;
    217}