cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ftrace.c (8711B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * arch/arm64/kernel/ftrace.c
      4 *
      5 * Copyright (C) 2013 Linaro Limited
      6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
      7 */
      8
      9#include <linux/ftrace.h>
     10#include <linux/module.h>
     11#include <linux/swab.h>
     12#include <linux/uaccess.h>
     13
     14#include <asm/cacheflush.h>
     15#include <asm/debug-monitors.h>
     16#include <asm/ftrace.h>
     17#include <asm/insn.h>
     18#include <asm/patching.h>
     19
     20#ifdef CONFIG_DYNAMIC_FTRACE
     21/*
     22 * Replace a single instruction, which may be a branch or NOP.
     23 * If @validate == true, a replaced instruction is checked against 'old'.
     24 */
     25static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
     26			      bool validate)
     27{
     28	u32 replaced;
     29
     30	/*
     31	 * Note:
     32	 * We are paranoid about modifying text, as if a bug were to happen, it
     33	 * could cause us to read or write to someplace that could cause harm.
     34	 * Carefully read and modify the code with aarch64_insn_*() which uses
     35	 * probe_kernel_*(), and make sure what we read is what we expected it
     36	 * to be before modifying it.
     37	 */
     38	if (validate) {
     39		if (aarch64_insn_read((void *)pc, &replaced))
     40			return -EFAULT;
     41
     42		if (replaced != old)
     43			return -EINVAL;
     44	}
     45	if (aarch64_insn_patch_text_nosync((void *)pc, new))
     46		return -EPERM;
     47
     48	return 0;
     49}
     50
     51/*
     52 * Replace tracer function in ftrace_caller()
     53 */
     54int ftrace_update_ftrace_func(ftrace_func_t func)
     55{
     56	unsigned long pc;
     57	u32 new;
     58
     59	pc = (unsigned long)function_nocfi(ftrace_call);
     60	new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
     61					  AARCH64_INSN_BRANCH_LINK);
     62
     63	return ftrace_modify_code(pc, 0, new, false);
     64}
     65
     66static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
     67{
     68#ifdef CONFIG_ARM64_MODULE_PLTS
     69	struct plt_entry *plt = mod->arch.ftrace_trampolines;
     70
     71	if (addr == FTRACE_ADDR)
     72		return &plt[FTRACE_PLT_IDX];
     73	if (addr == FTRACE_REGS_ADDR &&
     74	    IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
     75		return &plt[FTRACE_REGS_PLT_IDX];
     76#endif
     77	return NULL;
     78}
     79
     80/*
     81 * Find the address the callsite must branch to in order to reach '*addr'.
     82 *
     83 * Due to the limited range of 'BL' instructions, modules may be placed too far
     84 * away to branch directly and must use a PLT.
     85 *
     86 * Returns true when '*addr' contains a reachable target address, or has been
     87 * modified to contain a PLT address. Returns false otherwise.
     88 */
     89static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
     90				      struct module *mod,
     91				      unsigned long *addr)
     92{
     93	unsigned long pc = rec->ip;
     94	long offset = (long)*addr - (long)pc;
     95	struct plt_entry *plt;
     96
     97	/*
     98	 * When the target is within range of the 'BL' instruction, use 'addr'
     99	 * as-is and branch to that directly.
    100	 */
    101	if (offset >= -SZ_128M && offset < SZ_128M)
    102		return true;
    103
    104	/*
    105	 * When the target is outside of the range of a 'BL' instruction, we
    106	 * must use a PLT to reach it. We can only place PLTs for modules, and
    107	 * only when module PLT support is built-in.
    108	 */
    109	if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
    110		return false;
    111
    112	/*
    113	 * 'mod' is only set at module load time, but if we end up
    114	 * dealing with an out-of-range condition, we can assume it
    115	 * is due to a module being loaded far away from the kernel.
    116	 *
    117	 * NOTE: __module_text_address() must be called with preemption
    118	 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
    119	 * retains its validity throughout the remainder of this code.
    120	 */
    121	if (!mod) {
    122		preempt_disable();
    123		mod = __module_text_address(pc);
    124		preempt_enable();
    125	}
    126
    127	if (WARN_ON(!mod))
    128		return false;
    129
    130	plt = get_ftrace_plt(mod, *addr);
    131	if (!plt) {
    132		pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
    133		return false;
    134	}
    135
    136	*addr = (unsigned long)plt;
    137	return true;
    138}
    139
    140/*
    141 * Turn on the call to ftrace_caller() in instrumented function
    142 */
    143int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
    144{
    145	unsigned long pc = rec->ip;
    146	u32 old, new;
    147
    148	if (!ftrace_find_callable_addr(rec, NULL, &addr))
    149		return -EINVAL;
    150
    151	old = aarch64_insn_gen_nop();
    152	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
    153
    154	return ftrace_modify_code(pc, old, new, true);
    155}
    156
    157#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    158int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
    159			unsigned long addr)
    160{
    161	unsigned long pc = rec->ip;
    162	u32 old, new;
    163
    164	if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
    165		return -EINVAL;
    166	if (!ftrace_find_callable_addr(rec, NULL, &addr))
    167		return -EINVAL;
    168
    169	old = aarch64_insn_gen_branch_imm(pc, old_addr,
    170					  AARCH64_INSN_BRANCH_LINK);
    171	new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
    172
    173	return ftrace_modify_code(pc, old, new, true);
    174}
    175
    176/*
    177 * The compiler has inserted two NOPs before the regular function prologue.
    178 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
    179 * and x9-x18 are free for our use.
    180 *
    181 * At runtime we want to be able to swing a single NOP <-> BL to enable or
    182 * disable the ftrace call. The BL requires us to save the original LR value,
    183 * so here we insert a <MOV X9, LR> over the first NOP so the instructions
    184 * before the regular prologue are:
    185 *
    186 * | Compiled | Disabled   | Enabled    |
    187 * +----------+------------+------------+
    188 * | NOP      | MOV X9, LR | MOV X9, LR |
    189 * | NOP      | NOP        | BL <entry> |
    190 *
    191 * The LR value will be recovered by ftrace_regs_entry, and restored into LR
    192 * before returning to the regular function prologue. When a function is not
    193 * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
    194 *
    195 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
    196 * the BL.
    197 */
    198int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
    199{
    200	unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
    201	u32 old, new;
    202
    203	old = aarch64_insn_gen_nop();
    204	new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
    205					AARCH64_INSN_REG_LR,
    206					AARCH64_INSN_VARIANT_64BIT);
    207	return ftrace_modify_code(pc, old, new, true);
    208}
    209#endif
    210
    211/*
    212 * Turn off the call to ftrace_caller() in instrumented function
    213 */
    214int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
    215		    unsigned long addr)
    216{
    217	unsigned long pc = rec->ip;
    218	u32 old = 0, new;
    219
    220	if (!ftrace_find_callable_addr(rec, mod, &addr))
    221		return -EINVAL;
    222
    223	old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
    224	new = aarch64_insn_gen_nop();
    225
    226	return ftrace_modify_code(pc, old, new, true);
    227}
    228
    229void arch_ftrace_update_code(int command)
    230{
    231	command |= FTRACE_MAY_SLEEP;
    232	ftrace_modify_all_code(command);
    233}
    234#endif /* CONFIG_DYNAMIC_FTRACE */
    235
    236#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    237/*
    238 * function_graph tracer expects ftrace_return_to_handler() to be called
    239 * on the way back to parent. For this purpose, this function is called
    240 * in _mcount() or ftrace_caller() to replace return address (*parent) on
    241 * the call stack to return_to_handler.
    242 */
    243void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
    244			   unsigned long frame_pointer)
    245{
    246	unsigned long return_hooker = (unsigned long)&return_to_handler;
    247	unsigned long old;
    248
    249	if (unlikely(atomic_read(&current->tracing_graph_pause)))
    250		return;
    251
    252	/*
    253	 * Note:
    254	 * No protection against faulting at *parent, which may be seen
    255	 * on other archs. It's unlikely on AArch64.
    256	 */
    257	old = *parent;
    258
    259	if (!function_graph_enter(old, self_addr, frame_pointer,
    260	    (void *)frame_pointer)) {
    261		*parent = return_hooker;
    262	}
    263}
    264
    265#ifdef CONFIG_DYNAMIC_FTRACE
    266
    267#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    268void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
    269		       struct ftrace_ops *op, struct ftrace_regs *fregs)
    270{
    271	/*
    272	 * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL
    273	 * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs
    274	 * in which we can safely modify the LR.
    275	 */
    276	struct pt_regs *regs = arch_ftrace_get_regs(fregs);
    277	unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs);
    278
    279	prepare_ftrace_return(ip, parent, frame_pointer(regs));
    280}
    281#else
    282/*
    283 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
    284 * depending on @enable.
    285 */
    286static int ftrace_modify_graph_caller(bool enable)
    287{
    288	unsigned long pc = (unsigned long)&ftrace_graph_call;
    289	u32 branch, nop;
    290
    291	branch = aarch64_insn_gen_branch_imm(pc,
    292					     (unsigned long)ftrace_graph_caller,
    293					     AARCH64_INSN_BRANCH_NOLINK);
    294	nop = aarch64_insn_gen_nop();
    295
    296	if (enable)
    297		return ftrace_modify_code(pc, nop, branch, true);
    298	else
    299		return ftrace_modify_code(pc, branch, nop, true);
    300}
    301
    302int ftrace_enable_ftrace_graph_caller(void)
    303{
    304	return ftrace_modify_graph_caller(true);
    305}
    306
    307int ftrace_disable_ftrace_graph_caller(void)
    308{
    309	return ftrace_modify_graph_caller(false);
    310}
    311#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
    312#endif /* CONFIG_DYNAMIC_FTRACE */
    313#endif /* CONFIG_FUNCTION_GRAPH_TRACER */