cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ftrace.c (5544B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2013 Linaro Limited
      4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
      5 * Copyright (C) 2017 Andes Technology Corporation
      6 */
      7
      8#include <linux/ftrace.h>
      9#include <linux/uaccess.h>
     10#include <linux/memory.h>
     11#include <asm/cacheflush.h>
     12#include <asm/patch.h>
     13
     14#ifdef CONFIG_DYNAMIC_FTRACE
     15void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
     16{
     17	mutex_lock(&text_mutex);
     18}
     19
     20void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
     21{
     22	mutex_unlock(&text_mutex);
     23}
     24
     25static int ftrace_check_current_call(unsigned long hook_pos,
     26				     unsigned int *expected)
     27{
     28	unsigned int replaced[2];
     29	unsigned int nops[2] = {NOP4, NOP4};
     30
     31	/* we expect nops at the hook position */
     32	if (!expected)
     33		expected = nops;
     34
     35	/*
     36	 * Read the text we want to modify;
     37	 * return must be -EFAULT on read error
     38	 */
     39	if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
     40			MCOUNT_INSN_SIZE))
     41		return -EFAULT;
     42
     43	/*
     44	 * Make sure it is what we expect it to be;
     45	 * return must be -EINVAL on failed comparison
     46	 */
     47	if (memcmp(expected, replaced, sizeof(replaced))) {
     48		pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
     49		       (void *)hook_pos, expected[0], expected[1], replaced[0],
     50		       replaced[1]);
     51		return -EINVAL;
     52	}
     53
     54	return 0;
     55}
     56
     57static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
     58				bool enable)
     59{
     60	unsigned int call[2];
     61	unsigned int nops[2] = {NOP4, NOP4};
     62
     63	make_call(hook_pos, target, call);
     64
     65	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
     66	if (patch_text_nosync
     67	    ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
     68		return -EPERM;
     69
     70	return 0;
     71}
     72
     73/*
     74 * Put 5 instructions with 16 bytes at the front of function within
     75 * patchable function entry nops' area.
     76 *
     77 * 0: REG_S  ra, -SZREG(sp)
     78 * 1: auipc  ra, 0x?
     79 * 2: jalr   -?(ra)
     80 * 3: REG_L  ra, -SZREG(sp)
     81 *
     82 * So the opcodes is:
     83 * 0: 0xfe113c23 (sd)/0xfe112e23 (sw)
     84 * 1: 0x???????? -> auipc
     85 * 2: 0x???????? -> jalr
     86 * 3: 0xff813083 (ld)/0xffc12083 (lw)
     87 */
     88#if __riscv_xlen == 64
     89#define INSN0	0xfe113c23
     90#define INSN3	0xff813083
     91#elif __riscv_xlen == 32
     92#define INSN0	0xfe112e23
     93#define INSN3	0xffc12083
     94#endif
     95
     96#define FUNC_ENTRY_SIZE	16
     97#define FUNC_ENTRY_JMP	4
     98
     99int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
    100{
    101	unsigned int call[4] = {INSN0, 0, 0, INSN3};
    102	unsigned long target = addr;
    103	unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
    104
    105	call[1] = to_auipc_insn((unsigned int)(target - caller));
    106	call[2] = to_jalr_insn((unsigned int)(target - caller));
    107
    108	if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE))
    109		return -EPERM;
    110
    111	return 0;
    112}
    113
    114int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
    115		    unsigned long addr)
    116{
    117	unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4};
    118
    119	if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE))
    120		return -EPERM;
    121
    122	return 0;
    123}
    124
    125
    126/*
    127 * This is called early on, and isn't wrapped by
    128 * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
    129 * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
    130 * just directly poke the text, but it's simpler to just take the lock
    131 * ourselves.
    132 */
    133int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
    134{
    135	int out;
    136
    137	ftrace_arch_code_modify_prepare();
    138	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
    139	ftrace_arch_code_modify_post_process();
    140
    141	return out;
    142}
    143
    144int ftrace_update_ftrace_func(ftrace_func_t func)
    145{
    146	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
    147				       (unsigned long)func, true);
    148	if (!ret) {
    149		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
    150					   (unsigned long)func, true);
    151	}
    152
    153	return ret;
    154}
    155#endif
    156
    157#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    158int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
    159		       unsigned long addr)
    160{
    161	unsigned int call[2];
    162	unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
    163	int ret;
    164
    165	make_call(caller, old_addr, call);
    166	ret = ftrace_check_current_call(caller, call);
    167
    168	if (ret)
    169		return ret;
    170
    171	return __ftrace_modify_call(caller, addr, true);
    172}
    173#endif
    174
    175#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    176/*
    177 * Most of this function is copied from arm64.
    178 */
    179void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
    180			   unsigned long frame_pointer)
    181{
    182	unsigned long return_hooker = (unsigned long)&return_to_handler;
    183	unsigned long old;
    184
    185	if (unlikely(atomic_read(&current->tracing_graph_pause)))
    186		return;
    187
    188	/*
    189	 * We don't suffer access faults, so no extra fault-recovery assembly
    190	 * is needed here.
    191	 */
    192	old = *parent;
    193
    194	if (!function_graph_enter(old, self_addr, frame_pointer, parent))
    195		*parent = return_hooker;
    196}
    197
    198#ifdef CONFIG_DYNAMIC_FTRACE
    199extern void ftrace_graph_call(void);
    200extern void ftrace_graph_regs_call(void);
    201int ftrace_enable_ftrace_graph_caller(void)
    202{
    203	int ret;
    204
    205	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
    206				    (unsigned long)&prepare_ftrace_return, true);
    207	if (ret)
    208		return ret;
    209
    210	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
    211				    (unsigned long)&prepare_ftrace_return, true);
    212}
    213
    214int ftrace_disable_ftrace_graph_caller(void)
    215{
    216	int ret;
    217
    218	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
    219				    (unsigned long)&prepare_ftrace_return, false);
    220	if (ret)
    221		return ret;
    222
    223	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
    224				    (unsigned long)&prepare_ftrace_return, false);
    225}
    226#endif /* CONFIG_DYNAMIC_FTRACE */
    227#endif /* CONFIG_FUNCTION_GRAPH_TRACER */