cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ftrace.c (7486B)


      1/*
      2 * Dynamic function tracing support.
      3 *
      4 * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
      5 * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
      6 *
      7 * For licencing details, see COPYING.
      8 *
      9 * Defines low-level handling of mcount calls when the kernel
     10 * is compiled with the -pg flag. When using dynamic ftrace, the
     11 * mcount call-sites get patched with NOP till they are enabled.
     12 * All code mutation routines here are called under stop_machine().
     13 */
     14
     15#include <linux/ftrace.h>
     16#include <linux/uaccess.h>
     17#include <linux/module.h>
     18#include <linux/stop_machine.h>
     19
     20#include <asm/cacheflush.h>
     21#include <asm/opcodes.h>
     22#include <asm/ftrace.h>
     23#include <asm/insn.h>
     24#include <asm/set_memory.h>
     25#include <asm/stacktrace.h>
     26#include <asm/patch.h>
     27
     28/*
     29 * The compiler emitted profiling hook consists of
     30 *
     31 *   PUSH    {LR}
     32 *   BL	     __gnu_mcount_nc
     33 *
     34 * To turn this combined sequence into a NOP, we need to restore the value of
     35 * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not
     36 * modified anyway, and reloading LR from memory is highly likely to be less
     37 * efficient.
     38 */
     39#ifdef CONFIG_THUMB2_KERNEL
     40#define	NOP		0xf10d0d04	/* add.w sp, sp, #4 */
     41#else
     42#define	NOP		0xe28dd004	/* add   sp, sp, #4 */
     43#endif
     44
     45#ifdef CONFIG_DYNAMIC_FTRACE
     46
     47static int __ftrace_modify_code(void *data)
     48{
     49	int *command = data;
     50
     51	ftrace_modify_all_code(*command);
     52
     53	return 0;
     54}
     55
     56void arch_ftrace_update_code(int command)
     57{
     58	stop_machine(__ftrace_modify_code, &command, NULL);
     59}
     60
     61static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
     62{
     63	return NOP;
     64}
     65
     66void ftrace_caller_from_init(void);
     67void ftrace_regs_caller_from_init(void);
     68
     69static unsigned long __ref adjust_address(struct dyn_ftrace *rec,
     70					  unsigned long addr)
     71{
     72	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) ||
     73	    system_state >= SYSTEM_FREEING_INITMEM ||
     74	    likely(!is_kernel_inittext(rec->ip)))
     75		return addr;
     76	if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) ||
     77	    addr == (unsigned long)&ftrace_caller)
     78		return (unsigned long)&ftrace_caller_from_init;
     79	return (unsigned long)&ftrace_regs_caller_from_init;
     80}
     81
     82void ftrace_arch_code_modify_prepare(void)
     83{
     84}
     85
     86void ftrace_arch_code_modify_post_process(void)
     87{
     88	/* Make sure any TLB misses during machine stop are cleared. */
     89	flush_tlb_all();
     90}
     91
     92static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
     93					 bool warn)
     94{
     95	return arm_gen_branch_link(pc, addr, warn);
     96}
     97
     98static int ftrace_modify_code(unsigned long pc, unsigned long old,
     99			      unsigned long new, bool validate)
    100{
    101	unsigned long replaced;
    102
    103	if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
    104		old = __opcode_to_mem_thumb32(old);
    105	else
    106		old = __opcode_to_mem_arm(old);
    107
    108	if (validate) {
    109		if (copy_from_kernel_nofault(&replaced, (void *)pc,
    110				MCOUNT_INSN_SIZE))
    111			return -EFAULT;
    112
    113		if (replaced != old)
    114			return -EINVAL;
    115	}
    116
    117	__patch_text((void *)pc, new);
    118
    119	return 0;
    120}
    121
    122int ftrace_update_ftrace_func(ftrace_func_t func)
    123{
    124	unsigned long pc;
    125	unsigned long new;
    126	int ret;
    127
    128	pc = (unsigned long)&ftrace_call;
    129	new = ftrace_call_replace(pc, (unsigned long)func, true);
    130
    131	ret = ftrace_modify_code(pc, 0, new, false);
    132
    133#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    134	if (!ret) {
    135		pc = (unsigned long)&ftrace_regs_call;
    136		new = ftrace_call_replace(pc, (unsigned long)func, true);
    137
    138		ret = ftrace_modify_code(pc, 0, new, false);
    139	}
    140#endif
    141
    142	return ret;
    143}
    144
    145int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
    146{
    147	unsigned long new, old;
    148	unsigned long ip = rec->ip;
    149	unsigned long aaddr = adjust_address(rec, addr);
    150	struct module *mod = NULL;
    151
    152#ifdef CONFIG_ARM_MODULE_PLTS
    153	mod = rec->arch.mod;
    154#endif
    155
    156	old = ftrace_nop_replace(rec);
    157
    158	new = ftrace_call_replace(ip, aaddr, !mod);
    159#ifdef CONFIG_ARM_MODULE_PLTS
    160	if (!new && mod) {
    161		aaddr = get_module_plt(mod, ip, aaddr);
    162		new = ftrace_call_replace(ip, aaddr, true);
    163	}
    164#endif
    165
    166	return ftrace_modify_code(rec->ip, old, new, true);
    167}
    168
    169#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    170
    171int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
    172				unsigned long addr)
    173{
    174	unsigned long new, old;
    175	unsigned long ip = rec->ip;
    176
    177	old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true);
    178
    179	new = ftrace_call_replace(ip, adjust_address(rec, addr), true);
    180
    181	return ftrace_modify_code(rec->ip, old, new, true);
    182}
    183
    184#endif
    185
    186int ftrace_make_nop(struct module *mod,
    187		    struct dyn_ftrace *rec, unsigned long addr)
    188{
    189	unsigned long aaddr = adjust_address(rec, addr);
    190	unsigned long ip = rec->ip;
    191	unsigned long old;
    192	unsigned long new;
    193	int ret;
    194
    195#ifdef CONFIG_ARM_MODULE_PLTS
    196	/* mod is only supplied during module loading */
    197	if (!mod)
    198		mod = rec->arch.mod;
    199	else
    200		rec->arch.mod = mod;
    201#endif
    202
    203	old = ftrace_call_replace(ip, aaddr,
    204				  !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod);
    205#ifdef CONFIG_ARM_MODULE_PLTS
    206	if (!old && mod) {
    207		aaddr = get_module_plt(mod, ip, aaddr);
    208		old = ftrace_call_replace(ip, aaddr, true);
    209	}
    210#endif
    211
    212	new = ftrace_nop_replace(rec);
    213	/*
    214	 * Locations in .init.text may call __gnu_mcount_mc via a linker
    215	 * emitted veneer if they are too far away from its implementation, and
    216	 * so validation may fail spuriously in such cases. Let's work around
    217	 * this by omitting those from validation.
    218	 */
    219	ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip));
    220
    221	return ret;
    222}
    223#endif /* CONFIG_DYNAMIC_FTRACE */
    224
    225#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    226asmlinkage
    227void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
    228			   unsigned long frame_pointer,
    229			   unsigned long stack_pointer)
    230{
    231	unsigned long return_hooker = (unsigned long) &return_to_handler;
    232	unsigned long old;
    233
    234	if (unlikely(atomic_read(&current->tracing_graph_pause)))
    235		return;
    236
    237	if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
    238		/* FP points one word below parent's top of stack */
    239		frame_pointer += 4;
    240	} else {
    241		struct stackframe frame = {
    242			.fp = frame_pointer,
    243			.sp = stack_pointer,
    244			.lr = self_addr,
    245			.pc = self_addr,
    246		};
    247		if (unwind_frame(&frame) < 0)
    248			return;
    249		if (frame.lr != self_addr)
    250			parent = frame.lr_addr;
    251		frame_pointer = frame.sp;
    252	}
    253
    254	old = *parent;
    255	*parent = return_hooker;
    256
    257	if (function_graph_enter(old, self_addr, frame_pointer, NULL))
    258		*parent = old;
    259}
    260
    261#ifdef CONFIG_DYNAMIC_FTRACE
    262extern unsigned long ftrace_graph_call;
    263extern unsigned long ftrace_graph_call_old;
    264extern void ftrace_graph_caller_old(void);
    265extern unsigned long ftrace_graph_regs_call;
    266extern void ftrace_graph_regs_caller(void);
    267
    268static int __ftrace_modify_caller(unsigned long *callsite,
    269				  void (*func) (void), bool enable)
    270{
    271	unsigned long caller_fn = (unsigned long) func;
    272	unsigned long pc = (unsigned long) callsite;
    273	unsigned long branch = arm_gen_branch(pc, caller_fn);
    274	unsigned long nop = arm_gen_nop();
    275	unsigned long old = enable ? nop : branch;
    276	unsigned long new = enable ? branch : nop;
    277
    278	return ftrace_modify_code(pc, old, new, true);
    279}
    280
    281static int ftrace_modify_graph_caller(bool enable)
    282{
    283	int ret;
    284
    285	ret = __ftrace_modify_caller(&ftrace_graph_call,
    286				     ftrace_graph_caller,
    287				     enable);
    288
    289#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    290	if (!ret)
    291		ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
    292				     ftrace_graph_regs_caller,
    293				     enable);
    294#endif
    295
    296
    297	return ret;
    298}
    299
    300int ftrace_enable_ftrace_graph_caller(void)
    301{
    302	return ftrace_modify_graph_caller(true);
    303}
    304
    305int ftrace_disable_ftrace_graph_caller(void)
    306{
    307	return ftrace_modify_graph_caller(false);
    308}
    309#endif /* CONFIG_DYNAMIC_FTRACE */
    310#endif /* CONFIG_FUNCTION_GRAPH_TRACER */