cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ftrace.c (9386B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Dynamic function tracer architecture backend.
      4 *
      5 * Copyright IBM Corp. 2009,2014
      6 *
      7 *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
      8 */
      9
     10#include <linux/moduleloader.h>
     11#include <linux/hardirq.h>
     12#include <linux/uaccess.h>
     13#include <linux/ftrace.h>
     14#include <linux/kernel.h>
     15#include <linux/types.h>
     16#include <linux/kprobes.h>
     17#include <trace/syscall.h>
     18#include <asm/asm-offsets.h>
     19#include <asm/text-patching.h>
     20#include <asm/cacheflush.h>
     21#include <asm/ftrace.lds.h>
     22#include <asm/nospec-branch.h>
     23#include <asm/set_memory.h>
     24#include "entry.h"
     25#include "ftrace.h"
     26
     27/*
     28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
     29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
     30 * (since gcc 9 / clang 10) is used.
     31 * In both cases the original and also the disabled function prologue contains
     32 * only a single six byte instruction and looks like this:
     33 * >	brcl	0,0			# offset 0
     34 * To enable ftrace the code gets patched like above and afterwards looks
     35 * like this:
     36 * >	brasl	%r0,ftrace_caller	# offset 0
     37 *
     38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
     39 * The ftrace function gets called with a non-standard C function call ABI
     40 * where r0 contains the return address. It is also expected that the called
     41 * function only clobbers r0 and r1, but restores r2-r15.
     42 * For module code we can't directly jump to ftrace caller, but need a
     43 * trampoline (ftrace_plt), which clobbers also r1.
     44 */
     45
     46void *ftrace_func __read_mostly = ftrace_stub;
     47struct ftrace_insn {
     48	u16 opc;
     49	s32 disp;
     50} __packed;
     51
     52asm(
     53	"	.align 16\n"
     54	"ftrace_shared_hotpatch_trampoline_br:\n"
     55	"	lmg	%r0,%r1,2(%r1)\n"
     56	"	br	%r1\n"
     57	"ftrace_shared_hotpatch_trampoline_br_end:\n"
     58);
     59
     60#ifdef CONFIG_EXPOLINE
     61asm(
     62	"	.align 16\n"
     63	"ftrace_shared_hotpatch_trampoline_exrl:\n"
     64	"	lmg	%r0,%r1,2(%r1)\n"
     65	"	exrl	%r0,0f\n"
     66	"	j	.\n"
     67	"0:	br	%r1\n"
     68	"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
     69);
     70#endif /* CONFIG_EXPOLINE */
     71
     72#ifdef CONFIG_MODULES
     73static char *ftrace_plt;
     74#endif /* CONFIG_MODULES */
     75
     76static const char *ftrace_shared_hotpatch_trampoline(const char **end)
     77{
     78	const char *tstart, *tend;
     79
     80	tstart = ftrace_shared_hotpatch_trampoline_br;
     81	tend = ftrace_shared_hotpatch_trampoline_br_end;
     82#ifdef CONFIG_EXPOLINE
     83	if (!nospec_disable) {
     84		tstart = ftrace_shared_hotpatch_trampoline_exrl;
     85		tend = ftrace_shared_hotpatch_trampoline_exrl_end;
     86	}
     87#endif /* CONFIG_EXPOLINE */
     88	if (end)
     89		*end = tend;
     90	return tstart;
     91}
     92
     93bool ftrace_need_init_nop(void)
     94{
     95	return true;
     96}
     97
     98int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
     99{
    100	static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
    101		__ftrace_hotpatch_trampolines_start;
    102	static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
    103	static struct ftrace_hotpatch_trampoline *trampoline;
    104	struct ftrace_hotpatch_trampoline **next_trampoline;
    105	struct ftrace_hotpatch_trampoline *trampolines_end;
    106	struct ftrace_hotpatch_trampoline tmp;
    107	struct ftrace_insn *insn;
    108	const char *shared;
    109	s32 disp;
    110
    111	BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
    112		     SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
    113
    114	next_trampoline = &next_vmlinux_trampoline;
    115	trampolines_end = __ftrace_hotpatch_trampolines_end;
    116	shared = ftrace_shared_hotpatch_trampoline(NULL);
    117#ifdef CONFIG_MODULES
    118	if (mod) {
    119		next_trampoline = &mod->arch.next_trampoline;
    120		trampolines_end = mod->arch.trampolines_end;
    121		shared = ftrace_plt;
    122	}
    123#endif
    124
    125	if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
    126		return -ENOMEM;
    127	trampoline = (*next_trampoline)++;
    128
    129	/* Check for the compiler-generated fentry nop (brcl 0, .). */
    130	if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
    131		return -EINVAL;
    132
    133	/* Generate the trampoline. */
    134	tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
    135	tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
    136	tmp.interceptor = FTRACE_ADDR;
    137	tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
    138	s390_kernel_write(trampoline, &tmp, sizeof(tmp));
    139
    140	/* Generate a jump to the trampoline. */
    141	disp = ((char *)trampoline - (char *)rec->ip) / 2;
    142	insn = (struct ftrace_insn *)rec->ip;
    143	s390_kernel_write(&insn->disp, &disp, sizeof(disp));
    144
    145	return 0;
    146}
    147
    148static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
    149{
    150	struct ftrace_hotpatch_trampoline *trampoline;
    151	struct ftrace_insn insn;
    152	s64 disp;
    153	u16 opc;
    154
    155	if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
    156		return ERR_PTR(-EFAULT);
    157	disp = (s64)insn.disp * 2;
    158	trampoline = (void *)(rec->ip + disp);
    159	if (get_kernel_nofault(opc, &trampoline->brasl_opc))
    160		return ERR_PTR(-EFAULT);
    161	if (opc != 0xc015)
    162		return ERR_PTR(-EINVAL);
    163	return trampoline;
    164}
    165
    166int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
    167		       unsigned long addr)
    168{
    169	struct ftrace_hotpatch_trampoline *trampoline;
    170	u64 old;
    171
    172	trampoline = ftrace_get_trampoline(rec);
    173	if (IS_ERR(trampoline))
    174		return PTR_ERR(trampoline);
    175	if (get_kernel_nofault(old, &trampoline->interceptor))
    176		return -EFAULT;
    177	if (old != old_addr)
    178		return -EINVAL;
    179	s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
    180	return 0;
    181}
    182
    183static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
    184{
    185	u16 old;
    186	u8 op;
    187
    188	if (get_kernel_nofault(old, addr))
    189		return -EFAULT;
    190	if (old != expected)
    191		return -EINVAL;
    192	/* set mask field to all ones or zeroes */
    193	op = enable ? 0xf4 : 0x04;
    194	s390_kernel_write((char *)addr + 1, &op, sizeof(op));
    195	return 0;
    196}
    197
    198int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
    199		    unsigned long addr)
    200{
    201	/* Expect brcl 0xf,... */
    202	return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
    203}
    204
    205int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
    206{
    207	struct ftrace_hotpatch_trampoline *trampoline;
    208
    209	trampoline = ftrace_get_trampoline(rec);
    210	if (IS_ERR(trampoline))
    211		return PTR_ERR(trampoline);
    212	s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
    213	/* Expect brcl 0x0,... */
    214	return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
    215}
    216
    217int ftrace_update_ftrace_func(ftrace_func_t func)
    218{
    219	ftrace_func = func;
    220	return 0;
    221}
    222
    223void arch_ftrace_update_code(int command)
    224{
    225	ftrace_modify_all_code(command);
    226}
    227
    228void ftrace_arch_code_modify_post_process(void)
    229{
    230	/*
    231	 * Flush any pre-fetched instructions on all
    232	 * CPUs to make the new code visible.
    233	 */
    234	text_poke_sync_lock();
    235}
    236
    237#ifdef CONFIG_MODULES
    238
    239static int __init ftrace_plt_init(void)
    240{
    241	const char *start, *end;
    242
    243	ftrace_plt = module_alloc(PAGE_SIZE);
    244	if (!ftrace_plt)
    245		panic("cannot allocate ftrace plt\n");
    246
    247	start = ftrace_shared_hotpatch_trampoline(&end);
    248	memcpy(ftrace_plt, start, end - start);
    249	set_memory_ro((unsigned long)ftrace_plt, 1);
    250	return 0;
    251}
    252device_initcall(ftrace_plt_init);
    253
    254#endif /* CONFIG_MODULES */
    255
    256#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    257/*
    258 * Hook the return address and push it in the stack of return addresses
    259 * in current thread info.
    260 */
    261unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
    262				    unsigned long ip)
    263{
    264	if (unlikely(ftrace_graph_is_dead()))
    265		goto out;
    266	if (unlikely(atomic_read(&current->tracing_graph_pause)))
    267		goto out;
    268	ip -= MCOUNT_INSN_SIZE;
    269	if (!function_graph_enter(ra, ip, 0, (void *) sp))
    270		ra = (unsigned long) return_to_handler;
    271out:
    272	return ra;
    273}
    274NOKPROBE_SYMBOL(prepare_ftrace_return);
    275
    276/*
    277 * Patch the kernel code at ftrace_graph_caller location. The instruction
    278 * there is branch relative on condition. To enable the ftrace graph code
    279 * block, we simply patch the mask field of the instruction to zero and
    280 * turn the instruction into a nop.
    281 * To disable the ftrace graph code the mask field will be patched to
    282 * all ones, which turns the instruction into an unconditional branch.
    283 */
    284int ftrace_enable_ftrace_graph_caller(void)
    285{
    286	int rc;
    287
    288	/* Expect brc 0xf,... */
    289	rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
    290	if (rc)
    291		return rc;
    292	text_poke_sync_lock();
    293	return 0;
    294}
    295
    296int ftrace_disable_ftrace_graph_caller(void)
    297{
    298	int rc;
    299
    300	/* Expect brc 0x0,... */
    301	rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
    302	if (rc)
    303		return rc;
    304	text_poke_sync_lock();
    305	return 0;
    306}
    307
    308#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
    309
    310#ifdef CONFIG_KPROBES_ON_FTRACE
    311void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
    312		struct ftrace_ops *ops, struct ftrace_regs *fregs)
    313{
    314	struct kprobe_ctlblk *kcb;
    315	struct pt_regs *regs;
    316	struct kprobe *p;
    317	int bit;
    318
    319	bit = ftrace_test_recursion_trylock(ip, parent_ip);
    320	if (bit < 0)
    321		return;
    322
    323	regs = ftrace_get_regs(fregs);
    324	p = get_kprobe((kprobe_opcode_t *)ip);
    325	if (!regs || unlikely(!p) || kprobe_disabled(p))
    326		goto out;
    327
    328	if (kprobe_running()) {
    329		kprobes_inc_nmissed_count(p);
    330		goto out;
    331	}
    332
    333	__this_cpu_write(current_kprobe, p);
    334
    335	kcb = get_kprobe_ctlblk();
    336	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
    337
    338	instruction_pointer_set(regs, ip);
    339
    340	if (!p->pre_handler || !p->pre_handler(p, regs)) {
    341
    342		instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
    343
    344		if (unlikely(p->post_handler)) {
    345			kcb->kprobe_status = KPROBE_HIT_SSDONE;
    346			p->post_handler(p, regs, 0);
    347		}
    348	}
    349	__this_cpu_write(current_kprobe, NULL);
    350out:
    351	ftrace_test_recursion_unlock(bit);
    352}
    353NOKPROBE_SYMBOL(kprobe_ftrace_handler);
    354
    355int arch_prepare_kprobe_ftrace(struct kprobe *p)
    356{
    357	p->ainsn.insn = NULL;
    358	return 0;
    359}
    360#endif