cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ftrace.c (17641B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Dynamic function tracing support.
      4 *
      5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
      6 *
      7 * Thanks goes to Ingo Molnar, for suggesting the idea.
      8 * Mathieu Desnoyers, for suggesting postponing the modifications.
      9 * Arjan van de Ven, for keeping me straight, and explaining to me
     10 * the dangers of modifying code on the run.
     11 */
     12
     13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     14
     15#include <linux/spinlock.h>
     16#include <linux/hardirq.h>
     17#include <linux/uaccess.h>
     18#include <linux/ftrace.h>
     19#include <linux/percpu.h>
     20#include <linux/sched.h>
     21#include <linux/slab.h>
     22#include <linux/init.h>
     23#include <linux/list.h>
     24#include <linux/module.h>
     25#include <linux/memory.h>
     26#include <linux/vmalloc.h>
     27
     28#include <trace/syscall.h>
     29
     30#include <asm/set_memory.h>
     31#include <asm/kprobes.h>
     32#include <asm/ftrace.h>
     33#include <asm/nops.h>
     34#include <asm/text-patching.h>
     35
     36#ifdef CONFIG_DYNAMIC_FTRACE
     37
     38static int ftrace_poke_late = 0;
     39
     40void ftrace_arch_code_modify_prepare(void)
     41    __acquires(&text_mutex)
     42{
     43	/*
     44	 * Need to grab text_mutex to prevent a race from module loading
     45	 * and live kernel patching from changing the text permissions while
     46	 * ftrace has it set to "read/write".
     47	 */
     48	mutex_lock(&text_mutex);
     49	ftrace_poke_late = 1;
     50}
     51
     52void ftrace_arch_code_modify_post_process(void)
     53    __releases(&text_mutex)
     54{
     55	/*
     56	 * ftrace_make_{call,nop}() may be called during
     57	 * module load, and we need to finish the text_poke_queue()
     58	 * that they do, here.
     59	 */
     60	text_poke_finish();
     61	ftrace_poke_late = 0;
     62	mutex_unlock(&text_mutex);
     63}
     64
     65static const char *ftrace_nop_replace(void)
     66{
     67	return x86_nops[5];
     68}
     69
     70static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
     71{
     72	return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
     73}
     74
     75static int ftrace_verify_code(unsigned long ip, const char *old_code)
     76{
     77	char cur_code[MCOUNT_INSN_SIZE];
     78
     79	/*
     80	 * Note:
     81	 * We are paranoid about modifying text, as if a bug was to happen, it
     82	 * could cause us to read or write to someplace that could cause harm.
     83	 * Carefully read and modify the code with probe_kernel_*(), and make
     84	 * sure what we read is what we expected it to be before modifying it.
     85	 */
     86	/* read the text we want to modify */
     87	if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
     88		WARN_ON(1);
     89		return -EFAULT;
     90	}
     91
     92	/* Make sure it is what we expect it to be */
     93	if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
     94		WARN_ON(1);
     95		return -EINVAL;
     96	}
     97
     98	return 0;
     99}
    100
    101/*
    102 * Marked __ref because it calls text_poke_early() which is .init.text. That is
    103 * ok because that call will happen early, during boot, when .init sections are
    104 * still present.
    105 */
    106static int __ref
    107ftrace_modify_code_direct(unsigned long ip, const char *old_code,
    108			  const char *new_code)
    109{
    110	int ret = ftrace_verify_code(ip, old_code);
    111	if (ret)
    112		return ret;
    113
    114	/* replace the text with the new text */
    115	if (ftrace_poke_late)
    116		text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
    117	else
    118		text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
    119	return 0;
    120}
    121
    122int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
    123{
    124	unsigned long ip = rec->ip;
    125	const char *new, *old;
    126
    127	old = ftrace_call_replace(ip, addr);
    128	new = ftrace_nop_replace();
    129
    130	/*
    131	 * On boot up, and when modules are loaded, the MCOUNT_ADDR
    132	 * is converted to a nop, and will never become MCOUNT_ADDR
    133	 * again. This code is either running before SMP (on boot up)
    134	 * or before the code will ever be executed (module load).
    135	 * We do not want to use the breakpoint version in this case,
    136	 * just modify the code directly.
    137	 */
    138	if (addr == MCOUNT_ADDR)
    139		return ftrace_modify_code_direct(ip, old, new);
    140
    141	/*
    142	 * x86 overrides ftrace_replace_code -- this function will never be used
    143	 * in this case.
    144	 */
    145	WARN_ONCE(1, "invalid use of ftrace_make_nop");
    146	return -EINVAL;
    147}
    148
    149int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
    150{
    151	unsigned long ip = rec->ip;
    152	const char *new, *old;
    153
    154	old = ftrace_nop_replace();
    155	new = ftrace_call_replace(ip, addr);
    156
    157	/* Should only be called when module is loaded */
    158	return ftrace_modify_code_direct(rec->ip, old, new);
    159}
    160
    161/*
    162 * Should never be called:
    163 *  As it is only called by __ftrace_replace_code() which is called by
    164 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
    165 *  which is called to turn mcount into nops or nops into function calls
    166 *  but not to convert a function from not using regs to one that uses
    167 *  regs, which ftrace_modify_call() is for.
    168 */
    169int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
    170				 unsigned long addr)
    171{
    172	WARN_ON(1);
    173	return -EINVAL;
    174}
    175
    176int ftrace_update_ftrace_func(ftrace_func_t func)
    177{
    178	unsigned long ip;
    179	const char *new;
    180
    181	ip = (unsigned long)(&ftrace_call);
    182	new = ftrace_call_replace(ip, (unsigned long)func);
    183	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
    184
    185	ip = (unsigned long)(&ftrace_regs_call);
    186	new = ftrace_call_replace(ip, (unsigned long)func);
    187	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
    188
    189	return 0;
    190}
    191
    192void ftrace_replace_code(int enable)
    193{
    194	struct ftrace_rec_iter *iter;
    195	struct dyn_ftrace *rec;
    196	const char *new, *old;
    197	int ret;
    198
    199	for_ftrace_rec_iter(iter) {
    200		rec = ftrace_rec_iter_record(iter);
    201
    202		switch (ftrace_test_record(rec, enable)) {
    203		case FTRACE_UPDATE_IGNORE:
    204		default:
    205			continue;
    206
    207		case FTRACE_UPDATE_MAKE_CALL:
    208			old = ftrace_nop_replace();
    209			break;
    210
    211		case FTRACE_UPDATE_MODIFY_CALL:
    212		case FTRACE_UPDATE_MAKE_NOP:
    213			old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
    214			break;
    215		}
    216
    217		ret = ftrace_verify_code(rec->ip, old);
    218		if (ret) {
    219			ftrace_bug(ret, rec);
    220			return;
    221		}
    222	}
    223
    224	for_ftrace_rec_iter(iter) {
    225		rec = ftrace_rec_iter_record(iter);
    226
    227		switch (ftrace_test_record(rec, enable)) {
    228		case FTRACE_UPDATE_IGNORE:
    229		default:
    230			continue;
    231
    232		case FTRACE_UPDATE_MAKE_CALL:
    233		case FTRACE_UPDATE_MODIFY_CALL:
    234			new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
    235			break;
    236
    237		case FTRACE_UPDATE_MAKE_NOP:
    238			new = ftrace_nop_replace();
    239			break;
    240		}
    241
    242		text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
    243		ftrace_update_record(rec, enable);
    244	}
    245	text_poke_finish();
    246}
    247
    248void arch_ftrace_update_code(int command)
    249{
    250	ftrace_modify_all_code(command);
    251}
    252
    253/* Currently only x86_64 supports dynamic trampolines */
    254#ifdef CONFIG_X86_64
    255
    256#ifdef CONFIG_MODULES
    257#include <linux/moduleloader.h>
    258/* Module allocation simplifies allocating memory for code */
    259static inline void *alloc_tramp(unsigned long size)
    260{
    261	return module_alloc(size);
    262}
    263static inline void tramp_free(void *tramp)
    264{
    265	module_memfree(tramp);
    266}
    267#else
    268/* Trampolines can only be created if modules are supported */
    269static inline void *alloc_tramp(unsigned long size)
    270{
    271	return NULL;
    272}
    273static inline void tramp_free(void *tramp) { }
    274#endif
    275
    276/* Defined as markers to the end of the ftrace default trampolines */
    277extern void ftrace_regs_caller_end(void);
    278extern void ftrace_regs_caller_ret(void);
    279extern void ftrace_caller_end(void);
    280extern void ftrace_caller_op_ptr(void);
    281extern void ftrace_regs_caller_op_ptr(void);
    282extern void ftrace_regs_caller_jmp(void);
    283
    284/* movq function_trace_op(%rip), %rdx */
    285/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
    286#define OP_REF_SIZE	7
    287
    288/*
    289 * The ftrace_ops is passed to the function callback. Since the
    290 * trampoline only services a single ftrace_ops, we can pass in
    291 * that ops directly.
    292 *
    293 * The ftrace_op_code_union is used to create a pointer to the
    294 * ftrace_ops that will be passed to the callback function.
    295 */
    296union ftrace_op_code_union {
    297	char code[OP_REF_SIZE];
    298	struct {
    299		char op[3];
    300		int offset;
    301	} __attribute__((packed));
    302};
    303
    304#define RET_SIZE		1 + IS_ENABLED(CONFIG_SLS)
    305
    306static unsigned long
    307create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
    308{
    309	unsigned long start_offset;
    310	unsigned long end_offset;
    311	unsigned long op_offset;
    312	unsigned long call_offset;
    313	unsigned long jmp_offset;
    314	unsigned long offset;
    315	unsigned long npages;
    316	unsigned long size;
    317	unsigned long *ptr;
    318	void *trampoline;
    319	void *ip;
    320	/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
    321	unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
    322	unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
    323	union ftrace_op_code_union op_ptr;
    324	int ret;
    325
    326	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
    327		start_offset = (unsigned long)ftrace_regs_caller;
    328		end_offset = (unsigned long)ftrace_regs_caller_end;
    329		op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
    330		call_offset = (unsigned long)ftrace_regs_call;
    331		jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
    332	} else {
    333		start_offset = (unsigned long)ftrace_caller;
    334		end_offset = (unsigned long)ftrace_caller_end;
    335		op_offset = (unsigned long)ftrace_caller_op_ptr;
    336		call_offset = (unsigned long)ftrace_call;
    337		jmp_offset = 0;
    338	}
    339
    340	size = end_offset - start_offset;
    341
    342	/*
    343	 * Allocate enough size to store the ftrace_caller code,
    344	 * the iret , as well as the address of the ftrace_ops this
    345	 * trampoline is used for.
    346	 */
    347	trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
    348	if (!trampoline)
    349		return 0;
    350
    351	*tramp_size = size + RET_SIZE + sizeof(void *);
    352	npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
    353
    354	/* Copy ftrace_caller onto the trampoline memory */
    355	ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
    356	if (WARN_ON(ret < 0))
    357		goto fail;
    358
    359	ip = trampoline + size;
    360	memcpy(ip, retq, RET_SIZE);
    361
    362	/* No need to test direct calls on created trampolines */
    363	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
    364		/* NOP the jnz 1f; but make sure it's a 2 byte jnz */
    365		ip = trampoline + (jmp_offset - start_offset);
    366		if (WARN_ON(*(char *)ip != 0x75))
    367			goto fail;
    368		ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
    369		if (ret < 0)
    370			goto fail;
    371	}
    372
    373	/*
    374	 * The address of the ftrace_ops that is used for this trampoline
    375	 * is stored at the end of the trampoline. This will be used to
    376	 * load the third parameter for the callback. Basically, that
    377	 * location at the end of the trampoline takes the place of
    378	 * the global function_trace_op variable.
    379	 */
    380
    381	ptr = (unsigned long *)(trampoline + size + RET_SIZE);
    382	*ptr = (unsigned long)ops;
    383
    384	op_offset -= start_offset;
    385	memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
    386
    387	/* Are we pointing to the reference? */
    388	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
    389		goto fail;
    390
    391	/* Load the contents of ptr into the callback parameter */
    392	offset = (unsigned long)ptr;
    393	offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
    394
    395	op_ptr.offset = offset;
    396
    397	/* put in the new offset to the ftrace_ops */
    398	memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
    399
    400	/* put in the call to the function */
    401	mutex_lock(&text_mutex);
    402	call_offset -= start_offset;
    403	memcpy(trampoline + call_offset,
    404	       text_gen_insn(CALL_INSN_OPCODE,
    405			     trampoline + call_offset,
    406			     ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
    407	mutex_unlock(&text_mutex);
    408
    409	/* ALLOC_TRAMP flags lets us know we created it */
    410	ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
    411
    412	set_vm_flush_reset_perms(trampoline);
    413
    414	if (likely(system_state != SYSTEM_BOOTING))
    415		set_memory_ro((unsigned long)trampoline, npages);
    416	set_memory_x((unsigned long)trampoline, npages);
    417	return (unsigned long)trampoline;
    418fail:
    419	tramp_free(trampoline);
    420	return 0;
    421}
    422
    423void set_ftrace_ops_ro(void)
    424{
    425	struct ftrace_ops *ops;
    426	unsigned long start_offset;
    427	unsigned long end_offset;
    428	unsigned long npages;
    429	unsigned long size;
    430
    431	do_for_each_ftrace_op(ops, ftrace_ops_list) {
    432		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
    433			continue;
    434
    435		if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
    436			start_offset = (unsigned long)ftrace_regs_caller;
    437			end_offset = (unsigned long)ftrace_regs_caller_end;
    438		} else {
    439			start_offset = (unsigned long)ftrace_caller;
    440			end_offset = (unsigned long)ftrace_caller_end;
    441		}
    442		size = end_offset - start_offset;
    443		size = size + RET_SIZE + sizeof(void *);
    444		npages = DIV_ROUND_UP(size, PAGE_SIZE);
    445		set_memory_ro((unsigned long)ops->trampoline, npages);
    446	} while_for_each_ftrace_op(ops);
    447}
    448
    449static unsigned long calc_trampoline_call_offset(bool save_regs)
    450{
    451	unsigned long start_offset;
    452	unsigned long call_offset;
    453
    454	if (save_regs) {
    455		start_offset = (unsigned long)ftrace_regs_caller;
    456		call_offset = (unsigned long)ftrace_regs_call;
    457	} else {
    458		start_offset = (unsigned long)ftrace_caller;
    459		call_offset = (unsigned long)ftrace_call;
    460	}
    461
    462	return call_offset - start_offset;
    463}
    464
    465void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
    466{
    467	ftrace_func_t func;
    468	unsigned long offset;
    469	unsigned long ip;
    470	unsigned int size;
    471	const char *new;
    472
    473	if (!ops->trampoline) {
    474		ops->trampoline = create_trampoline(ops, &size);
    475		if (!ops->trampoline)
    476			return;
    477		ops->trampoline_size = size;
    478		return;
    479	}
    480
    481	/*
    482	 * The ftrace_ops caller may set up its own trampoline.
    483	 * In such a case, this code must not modify it.
    484	 */
    485	if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
    486		return;
    487
    488	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
    489	ip = ops->trampoline + offset;
    490	func = ftrace_ops_get_func(ops);
    491
    492	mutex_lock(&text_mutex);
    493	/* Do a safe modify in case the trampoline is executing */
    494	new = ftrace_call_replace(ip, (unsigned long)func);
    495	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
    496	mutex_unlock(&text_mutex);
    497}
    498
    499/* Return the address of the function the trampoline calls */
    500static void *addr_from_call(void *ptr)
    501{
    502	union text_poke_insn call;
    503	int ret;
    504
    505	ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
    506	if (WARN_ON_ONCE(ret < 0))
    507		return NULL;
    508
    509	/* Make sure this is a call */
    510	if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
    511		pr_warn("Expected E8, got %x\n", call.opcode);
    512		return NULL;
    513	}
    514
    515	return ptr + CALL_INSN_SIZE + call.disp;
    516}
    517
    518void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
    519			   unsigned long frame_pointer);
    520
    521/*
    522 * If the ops->trampoline was not allocated, then it probably
    523 * has a static trampoline func, or is the ftrace caller itself.
    524 */
    525static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
    526{
    527	unsigned long offset;
    528	bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
    529	void *ptr;
    530
    531	if (ops && ops->trampoline) {
    532#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \
    533	defined(CONFIG_FUNCTION_GRAPH_TRACER)
    534		/*
    535		 * We only know about function graph tracer setting as static
    536		 * trampoline.
    537		 */
    538		if (ops->trampoline == FTRACE_GRAPH_ADDR)
    539			return (void *)prepare_ftrace_return;
    540#endif
    541		return NULL;
    542	}
    543
    544	offset = calc_trampoline_call_offset(save_regs);
    545
    546	if (save_regs)
    547		ptr = (void *)FTRACE_REGS_ADDR + offset;
    548	else
    549		ptr = (void *)FTRACE_ADDR + offset;
    550
    551	return addr_from_call(ptr);
    552}
    553
    554void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
    555{
    556	unsigned long offset;
    557
    558	/* If we didn't allocate this trampoline, consider it static */
    559	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
    560		return static_tramp_func(ops, rec);
    561
    562	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
    563	return addr_from_call((void *)ops->trampoline + offset);
    564}
    565
    566void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
    567{
    568	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
    569		return;
    570
    571	tramp_free((void *)ops->trampoline);
    572	ops->trampoline = 0;
    573}
    574
    575#endif /* CONFIG_X86_64 */
    576#endif /* CONFIG_DYNAMIC_FTRACE */
    577
    578#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    579
    580#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
    581extern void ftrace_graph_call(void);
    582static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
    583{
    584	return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
    585}
    586
    587static int ftrace_mod_jmp(unsigned long ip, void *func)
    588{
    589	const char *new;
    590
    591	new = ftrace_jmp_replace(ip, (unsigned long)func);
    592	text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
    593	return 0;
    594}
    595
    596int ftrace_enable_ftrace_graph_caller(void)
    597{
    598	unsigned long ip = (unsigned long)(&ftrace_graph_call);
    599
    600	return ftrace_mod_jmp(ip, &ftrace_graph_caller);
    601}
    602
    603int ftrace_disable_ftrace_graph_caller(void)
    604{
    605	unsigned long ip = (unsigned long)(&ftrace_graph_call);
    606
    607	return ftrace_mod_jmp(ip, &ftrace_stub);
    608}
    609#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
    610
    611/*
    612 * Hook the return address and push it in the stack of return addrs
    613 * in current thread info.
    614 */
    615void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
    616			   unsigned long frame_pointer)
    617{
    618	unsigned long return_hooker = (unsigned long)&return_to_handler;
    619	int bit;
    620
    621	/*
    622	 * When resuming from suspend-to-ram, this function can be indirectly
    623	 * called from early CPU startup code while the CPU is in real mode,
    624	 * which would fail miserably.  Make sure the stack pointer is a
    625	 * virtual address.
    626	 *
    627	 * This check isn't as accurate as virt_addr_valid(), but it should be
    628	 * good enough for this purpose, and it's fast.
    629	 */
    630	if (unlikely((long)__builtin_frame_address(0) >= 0))
    631		return;
    632
    633	if (unlikely(ftrace_graph_is_dead()))
    634		return;
    635
    636	if (unlikely(atomic_read(&current->tracing_graph_pause)))
    637		return;
    638
    639	bit = ftrace_test_recursion_trylock(ip, *parent);
    640	if (bit < 0)
    641		return;
    642
    643	if (!function_graph_enter(*parent, ip, frame_pointer, parent))
    644		*parent = return_hooker;
    645
    646	ftrace_test_recursion_unlock(bit);
    647}
    648
    649#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
    650void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
    651		       struct ftrace_ops *op, struct ftrace_regs *fregs)
    652{
    653	struct pt_regs *regs = &fregs->regs;
    654	unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs);
    655
    656	prepare_ftrace_return(ip, (unsigned long *)stack, 0);
    657}
    658#endif
    659
    660#endif /* CONFIG_FUNCTION_GRAPH_TRACER */