cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ftrace.h (37694B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Ftrace header.  For implementation details beyond the random comments
      4 * scattered below, see: Documentation/trace/ftrace-design.rst
      5 */
      6
      7#ifndef _LINUX_FTRACE_H
      8#define _LINUX_FTRACE_H
      9
     10#include <linux/trace_recursion.h>
     11#include <linux/trace_clock.h>
     12#include <linux/jump_label.h>
     13#include <linux/kallsyms.h>
     14#include <linux/linkage.h>
     15#include <linux/bitops.h>
     16#include <linux/ptrace.h>
     17#include <linux/ktime.h>
     18#include <linux/sched.h>
     19#include <linux/types.h>
     20#include <linux/init.h>
     21#include <linux/fs.h>
     22
     23#include <asm/ftrace.h>
     24
     25/*
     26 * If the arch supports passing the variable contents of
     27 * function_trace_op as the third parameter back from the
     28 * mcount call, then the arch should define this as 1.
     29 */
     30#ifndef ARCH_SUPPORTS_FTRACE_OPS
     31#define ARCH_SUPPORTS_FTRACE_OPS 0
     32#endif
     33
     34#ifdef CONFIG_TRACING
     35extern void ftrace_boot_snapshot(void);
     36#else
     37static inline void ftrace_boot_snapshot(void) { }
     38#endif
     39
     40#ifdef CONFIG_FUNCTION_TRACER
     41struct ftrace_ops;
     42struct ftrace_regs;
     43/*
     44 * If the arch's mcount caller does not support all of ftrace's
     45 * features, then it must call an indirect function that
     46 * does. Or at least does enough to prevent any unwelcome side effects.
     47 *
     48 * Also define the function prototype that these architectures use
     49 * to call the ftrace_ops_list_func().
     50 */
     51#if !ARCH_SUPPORTS_FTRACE_OPS
     52# define FTRACE_FORCE_LIST_FUNC 1
     53void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
     54#else
     55# define FTRACE_FORCE_LIST_FUNC 0
     56void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
     57			       struct ftrace_ops *op, struct ftrace_regs *fregs);
     58#endif
     59#endif /* CONFIG_FUNCTION_TRACER */
     60
     61/* Main tracing buffer and events set up */
     62#ifdef CONFIG_TRACING
     63void trace_init(void);
     64void early_trace_init(void);
     65#else
     66static inline void trace_init(void) { }
     67static inline void early_trace_init(void) { }
     68#endif
     69
     70struct module;
     71struct ftrace_hash;
     72struct ftrace_direct_func;
     73
     74#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
     75	defined(CONFIG_DYNAMIC_FTRACE)
     76const char *
     77ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
     78		   unsigned long *off, char **modname, char *sym);
     79#else
     80static inline const char *
     81ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
     82		   unsigned long *off, char **modname, char *sym)
     83{
     84	return NULL;
     85}
     86#endif
     87
     88#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
     89int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
     90			   char *type, char *name,
     91			   char *module_name, int *exported);
     92#else
     93static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
     94					 char *type, char *name,
     95					 char *module_name, int *exported)
     96{
     97	return -1;
     98}
     99#endif
    100
    101#ifdef CONFIG_FUNCTION_TRACER
    102
    103extern int ftrace_enabled;
    104
    105#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
    106
    107struct ftrace_regs {
    108	struct pt_regs		regs;
    109};
    110#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
    111
    112/*
    113 * ftrace_instruction_pointer_set() is to be defined by the architecture
    114 * if to allow setting of the instruction pointer from the ftrace_regs
    115 * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports
    116 * live kernel patching.
    117 */
    118#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0)
    119#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
    120
    121static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
    122{
    123	if (!fregs)
    124		return NULL;
    125
    126	return arch_ftrace_get_regs(fregs);
    127}
    128
    129typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
    130			      struct ftrace_ops *op, struct ftrace_regs *fregs);
    131
    132ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
    133
    134/*
    135 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
    136 * set in the flags member.
    137 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
    138 * IPMODIFY are a kind of attribute flags which can be set only before
    139 * registering the ftrace_ops, and can not be modified while registered.
    140 * Changing those attribute flags after registering ftrace_ops will
    141 * cause unexpected results.
    142 *
    143 * ENABLED - set/unset when ftrace_ops is registered/unregistered
    144 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
    145 *           allocated ftrace_ops which need special care
    146 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
    147 *            and passed to the callback. If this flag is set, but the
    148 *            architecture does not support passing regs
    149 *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
    150 *            ftrace_ops will fail to register, unless the next flag
    151 *            is set.
    152 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
    153 *            handler can handle an arch that does not save regs
    154 *            (the handler tests if regs == NULL), then it can set
    155 *            this flag instead. It will not fail registering the ftrace_ops
    156 *            but, the regs field will be NULL if the arch does not support
    157 *            passing regs to the handler.
    158 *            Note, if this flag is set, the SAVE_REGS flag will automatically
    159 *            get set upon registering the ftrace_ops, if the arch supports it.
    160 * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
    161 *            that the call back needs recursion protection. If it does
    162 *            not set this, then the ftrace infrastructure will assume
    163 *            that the callback can handle recursion on its own.
    164 * STUB   - The ftrace_ops is just a place holder.
    165 * INITIALIZED - The ftrace_ops has already been initialized (first use time
    166 *            register_ftrace_function() is called, it will initialized the ops)
    167 * DELETED - The ops are being deleted, do not let them be registered again.
    168 * ADDING  - The ops is in the process of being added.
    169 * REMOVING - The ops is in the process of being removed.
    170 * MODIFYING - The ops is in the process of changing its filter functions.
    171 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
    172 *            The arch specific code sets this flag when it allocated a
    173 *            trampoline. This lets the arch know that it can update the
    174 *            trampoline in case the callback function changes.
    175 *            The ftrace_ops trampoline can be set by the ftrace users, and
    176 *            in such cases the arch must not modify it. Only the arch ftrace
    177 *            core code should set this flag.
    178 * IPMODIFY - The ops can modify the IP register. This can only be set with
    179 *            SAVE_REGS. If another ops with this flag set is already registered
    180 *            for any of the functions that this ops will be registered for, then
    181 *            this ops will fail to register or set_filter_ip.
    182 * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
    183 * RCU     - Set when the ops can only be called when RCU is watching.
    184 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
    185 * PERMANENT - Set when the ops is permanent and should not be affected by
    186 *             ftrace_enabled.
    187 * DIRECT - Used by the direct ftrace_ops helper for direct functions
    188 *            (internal ftrace only, should not be used by others)
    189 */
    190enum {
    191	FTRACE_OPS_FL_ENABLED			= BIT(0),
    192	FTRACE_OPS_FL_DYNAMIC			= BIT(1),
    193	FTRACE_OPS_FL_SAVE_REGS			= BIT(2),
    194	FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED	= BIT(3),
    195	FTRACE_OPS_FL_RECURSION			= BIT(4),
    196	FTRACE_OPS_FL_STUB			= BIT(5),
    197	FTRACE_OPS_FL_INITIALIZED		= BIT(6),
    198	FTRACE_OPS_FL_DELETED			= BIT(7),
    199	FTRACE_OPS_FL_ADDING			= BIT(8),
    200	FTRACE_OPS_FL_REMOVING			= BIT(9),
    201	FTRACE_OPS_FL_MODIFYING			= BIT(10),
    202	FTRACE_OPS_FL_ALLOC_TRAMP		= BIT(11),
    203	FTRACE_OPS_FL_IPMODIFY			= BIT(12),
    204	FTRACE_OPS_FL_PID			= BIT(13),
    205	FTRACE_OPS_FL_RCU			= BIT(14),
    206	FTRACE_OPS_FL_TRACE_ARRAY		= BIT(15),
    207	FTRACE_OPS_FL_PERMANENT                 = BIT(16),
    208	FTRACE_OPS_FL_DIRECT			= BIT(17),
    209};
    210
    211#ifdef CONFIG_DYNAMIC_FTRACE
    212/* The hash used to know what functions callbacks trace */
    213struct ftrace_ops_hash {
    214	struct ftrace_hash __rcu	*notrace_hash;
    215	struct ftrace_hash __rcu	*filter_hash;
    216	struct mutex			regex_lock;
    217};
    218
    219void ftrace_free_init_mem(void);
    220void ftrace_free_mem(struct module *mod, void *start, void *end);
    221#else
    222static inline void ftrace_free_init_mem(void)
    223{
    224	ftrace_boot_snapshot();
    225}
    226static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
    227#endif
    228
    229/*
    230 * Note, ftrace_ops can be referenced outside of RCU protection, unless
    231 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
    232 * core data, the unregistering of it will perform a scheduling on all CPUs
    233 * to make sure that there are no more users. Depending on the load of the
    234 * system that may take a bit of time.
    235 *
    236 * Any private data added must also take care not to be freed and if private
    237 * data is added to a ftrace_ops that is in core code, the user of the
    238 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
    239 */
    240struct ftrace_ops {
    241	ftrace_func_t			func;
    242	struct ftrace_ops __rcu		*next;
    243	unsigned long			flags;
    244	void				*private;
    245	ftrace_func_t			saved_func;
    246#ifdef CONFIG_DYNAMIC_FTRACE
    247	struct ftrace_ops_hash		local_hash;
    248	struct ftrace_ops_hash		*func_hash;
    249	struct ftrace_ops_hash		old_hash;
    250	unsigned long			trampoline;
    251	unsigned long			trampoline_size;
    252	struct list_head		list;
    253#endif
    254};
    255
    256extern struct ftrace_ops __rcu *ftrace_ops_list;
    257extern struct ftrace_ops ftrace_list_end;
    258
    259/*
    260 * Traverse the ftrace_ops_list, invoking all entries.  The reason that we
    261 * can use rcu_dereference_raw_check() is that elements removed from this list
    262 * are simply leaked, so there is no need to interact with a grace-period
    263 * mechanism.  The rcu_dereference_raw_check() calls are needed to handle
    264 * concurrent insertions into the ftrace_ops_list.
    265 *
    266 * Silly Alpha and silly pointer-speculation compiler optimizations!
    267 */
    268#define do_for_each_ftrace_op(op, list)			\
    269	op = rcu_dereference_raw_check(list);			\
    270	do
    271
    272/*
    273 * Optimized for just a single item in the list (as that is the normal case).
    274 */
    275#define while_for_each_ftrace_op(op)				\
    276	while (likely(op = rcu_dereference_raw_check((op)->next)) &&	\
    277	       unlikely((op) != &ftrace_list_end))
    278
    279/*
    280 * Type of the current tracing.
    281 */
    282enum ftrace_tracing_type_t {
    283	FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
    284	FTRACE_TYPE_RETURN,	/* Hook the return of the function */
    285};
    286
    287/* Current tracing type, default is FTRACE_TYPE_ENTER */
    288extern enum ftrace_tracing_type_t ftrace_tracing_type;
    289
    290/*
    291 * The ftrace_ops must be a static and should also
    292 * be read_mostly.  These functions do modify read_mostly variables
    293 * so use them sparely. Never free an ftrace_op or modify the
    294 * next pointer after it has been registered. Even after unregistering
    295 * it, the next pointer may still be used internally.
    296 */
    297int register_ftrace_function(struct ftrace_ops *ops);
    298int unregister_ftrace_function(struct ftrace_ops *ops);
    299
    300extern void ftrace_stub(unsigned long a0, unsigned long a1,
    301			struct ftrace_ops *op, struct ftrace_regs *fregs);
    302
    303
    304int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
    305#else /* !CONFIG_FUNCTION_TRACER */
    306/*
    307 * (un)register_ftrace_function must be a macro since the ops parameter
    308 * must not be evaluated.
    309 */
    310#define register_ftrace_function(ops) ({ 0; })
    311#define unregister_ftrace_function(ops) ({ 0; })
    312static inline void ftrace_kill(void) { }
    313static inline void ftrace_free_init_mem(void) { }
    314static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
    315static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
    316{
    317	return -EOPNOTSUPP;
    318}
    319#endif /* CONFIG_FUNCTION_TRACER */
    320
    321struct ftrace_func_entry {
    322	struct hlist_node hlist;
    323	unsigned long ip;
    324	unsigned long direct; /* for direct lookup only */
    325};
    326
    327struct dyn_ftrace;
    328
    329#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
    330extern int ftrace_direct_func_count;
    331int register_ftrace_direct(unsigned long ip, unsigned long addr);
    332int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
    333int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
    334struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
    335int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
    336				struct dyn_ftrace *rec,
    337				unsigned long old_addr,
    338				unsigned long new_addr);
    339unsigned long ftrace_find_rec_direct(unsigned long ip);
    340int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
    341int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
    342int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr);
    343
    344#else
    345struct ftrace_ops;
    346# define ftrace_direct_func_count 0
    347static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
    348{
    349	return -ENOTSUPP;
    350}
    351static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
    352{
    353	return -ENOTSUPP;
    354}
    355static inline int modify_ftrace_direct(unsigned long ip,
    356				       unsigned long old_addr, unsigned long new_addr)
    357{
    358	return -ENOTSUPP;
    359}
    360static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
    361{
    362	return NULL;
    363}
    364static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
    365					      struct dyn_ftrace *rec,
    366					      unsigned long old_addr,
    367					      unsigned long new_addr)
    368{
    369	return -ENODEV;
    370}
    371static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
    372{
    373	return 0;
    374}
    375static inline int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
    376{
    377	return -ENODEV;
    378}
    379static inline int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
    380{
    381	return -ENODEV;
    382}
    383static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
    384{
    385	return -ENODEV;
    386}
    387#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
    388
    389#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
    390/*
    391 * This must be implemented by the architecture.
    392 * It is the way the ftrace direct_ops helper, when called
    393 * via ftrace (because there's other callbacks besides the
    394 * direct call), can inform the architecture's trampoline that this
    395 * routine has a direct caller, and what the caller is.
    396 *
    397 * For example, in x86, it returns the direct caller
    398 * callback function via the regs->orig_ax parameter.
    399 * Then in the ftrace trampoline, if this is set, it makes
    400 * the return from the trampoline jump to the direct caller
    401 * instead of going back to the function it just traced.
    402 */
    403static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
    404						 unsigned long addr) { }
    405#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
    406
    407#ifdef CONFIG_STACK_TRACER
    408
    409extern int stack_tracer_enabled;
    410
    411int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
    412		       size_t *lenp, loff_t *ppos);
    413
    414/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
    415DECLARE_PER_CPU(int, disable_stack_tracer);
    416
    417/**
    418 * stack_tracer_disable - temporarily disable the stack tracer
    419 *
    420 * There's a few locations (namely in RCU) where stack tracing
    421 * cannot be executed. This function is used to disable stack
    422 * tracing during those critical sections.
    423 *
    424 * This function must be called with preemption or interrupts
    425 * disabled and stack_tracer_enable() must be called shortly after
    426 * while preemption or interrupts are still disabled.
    427 */
    428static inline void stack_tracer_disable(void)
    429{
    430	/* Preemption or interrupts must be disabled */
    431	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
    432		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
    433	this_cpu_inc(disable_stack_tracer);
    434}
    435
    436/**
    437 * stack_tracer_enable - re-enable the stack tracer
    438 *
    439 * After stack_tracer_disable() is called, stack_tracer_enable()
    440 * must be called shortly afterward.
    441 */
    442static inline void stack_tracer_enable(void)
    443{
    444	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
    445		WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
    446	this_cpu_dec(disable_stack_tracer);
    447}
    448#else
    449static inline void stack_tracer_disable(void) { }
    450static inline void stack_tracer_enable(void) { }
    451#endif
    452
    453#ifdef CONFIG_DYNAMIC_FTRACE
    454
    455void ftrace_arch_code_modify_prepare(void);
    456void ftrace_arch_code_modify_post_process(void);
    457
    458enum ftrace_bug_type {
    459	FTRACE_BUG_UNKNOWN,
    460	FTRACE_BUG_INIT,
    461	FTRACE_BUG_NOP,
    462	FTRACE_BUG_CALL,
    463	FTRACE_BUG_UPDATE,
    464};
    465extern enum ftrace_bug_type ftrace_bug_type;
    466
    467/*
    468 * Archs can set this to point to a variable that holds the value that was
    469 * expected at the call site before calling ftrace_bug().
    470 */
    471extern const void *ftrace_expected;
    472
    473void ftrace_bug(int err, struct dyn_ftrace *rec);
    474
    475struct seq_file;
    476
    477extern int ftrace_text_reserved(const void *start, const void *end);
    478
    479struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
    480
    481bool is_ftrace_trampoline(unsigned long addr);
    482
    483/*
    484 * The dyn_ftrace record's flags field is split into two parts.
    485 * the first part which is '0-FTRACE_REF_MAX' is a counter of
    486 * the number of callbacks that have registered the function that
    487 * the dyn_ftrace descriptor represents.
    488 *
    489 * The second part is a mask:
    490 *  ENABLED - the function is being traced
    491 *  REGS    - the record wants the function to save regs
    492 *  REGS_EN - the function is set up to save regs.
    493 *  IPMODIFY - the record allows for the IP address to be changed.
    494 *  DISABLED - the record is not ready to be touched yet
    495 *  DIRECT   - there is a direct function to call
    496 *
    497 * When a new ftrace_ops is registered and wants a function to save
    498 * pt_regs, the rec->flags REGS is set. When the function has been
    499 * set up to save regs, the REG_EN flag is set. Once a function
    500 * starts saving regs it will do so until all ftrace_ops are removed
    501 * from tracing that function.
    502 */
    503enum {
    504	FTRACE_FL_ENABLED	= (1UL << 31),
    505	FTRACE_FL_REGS		= (1UL << 30),
    506	FTRACE_FL_REGS_EN	= (1UL << 29),
    507	FTRACE_FL_TRAMP		= (1UL << 28),
    508	FTRACE_FL_TRAMP_EN	= (1UL << 27),
    509	FTRACE_FL_IPMODIFY	= (1UL << 26),
    510	FTRACE_FL_DISABLED	= (1UL << 25),
    511	FTRACE_FL_DIRECT	= (1UL << 24),
    512	FTRACE_FL_DIRECT_EN	= (1UL << 23),
    513};
    514
    515#define FTRACE_REF_MAX_SHIFT	23
    516#define FTRACE_REF_MAX		((1UL << FTRACE_REF_MAX_SHIFT) - 1)
    517
    518#define ftrace_rec_count(rec)	((rec)->flags & FTRACE_REF_MAX)
    519
    520struct dyn_ftrace {
    521	unsigned long		ip; /* address of mcount call-site */
    522	unsigned long		flags;
    523	struct dyn_arch_ftrace	arch;
    524};
    525
    526int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
    527			 int remove, int reset);
    528int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
    529			  unsigned int cnt, int remove, int reset);
    530int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
    531		       int len, int reset);
    532int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
    533			int len, int reset);
    534void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
    535void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
    536void ftrace_free_filter(struct ftrace_ops *ops);
    537void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
    538
    539enum {
    540	FTRACE_UPDATE_CALLS		= (1 << 0),
    541	FTRACE_DISABLE_CALLS		= (1 << 1),
    542	FTRACE_UPDATE_TRACE_FUNC	= (1 << 2),
    543	FTRACE_START_FUNC_RET		= (1 << 3),
    544	FTRACE_STOP_FUNC_RET		= (1 << 4),
    545	FTRACE_MAY_SLEEP		= (1 << 5),
    546};
    547
    548/*
    549 * The FTRACE_UPDATE_* enum is used to pass information back
    550 * from the ftrace_update_record() and ftrace_test_record()
    551 * functions. These are called by the code update routines
    552 * to find out what is to be done for a given function.
    553 *
    554 *  IGNORE           - The function is already what we want it to be
    555 *  MAKE_CALL        - Start tracing the function
    556 *  MODIFY_CALL      - Stop saving regs for the function
    557 *  MAKE_NOP         - Stop tracing the function
    558 */
    559enum {
    560	FTRACE_UPDATE_IGNORE,
    561	FTRACE_UPDATE_MAKE_CALL,
    562	FTRACE_UPDATE_MODIFY_CALL,
    563	FTRACE_UPDATE_MAKE_NOP,
    564};
    565
    566enum {
    567	FTRACE_ITER_FILTER	= (1 << 0),
    568	FTRACE_ITER_NOTRACE	= (1 << 1),
    569	FTRACE_ITER_PRINTALL	= (1 << 2),
    570	FTRACE_ITER_DO_PROBES	= (1 << 3),
    571	FTRACE_ITER_PROBE	= (1 << 4),
    572	FTRACE_ITER_MOD		= (1 << 5),
    573	FTRACE_ITER_ENABLED	= (1 << 6),
    574};
    575
    576void arch_ftrace_update_code(int command);
    577void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
    578void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
    579void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
    580
    581struct ftrace_rec_iter;
    582
    583struct ftrace_rec_iter *ftrace_rec_iter_start(void);
    584struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
    585struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
    586
    587#define for_ftrace_rec_iter(iter)		\
    588	for (iter = ftrace_rec_iter_start();	\
    589	     iter;				\
    590	     iter = ftrace_rec_iter_next(iter))
    591
    592
    593int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
    594int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
    595void ftrace_run_stop_machine(int command);
    596unsigned long ftrace_location(unsigned long ip);
    597unsigned long ftrace_location_range(unsigned long start, unsigned long end);
    598unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
    599unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
    600
    601extern ftrace_func_t ftrace_trace_function;
    602
    603int ftrace_regex_open(struct ftrace_ops *ops, int flag,
    604		  struct inode *inode, struct file *file);
    605ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
    606			    size_t cnt, loff_t *ppos);
    607ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
    608			     size_t cnt, loff_t *ppos);
    609int ftrace_regex_release(struct inode *inode, struct file *file);
    610
    611void __init
    612ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
    613
    614/* defined in arch */
    615extern int ftrace_ip_converted(unsigned long ip);
    616extern int ftrace_dyn_arch_init(void);
    617extern void ftrace_replace_code(int enable);
    618extern int ftrace_update_ftrace_func(ftrace_func_t func);
    619extern void ftrace_caller(void);
    620extern void ftrace_regs_caller(void);
    621extern void ftrace_call(void);
    622extern void ftrace_regs_call(void);
    623extern void mcount_call(void);
    624
    625void ftrace_modify_all_code(int command);
    626
    627#ifndef FTRACE_ADDR
    628#define FTRACE_ADDR ((unsigned long)ftrace_caller)
    629#endif
    630
    631#ifndef FTRACE_GRAPH_ADDR
    632#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
    633#endif
    634
    635#ifndef FTRACE_REGS_ADDR
    636#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    637# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
    638#else
    639# define FTRACE_REGS_ADDR FTRACE_ADDR
    640#endif
    641#endif
    642
    643/*
    644 * If an arch would like functions that are only traced
    645 * by the function graph tracer to jump directly to its own
    646 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
    647 * to be that address to jump to.
    648 */
    649#ifndef FTRACE_GRAPH_TRAMP_ADDR
    650#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
    651#endif
    652
    653#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    654extern void ftrace_graph_caller(void);
    655extern int ftrace_enable_ftrace_graph_caller(void);
    656extern int ftrace_disable_ftrace_graph_caller(void);
    657#else
    658static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
    659static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
    660#endif
    661
    662/**
    663 * ftrace_make_nop - convert code into nop
    664 * @mod: module structure if called by module load initialization
    665 * @rec: the call site record (e.g. mcount/fentry)
    666 * @addr: the address that the call site should be calling
    667 *
    668 * This is a very sensitive operation and great care needs
    669 * to be taken by the arch.  The operation should carefully
    670 * read the location, check to see if what is read is indeed
    671 * what we expect it to be, and then on success of the compare,
    672 * it should write to the location.
    673 *
    674 * The code segment at @rec->ip should be a caller to @addr
    675 *
    676 * Return must be:
    677 *  0 on success
    678 *  -EFAULT on error reading the location
    679 *  -EINVAL on a failed compare of the contents
    680 *  -EPERM  on error writing to the location
    681 * Any other value will be considered a failure.
    682 */
    683extern int ftrace_make_nop(struct module *mod,
    684			   struct dyn_ftrace *rec, unsigned long addr);
    685
    686/**
    687 * ftrace_need_init_nop - return whether nop call sites should be initialized
    688 *
    689 * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
    690 * need to call ftrace_init_nop() if the code is built with that flag.
    691 * Architectures where this is not always the case may define their own
    692 * condition.
    693 *
    694 * Return must be:
    695 *  0	    if ftrace_init_nop() should be called
    696 *  Nonzero if ftrace_init_nop() should not be called
    697 */
    698
    699#ifndef ftrace_need_init_nop
    700#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
    701#endif
    702
    703/**
    704 * ftrace_init_nop - initialize a nop call site
    705 * @mod: module structure if called by module load initialization
    706 * @rec: the call site record (e.g. mcount/fentry)
    707 *
    708 * This is a very sensitive operation and great care needs
    709 * to be taken by the arch.  The operation should carefully
    710 * read the location, check to see if what is read is indeed
    711 * what we expect it to be, and then on success of the compare,
    712 * it should write to the location.
    713 *
    714 * The code segment at @rec->ip should contain the contents created by
    715 * the compiler
    716 *
    717 * Return must be:
    718 *  0 on success
    719 *  -EFAULT on error reading the location
    720 *  -EINVAL on a failed compare of the contents
    721 *  -EPERM  on error writing to the location
    722 * Any other value will be considered a failure.
    723 */
    724#ifndef ftrace_init_nop
    725static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
    726{
    727	return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
    728}
    729#endif
    730
    731/**
    732 * ftrace_make_call - convert a nop call site into a call to addr
    733 * @rec: the call site record (e.g. mcount/fentry)
    734 * @addr: the address that the call site should call
    735 *
    736 * This is a very sensitive operation and great care needs
    737 * to be taken by the arch.  The operation should carefully
    738 * read the location, check to see if what is read is indeed
    739 * what we expect it to be, and then on success of the compare,
    740 * it should write to the location.
    741 *
    742 * The code segment at @rec->ip should be a nop
    743 *
    744 * Return must be:
    745 *  0 on success
    746 *  -EFAULT on error reading the location
    747 *  -EINVAL on a failed compare of the contents
    748 *  -EPERM  on error writing to the location
    749 * Any other value will be considered a failure.
    750 */
    751extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
    752
    753#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
    754/**
    755 * ftrace_modify_call - convert from one addr to another (no nop)
    756 * @rec: the call site record (e.g. mcount/fentry)
    757 * @old_addr: the address expected to be currently called to
    758 * @addr: the address to change to
    759 *
    760 * This is a very sensitive operation and great care needs
    761 * to be taken by the arch.  The operation should carefully
    762 * read the location, check to see if what is read is indeed
    763 * what we expect it to be, and then on success of the compare,
    764 * it should write to the location.
    765 *
    766 * The code segment at @rec->ip should be a caller to @old_addr
    767 *
    768 * Return must be:
    769 *  0 on success
    770 *  -EFAULT on error reading the location
    771 *  -EINVAL on a failed compare of the contents
    772 *  -EPERM  on error writing to the location
    773 * Any other value will be considered a failure.
    774 */
    775extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
    776			      unsigned long addr);
    777#else
    778/* Should never be called */
    779static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
    780				     unsigned long addr)
    781{
    782	return -EINVAL;
    783}
    784#endif
    785
    786/* May be defined in arch */
    787extern int ftrace_arch_read_dyn_info(char *buf, int size);
    788
    789extern int skip_trace(unsigned long ip);
    790extern void ftrace_module_init(struct module *mod);
    791extern void ftrace_module_enable(struct module *mod);
    792extern void ftrace_release_mod(struct module *mod);
    793
    794extern void ftrace_disable_daemon(void);
    795extern void ftrace_enable_daemon(void);
    796#else /* CONFIG_DYNAMIC_FTRACE */
    797static inline int skip_trace(unsigned long ip) { return 0; }
    798static inline void ftrace_disable_daemon(void) { }
    799static inline void ftrace_enable_daemon(void) { }
    800static inline void ftrace_module_init(struct module *mod) { }
    801static inline void ftrace_module_enable(struct module *mod) { }
    802static inline void ftrace_release_mod(struct module *mod) { }
    803static inline int ftrace_text_reserved(const void *start, const void *end)
    804{
    805	return 0;
    806}
    807static inline unsigned long ftrace_location(unsigned long ip)
    808{
    809	return 0;
    810}
    811
    812/*
    813 * Again users of functions that have ftrace_ops may not
    814 * have them defined when ftrace is not enabled, but these
    815 * functions may still be called. Use a macro instead of inline.
    816 */
    817#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
    818#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
    819#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
    820#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
    821#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
    822#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
    823#define ftrace_free_filter(ops) do { } while (0)
    824#define ftrace_ops_set_global_filter(ops) do { } while (0)
    825
    826static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
    827			    size_t cnt, loff_t *ppos) { return -ENODEV; }
    828static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
    829			     size_t cnt, loff_t *ppos) { return -ENODEV; }
    830static inline int
    831ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
    832
    833static inline bool is_ftrace_trampoline(unsigned long addr)
    834{
    835	return false;
    836}
    837#endif /* CONFIG_DYNAMIC_FTRACE */
    838
    839#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    840#ifndef ftrace_graph_func
    841#define ftrace_graph_func ftrace_stub
    842#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
    843#else
    844#define FTRACE_OPS_GRAPH_STUB 0
    845#endif
    846#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
    847
    848/* totally disable ftrace - can not re-enable after this */
    849void ftrace_kill(void);
    850
    851static inline void tracer_disable(void)
    852{
    853#ifdef CONFIG_FUNCTION_TRACER
    854	ftrace_enabled = 0;
    855#endif
    856}
    857
    858/*
    859 * Ftrace disable/restore without lock. Some synchronization mechanism
    860 * must be used to prevent ftrace_enabled to be changed between
    861 * disable/restore.
    862 */
    863static inline int __ftrace_enabled_save(void)
    864{
    865#ifdef CONFIG_FUNCTION_TRACER
    866	int saved_ftrace_enabled = ftrace_enabled;
    867	ftrace_enabled = 0;
    868	return saved_ftrace_enabled;
    869#else
    870	return 0;
    871#endif
    872}
    873
    874static inline void __ftrace_enabled_restore(int enabled)
    875{
    876#ifdef CONFIG_FUNCTION_TRACER
    877	ftrace_enabled = enabled;
    878#endif
    879}
    880
    881/* All archs should have this, but we define it for consistency */
    882#ifndef ftrace_return_address0
    883# define ftrace_return_address0 __builtin_return_address(0)
    884#endif
    885
    886/* Archs may use other ways for ADDR1 and beyond */
    887#ifndef ftrace_return_address
    888# ifdef CONFIG_FRAME_POINTER
    889#  define ftrace_return_address(n) __builtin_return_address(n)
    890# else
    891#  define ftrace_return_address(n) 0UL
    892# endif
    893#endif
    894
    895#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
    896#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
    897#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
    898#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
    899#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
    900#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
    901#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
    902
    903static inline unsigned long get_lock_parent_ip(void)
    904{
    905	unsigned long addr = CALLER_ADDR0;
    906
    907	if (!in_lock_functions(addr))
    908		return addr;
    909	addr = CALLER_ADDR1;
    910	if (!in_lock_functions(addr))
    911		return addr;
    912	return CALLER_ADDR2;
    913}
    914
    915#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
    916  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
    917  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
    918#else
    919/*
    920 * Use defines instead of static inlines because some arches will make code out
    921 * of the CALLER_ADDR, when we really want these to be a real nop.
    922 */
    923# define trace_preempt_on(a0, a1) do { } while (0)
    924# define trace_preempt_off(a0, a1) do { } while (0)
    925#endif
    926
    927#ifdef CONFIG_FTRACE_MCOUNT_RECORD
    928extern void ftrace_init(void);
    929#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
    930#define FTRACE_CALLSITE_SECTION	"__patchable_function_entries"
    931#else
    932#define FTRACE_CALLSITE_SECTION	"__mcount_loc"
    933#endif
    934#else
    935static inline void ftrace_init(void) { }
    936#endif
    937
    938/*
    939 * Structure that defines an entry function trace.
    940 * It's already packed but the attribute "packed" is needed
    941 * to remove extra padding at the end.
    942 */
    943struct ftrace_graph_ent {
    944	unsigned long func; /* Current function */
    945	int depth;
    946} __packed;
    947
    948/*
    949 * Structure that defines a return function trace.
    950 * It's already packed but the attribute "packed" is needed
    951 * to remove extra padding at the end.
    952 */
    953struct ftrace_graph_ret {
    954	unsigned long func; /* Current function */
    955	int depth;
    956	/* Number of functions that overran the depth limit for current task */
    957	unsigned int overrun;
    958	unsigned long long calltime;
    959	unsigned long long rettime;
    960} __packed;
    961
    962/* Type of the callback handlers for tracing function graph*/
    963typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
    964typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
    965
    966extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
    967
    968#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    969
    970struct fgraph_ops {
    971	trace_func_graph_ent_t		entryfunc;
    972	trace_func_graph_ret_t		retfunc;
    973};
    974
    975/*
    976 * Stack of return addresses for functions
    977 * of a thread.
    978 * Used in struct thread_info
    979 */
    980struct ftrace_ret_stack {
    981	unsigned long ret;
    982	unsigned long func;
    983	unsigned long long calltime;
    984#ifdef CONFIG_FUNCTION_PROFILER
    985	unsigned long long subtime;
    986#endif
    987#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
    988	unsigned long fp;
    989#endif
    990#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
    991	unsigned long *retp;
    992#endif
    993};
    994
    995/*
    996 * Primary handler of a function return.
    997 * It relays on ftrace_return_to_handler.
    998 * Defined in entry_32/64.S
    999 */
   1000extern void return_to_handler(void);
   1001
   1002extern int
   1003function_graph_enter(unsigned long ret, unsigned long func,
   1004		     unsigned long frame_pointer, unsigned long *retp);
   1005
   1006struct ftrace_ret_stack *
   1007ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
   1008
   1009unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
   1010				    unsigned long ret, unsigned long *retp);
   1011
   1012/*
   1013 * Sometimes we don't want to trace a function with the function
   1014 * graph tracer but we want them to keep traced by the usual function
   1015 * tracer if the function graph tracer is not configured.
   1016 */
   1017#define __notrace_funcgraph		notrace
   1018
   1019#define FTRACE_RETFUNC_DEPTH 50
   1020#define FTRACE_RETSTACK_ALLOC_SIZE 32
   1021
   1022extern int register_ftrace_graph(struct fgraph_ops *ops);
   1023extern void unregister_ftrace_graph(struct fgraph_ops *ops);
   1024
   1025/**
   1026 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
   1027 *
   1028 * ftrace_graph_stop() is called when a severe error is detected in
   1029 * the function graph tracing. This function is called by the critical
   1030 * paths of function graph to keep those paths from doing any more harm.
   1031 */
   1032DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
   1033
   1034static inline bool ftrace_graph_is_dead(void)
   1035{
   1036	return static_branch_unlikely(&kill_ftrace_graph);
   1037}
   1038
   1039extern void ftrace_graph_stop(void);
   1040
   1041/* The current handlers in use */
   1042extern trace_func_graph_ret_t ftrace_graph_return;
   1043extern trace_func_graph_ent_t ftrace_graph_entry;
   1044
   1045extern void ftrace_graph_init_task(struct task_struct *t);
   1046extern void ftrace_graph_exit_task(struct task_struct *t);
   1047extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
   1048
   1049static inline void pause_graph_tracing(void)
   1050{
   1051	atomic_inc(&current->tracing_graph_pause);
   1052}
   1053
   1054static inline void unpause_graph_tracing(void)
   1055{
   1056	atomic_dec(&current->tracing_graph_pause);
   1057}
   1058#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
   1059
   1060#define __notrace_funcgraph
   1061
   1062static inline void ftrace_graph_init_task(struct task_struct *t) { }
   1063static inline void ftrace_graph_exit_task(struct task_struct *t) { }
   1064static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
   1065
   1066/* Define as macros as fgraph_ops may not be defined */
   1067#define register_ftrace_graph(ops) ({ -1; })
   1068#define unregister_ftrace_graph(ops) do { } while (0)
   1069
   1070static inline unsigned long
   1071ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
   1072		      unsigned long *retp)
   1073{
   1074	return ret;
   1075}
   1076
   1077static inline void pause_graph_tracing(void) { }
   1078static inline void unpause_graph_tracing(void) { }
   1079#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
   1080
   1081#ifdef CONFIG_TRACING
   1082
   1083/* flags for current->trace */
   1084enum {
   1085	TSK_TRACE_FL_TRACE_BIT	= 0,
   1086	TSK_TRACE_FL_GRAPH_BIT	= 1,
   1087};
   1088enum {
   1089	TSK_TRACE_FL_TRACE	= 1 << TSK_TRACE_FL_TRACE_BIT,
   1090	TSK_TRACE_FL_GRAPH	= 1 << TSK_TRACE_FL_GRAPH_BIT,
   1091};
   1092
   1093static inline void set_tsk_trace_trace(struct task_struct *tsk)
   1094{
   1095	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
   1096}
   1097
   1098static inline void clear_tsk_trace_trace(struct task_struct *tsk)
   1099{
   1100	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
   1101}
   1102
   1103static inline int test_tsk_trace_trace(struct task_struct *tsk)
   1104{
   1105	return tsk->trace & TSK_TRACE_FL_TRACE;
   1106}
   1107
   1108static inline void set_tsk_trace_graph(struct task_struct *tsk)
   1109{
   1110	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
   1111}
   1112
   1113static inline void clear_tsk_trace_graph(struct task_struct *tsk)
   1114{
   1115	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
   1116}
   1117
   1118static inline int test_tsk_trace_graph(struct task_struct *tsk)
   1119{
   1120	return tsk->trace & TSK_TRACE_FL_GRAPH;
   1121}
   1122
   1123enum ftrace_dump_mode;
   1124
   1125extern enum ftrace_dump_mode ftrace_dump_on_oops;
   1126extern int tracepoint_printk;
   1127
   1128extern void disable_trace_on_warning(void);
   1129extern int __disable_trace_on_warning;
   1130
   1131int tracepoint_printk_sysctl(struct ctl_table *table, int write,
   1132			     void *buffer, size_t *lenp, loff_t *ppos);
   1133
   1134#else /* CONFIG_TRACING */
   1135static inline void  disable_trace_on_warning(void) { }
   1136#endif /* CONFIG_TRACING */
   1137
   1138#ifdef CONFIG_FTRACE_SYSCALLS
   1139
   1140unsigned long arch_syscall_addr(int nr);
   1141
   1142#endif /* CONFIG_FTRACE_SYSCALLS */
   1143
   1144#endif /* _LINUX_FTRACE_H */