cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

trace.h (65435B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#ifndef _LINUX_KERNEL_TRACE_H
      4#define _LINUX_KERNEL_TRACE_H
      5
      6#include <linux/fs.h>
      7#include <linux/atomic.h>
      8#include <linux/sched.h>
      9#include <linux/clocksource.h>
     10#include <linux/ring_buffer.h>
     11#include <linux/mmiotrace.h>
     12#include <linux/tracepoint.h>
     13#include <linux/ftrace.h>
     14#include <linux/trace.h>
     15#include <linux/hw_breakpoint.h>
     16#include <linux/trace_seq.h>
     17#include <linux/trace_events.h>
     18#include <linux/compiler.h>
     19#include <linux/glob.h>
     20#include <linux/irq_work.h>
     21#include <linux/workqueue.h>
     22#include <linux/ctype.h>
     23#include <linux/once_lite.h>
     24
     25#include "pid_list.h"
     26
     27#ifdef CONFIG_FTRACE_SYSCALLS
     28#include <asm/unistd.h>		/* For NR_SYSCALLS	     */
     29#include <asm/syscall.h>	/* some archs define it here */
     30#endif
     31
     32#define TRACE_MODE_WRITE	0640
     33#define TRACE_MODE_READ		0440
     34
     35enum trace_type {
     36	__TRACE_FIRST_TYPE = 0,
     37
     38	TRACE_FN,
     39	TRACE_CTX,
     40	TRACE_WAKE,
     41	TRACE_STACK,
     42	TRACE_PRINT,
     43	TRACE_BPRINT,
     44	TRACE_MMIO_RW,
     45	TRACE_MMIO_MAP,
     46	TRACE_BRANCH,
     47	TRACE_GRAPH_RET,
     48	TRACE_GRAPH_ENT,
     49	TRACE_USER_STACK,
     50	TRACE_BLK,
     51	TRACE_BPUTS,
     52	TRACE_HWLAT,
     53	TRACE_OSNOISE,
     54	TRACE_TIMERLAT,
     55	TRACE_RAW_DATA,
     56	TRACE_FUNC_REPEATS,
     57
     58	__TRACE_LAST_TYPE,
     59};
     60
     61
     62#undef __field
     63#define __field(type, item)		type	item;
     64
     65#undef __field_fn
     66#define __field_fn(type, item)		type	item;
     67
     68#undef __field_struct
     69#define __field_struct(type, item)	__field(type, item)
     70
     71#undef __field_desc
     72#define __field_desc(type, container, item)
     73
     74#undef __field_packed
     75#define __field_packed(type, container, item)
     76
     77#undef __array
     78#define __array(type, item, size)	type	item[size];
     79
     80#undef __array_desc
     81#define __array_desc(type, container, item, size)
     82
     83#undef __dynamic_array
     84#define __dynamic_array(type, item)	type	item[];
     85
     86#undef __rel_dynamic_array
     87#define __rel_dynamic_array(type, item)	type	item[];
     88
     89#undef F_STRUCT
     90#define F_STRUCT(args...)		args
     91
     92#undef FTRACE_ENTRY
     93#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)		\
     94	struct struct_name {						\
     95		struct trace_entry	ent;				\
     96		tstruct							\
     97	}
     98
     99#undef FTRACE_ENTRY_DUP
    100#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
    101
    102#undef FTRACE_ENTRY_REG
    103#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	regfn)	\
    104	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
    105
    106#undef FTRACE_ENTRY_PACKED
    107#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print)	\
    108	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
    109
    110#include "trace_entries.h"
    111
    112/* Use this for memory failure errors */
    113#define MEM_FAIL(condition, fmt, ...)					\
    114	DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
    115
    116/*
    117 * syscalls are special, and need special handling, this is why
    118 * they are not included in trace_entries.h
    119 */
    120struct syscall_trace_enter {
    121	struct trace_entry	ent;
    122	int			nr;
    123	unsigned long		args[];
    124};
    125
    126struct syscall_trace_exit {
    127	struct trace_entry	ent;
    128	int			nr;
    129	long			ret;
    130};
    131
    132struct kprobe_trace_entry_head {
    133	struct trace_entry	ent;
    134	unsigned long		ip;
    135};
    136
    137struct eprobe_trace_entry_head {
    138	struct trace_entry	ent;
    139};
    140
    141struct kretprobe_trace_entry_head {
    142	struct trace_entry	ent;
    143	unsigned long		func;
    144	unsigned long		ret_ip;
    145};
    146
    147#define TRACE_BUF_SIZE		1024
    148
    149struct trace_array;
    150
    151/*
    152 * The CPU trace array - it consists of thousands of trace entries
    153 * plus some other descriptor data: (for example which task started
    154 * the trace, etc.)
    155 */
    156struct trace_array_cpu {
    157	atomic_t		disabled;
    158	void			*buffer_page;	/* ring buffer spare */
    159
    160	unsigned long		entries;
    161	unsigned long		saved_latency;
    162	unsigned long		critical_start;
    163	unsigned long		critical_end;
    164	unsigned long		critical_sequence;
    165	unsigned long		nice;
    166	unsigned long		policy;
    167	unsigned long		rt_priority;
    168	unsigned long		skipped_entries;
    169	u64			preempt_timestamp;
    170	pid_t			pid;
    171	kuid_t			uid;
    172	char			comm[TASK_COMM_LEN];
    173
    174#ifdef CONFIG_FUNCTION_TRACER
    175	int			ftrace_ignore_pid;
    176#endif
    177	bool			ignore_pid;
    178};
    179
    180struct tracer;
    181struct trace_option_dentry;
    182
    183struct array_buffer {
    184	struct trace_array		*tr;
    185	struct trace_buffer		*buffer;
    186	struct trace_array_cpu __percpu	*data;
    187	u64				time_start;
    188	int				cpu;
    189};
    190
    191#define TRACE_FLAGS_MAX_SIZE		32
    192
    193struct trace_options {
    194	struct tracer			*tracer;
    195	struct trace_option_dentry	*topts;
    196};
    197
    198struct trace_pid_list *trace_pid_list_alloc(void);
    199void trace_pid_list_free(struct trace_pid_list *pid_list);
    200bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid);
    201int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid);
    202int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid);
    203int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid);
    204int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid,
    205			unsigned int *next);
    206
    207enum {
    208	TRACE_PIDS		= BIT(0),
    209	TRACE_NO_PIDS		= BIT(1),
    210};
    211
    212static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
    213				    struct trace_pid_list *no_pid_list)
    214{
    215	/* Return true if the pid list in type has pids */
    216	return ((type & TRACE_PIDS) && pid_list) ||
    217		((type & TRACE_NO_PIDS) && no_pid_list);
    218}
    219
    220static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
    221					 struct trace_pid_list *no_pid_list)
    222{
    223	/*
    224	 * Turning off what is in @type, return true if the "other"
    225	 * pid list, still has pids in it.
    226	 */
    227	return (!(type & TRACE_PIDS) && pid_list) ||
    228		(!(type & TRACE_NO_PIDS) && no_pid_list);
    229}
    230
    231typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
    232
    233/**
    234 * struct cond_snapshot - conditional snapshot data and callback
    235 *
    236 * The cond_snapshot structure encapsulates a callback function and
    237 * data associated with the snapshot for a given tracing instance.
    238 *
    239 * When a snapshot is taken conditionally, by invoking
    240 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
    241 * passed in turn to the cond_snapshot.update() function.  That data
    242 * can be compared by the update() implementation with the cond_data
    243 * contained within the struct cond_snapshot instance associated with
    244 * the trace_array.  Because the tr->max_lock is held throughout the
    245 * update() call, the update() function can directly retrieve the
    246 * cond_snapshot and cond_data associated with the per-instance
    247 * snapshot associated with the trace_array.
    248 *
    249 * The cond_snapshot.update() implementation can save data to be
    250 * associated with the snapshot if it decides to, and returns 'true'
    251 * in that case, or it returns 'false' if the conditional snapshot
    252 * shouldn't be taken.
    253 *
    254 * The cond_snapshot instance is created and associated with the
    255 * user-defined cond_data by tracing_cond_snapshot_enable().
    256 * Likewise, the cond_snapshot instance is destroyed and is no longer
    257 * associated with the trace instance by
    258 * tracing_cond_snapshot_disable().
    259 *
    260 * The method below is required.
    261 *
    262 * @update: When a conditional snapshot is invoked, the update()
    263 *	callback function is invoked with the tr->max_lock held.  The
    264 *	update() implementation signals whether or not to actually
    265 *	take the snapshot, by returning 'true' if so, 'false' if no
    266 *	snapshot should be taken.  Because the max_lock is held for
    267 *	the duration of update(), the implementation is safe to
    268 *	directly retrieved and save any implementation data it needs
    269 *	to in association with the snapshot.
    270 */
    271struct cond_snapshot {
    272	void				*cond_data;
    273	cond_update_fn_t		update;
    274};
    275
    276/*
    277 * struct trace_func_repeats - used to keep track of the consecutive
    278 * (on the same CPU) calls of a single function.
    279 */
    280struct trace_func_repeats {
    281	unsigned long	ip;
    282	unsigned long	parent_ip;
    283	unsigned long	count;
    284	u64		ts_last_call;
    285};
    286
    287/*
    288 * The trace array - an array of per-CPU trace arrays. This is the
    289 * highest level data structure that individual tracers deal with.
    290 * They have on/off state as well:
    291 */
    292struct trace_array {
    293	struct list_head	list;
    294	char			*name;
    295	struct array_buffer	array_buffer;
    296#ifdef CONFIG_TRACER_MAX_TRACE
    297	/*
    298	 * The max_buffer is used to snapshot the trace when a maximum
    299	 * latency is reached, or when the user initiates a snapshot.
    300	 * Some tracers will use this to store a maximum trace while
    301	 * it continues examining live traces.
    302	 *
    303	 * The buffers for the max_buffer are set up the same as the array_buffer
    304	 * When a snapshot is taken, the buffer of the max_buffer is swapped
    305	 * with the buffer of the array_buffer and the buffers are reset for
    306	 * the array_buffer so the tracing can continue.
    307	 */
    308	struct array_buffer	max_buffer;
    309	bool			allocated_snapshot;
    310#endif
    311#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
    312	|| defined(CONFIG_OSNOISE_TRACER)
    313	unsigned long		max_latency;
    314#ifdef CONFIG_FSNOTIFY
    315	struct dentry		*d_max_latency;
    316	struct work_struct	fsnotify_work;
    317	struct irq_work		fsnotify_irqwork;
    318#endif
    319#endif
    320	struct trace_pid_list	__rcu *filtered_pids;
    321	struct trace_pid_list	__rcu *filtered_no_pids;
    322	/*
    323	 * max_lock is used to protect the swapping of buffers
    324	 * when taking a max snapshot. The buffers themselves are
    325	 * protected by per_cpu spinlocks. But the action of the swap
    326	 * needs its own lock.
    327	 *
    328	 * This is defined as a arch_spinlock_t in order to help
    329	 * with performance when lockdep debugging is enabled.
    330	 *
    331	 * It is also used in other places outside the update_max_tr
    332	 * so it needs to be defined outside of the
    333	 * CONFIG_TRACER_MAX_TRACE.
    334	 */
    335	arch_spinlock_t		max_lock;
    336	int			buffer_disabled;
    337#ifdef CONFIG_FTRACE_SYSCALLS
    338	int			sys_refcount_enter;
    339	int			sys_refcount_exit;
    340	struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
    341	struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
    342#endif
    343	int			stop_count;
    344	int			clock_id;
    345	int			nr_topts;
    346	bool			clear_trace;
    347	int			buffer_percent;
    348	unsigned int		n_err_log_entries;
    349	struct tracer		*current_trace;
    350	unsigned int		trace_flags;
    351	unsigned char		trace_flags_index[TRACE_FLAGS_MAX_SIZE];
    352	unsigned int		flags;
    353	raw_spinlock_t		start_lock;
    354	struct list_head	err_log;
    355	struct dentry		*dir;
    356	struct dentry		*options;
    357	struct dentry		*percpu_dir;
    358	struct dentry		*event_dir;
    359	struct trace_options	*topts;
    360	struct list_head	systems;
    361	struct list_head	events;
    362	struct trace_event_file *trace_marker_file;
    363	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
    364	int			ref;
    365	int			trace_ref;
    366#ifdef CONFIG_FUNCTION_TRACER
    367	struct ftrace_ops	*ops;
    368	struct trace_pid_list	__rcu *function_pids;
    369	struct trace_pid_list	__rcu *function_no_pids;
    370#ifdef CONFIG_DYNAMIC_FTRACE
    371	/* All of these are protected by the ftrace_lock */
    372	struct list_head	func_probes;
    373	struct list_head	mod_trace;
    374	struct list_head	mod_notrace;
    375#endif
    376	/* function tracing enabled */
    377	int			function_enabled;
    378#endif
    379	int			no_filter_buffering_ref;
    380	struct list_head	hist_vars;
    381#ifdef CONFIG_TRACER_SNAPSHOT
    382	struct cond_snapshot	*cond_snapshot;
    383#endif
    384	struct trace_func_repeats	__percpu *last_func_repeats;
    385};
    386
    387enum {
    388	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
    389};
    390
    391extern struct list_head ftrace_trace_arrays;
    392
    393extern struct mutex trace_types_lock;
    394
    395extern int trace_array_get(struct trace_array *tr);
    396extern int tracing_check_open_get_tr(struct trace_array *tr);
    397extern struct trace_array *trace_array_find(const char *instance);
    398extern struct trace_array *trace_array_find_get(const char *instance);
    399
    400extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
    401extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
    402extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
    403
    404extern bool trace_clock_in_ns(struct trace_array *tr);
    405
    406/*
    407 * The global tracer (top) should be the first trace array added,
    408 * but we check the flag anyway.
    409 */
    410static inline struct trace_array *top_trace_array(void)
    411{
    412	struct trace_array *tr;
    413
    414	if (list_empty(&ftrace_trace_arrays))
    415		return NULL;
    416
    417	tr = list_entry(ftrace_trace_arrays.prev,
    418			typeof(*tr), list);
    419	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
    420	return tr;
    421}
    422
    423#define FTRACE_CMP_TYPE(var, type) \
    424	__builtin_types_compatible_p(typeof(var), type *)
    425
    426#undef IF_ASSIGN
    427#define IF_ASSIGN(var, entry, etype, id)			\
    428	if (FTRACE_CMP_TYPE(var, etype)) {			\
    429		var = (typeof(var))(entry);			\
    430		WARN_ON(id != 0 && (entry)->type != id);	\
    431		break;						\
    432	}
    433
    434/* Will cause compile errors if type is not found. */
    435extern void __ftrace_bad_type(void);
    436
    437/*
    438 * The trace_assign_type is a verifier that the entry type is
    439 * the same as the type being assigned. To add new types simply
    440 * add a line with the following format:
    441 *
    442 * IF_ASSIGN(var, ent, type, id);
    443 *
    444 *  Where "type" is the trace type that includes the trace_entry
    445 *  as the "ent" item. And "id" is the trace identifier that is
    446 *  used in the trace_type enum.
    447 *
    448 *  If the type can have more than one id, then use zero.
    449 */
    450#define trace_assign_type(var, ent)					\
    451	do {								\
    452		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
    453		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
    454		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
    455		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
    456		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
    457		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
    458		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
    459		IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT);	\
    460		IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
    461		IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
    462		IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
    463		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
    464			  TRACE_MMIO_RW);				\
    465		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
    466			  TRACE_MMIO_MAP);				\
    467		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
    468		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
    469			  TRACE_GRAPH_ENT);		\
    470		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
    471			  TRACE_GRAPH_RET);		\
    472		IF_ASSIGN(var, ent, struct func_repeats_entry,		\
    473			  TRACE_FUNC_REPEATS);				\
    474		__ftrace_bad_type();					\
    475	} while (0)
    476
    477/*
    478 * An option specific to a tracer. This is a boolean value.
    479 * The bit is the bit index that sets its value on the
    480 * flags value in struct tracer_flags.
    481 */
    482struct tracer_opt {
    483	const char	*name; /* Will appear on the trace_options file */
    484	u32		bit; /* Mask assigned in val field in tracer_flags */
    485};
    486
    487/*
    488 * The set of specific options for a tracer. Your tracer
    489 * have to set the initial value of the flags val.
    490 */
    491struct tracer_flags {
    492	u32			val;
    493	struct tracer_opt	*opts;
    494	struct tracer		*trace;
    495};
    496
    497/* Makes more easy to define a tracer opt */
    498#define TRACER_OPT(s, b)	.name = #s, .bit = b
    499
    500
    501struct trace_option_dentry {
    502	struct tracer_opt		*opt;
    503	struct tracer_flags		*flags;
    504	struct trace_array		*tr;
    505	struct dentry			*entry;
    506};
    507
    508/**
    509 * struct tracer - a specific tracer and its callbacks to interact with tracefs
    510 * @name: the name chosen to select it on the available_tracers file
    511 * @init: called when one switches to this tracer (echo name > current_tracer)
    512 * @reset: called when one switches to another tracer
    513 * @start: called when tracing is unpaused (echo 1 > tracing_on)
    514 * @stop: called when tracing is paused (echo 0 > tracing_on)
    515 * @update_thresh: called when tracing_thresh is updated
    516 * @open: called when the trace file is opened
    517 * @pipe_open: called when the trace_pipe file is opened
    518 * @close: called when the trace file is released
    519 * @pipe_close: called when the trace_pipe file is released
    520 * @read: override the default read callback on trace_pipe
    521 * @splice_read: override the default splice_read callback on trace_pipe
    522 * @selftest: selftest to run on boot (see trace_selftest.c)
    523 * @print_headers: override the first lines that describe your columns
    524 * @print_line: callback that prints a trace
    525 * @set_flag: signals one of your private flags changed (trace_options file)
    526 * @flags: your private flags
    527 */
    528struct tracer {
    529	const char		*name;
    530	int			(*init)(struct trace_array *tr);
    531	void			(*reset)(struct trace_array *tr);
    532	void			(*start)(struct trace_array *tr);
    533	void			(*stop)(struct trace_array *tr);
    534	int			(*update_thresh)(struct trace_array *tr);
    535	void			(*open)(struct trace_iterator *iter);
    536	void			(*pipe_open)(struct trace_iterator *iter);
    537	void			(*close)(struct trace_iterator *iter);
    538	void			(*pipe_close)(struct trace_iterator *iter);
    539	ssize_t			(*read)(struct trace_iterator *iter,
    540					struct file *filp, char __user *ubuf,
    541					size_t cnt, loff_t *ppos);
    542	ssize_t			(*splice_read)(struct trace_iterator *iter,
    543					       struct file *filp,
    544					       loff_t *ppos,
    545					       struct pipe_inode_info *pipe,
    546					       size_t len,
    547					       unsigned int flags);
    548#ifdef CONFIG_FTRACE_STARTUP_TEST
    549	int			(*selftest)(struct tracer *trace,
    550					    struct trace_array *tr);
    551#endif
    552	void			(*print_header)(struct seq_file *m);
    553	enum print_line_t	(*print_line)(struct trace_iterator *iter);
    554	/* If you handled the flag setting, return 0 */
    555	int			(*set_flag)(struct trace_array *tr,
    556					    u32 old_flags, u32 bit, int set);
    557	/* Return 0 if OK with change, else return non-zero */
    558	int			(*flag_changed)(struct trace_array *tr,
    559						u32 mask, int set);
    560	struct tracer		*next;
    561	struct tracer_flags	*flags;
    562	int			enabled;
    563	bool			print_max;
    564	bool			allow_instances;
    565#ifdef CONFIG_TRACER_MAX_TRACE
    566	bool			use_max_tr;
    567#endif
    568	/* True if tracer cannot be enabled in kernel param */
    569	bool			noboot;
    570};
    571
    572static inline struct ring_buffer_iter *
    573trace_buffer_iter(struct trace_iterator *iter, int cpu)
    574{
    575	return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
    576}
    577
    578int tracer_init(struct tracer *t, struct trace_array *tr);
    579int tracing_is_enabled(void);
    580void tracing_reset_online_cpus(struct array_buffer *buf);
    581void tracing_reset_current(int cpu);
    582void tracing_reset_all_online_cpus(void);
    583int tracing_open_generic(struct inode *inode, struct file *filp);
    584int tracing_open_generic_tr(struct inode *inode, struct file *filp);
    585bool tracing_is_disabled(void);
    586bool tracer_tracing_is_on(struct trace_array *tr);
    587void tracer_tracing_on(struct trace_array *tr);
    588void tracer_tracing_off(struct trace_array *tr);
    589struct dentry *trace_create_file(const char *name,
    590				 umode_t mode,
    591				 struct dentry *parent,
    592				 void *data,
    593				 const struct file_operations *fops);
    594
    595int tracing_init_dentry(void);
    596
    597struct ring_buffer_event;
    598
    599struct ring_buffer_event *
    600trace_buffer_lock_reserve(struct trace_buffer *buffer,
    601			  int type,
    602			  unsigned long len,
    603			  unsigned int trace_ctx);
    604
    605struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
    606						struct trace_array_cpu *data);
    607
    608struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
    609					  int *ent_cpu, u64 *ent_ts);
    610
    611void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
    612					struct ring_buffer_event *event);
    613
    614bool trace_is_tracepoint_string(const char *str);
    615const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
    616void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
    617			 va_list ap);
    618
    619int trace_empty(struct trace_iterator *iter);
    620
    621void *trace_find_next_entry_inc(struct trace_iterator *iter);
    622
    623void trace_init_global_iter(struct trace_iterator *iter);
    624
    625void tracing_iter_reset(struct trace_iterator *iter, int cpu);
    626
    627unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
    628unsigned long trace_total_entries(struct trace_array *tr);
    629
    630void trace_function(struct trace_array *tr,
    631		    unsigned long ip,
    632		    unsigned long parent_ip,
    633		    unsigned int trace_ctx);
    634void trace_graph_function(struct trace_array *tr,
    635		    unsigned long ip,
    636		    unsigned long parent_ip,
    637		    unsigned int trace_ctx);
    638void trace_latency_header(struct seq_file *m);
    639void trace_default_header(struct seq_file *m);
    640void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
    641
    642void trace_graph_return(struct ftrace_graph_ret *trace);
    643int trace_graph_entry(struct ftrace_graph_ent *trace);
    644void set_graph_array(struct trace_array *tr);
    645
    646void tracing_start_cmdline_record(void);
    647void tracing_stop_cmdline_record(void);
    648void tracing_start_tgid_record(void);
    649void tracing_stop_tgid_record(void);
    650
    651int register_tracer(struct tracer *type);
    652int is_tracing_stopped(void);
    653
    654loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
    655
    656extern cpumask_var_t __read_mostly tracing_buffer_mask;
    657
    658#define for_each_tracing_cpu(cpu)	\
    659	for_each_cpu(cpu, tracing_buffer_mask)
    660
    661extern unsigned long nsecs_to_usecs(unsigned long nsecs);
    662
    663extern unsigned long tracing_thresh;
    664
    665/* PID filtering */
    666
    667extern int pid_max;
    668
    669bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
    670			     pid_t search_pid);
    671bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
    672			    struct trace_pid_list *filtered_no_pids,
    673			    struct task_struct *task);
    674void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
    675				  struct task_struct *self,
    676				  struct task_struct *task);
    677void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
    678void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
    679int trace_pid_show(struct seq_file *m, void *v);
    680void trace_free_pid_list(struct trace_pid_list *pid_list);
    681int trace_pid_write(struct trace_pid_list *filtered_pids,
    682		    struct trace_pid_list **new_pid_list,
    683		    const char __user *ubuf, size_t cnt);
    684
    685#ifdef CONFIG_TRACER_MAX_TRACE
    686void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
    687		   void *cond_data);
    688void update_max_tr_single(struct trace_array *tr,
    689			  struct task_struct *tsk, int cpu);
    690#endif /* CONFIG_TRACER_MAX_TRACE */
    691
    692#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
    693	|| defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY)
    694#define LATENCY_FS_NOTIFY
    695#endif
    696
    697#ifdef LATENCY_FS_NOTIFY
    698void latency_fsnotify(struct trace_array *tr);
    699#else
    700static inline void latency_fsnotify(struct trace_array *tr) { }
    701#endif
    702
    703#ifdef CONFIG_STACKTRACE
    704void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
    705#else
    706static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
    707				 int skip)
    708{
    709}
    710#endif /* CONFIG_STACKTRACE */
    711
    712void trace_last_func_repeats(struct trace_array *tr,
    713			     struct trace_func_repeats *last_info,
    714			     unsigned int trace_ctx);
    715
    716extern u64 ftrace_now(int cpu);
    717
    718extern void trace_find_cmdline(int pid, char comm[]);
    719extern int trace_find_tgid(int pid);
    720extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
    721
    722#ifdef CONFIG_DYNAMIC_FTRACE
    723extern unsigned long ftrace_update_tot_cnt;
    724extern unsigned long ftrace_number_of_pages;
    725extern unsigned long ftrace_number_of_groups;
    726void ftrace_init_trace_array(struct trace_array *tr);
    727#else
    728static inline void ftrace_init_trace_array(struct trace_array *tr) { }
    729#endif
    730#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
    731extern int DYN_FTRACE_TEST_NAME(void);
    732#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
    733extern int DYN_FTRACE_TEST_NAME2(void);
    734
    735extern bool ring_buffer_expanded;
    736extern bool tracing_selftest_disabled;
    737
    738#ifdef CONFIG_FTRACE_STARTUP_TEST
    739extern void __init disable_tracing_selftest(const char *reason);
    740
    741extern int trace_selftest_startup_function(struct tracer *trace,
    742					   struct trace_array *tr);
    743extern int trace_selftest_startup_function_graph(struct tracer *trace,
    744						 struct trace_array *tr);
    745extern int trace_selftest_startup_irqsoff(struct tracer *trace,
    746					  struct trace_array *tr);
    747extern int trace_selftest_startup_preemptoff(struct tracer *trace,
    748					     struct trace_array *tr);
    749extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
    750						 struct trace_array *tr);
    751extern int trace_selftest_startup_wakeup(struct tracer *trace,
    752					 struct trace_array *tr);
    753extern int trace_selftest_startup_nop(struct tracer *trace,
    754					 struct trace_array *tr);
    755extern int trace_selftest_startup_branch(struct tracer *trace,
    756					 struct trace_array *tr);
    757/*
    758 * Tracer data references selftest functions that only occur
    759 * on boot up. These can be __init functions. Thus, when selftests
    760 * are enabled, then the tracers need to reference __init functions.
    761 */
    762#define __tracer_data		__refdata
    763#else
    764static inline void __init disable_tracing_selftest(const char *reason)
    765{
    766}
    767/* Tracers are seldom changed. Optimize when selftests are disabled. */
    768#define __tracer_data		__read_mostly
    769#endif /* CONFIG_FTRACE_STARTUP_TEST */
    770
    771extern void *head_page(struct trace_array_cpu *data);
    772extern unsigned long long ns2usecs(u64 nsec);
    773extern int
    774trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
    775extern int
    776trace_vprintk(unsigned long ip, const char *fmt, va_list args);
    777extern int
    778trace_array_vprintk(struct trace_array *tr,
    779		    unsigned long ip, const char *fmt, va_list args);
    780int trace_array_printk_buf(struct trace_buffer *buffer,
    781			   unsigned long ip, const char *fmt, ...);
    782void trace_printk_seq(struct trace_seq *s);
    783enum print_line_t print_trace_line(struct trace_iterator *iter);
    784
    785extern char trace_find_mark(unsigned long long duration);
    786
    787struct ftrace_hash;
    788
    789struct ftrace_mod_load {
    790	struct list_head	list;
    791	char			*func;
    792	char			*module;
    793	int			 enable;
    794};
    795
    796enum {
    797	FTRACE_HASH_FL_MOD	= (1 << 0),
    798};
    799
    800struct ftrace_hash {
    801	unsigned long		size_bits;
    802	struct hlist_head	*buckets;
    803	unsigned long		count;
    804	unsigned long		flags;
    805	struct rcu_head		rcu;
    806};
    807
    808struct ftrace_func_entry *
    809ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
    810
    811static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
    812{
    813	return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
    814}
    815
    816/* Standard output formatting function used for function return traces */
    817#ifdef CONFIG_FUNCTION_GRAPH_TRACER
    818
    819/* Flag options */
    820#define TRACE_GRAPH_PRINT_OVERRUN       0x1
    821#define TRACE_GRAPH_PRINT_CPU           0x2
    822#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
    823#define TRACE_GRAPH_PRINT_PROC          0x8
    824#define TRACE_GRAPH_PRINT_DURATION      0x10
    825#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
    826#define TRACE_GRAPH_PRINT_REL_TIME      0x40
    827#define TRACE_GRAPH_PRINT_IRQS          0x80
    828#define TRACE_GRAPH_PRINT_TAIL          0x100
    829#define TRACE_GRAPH_SLEEP_TIME          0x200
    830#define TRACE_GRAPH_GRAPH_TIME          0x400
    831#define TRACE_GRAPH_PRINT_FILL_SHIFT	28
    832#define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
    833
    834extern void ftrace_graph_sleep_time_control(bool enable);
    835
    836#ifdef CONFIG_FUNCTION_PROFILER
    837extern void ftrace_graph_graph_time_control(bool enable);
    838#else
    839static inline void ftrace_graph_graph_time_control(bool enable) { }
    840#endif
    841
    842extern enum print_line_t
    843print_graph_function_flags(struct trace_iterator *iter, u32 flags);
    844extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
    845extern void
    846trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
    847extern void graph_trace_open(struct trace_iterator *iter);
    848extern void graph_trace_close(struct trace_iterator *iter);
    849extern int __trace_graph_entry(struct trace_array *tr,
    850			       struct ftrace_graph_ent *trace,
    851			       unsigned int trace_ctx);
    852extern void __trace_graph_return(struct trace_array *tr,
    853				 struct ftrace_graph_ret *trace,
    854				 unsigned int trace_ctx);
    855
    856#ifdef CONFIG_DYNAMIC_FTRACE
    857extern struct ftrace_hash __rcu *ftrace_graph_hash;
    858extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
    859
    860static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
    861{
    862	unsigned long addr = trace->func;
    863	int ret = 0;
    864	struct ftrace_hash *hash;
    865
    866	preempt_disable_notrace();
    867
    868	/*
    869	 * Have to open code "rcu_dereference_sched()" because the
    870	 * function graph tracer can be called when RCU is not
    871	 * "watching".
    872	 * Protected with schedule_on_each_cpu(ftrace_sync)
    873	 */
    874	hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
    875
    876	if (ftrace_hash_empty(hash)) {
    877		ret = 1;
    878		goto out;
    879	}
    880
    881	if (ftrace_lookup_ip(hash, addr)) {
    882
    883		/*
    884		 * This needs to be cleared on the return functions
    885		 * when the depth is zero.
    886		 */
    887		trace_recursion_set(TRACE_GRAPH_BIT);
    888		trace_recursion_set_depth(trace->depth);
    889
    890		/*
    891		 * If no irqs are to be traced, but a set_graph_function
    892		 * is set, and called by an interrupt handler, we still
    893		 * want to trace it.
    894		 */
    895		if (in_hardirq())
    896			trace_recursion_set(TRACE_IRQ_BIT);
    897		else
    898			trace_recursion_clear(TRACE_IRQ_BIT);
    899		ret = 1;
    900	}
    901
    902out:
    903	preempt_enable_notrace();
    904	return ret;
    905}
    906
    907static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
    908{
    909	if (trace_recursion_test(TRACE_GRAPH_BIT) &&
    910	    trace->depth == trace_recursion_depth())
    911		trace_recursion_clear(TRACE_GRAPH_BIT);
    912}
    913
    914static inline int ftrace_graph_notrace_addr(unsigned long addr)
    915{
    916	int ret = 0;
    917	struct ftrace_hash *notrace_hash;
    918
    919	preempt_disable_notrace();
    920
    921	/*
    922	 * Have to open code "rcu_dereference_sched()" because the
    923	 * function graph tracer can be called when RCU is not
    924	 * "watching".
    925	 * Protected with schedule_on_each_cpu(ftrace_sync)
    926	 */
    927	notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
    928						 !preemptible());
    929
    930	if (ftrace_lookup_ip(notrace_hash, addr))
    931		ret = 1;
    932
    933	preempt_enable_notrace();
    934	return ret;
    935}
    936#else
    937static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
    938{
    939	return 1;
    940}
    941
    942static inline int ftrace_graph_notrace_addr(unsigned long addr)
    943{
    944	return 0;
    945}
    946static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
    947{ }
    948#endif /* CONFIG_DYNAMIC_FTRACE */
    949
    950extern unsigned int fgraph_max_depth;
    951
    952static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
    953{
    954	/* trace it when it is-nested-in or is a function enabled. */
    955	return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
    956		 ftrace_graph_addr(trace)) ||
    957		(trace->depth < 0) ||
    958		(fgraph_max_depth && trace->depth >= fgraph_max_depth);
    959}
    960
    961#else /* CONFIG_FUNCTION_GRAPH_TRACER */
    962static inline enum print_line_t
    963print_graph_function_flags(struct trace_iterator *iter, u32 flags)
    964{
    965	return TRACE_TYPE_UNHANDLED;
    966}
    967#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
    968
    969extern struct list_head ftrace_pids;
    970
    971#ifdef CONFIG_FUNCTION_TRACER
    972
    973#define FTRACE_PID_IGNORE	-1
    974#define FTRACE_PID_TRACE	-2
    975
    976struct ftrace_func_command {
    977	struct list_head	list;
    978	char			*name;
    979	int			(*func)(struct trace_array *tr,
    980					struct ftrace_hash *hash,
    981					char *func, char *cmd,
    982					char *params, int enable);
    983};
    984extern bool ftrace_filter_param __initdata;
    985static inline int ftrace_trace_task(struct trace_array *tr)
    986{
    987	return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
    988		FTRACE_PID_IGNORE;
    989}
    990extern int ftrace_is_dead(void);
    991int ftrace_create_function_files(struct trace_array *tr,
    992				 struct dentry *parent);
    993void ftrace_destroy_function_files(struct trace_array *tr);
    994int ftrace_allocate_ftrace_ops(struct trace_array *tr);
    995void ftrace_free_ftrace_ops(struct trace_array *tr);
    996void ftrace_init_global_array_ops(struct trace_array *tr);
    997void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
    998void ftrace_reset_array_ops(struct trace_array *tr);
    999void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
   1000void ftrace_init_tracefs_toplevel(struct trace_array *tr,
   1001				  struct dentry *d_tracer);
   1002void ftrace_clear_pids(struct trace_array *tr);
   1003int init_function_trace(void);
   1004void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
   1005#else
   1006static inline int ftrace_trace_task(struct trace_array *tr)
   1007{
   1008	return 1;
   1009}
   1010static inline int ftrace_is_dead(void) { return 0; }
   1011static inline int
   1012ftrace_create_function_files(struct trace_array *tr,
   1013			     struct dentry *parent)
   1014{
   1015	return 0;
   1016}
   1017static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
   1018{
   1019	return 0;
   1020}
   1021static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
   1022static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
   1023static inline __init void
   1024ftrace_init_global_array_ops(struct trace_array *tr) { }
   1025static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
   1026static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
   1027static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
   1028static inline void ftrace_clear_pids(struct trace_array *tr) { }
   1029static inline int init_function_trace(void) { return 0; }
   1030static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
   1031/* ftace_func_t type is not defined, use macro instead of static inline */
   1032#define ftrace_init_array_ops(tr, func) do { } while (0)
   1033#endif /* CONFIG_FUNCTION_TRACER */
   1034
   1035#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
   1036
   1037struct ftrace_probe_ops {
   1038	void			(*func)(unsigned long ip,
   1039					unsigned long parent_ip,
   1040					struct trace_array *tr,
   1041					struct ftrace_probe_ops *ops,
   1042					void *data);
   1043	int			(*init)(struct ftrace_probe_ops *ops,
   1044					struct trace_array *tr,
   1045					unsigned long ip, void *init_data,
   1046					void **data);
   1047	void			(*free)(struct ftrace_probe_ops *ops,
   1048					struct trace_array *tr,
   1049					unsigned long ip, void *data);
   1050	int			(*print)(struct seq_file *m,
   1051					 unsigned long ip,
   1052					 struct ftrace_probe_ops *ops,
   1053					 void *data);
   1054};
   1055
   1056struct ftrace_func_mapper;
   1057typedef int (*ftrace_mapper_func)(void *data);
   1058
   1059struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
   1060void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
   1061					   unsigned long ip);
   1062int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
   1063			       unsigned long ip, void *data);
   1064void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
   1065				   unsigned long ip);
   1066void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
   1067			     ftrace_mapper_func free_func);
   1068
   1069extern int
   1070register_ftrace_function_probe(char *glob, struct trace_array *tr,
   1071			       struct ftrace_probe_ops *ops, void *data);
   1072extern int
   1073unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
   1074				      struct ftrace_probe_ops *ops);
   1075extern void clear_ftrace_function_probes(struct trace_array *tr);
   1076
   1077int register_ftrace_command(struct ftrace_func_command *cmd);
   1078int unregister_ftrace_command(struct ftrace_func_command *cmd);
   1079
   1080void ftrace_create_filter_files(struct ftrace_ops *ops,
   1081				struct dentry *parent);
   1082void ftrace_destroy_filter_files(struct ftrace_ops *ops);
   1083
   1084extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
   1085			     int len, int reset);
   1086extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
   1087			      int len, int reset);
   1088#else
   1089struct ftrace_func_command;
   1090
   1091static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
   1092{
   1093	return -EINVAL;
   1094}
   1095static inline __init int unregister_ftrace_command(char *cmd_name)
   1096{
   1097	return -EINVAL;
   1098}
   1099static inline void clear_ftrace_function_probes(struct trace_array *tr)
   1100{
   1101}
   1102
   1103/*
   1104 * The ops parameter passed in is usually undefined.
   1105 * This must be a macro.
   1106 */
   1107#define ftrace_create_filter_files(ops, parent) do { } while (0)
   1108#define ftrace_destroy_filter_files(ops) do { } while (0)
   1109#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
   1110
   1111bool ftrace_event_is_function(struct trace_event_call *call);
   1112
   1113/*
   1114 * struct trace_parser - servers for reading the user input separated by spaces
   1115 * @cont: set if the input is not complete - no final space char was found
   1116 * @buffer: holds the parsed user input
   1117 * @idx: user input length
   1118 * @size: buffer size
   1119 */
   1120struct trace_parser {
   1121	bool		cont;
   1122	char		*buffer;
   1123	unsigned	idx;
   1124	unsigned	size;
   1125};
   1126
   1127static inline bool trace_parser_loaded(struct trace_parser *parser)
   1128{
   1129	return (parser->idx != 0);
   1130}
   1131
   1132static inline bool trace_parser_cont(struct trace_parser *parser)
   1133{
   1134	return parser->cont;
   1135}
   1136
   1137static inline void trace_parser_clear(struct trace_parser *parser)
   1138{
   1139	parser->cont = false;
   1140	parser->idx = 0;
   1141}
   1142
   1143extern int trace_parser_get_init(struct trace_parser *parser, int size);
   1144extern void trace_parser_put(struct trace_parser *parser);
   1145extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
   1146	size_t cnt, loff_t *ppos);
   1147
   1148/*
   1149 * Only create function graph options if function graph is configured.
   1150 */
   1151#ifdef CONFIG_FUNCTION_GRAPH_TRACER
   1152# define FGRAPH_FLAGS						\
   1153		C(DISPLAY_GRAPH,	"display-graph"),
   1154#else
   1155# define FGRAPH_FLAGS
   1156#endif
   1157
   1158#ifdef CONFIG_BRANCH_TRACER
   1159# define BRANCH_FLAGS					\
   1160		C(BRANCH,		"branch"),
   1161#else
   1162# define BRANCH_FLAGS
   1163#endif
   1164
   1165#ifdef CONFIG_FUNCTION_TRACER
   1166# define FUNCTION_FLAGS						\
   1167		C(FUNCTION,		"function-trace"),	\
   1168		C(FUNC_FORK,		"function-fork"),
   1169# define FUNCTION_DEFAULT_FLAGS		TRACE_ITER_FUNCTION
   1170#else
   1171# define FUNCTION_FLAGS
   1172# define FUNCTION_DEFAULT_FLAGS		0UL
   1173# define TRACE_ITER_FUNC_FORK		0UL
   1174#endif
   1175
   1176#ifdef CONFIG_STACKTRACE
   1177# define STACK_FLAGS				\
   1178		C(STACKTRACE,		"stacktrace"),
   1179#else
   1180# define STACK_FLAGS
   1181#endif
   1182
   1183/*
   1184 * trace_iterator_flags is an enumeration that defines bit
   1185 * positions into trace_flags that controls the output.
   1186 *
   1187 * NOTE: These bits must match the trace_options array in
   1188 *       trace.c (this macro guarantees it).
   1189 */
   1190#define TRACE_FLAGS						\
   1191		C(PRINT_PARENT,		"print-parent"),	\
   1192		C(SYM_OFFSET,		"sym-offset"),		\
   1193		C(SYM_ADDR,		"sym-addr"),		\
   1194		C(VERBOSE,		"verbose"),		\
   1195		C(RAW,			"raw"),			\
   1196		C(HEX,			"hex"),			\
   1197		C(BIN,			"bin"),			\
   1198		C(BLOCK,		"block"),		\
   1199		C(PRINTK,		"trace_printk"),	\
   1200		C(ANNOTATE,		"annotate"),		\
   1201		C(USERSTACKTRACE,	"userstacktrace"),	\
   1202		C(SYM_USEROBJ,		"sym-userobj"),		\
   1203		C(PRINTK_MSGONLY,	"printk-msg-only"),	\
   1204		C(CONTEXT_INFO,		"context-info"),   /* Print pid/cpu/time */ \
   1205		C(LATENCY_FMT,		"latency-format"),	\
   1206		C(RECORD_CMD,		"record-cmd"),		\
   1207		C(RECORD_TGID,		"record-tgid"),		\
   1208		C(OVERWRITE,		"overwrite"),		\
   1209		C(STOP_ON_FREE,		"disable_on_free"),	\
   1210		C(IRQ_INFO,		"irq-info"),		\
   1211		C(MARKERS,		"markers"),		\
   1212		C(EVENT_FORK,		"event-fork"),		\
   1213		C(PAUSE_ON_TRACE,	"pause-on-trace"),	\
   1214		C(HASH_PTR,		"hash-ptr"),	/* Print hashed pointer */ \
   1215		FUNCTION_FLAGS					\
   1216		FGRAPH_FLAGS					\
   1217		STACK_FLAGS					\
   1218		BRANCH_FLAGS
   1219
   1220/*
   1221 * By defining C, we can make TRACE_FLAGS a list of bit names
   1222 * that will define the bits for the flag masks.
   1223 */
   1224#undef C
   1225#define C(a, b) TRACE_ITER_##a##_BIT
   1226
   1227enum trace_iterator_bits {
   1228	TRACE_FLAGS
   1229	/* Make sure we don't go more than we have bits for */
   1230	TRACE_ITER_LAST_BIT
   1231};
   1232
   1233/*
   1234 * By redefining C, we can make TRACE_FLAGS a list of masks that
   1235 * use the bits as defined above.
   1236 */
   1237#undef C
   1238#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
   1239
   1240enum trace_iterator_flags { TRACE_FLAGS };
   1241
   1242/*
   1243 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
   1244 * control the output of kernel symbols.
   1245 */
   1246#define TRACE_ITER_SYM_MASK \
   1247	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
   1248
   1249extern struct tracer nop_trace;
   1250
   1251#ifdef CONFIG_BRANCH_TRACER
   1252extern int enable_branch_tracing(struct trace_array *tr);
   1253extern void disable_branch_tracing(void);
   1254static inline int trace_branch_enable(struct trace_array *tr)
   1255{
   1256	if (tr->trace_flags & TRACE_ITER_BRANCH)
   1257		return enable_branch_tracing(tr);
   1258	return 0;
   1259}
   1260static inline void trace_branch_disable(void)
   1261{
   1262	/* due to races, always disable */
   1263	disable_branch_tracing();
   1264}
   1265#else
   1266static inline int trace_branch_enable(struct trace_array *tr)
   1267{
   1268	return 0;
   1269}
   1270static inline void trace_branch_disable(void)
   1271{
   1272}
   1273#endif /* CONFIG_BRANCH_TRACER */
   1274
   1275/* set ring buffers to default size if not already done so */
   1276int tracing_update_buffers(void);
   1277
   1278struct ftrace_event_field {
   1279	struct list_head	link;
   1280	const char		*name;
   1281	const char		*type;
   1282	int			filter_type;
   1283	int			offset;
   1284	int			size;
   1285	int			is_signed;
   1286};
   1287
   1288struct prog_entry;
   1289
   1290struct event_filter {
   1291	struct prog_entry __rcu	*prog;
   1292	char			*filter_string;
   1293};
   1294
   1295struct event_subsystem {
   1296	struct list_head	list;
   1297	const char		*name;
   1298	struct event_filter	*filter;
   1299	int			ref_count;
   1300};
   1301
   1302struct trace_subsystem_dir {
   1303	struct list_head		list;
   1304	struct event_subsystem		*subsystem;
   1305	struct trace_array		*tr;
   1306	struct dentry			*entry;
   1307	int				ref_count;
   1308	int				nr_events;
   1309};
   1310
   1311extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
   1312				     struct trace_buffer *buffer,
   1313				     struct ring_buffer_event *event);
   1314
   1315void trace_buffer_unlock_commit_regs(struct trace_array *tr,
   1316				     struct trace_buffer *buffer,
   1317				     struct ring_buffer_event *event,
   1318				     unsigned int trcace_ctx,
   1319				     struct pt_regs *regs);
   1320
   1321static inline void trace_buffer_unlock_commit(struct trace_array *tr,
   1322					      struct trace_buffer *buffer,
   1323					      struct ring_buffer_event *event,
   1324					      unsigned int trace_ctx)
   1325{
   1326	trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
   1327}
   1328
   1329DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
   1330DECLARE_PER_CPU(int, trace_buffered_event_cnt);
   1331void trace_buffered_event_disable(void);
   1332void trace_buffered_event_enable(void);
   1333
   1334static inline void
   1335__trace_event_discard_commit(struct trace_buffer *buffer,
   1336			     struct ring_buffer_event *event)
   1337{
   1338	if (this_cpu_read(trace_buffered_event) == event) {
   1339		/* Simply release the temp buffer and enable preemption */
   1340		this_cpu_dec(trace_buffered_event_cnt);
   1341		preempt_enable_notrace();
   1342		return;
   1343	}
   1344	/* ring_buffer_discard_commit() enables preemption */
   1345	ring_buffer_discard_commit(buffer, event);
   1346}
   1347
   1348/*
   1349 * Helper function for event_trigger_unlock_commit{_regs}().
   1350 * If there are event triggers attached to this event that requires
   1351 * filtering against its fields, then they will be called as the
   1352 * entry already holds the field information of the current event.
   1353 *
   1354 * It also checks if the event should be discarded or not.
   1355 * It is to be discarded if the event is soft disabled and the
   1356 * event was only recorded to process triggers, or if the event
   1357 * filter is active and this event did not match the filters.
   1358 *
   1359 * Returns true if the event is discarded, false otherwise.
   1360 */
   1361static inline bool
   1362__event_trigger_test_discard(struct trace_event_file *file,
   1363			     struct trace_buffer *buffer,
   1364			     struct ring_buffer_event *event,
   1365			     void *entry,
   1366			     enum event_trigger_type *tt)
   1367{
   1368	unsigned long eflags = file->flags;
   1369
   1370	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
   1371		*tt = event_triggers_call(file, buffer, entry, event);
   1372
   1373	if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
   1374				    EVENT_FILE_FL_FILTERED |
   1375				    EVENT_FILE_FL_PID_FILTER))))
   1376		return false;
   1377
   1378	if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
   1379		goto discard;
   1380
   1381	if (file->flags & EVENT_FILE_FL_FILTERED &&
   1382	    !filter_match_preds(file->filter, entry))
   1383		goto discard;
   1384
   1385	if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
   1386	    trace_event_ignore_this_pid(file))
   1387		goto discard;
   1388
   1389	return false;
   1390 discard:
   1391	__trace_event_discard_commit(buffer, event);
   1392	return true;
   1393}
   1394
   1395/**
   1396 * event_trigger_unlock_commit - handle triggers and finish event commit
   1397 * @file: The file pointer associated with the event
   1398 * @buffer: The ring buffer that the event is being written to
   1399 * @event: The event meta data in the ring buffer
   1400 * @entry: The event itself
   1401 * @trace_ctx: The tracing context flags.
   1402 *
   1403 * This is a helper function to handle triggers that require data
   1404 * from the event itself. It also tests the event against filters and
   1405 * if the event is soft disabled and should be discarded.
   1406 */
   1407static inline void
   1408event_trigger_unlock_commit(struct trace_event_file *file,
   1409			    struct trace_buffer *buffer,
   1410			    struct ring_buffer_event *event,
   1411			    void *entry, unsigned int trace_ctx)
   1412{
   1413	enum event_trigger_type tt = ETT_NONE;
   1414
   1415	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
   1416		trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
   1417
   1418	if (tt)
   1419		event_triggers_post_call(file, tt);
   1420}
   1421
   1422#define FILTER_PRED_INVALID	((unsigned short)-1)
   1423#define FILTER_PRED_IS_RIGHT	(1 << 15)
   1424#define FILTER_PRED_FOLD	(1 << 15)
   1425
   1426/*
   1427 * The max preds is the size of unsigned short with
   1428 * two flags at the MSBs. One bit is used for both the IS_RIGHT
   1429 * and FOLD flags. The other is reserved.
   1430 *
   1431 * 2^14 preds is way more than enough.
   1432 */
   1433#define MAX_FILTER_PRED		16384
   1434
   1435struct filter_pred;
   1436struct regex;
   1437
   1438typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
   1439
   1440typedef int (*regex_match_func)(char *str, struct regex *r, int len);
   1441
   1442enum regex_type {
   1443	MATCH_FULL = 0,
   1444	MATCH_FRONT_ONLY,
   1445	MATCH_MIDDLE_ONLY,
   1446	MATCH_END_ONLY,
   1447	MATCH_GLOB,
   1448	MATCH_INDEX,
   1449};
   1450
   1451struct regex {
   1452	char			pattern[MAX_FILTER_STR_VAL];
   1453	int			len;
   1454	int			field_len;
   1455	regex_match_func	match;
   1456};
   1457
   1458struct filter_pred {
   1459	filter_pred_fn_t 	fn;
   1460	u64 			val;
   1461	struct regex		regex;
   1462	unsigned short		*ops;
   1463	struct ftrace_event_field *field;
   1464	int 			offset;
   1465	int			not;
   1466	int 			op;
   1467};
   1468
   1469static inline bool is_string_field(struct ftrace_event_field *field)
   1470{
   1471	return field->filter_type == FILTER_DYN_STRING ||
   1472	       field->filter_type == FILTER_RDYN_STRING ||
   1473	       field->filter_type == FILTER_STATIC_STRING ||
   1474	       field->filter_type == FILTER_PTR_STRING ||
   1475	       field->filter_type == FILTER_COMM;
   1476}
   1477
   1478static inline bool is_function_field(struct ftrace_event_field *field)
   1479{
   1480	return field->filter_type == FILTER_TRACE_FN;
   1481}
   1482
   1483extern enum regex_type
   1484filter_parse_regex(char *buff, int len, char **search, int *not);
   1485extern void print_event_filter(struct trace_event_file *file,
   1486			       struct trace_seq *s);
   1487extern int apply_event_filter(struct trace_event_file *file,
   1488			      char *filter_string);
   1489extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
   1490					char *filter_string);
   1491extern void print_subsystem_event_filter(struct event_subsystem *system,
   1492					 struct trace_seq *s);
   1493extern int filter_assign_type(const char *type);
   1494extern int create_event_filter(struct trace_array *tr,
   1495			       struct trace_event_call *call,
   1496			       char *filter_str, bool set_str,
   1497			       struct event_filter **filterp);
   1498extern void free_event_filter(struct event_filter *filter);
   1499
   1500struct ftrace_event_field *
   1501trace_find_event_field(struct trace_event_call *call, char *name);
   1502
   1503extern void trace_event_enable_cmd_record(bool enable);
   1504extern void trace_event_enable_tgid_record(bool enable);
   1505
   1506extern int event_trace_init(void);
   1507extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
   1508extern int event_trace_del_tracer(struct trace_array *tr);
   1509extern void __trace_early_add_events(struct trace_array *tr);
   1510
   1511extern struct trace_event_file *__find_event_file(struct trace_array *tr,
   1512						  const char *system,
   1513						  const char *event);
   1514extern struct trace_event_file *find_event_file(struct trace_array *tr,
   1515						const char *system,
   1516						const char *event);
   1517
   1518static inline void *event_file_data(struct file *filp)
   1519{
   1520	return READ_ONCE(file_inode(filp)->i_private);
   1521}
   1522
   1523extern struct mutex event_mutex;
   1524extern struct list_head ftrace_events;
   1525
   1526extern const struct file_operations event_trigger_fops;
   1527extern const struct file_operations event_hist_fops;
   1528extern const struct file_operations event_hist_debug_fops;
   1529extern const struct file_operations event_inject_fops;
   1530
   1531#ifdef CONFIG_HIST_TRIGGERS
   1532extern int register_trigger_hist_cmd(void);
   1533extern int register_trigger_hist_enable_disable_cmds(void);
   1534#else
   1535static inline int register_trigger_hist_cmd(void) { return 0; }
   1536static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
   1537#endif
   1538
   1539extern int register_trigger_cmds(void);
   1540extern void clear_event_triggers(struct trace_array *tr);
   1541
   1542enum {
   1543	EVENT_TRIGGER_FL_PROBE		= BIT(0),
   1544};
   1545
   1546struct event_trigger_data {
   1547	unsigned long			count;
   1548	int				ref;
   1549	int				flags;
   1550	struct event_trigger_ops	*ops;
   1551	struct event_command		*cmd_ops;
   1552	struct event_filter __rcu	*filter;
   1553	char				*filter_str;
   1554	void				*private_data;
   1555	bool				paused;
   1556	bool				paused_tmp;
   1557	struct list_head		list;
   1558	char				*name;
   1559	struct list_head		named_list;
   1560	struct event_trigger_data	*named_data;
   1561};
   1562
   1563/* Avoid typos */
   1564#define ENABLE_EVENT_STR	"enable_event"
   1565#define DISABLE_EVENT_STR	"disable_event"
   1566#define ENABLE_HIST_STR		"enable_hist"
   1567#define DISABLE_HIST_STR	"disable_hist"
   1568
   1569struct enable_trigger_data {
   1570	struct trace_event_file		*file;
   1571	bool				enable;
   1572	bool				hist;
   1573};
   1574
   1575extern int event_enable_trigger_print(struct seq_file *m,
   1576				      struct event_trigger_data *data);
   1577extern void event_enable_trigger_free(struct event_trigger_data *data);
   1578extern int event_enable_trigger_parse(struct event_command *cmd_ops,
   1579				      struct trace_event_file *file,
   1580				      char *glob, char *cmd,
   1581				      char *param_and_filter);
   1582extern int event_enable_register_trigger(char *glob,
   1583					 struct event_trigger_data *data,
   1584					 struct trace_event_file *file);
   1585extern void event_enable_unregister_trigger(char *glob,
   1586					    struct event_trigger_data *test,
   1587					    struct trace_event_file *file);
   1588extern void trigger_data_free(struct event_trigger_data *data);
   1589extern int event_trigger_init(struct event_trigger_data *data);
   1590extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
   1591					      int trigger_enable);
   1592extern void update_cond_flag(struct trace_event_file *file);
   1593extern int set_trigger_filter(char *filter_str,
   1594			      struct event_trigger_data *trigger_data,
   1595			      struct trace_event_file *file);
   1596extern struct event_trigger_data *find_named_trigger(const char *name);
   1597extern bool is_named_trigger(struct event_trigger_data *test);
   1598extern int save_named_trigger(const char *name,
   1599			      struct event_trigger_data *data);
   1600extern void del_named_trigger(struct event_trigger_data *data);
   1601extern void pause_named_trigger(struct event_trigger_data *data);
   1602extern void unpause_named_trigger(struct event_trigger_data *data);
   1603extern void set_named_trigger_data(struct event_trigger_data *data,
   1604				   struct event_trigger_data *named_data);
   1605extern struct event_trigger_data *
   1606get_named_trigger_data(struct event_trigger_data *data);
   1607extern int register_event_command(struct event_command *cmd);
   1608extern int unregister_event_command(struct event_command *cmd);
   1609extern int register_trigger_hist_enable_disable_cmds(void);
   1610extern bool event_trigger_check_remove(const char *glob);
   1611extern bool event_trigger_empty_param(const char *param);
   1612extern int event_trigger_separate_filter(char *param_and_filter, char **param,
   1613					 char **filter, bool param_required);
   1614extern struct event_trigger_data *
   1615event_trigger_alloc(struct event_command *cmd_ops,
   1616		    char *cmd,
   1617		    char *param,
   1618		    void *private_data);
   1619extern int event_trigger_parse_num(char *trigger,
   1620				   struct event_trigger_data *trigger_data);
   1621extern int event_trigger_set_filter(struct event_command *cmd_ops,
   1622				    struct trace_event_file *file,
   1623				    char *param,
   1624				    struct event_trigger_data *trigger_data);
   1625extern void event_trigger_reset_filter(struct event_command *cmd_ops,
   1626				       struct event_trigger_data *trigger_data);
   1627extern int event_trigger_register(struct event_command *cmd_ops,
   1628				  struct trace_event_file *file,
   1629				  char *glob,
   1630				  struct event_trigger_data *trigger_data);
   1631extern void event_trigger_unregister(struct event_command *cmd_ops,
   1632				     struct trace_event_file *file,
   1633				     char *glob,
   1634				     struct event_trigger_data *trigger_data);
   1635
   1636/**
   1637 * struct event_trigger_ops - callbacks for trace event triggers
   1638 *
   1639 * The methods in this structure provide per-event trigger hooks for
   1640 * various trigger operations.
   1641 *
   1642 * The @init and @free methods are used during trigger setup and
   1643 * teardown, typically called from an event_command's @parse()
   1644 * function implementation.
   1645 *
   1646 * The @print method is used to print the trigger spec.
   1647 *
   1648 * The @trigger method is the function that actually implements the
   1649 * trigger and is called in the context of the triggering event
   1650 * whenever that event occurs.
   1651 *
   1652 * All the methods below, except for @init() and @free(), must be
   1653 * implemented.
   1654 *
   1655 * @trigger: The trigger 'probe' function called when the triggering
   1656 *	event occurs.  The data passed into this callback is the data
   1657 *	that was supplied to the event_command @reg() function that
   1658 *	registered the trigger (see struct event_command) along with
   1659 *	the trace record, rec.
   1660 *
   1661 * @init: An optional initialization function called for the trigger
   1662 *	when the trigger is registered (via the event_command reg()
   1663 *	function).  This can be used to perform per-trigger
   1664 *	initialization such as incrementing a per-trigger reference
   1665 *	count, for instance.  This is usually implemented by the
   1666 *	generic utility function @event_trigger_init() (see
   1667 *	trace_event_triggers.c).
   1668 *
   1669 * @free: An optional de-initialization function called for the
   1670 *	trigger when the trigger is unregistered (via the
   1671 *	event_command @reg() function).  This can be used to perform
   1672 *	per-trigger de-initialization such as decrementing a
   1673 *	per-trigger reference count and freeing corresponding trigger
   1674 *	data, for instance.  This is usually implemented by the
   1675 *	generic utility function @event_trigger_free() (see
   1676 *	trace_event_triggers.c).
   1677 *
   1678 * @print: The callback function invoked to have the trigger print
   1679 *	itself.  This is usually implemented by a wrapper function
   1680 *	that calls the generic utility function @event_trigger_print()
   1681 *	(see trace_event_triggers.c).
   1682 */
   1683struct event_trigger_ops {
   1684	void			(*trigger)(struct event_trigger_data *data,
   1685					   struct trace_buffer *buffer,
   1686					   void *rec,
   1687					   struct ring_buffer_event *rbe);
   1688	int			(*init)(struct event_trigger_data *data);
   1689	void			(*free)(struct event_trigger_data *data);
   1690	int			(*print)(struct seq_file *m,
   1691					 struct event_trigger_data *data);
   1692};
   1693
   1694/**
   1695 * struct event_command - callbacks and data members for event commands
   1696 *
   1697 * Event commands are invoked by users by writing the command name
   1698 * into the 'trigger' file associated with a trace event.  The
   1699 * parameters associated with a specific invocation of an event
   1700 * command are used to create an event trigger instance, which is
   1701 * added to the list of trigger instances associated with that trace
   1702 * event.  When the event is hit, the set of triggers associated with
   1703 * that event is invoked.
   1704 *
   1705 * The data members in this structure provide per-event command data
   1706 * for various event commands.
   1707 *
   1708 * All the data members below, except for @post_trigger, must be set
   1709 * for each event command.
   1710 *
   1711 * @name: The unique name that identifies the event command.  This is
   1712 *	the name used when setting triggers via trigger files.
   1713 *
   1714 * @trigger_type: A unique id that identifies the event command
   1715 *	'type'.  This value has two purposes, the first to ensure that
   1716 *	only one trigger of the same type can be set at a given time
   1717 *	for a particular event e.g. it doesn't make sense to have both
   1718 *	a traceon and traceoff trigger attached to a single event at
   1719 *	the same time, so traceon and traceoff have the same type
   1720 *	though they have different names.  The @trigger_type value is
   1721 *	also used as a bit value for deferring the actual trigger
   1722 *	action until after the current event is finished.  Some
   1723 *	commands need to do this if they themselves log to the trace
   1724 *	buffer (see the @post_trigger() member below).  @trigger_type
   1725 *	values are defined by adding new values to the trigger_type
   1726 *	enum in include/linux/trace_events.h.
   1727 *
   1728 * @flags: See the enum event_command_flags below.
   1729 *
   1730 * All the methods below, except for @set_filter() and @unreg_all(),
   1731 * must be implemented.
   1732 *
   1733 * @parse: The callback function responsible for parsing and
   1734 *	registering the trigger written to the 'trigger' file by the
   1735 *	user.  It allocates the trigger instance and registers it with
   1736 *	the appropriate trace event.  It makes use of the other
   1737 *	event_command callback functions to orchestrate this, and is
   1738 *	usually implemented by the generic utility function
   1739 *	@event_trigger_callback() (see trace_event_triggers.c).
   1740 *
   1741 * @reg: Adds the trigger to the list of triggers associated with the
   1742 *	event, and enables the event trigger itself, after
   1743 *	initializing it (via the event_trigger_ops @init() function).
   1744 *	This is also where commands can use the @trigger_type value to
   1745 *	make the decision as to whether or not multiple instances of
   1746 *	the trigger should be allowed.  This is usually implemented by
   1747 *	the generic utility function @register_trigger() (see
   1748 *	trace_event_triggers.c).
   1749 *
   1750 * @unreg: Removes the trigger from the list of triggers associated
   1751 *	with the event, and disables the event trigger itself, after
   1752 *	initializing it (via the event_trigger_ops @free() function).
   1753 *	This is usually implemented by the generic utility function
   1754 *	@unregister_trigger() (see trace_event_triggers.c).
   1755 *
   1756 * @unreg_all: An optional function called to remove all the triggers
   1757 *	from the list of triggers associated with the event.  Called
   1758 *	when a trigger file is opened in truncate mode.
   1759 *
   1760 * @set_filter: An optional function called to parse and set a filter
   1761 *	for the trigger.  If no @set_filter() method is set for the
   1762 *	event command, filters set by the user for the command will be
   1763 *	ignored.  This is usually implemented by the generic utility
   1764 *	function @set_trigger_filter() (see trace_event_triggers.c).
   1765 *
   1766 * @get_trigger_ops: The callback function invoked to retrieve the
   1767 *	event_trigger_ops implementation associated with the command.
   1768 *	This callback function allows a single event_command to
   1769 *	support multiple trigger implementations via different sets of
   1770 *	event_trigger_ops, depending on the value of the @param
   1771 *	string.
   1772 */
   1773struct event_command {
   1774	struct list_head	list;
   1775	char			*name;
   1776	enum event_trigger_type	trigger_type;
   1777	int			flags;
   1778	int			(*parse)(struct event_command *cmd_ops,
   1779					 struct trace_event_file *file,
   1780					 char *glob, char *cmd,
   1781					 char *param_and_filter);
   1782	int			(*reg)(char *glob,
   1783				       struct event_trigger_data *data,
   1784				       struct trace_event_file *file);
   1785	void			(*unreg)(char *glob,
   1786					 struct event_trigger_data *data,
   1787					 struct trace_event_file *file);
   1788	void			(*unreg_all)(struct trace_event_file *file);
   1789	int			(*set_filter)(char *filter_str,
   1790					      struct event_trigger_data *data,
   1791					      struct trace_event_file *file);
   1792	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
   1793};
   1794
   1795/**
   1796 * enum event_command_flags - flags for struct event_command
   1797 *
   1798 * @POST_TRIGGER: A flag that says whether or not this command needs
   1799 *	to have its action delayed until after the current event has
   1800 *	been closed.  Some triggers need to avoid being invoked while
   1801 *	an event is currently in the process of being logged, since
   1802 *	the trigger may itself log data into the trace buffer.  Thus
   1803 *	we make sure the current event is committed before invoking
   1804 *	those triggers.  To do that, the trigger invocation is split
   1805 *	in two - the first part checks the filter using the current
   1806 *	trace record; if a command has the @post_trigger flag set, it
   1807 *	sets a bit for itself in the return value, otherwise it
   1808 *	directly invokes the trigger.  Once all commands have been
   1809 *	either invoked or set their return flag, the current record is
   1810 *	either committed or discarded.  At that point, if any commands
   1811 *	have deferred their triggers, those commands are finally
   1812 *	invoked following the close of the current event.  In other
   1813 *	words, if the event_trigger_ops @func() probe implementation
   1814 *	itself logs to the trace buffer, this flag should be set,
   1815 *	otherwise it can be left unspecified.
   1816 *
   1817 * @NEEDS_REC: A flag that says whether or not this command needs
   1818 *	access to the trace record in order to perform its function,
   1819 *	regardless of whether or not it has a filter associated with
   1820 *	it (filters make a trigger require access to the trace record
   1821 *	but are not always present).
   1822 */
   1823enum event_command_flags {
   1824	EVENT_CMD_FL_POST_TRIGGER	= 1,
   1825	EVENT_CMD_FL_NEEDS_REC		= 2,
   1826};
   1827
   1828static inline bool event_command_post_trigger(struct event_command *cmd_ops)
   1829{
   1830	return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
   1831}
   1832
   1833static inline bool event_command_needs_rec(struct event_command *cmd_ops)
   1834{
   1835	return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
   1836}
   1837
   1838extern int trace_event_enable_disable(struct trace_event_file *file,
   1839				      int enable, int soft_disable);
   1840extern int tracing_alloc_snapshot(void);
   1841extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
   1842extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
   1843
   1844extern int tracing_snapshot_cond_disable(struct trace_array *tr);
   1845extern void *tracing_cond_snapshot_data(struct trace_array *tr);
   1846
   1847extern const char *__start___trace_bprintk_fmt[];
   1848extern const char *__stop___trace_bprintk_fmt[];
   1849
   1850extern const char *__start___tracepoint_str[];
   1851extern const char *__stop___tracepoint_str[];
   1852
   1853void trace_printk_control(bool enabled);
   1854void trace_printk_start_comm(void);
   1855int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
   1856int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
   1857
   1858/* Used from boot time tracer */
   1859extern int trace_set_options(struct trace_array *tr, char *option);
   1860extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
   1861extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
   1862					  unsigned long size, int cpu_id);
   1863extern int tracing_set_cpumask(struct trace_array *tr,
   1864				cpumask_var_t tracing_cpumask_new);
   1865
   1866
   1867#define MAX_EVENT_NAME_LEN	64
   1868
   1869extern ssize_t trace_parse_run_command(struct file *file,
   1870		const char __user *buffer, size_t count, loff_t *ppos,
   1871		int (*createfn)(const char *));
   1872
   1873extern unsigned int err_pos(char *cmd, const char *str);
   1874extern void tracing_log_err(struct trace_array *tr,
   1875			    const char *loc, const char *cmd,
   1876			    const char **errs, u8 type, u16 pos);
   1877
   1878/*
   1879 * Normal trace_printk() and friends allocates special buffers
   1880 * to do the manipulation, as well as saves the print formats
   1881 * into sections to display. But the trace infrastructure wants
   1882 * to use these without the added overhead at the price of being
   1883 * a bit slower (used mainly for warnings, where we don't care
   1884 * about performance). The internal_trace_puts() is for such
   1885 * a purpose.
   1886 */
   1887#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
   1888
   1889#undef FTRACE_ENTRY
   1890#define FTRACE_ENTRY(call, struct_name, id, tstruct, print)	\
   1891	extern struct trace_event_call					\
   1892	__aligned(4) event_##call;
   1893#undef FTRACE_ENTRY_DUP
   1894#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)	\
   1895	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
   1896#undef FTRACE_ENTRY_PACKED
   1897#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
   1898	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
   1899
   1900#include "trace_entries.h"
   1901
   1902#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
   1903int perf_ftrace_event_register(struct trace_event_call *call,
   1904			       enum trace_reg type, void *data);
   1905#else
   1906#define perf_ftrace_event_register NULL
   1907#endif
   1908
   1909#ifdef CONFIG_FTRACE_SYSCALLS
   1910void init_ftrace_syscalls(void);
   1911const char *get_syscall_name(int syscall);
   1912#else
   1913static inline void init_ftrace_syscalls(void) { }
   1914static inline const char *get_syscall_name(int syscall)
   1915{
   1916	return NULL;
   1917}
   1918#endif
   1919
   1920#ifdef CONFIG_EVENT_TRACING
   1921void trace_event_init(void);
   1922void trace_event_eval_update(struct trace_eval_map **map, int len);
   1923/* Used from boot time tracer */
   1924extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
   1925extern int trigger_process_regex(struct trace_event_file *file, char *buff);
   1926#else
   1927static inline void __init trace_event_init(void) { }
   1928static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
   1929#endif
   1930
   1931#ifdef CONFIG_TRACER_SNAPSHOT
   1932void tracing_snapshot_instance(struct trace_array *tr);
   1933int tracing_alloc_snapshot_instance(struct trace_array *tr);
   1934#else
   1935static inline void tracing_snapshot_instance(struct trace_array *tr) { }
   1936static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
   1937{
   1938	return 0;
   1939}
   1940#endif
   1941
   1942#ifdef CONFIG_PREEMPT_TRACER
   1943void tracer_preempt_on(unsigned long a0, unsigned long a1);
   1944void tracer_preempt_off(unsigned long a0, unsigned long a1);
   1945#else
   1946static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
   1947static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
   1948#endif
   1949#ifdef CONFIG_IRQSOFF_TRACER
   1950void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
   1951void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
   1952#else
   1953static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
   1954static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
   1955#endif
   1956
   1957extern struct trace_iterator *tracepoint_print_iter;
   1958
   1959/*
   1960 * Reset the state of the trace_iterator so that it can read consumed data.
   1961 * Normally, the trace_iterator is used for reading the data when it is not
   1962 * consumed, and must retain state.
   1963 */
   1964static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
   1965{
   1966	memset_startat(iter, 0, seq);
   1967	iter->pos = -1;
   1968}
   1969
   1970/* Check the name is good for event/group/fields */
   1971static inline bool is_good_name(const char *name)
   1972{
   1973	if (!isalpha(*name) && *name != '_')
   1974		return false;
   1975	while (*++name != '\0') {
   1976		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
   1977			return false;
   1978	}
   1979	return true;
   1980}
   1981
   1982/* Convert certain expected symbols into '_' when generating event names */
   1983static inline void sanitize_event_name(char *name)
   1984{
   1985	while (*name++ != '\0')
   1986		if (*name == ':' || *name == '.')
   1987			*name = '_';
   1988}
   1989
   1990/*
   1991 * This is a generic way to read and write a u64 value from a file in tracefs.
   1992 *
   1993 * The value is stored on the variable pointed by *val. The value needs
   1994 * to be at least *min and at most *max. The write is protected by an
   1995 * existing *lock.
   1996 */
   1997struct trace_min_max_param {
   1998	struct mutex	*lock;
   1999	u64		*val;
   2000	u64		*min;
   2001	u64		*max;
   2002};
   2003
   2004#define U64_STR_SIZE		24	/* 20 digits max */
   2005
   2006extern const struct file_operations trace_min_max_fops;
   2007
   2008#endif /* _LINUX_KERNEL_TRACE_H */