cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

trace_events.h (29481B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2
      3#ifndef _LINUX_TRACE_EVENT_H
      4#define _LINUX_TRACE_EVENT_H
      5
      6#include <linux/ring_buffer.h>
      7#include <linux/trace_seq.h>
      8#include <linux/percpu.h>
      9#include <linux/hardirq.h>
     10#include <linux/perf_event.h>
     11#include <linux/tracepoint.h>
     12
     13struct trace_array;
     14struct array_buffer;
     15struct tracer;
     16struct dentry;
     17struct bpf_prog;
     18union bpf_attr;
     19
     20const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
     21				  unsigned long flags,
     22				  const struct trace_print_flags *flag_array);
     23
     24const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
     25				    const struct trace_print_flags *symbol_array);
     26
     27#if BITS_PER_LONG == 32
     28const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
     29		      unsigned long long flags,
     30		      const struct trace_print_flags_u64 *flag_array);
     31
     32const char *trace_print_symbols_seq_u64(struct trace_seq *p,
     33					unsigned long long val,
     34					const struct trace_print_flags_u64
     35								 *symbol_array);
     36#endif
     37
     38const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
     39				    unsigned int bitmask_size);
     40
     41const char *trace_print_hex_seq(struct trace_seq *p,
     42				const unsigned char *buf, int len,
     43				bool concatenate);
     44
     45const char *trace_print_array_seq(struct trace_seq *p,
     46				   const void *buf, int count,
     47				   size_t el_size);
     48
     49const char *
     50trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
     51			 int prefix_type, int rowsize, int groupsize,
     52			 const void *buf, size_t len, bool ascii);
     53
     54struct trace_iterator;
     55struct trace_event;
     56
     57int trace_raw_output_prep(struct trace_iterator *iter,
     58			  struct trace_event *event);
     59extern __printf(2, 3)
     60void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
     61
     62/*
     63 * The trace entry - the most basic unit of tracing. This is what
     64 * is printed in the end as a single line in the trace output, such as:
     65 *
     66 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
     67 */
     68struct trace_entry {
     69	unsigned short		type;
     70	unsigned char		flags;
     71	unsigned char		preempt_count;
     72	int			pid;
     73};
     74
     75#define TRACE_EVENT_TYPE_MAX						\
     76	((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
     77
     78/*
     79 * Trace iterator - used by printout routines who present trace
     80 * results to users and which routines might sleep, etc:
     81 */
     82struct trace_iterator {
     83	struct trace_array	*tr;
     84	struct tracer		*trace;
     85	struct array_buffer	*array_buffer;
     86	void			*private;
     87	int			cpu_file;
     88	struct mutex		mutex;
     89	struct ring_buffer_iter	**buffer_iter;
     90	unsigned long		iter_flags;
     91	void			*temp;	/* temp holder */
     92	unsigned int		temp_size;
     93	char			*fmt;	/* modified format holder */
     94	unsigned int		fmt_size;
     95
     96	/* trace_seq for __print_flags() and __print_symbolic() etc. */
     97	struct trace_seq	tmp_seq;
     98
     99	cpumask_var_t		started;
    100
    101	/* it's true when current open file is snapshot */
    102	bool			snapshot;
    103
    104	/* The below is zeroed out in pipe_read */
    105	struct trace_seq	seq;
    106	struct trace_entry	*ent;
    107	unsigned long		lost_events;
    108	int			leftover;
    109	int			ent_size;
    110	int			cpu;
    111	u64			ts;
    112
    113	loff_t			pos;
    114	long			idx;
    115
    116	/* All new field here will be zeroed out in pipe_read */
    117};
    118
    119enum trace_iter_flags {
    120	TRACE_FILE_LAT_FMT	= 1,
    121	TRACE_FILE_ANNOTATE	= 2,
    122	TRACE_FILE_TIME_IN_NS	= 4,
    123};
    124
    125
    126typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
    127				      int flags, struct trace_event *event);
    128
    129struct trace_event_functions {
    130	trace_print_func	trace;
    131	trace_print_func	raw;
    132	trace_print_func	hex;
    133	trace_print_func	binary;
    134};
    135
    136struct trace_event {
    137	struct hlist_node		node;
    138	struct list_head		list;
    139	int				type;
    140	struct trace_event_functions	*funcs;
    141};
    142
    143extern int register_trace_event(struct trace_event *event);
    144extern int unregister_trace_event(struct trace_event *event);
    145
    146/* Return values for print_line callback */
    147enum print_line_t {
    148	TRACE_TYPE_PARTIAL_LINE	= 0,	/* Retry after flushing the seq */
    149	TRACE_TYPE_HANDLED	= 1,
    150	TRACE_TYPE_UNHANDLED	= 2,	/* Relay to other output functions */
    151	TRACE_TYPE_NO_CONSUME	= 3	/* Handled but ask to not consume */
    152};
    153
    154enum print_line_t trace_handle_return(struct trace_seq *s);
    155
    156static inline void tracing_generic_entry_update(struct trace_entry *entry,
    157						unsigned short type,
    158						unsigned int trace_ctx)
    159{
    160	entry->preempt_count		= trace_ctx & 0xff;
    161	entry->pid			= current->pid;
    162	entry->type			= type;
    163	entry->flags =			trace_ctx >> 16;
    164}
    165
    166unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
    167
    168enum trace_flag_type {
    169	TRACE_FLAG_IRQS_OFF		= 0x01,
    170	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
    171	TRACE_FLAG_NEED_RESCHED		= 0x04,
    172	TRACE_FLAG_HARDIRQ		= 0x08,
    173	TRACE_FLAG_SOFTIRQ		= 0x10,
    174	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
    175	TRACE_FLAG_NMI			= 0x40,
    176	TRACE_FLAG_BH_OFF		= 0x80,
    177};
    178
    179#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
    180static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
    181{
    182	unsigned int irq_status = irqs_disabled_flags(irqflags) ?
    183		TRACE_FLAG_IRQS_OFF : 0;
    184	return tracing_gen_ctx_irq_test(irq_status);
    185}
    186static inline unsigned int tracing_gen_ctx(void)
    187{
    188	unsigned long irqflags;
    189
    190	local_save_flags(irqflags);
    191	return tracing_gen_ctx_flags(irqflags);
    192}
    193#else
    194
    195static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
    196{
    197	return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
    198}
    199static inline unsigned int tracing_gen_ctx(void)
    200{
    201	return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
    202}
    203#endif
    204
    205static inline unsigned int tracing_gen_ctx_dec(void)
    206{
    207	unsigned int trace_ctx;
    208
    209	trace_ctx = tracing_gen_ctx();
    210	/*
    211	 * Subtract one from the preemption counter if preemption is enabled,
    212	 * see trace_event_buffer_reserve()for details.
    213	 */
    214	if (IS_ENABLED(CONFIG_PREEMPTION))
    215		trace_ctx--;
    216	return trace_ctx;
    217}
    218
    219struct trace_event_file;
    220
    221struct ring_buffer_event *
    222trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
    223				struct trace_event_file *trace_file,
    224				int type, unsigned long len,
    225				unsigned int trace_ctx);
    226
    227#define TRACE_RECORD_CMDLINE	BIT(0)
    228#define TRACE_RECORD_TGID	BIT(1)
    229
    230void tracing_record_taskinfo(struct task_struct *task, int flags);
    231void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
    232					  struct task_struct *next, int flags);
    233
    234void tracing_record_cmdline(struct task_struct *task);
    235void tracing_record_tgid(struct task_struct *task);
    236
    237int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
    238
    239struct event_filter;
    240
    241enum trace_reg {
    242	TRACE_REG_REGISTER,
    243	TRACE_REG_UNREGISTER,
    244#ifdef CONFIG_PERF_EVENTS
    245	TRACE_REG_PERF_REGISTER,
    246	TRACE_REG_PERF_UNREGISTER,
    247	TRACE_REG_PERF_OPEN,
    248	TRACE_REG_PERF_CLOSE,
    249	/*
    250	 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
    251	 * custom action was taken and the default action is not to be
    252	 * performed.
    253	 */
    254	TRACE_REG_PERF_ADD,
    255	TRACE_REG_PERF_DEL,
    256#endif
    257};
    258
    259struct trace_event_call;
    260
    261#define TRACE_FUNCTION_TYPE ((const char *)~0UL)
    262
    263struct trace_event_fields {
    264	const char *type;
    265	union {
    266		struct {
    267			const char *name;
    268			const int  size;
    269			const int  align;
    270			const int  is_signed;
    271			const int  filter_type;
    272		};
    273		int (*define_fields)(struct trace_event_call *);
    274	};
    275};
    276
    277struct trace_event_class {
    278	const char		*system;
    279	void			*probe;
    280#ifdef CONFIG_PERF_EVENTS
    281	void			*perf_probe;
    282#endif
    283	int			(*reg)(struct trace_event_call *event,
    284				       enum trace_reg type, void *data);
    285	struct trace_event_fields *fields_array;
    286	struct list_head	*(*get_fields)(struct trace_event_call *);
    287	struct list_head	fields;
    288	int			(*raw_init)(struct trace_event_call *);
    289};
    290
    291extern int trace_event_reg(struct trace_event_call *event,
    292			    enum trace_reg type, void *data);
    293
    294struct trace_event_buffer {
    295	struct trace_buffer		*buffer;
    296	struct ring_buffer_event	*event;
    297	struct trace_event_file		*trace_file;
    298	void				*entry;
    299	unsigned int			trace_ctx;
    300	struct pt_regs			*regs;
    301};
    302
    303void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
    304				  struct trace_event_file *trace_file,
    305				  unsigned long len);
    306
    307void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
    308
    309enum {
    310	TRACE_EVENT_FL_FILTERED_BIT,
    311	TRACE_EVENT_FL_CAP_ANY_BIT,
    312	TRACE_EVENT_FL_NO_SET_FILTER_BIT,
    313	TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
    314	TRACE_EVENT_FL_TRACEPOINT_BIT,
    315	TRACE_EVENT_FL_DYNAMIC_BIT,
    316	TRACE_EVENT_FL_KPROBE_BIT,
    317	TRACE_EVENT_FL_UPROBE_BIT,
    318	TRACE_EVENT_FL_EPROBE_BIT,
    319	TRACE_EVENT_FL_CUSTOM_BIT,
    320};
    321
    322/*
    323 * Event flags:
    324 *  FILTERED	  - The event has a filter attached
    325 *  CAP_ANY	  - Any user can enable for perf
    326 *  NO_SET_FILTER - Set when filter has error and is to be ignored
    327 *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
    328 *  TRACEPOINT    - Event is a tracepoint
    329 *  DYNAMIC       - Event is a dynamic event (created at run time)
    330 *  KPROBE        - Event is a kprobe
    331 *  UPROBE        - Event is a uprobe
    332 *  EPROBE        - Event is an event probe
    333 *  CUSTOM        - Event is a custom event (to be attached to an exsiting tracepoint)
    334 *                   This is set when the custom event has not been attached
    335 *                   to a tracepoint yet, then it is cleared when it is.
    336 */
    337enum {
    338	TRACE_EVENT_FL_FILTERED		= (1 << TRACE_EVENT_FL_FILTERED_BIT),
    339	TRACE_EVENT_FL_CAP_ANY		= (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
    340	TRACE_EVENT_FL_NO_SET_FILTER	= (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
    341	TRACE_EVENT_FL_IGNORE_ENABLE	= (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
    342	TRACE_EVENT_FL_TRACEPOINT	= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
    343	TRACE_EVENT_FL_DYNAMIC		= (1 << TRACE_EVENT_FL_DYNAMIC_BIT),
    344	TRACE_EVENT_FL_KPROBE		= (1 << TRACE_EVENT_FL_KPROBE_BIT),
    345	TRACE_EVENT_FL_UPROBE		= (1 << TRACE_EVENT_FL_UPROBE_BIT),
    346	TRACE_EVENT_FL_EPROBE		= (1 << TRACE_EVENT_FL_EPROBE_BIT),
    347	TRACE_EVENT_FL_CUSTOM		= (1 << TRACE_EVENT_FL_CUSTOM_BIT),
    348};
    349
    350#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
    351
    352struct trace_event_call {
    353	struct list_head	list;
    354	struct trace_event_class *class;
    355	union {
    356		char			*name;
    357		/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
    358		struct tracepoint	*tp;
    359	};
    360	struct trace_event	event;
    361	char			*print_fmt;
    362	struct event_filter	*filter;
    363	/*
    364	 * Static events can disappear with modules,
    365	 * where as dynamic ones need their own ref count.
    366	 */
    367	union {
    368		void				*module;
    369		atomic_t			refcnt;
    370	};
    371	void			*data;
    372
    373	/* See the TRACE_EVENT_FL_* flags above */
    374	int			flags; /* static flags of different events */
    375
    376#ifdef CONFIG_PERF_EVENTS
    377	int				perf_refcount;
    378	struct hlist_head __percpu	*perf_events;
    379	struct bpf_prog_array __rcu	*prog_array;
    380
    381	int	(*perf_perm)(struct trace_event_call *,
    382			     struct perf_event *);
    383#endif
    384};
    385
    386#ifdef CONFIG_DYNAMIC_EVENTS
    387bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
    388void trace_event_dyn_put_ref(struct trace_event_call *call);
    389bool trace_event_dyn_busy(struct trace_event_call *call);
    390#else
    391static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
    392{
    393	/* Without DYNAMIC_EVENTS configured, nothing should be calling this */
    394	return false;
    395}
    396static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
    397{
    398}
    399static inline bool trace_event_dyn_busy(struct trace_event_call *call)
    400{
    401	/* Nothing should call this without DYNAIMIC_EVENTS configured. */
    402	return true;
    403}
    404#endif
    405
    406static inline bool trace_event_try_get_ref(struct trace_event_call *call)
    407{
    408	if (call->flags & TRACE_EVENT_FL_DYNAMIC)
    409		return trace_event_dyn_try_get_ref(call);
    410	else
    411		return try_module_get(call->module);
    412}
    413
    414static inline void trace_event_put_ref(struct trace_event_call *call)
    415{
    416	if (call->flags & TRACE_EVENT_FL_DYNAMIC)
    417		trace_event_dyn_put_ref(call);
    418	else
    419		module_put(call->module);
    420}
    421
    422#ifdef CONFIG_PERF_EVENTS
    423static inline bool bpf_prog_array_valid(struct trace_event_call *call)
    424{
    425	/*
    426	 * This inline function checks whether call->prog_array
    427	 * is valid or not. The function is called in various places,
    428	 * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
    429	 *
    430	 * If this function returns true, and later call->prog_array
    431	 * becomes false inside rcu_read_lock/unlock region,
    432	 * we bail out then. If this function return false,
    433	 * there is a risk that we might miss a few events if the checking
    434	 * were delayed until inside rcu_read_lock/unlock region and
    435	 * call->prog_array happened to become non-NULL then.
    436	 *
    437	 * Here, READ_ONCE() is used instead of rcu_access_pointer().
    438	 * rcu_access_pointer() requires the actual definition of
    439	 * "struct bpf_prog_array" while READ_ONCE() only needs
    440	 * a declaration of the same type.
    441	 */
    442	return !!READ_ONCE(call->prog_array);
    443}
    444#endif
    445
    446static inline const char *
    447trace_event_name(struct trace_event_call *call)
    448{
    449	if (call->flags & TRACE_EVENT_FL_CUSTOM)
    450		return call->name;
    451	else if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
    452		return call->tp ? call->tp->name : NULL;
    453	else
    454		return call->name;
    455}
    456
    457static inline struct list_head *
    458trace_get_fields(struct trace_event_call *event_call)
    459{
    460	if (!event_call->class->get_fields)
    461		return &event_call->class->fields;
    462	return event_call->class->get_fields(event_call);
    463}
    464
    465struct trace_subsystem_dir;
    466
    467enum {
    468	EVENT_FILE_FL_ENABLED_BIT,
    469	EVENT_FILE_FL_RECORDED_CMD_BIT,
    470	EVENT_FILE_FL_RECORDED_TGID_BIT,
    471	EVENT_FILE_FL_FILTERED_BIT,
    472	EVENT_FILE_FL_NO_SET_FILTER_BIT,
    473	EVENT_FILE_FL_SOFT_MODE_BIT,
    474	EVENT_FILE_FL_SOFT_DISABLED_BIT,
    475	EVENT_FILE_FL_TRIGGER_MODE_BIT,
    476	EVENT_FILE_FL_TRIGGER_COND_BIT,
    477	EVENT_FILE_FL_PID_FILTER_BIT,
    478	EVENT_FILE_FL_WAS_ENABLED_BIT,
    479};
    480
    481extern struct trace_event_file *trace_get_event_file(const char *instance,
    482						     const char *system,
    483						     const char *event);
    484extern void trace_put_event_file(struct trace_event_file *file);
    485
    486#define MAX_DYNEVENT_CMD_LEN	(2048)
    487
    488enum dynevent_type {
    489	DYNEVENT_TYPE_SYNTH = 1,
    490	DYNEVENT_TYPE_KPROBE,
    491	DYNEVENT_TYPE_NONE,
    492};
    493
    494struct dynevent_cmd;
    495
    496typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
    497
    498struct dynevent_cmd {
    499	struct seq_buf		seq;
    500	const char		*event_name;
    501	unsigned int		n_fields;
    502	enum dynevent_type	type;
    503	dynevent_create_fn_t	run_command;
    504	void			*private_data;
    505};
    506
    507extern int dynevent_create(struct dynevent_cmd *cmd);
    508
    509extern int synth_event_delete(const char *name);
    510
    511extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
    512				 char *buf, int maxlen);
    513
    514extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
    515				       const char *name,
    516				       struct module *mod, ...);
    517
    518#define synth_event_gen_cmd_start(cmd, name, mod, ...)	\
    519	__synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
    520
    521struct synth_field_desc {
    522	const char *type;
    523	const char *name;
    524};
    525
    526extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
    527					   const char *name,
    528					   struct module *mod,
    529					   struct synth_field_desc *fields,
    530					   unsigned int n_fields);
    531extern int synth_event_create(const char *name,
    532			      struct synth_field_desc *fields,
    533			      unsigned int n_fields, struct module *mod);
    534
    535extern int synth_event_add_field(struct dynevent_cmd *cmd,
    536				 const char *type,
    537				 const char *name);
    538extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
    539				     const char *type_name);
    540extern int synth_event_add_fields(struct dynevent_cmd *cmd,
    541				  struct synth_field_desc *fields,
    542				  unsigned int n_fields);
    543
    544#define synth_event_gen_cmd_end(cmd)	\
    545	dynevent_create(cmd)
    546
    547struct synth_event;
    548
    549struct synth_event_trace_state {
    550	struct trace_event_buffer fbuffer;
    551	struct synth_trace_event *entry;
    552	struct trace_buffer *buffer;
    553	struct synth_event *event;
    554	unsigned int cur_field;
    555	unsigned int n_u64;
    556	bool disabled;
    557	bool add_next;
    558	bool add_name;
    559};
    560
    561extern int synth_event_trace(struct trace_event_file *file,
    562			     unsigned int n_vals, ...);
    563extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
    564				   unsigned int n_vals);
    565extern int synth_event_trace_start(struct trace_event_file *file,
    566				   struct synth_event_trace_state *trace_state);
    567extern int synth_event_add_next_val(u64 val,
    568				    struct synth_event_trace_state *trace_state);
    569extern int synth_event_add_val(const char *field_name, u64 val,
    570			       struct synth_event_trace_state *trace_state);
    571extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
    572
    573extern int kprobe_event_delete(const char *name);
    574
    575extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
    576				  char *buf, int maxlen);
    577
    578#define kprobe_event_gen_cmd_start(cmd, name, loc, ...)			\
    579	__kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
    580
    581#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...)		\
    582	__kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
    583
    584extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
    585					bool kretprobe,
    586					const char *name,
    587					const char *loc, ...);
    588
    589#define kprobe_event_add_fields(cmd, ...)	\
    590	__kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
    591
    592#define kprobe_event_add_field(cmd, field)	\
    593	__kprobe_event_add_fields(cmd, field, NULL)
    594
    595extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
    596
    597#define kprobe_event_gen_cmd_end(cmd)		\
    598	dynevent_create(cmd)
    599
    600#define kretprobe_event_gen_cmd_end(cmd)	\
    601	dynevent_create(cmd)
    602
    603/*
    604 * Event file flags:
    605 *  ENABLED	  - The event is enabled
    606 *  RECORDED_CMD  - The comms should be recorded at sched_switch
    607 *  RECORDED_TGID - The tgids should be recorded at sched_switch
    608 *  FILTERED	  - The event has a filter attached
    609 *  NO_SET_FILTER - Set when filter has error and is to be ignored
    610 *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
    611 *  SOFT_DISABLED - When set, do not trace the event (even though its
    612 *                   tracepoint may be enabled)
    613 *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
    614 *  TRIGGER_COND  - When set, one or more triggers has an associated filter
    615 *  PID_FILTER    - When set, the event is filtered based on pid
    616 *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
    617 */
    618enum {
    619	EVENT_FILE_FL_ENABLED		= (1 << EVENT_FILE_FL_ENABLED_BIT),
    620	EVENT_FILE_FL_RECORDED_CMD	= (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
    621	EVENT_FILE_FL_RECORDED_TGID	= (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
    622	EVENT_FILE_FL_FILTERED		= (1 << EVENT_FILE_FL_FILTERED_BIT),
    623	EVENT_FILE_FL_NO_SET_FILTER	= (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
    624	EVENT_FILE_FL_SOFT_MODE		= (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
    625	EVENT_FILE_FL_SOFT_DISABLED	= (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
    626	EVENT_FILE_FL_TRIGGER_MODE	= (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
    627	EVENT_FILE_FL_TRIGGER_COND	= (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
    628	EVENT_FILE_FL_PID_FILTER	= (1 << EVENT_FILE_FL_PID_FILTER_BIT),
    629	EVENT_FILE_FL_WAS_ENABLED	= (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
    630};
    631
    632struct trace_event_file {
    633	struct list_head		list;
    634	struct trace_event_call		*event_call;
    635	struct event_filter __rcu	*filter;
    636	struct dentry			*dir;
    637	struct trace_array		*tr;
    638	struct trace_subsystem_dir	*system;
    639	struct list_head		triggers;
    640
    641	/*
    642	 * 32 bit flags:
    643	 *   bit 0:		enabled
    644	 *   bit 1:		enabled cmd record
    645	 *   bit 2:		enable/disable with the soft disable bit
    646	 *   bit 3:		soft disabled
    647	 *   bit 4:		trigger enabled
    648	 *
    649	 * Note: The bits must be set atomically to prevent races
    650	 * from other writers. Reads of flags do not need to be in
    651	 * sync as they occur in critical sections. But the way flags
    652	 * is currently used, these changes do not affect the code
    653	 * except that when a change is made, it may have a slight
    654	 * delay in propagating the changes to other CPUs due to
    655	 * caching and such. Which is mostly OK ;-)
    656	 */
    657	unsigned long		flags;
    658	atomic_t		sm_ref;	/* soft-mode reference counter */
    659	atomic_t		tm_ref;	/* trigger-mode reference counter */
    660};
    661
    662#define __TRACE_EVENT_FLAGS(name, value)				\
    663	static int __init trace_init_flags_##name(void)			\
    664	{								\
    665		event_##name.flags |= value;				\
    666		return 0;						\
    667	}								\
    668	early_initcall(trace_init_flags_##name);
    669
    670#define __TRACE_EVENT_PERF_PERM(name, expr...)				\
    671	static int perf_perm_##name(struct trace_event_call *tp_event, \
    672				    struct perf_event *p_event)		\
    673	{								\
    674		return ({ expr; });					\
    675	}								\
    676	static int __init trace_init_perf_perm_##name(void)		\
    677	{								\
    678		event_##name.perf_perm = &perf_perm_##name;		\
    679		return 0;						\
    680	}								\
    681	early_initcall(trace_init_perf_perm_##name);
    682
    683#define PERF_MAX_TRACE_SIZE	8192
    684
    685#define MAX_FILTER_STR_VAL	256U	/* Should handle KSYM_SYMBOL_LEN */
    686
    687enum event_trigger_type {
    688	ETT_NONE		= (0),
    689	ETT_TRACE_ONOFF		= (1 << 0),
    690	ETT_SNAPSHOT		= (1 << 1),
    691	ETT_STACKTRACE		= (1 << 2),
    692	ETT_EVENT_ENABLE	= (1 << 3),
    693	ETT_EVENT_HIST		= (1 << 4),
    694	ETT_HIST_ENABLE		= (1 << 5),
    695	ETT_EVENT_EPROBE	= (1 << 6),
    696};
    697
    698extern int filter_match_preds(struct event_filter *filter, void *rec);
    699
    700extern enum event_trigger_type
    701event_triggers_call(struct trace_event_file *file,
    702		    struct trace_buffer *buffer, void *rec,
    703		    struct ring_buffer_event *event);
    704extern void
    705event_triggers_post_call(struct trace_event_file *file,
    706			 enum event_trigger_type tt);
    707
    708bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
    709
    710bool __trace_trigger_soft_disabled(struct trace_event_file *file);
    711
    712/**
    713 * trace_trigger_soft_disabled - do triggers and test if soft disabled
    714 * @file: The file pointer of the event to test
    715 *
    716 * If any triggers without filters are attached to this event, they
    717 * will be called here. If the event is soft disabled and has no
    718 * triggers that require testing the fields, it will return true,
    719 * otherwise false.
    720 */
    721static __always_inline bool
    722trace_trigger_soft_disabled(struct trace_event_file *file)
    723{
    724	unsigned long eflags = file->flags;
    725
    726	if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
    727			       EVENT_FILE_FL_SOFT_DISABLED |
    728			       EVENT_FILE_FL_PID_FILTER))))
    729		return false;
    730
    731	if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
    732		return false;
    733
    734	return __trace_trigger_soft_disabled(file);
    735}
    736
    737#ifdef CONFIG_BPF_EVENTS
    738unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
    739int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
    740void perf_event_detach_bpf_prog(struct perf_event *event);
    741int perf_event_query_prog_array(struct perf_event *event, void __user *info);
    742int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
    743int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
    744struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
    745void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
    746int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
    747			    u32 *fd_type, const char **buf,
    748			    u64 *probe_offset, u64 *probe_addr);
    749int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
    750#else
    751static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
    752{
    753	return 1;
    754}
    755
    756static inline int
    757perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie)
    758{
    759	return -EOPNOTSUPP;
    760}
    761
    762static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
    763
    764static inline int
    765perf_event_query_prog_array(struct perf_event *event, void __user *info)
    766{
    767	return -EOPNOTSUPP;
    768}
    769static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
    770{
    771	return -EOPNOTSUPP;
    772}
    773static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
    774{
    775	return -EOPNOTSUPP;
    776}
    777static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
    778{
    779	return NULL;
    780}
    781static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
    782{
    783}
    784static inline int bpf_get_perf_event_info(const struct perf_event *event,
    785					  u32 *prog_id, u32 *fd_type,
    786					  const char **buf, u64 *probe_offset,
    787					  u64 *probe_addr)
    788{
    789	return -EOPNOTSUPP;
    790}
    791static inline int
    792bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
    793{
    794	return -EOPNOTSUPP;
    795}
    796#endif
    797
    798enum {
    799	FILTER_OTHER = 0,
    800	FILTER_STATIC_STRING,
    801	FILTER_DYN_STRING,
    802	FILTER_RDYN_STRING,
    803	FILTER_PTR_STRING,
    804	FILTER_TRACE_FN,
    805	FILTER_COMM,
    806	FILTER_CPU,
    807};
    808
    809extern int trace_event_raw_init(struct trace_event_call *call);
    810extern int trace_define_field(struct trace_event_call *call, const char *type,
    811			      const char *name, int offset, int size,
    812			      int is_signed, int filter_type);
    813extern int trace_add_event_call(struct trace_event_call *call);
    814extern int trace_remove_event_call(struct trace_event_call *call);
    815extern int trace_event_get_offsets(struct trace_event_call *call);
    816
    817#define is_signed_type(type)	(((type)(-1)) < (type)1)
    818
    819int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
    820int trace_set_clr_event(const char *system, const char *event, int set);
    821int trace_array_set_clr_event(struct trace_array *tr, const char *system,
    822		const char *event, bool enable);
    823/*
    824 * The double __builtin_constant_p is because gcc will give us an error
    825 * if we try to allocate the static variable to fmt if it is not a
    826 * constant. Even with the outer if statement optimizing out.
    827 */
    828#define event_trace_printk(ip, fmt, args...)				\
    829do {									\
    830	__trace_printk_check_format(fmt, ##args);			\
    831	tracing_record_cmdline(current);				\
    832	if (__builtin_constant_p(fmt)) {				\
    833		static const char *trace_printk_fmt			\
    834		  __section("__trace_printk_fmt") =			\
    835			__builtin_constant_p(fmt) ? fmt : NULL;		\
    836									\
    837		__trace_bprintk(ip, trace_printk_fmt, ##args);		\
    838	} else								\
    839		__trace_printk(ip, fmt, ##args);			\
    840} while (0)
    841
    842#ifdef CONFIG_PERF_EVENTS
    843struct perf_event;
    844
    845DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
    846DECLARE_PER_CPU(int, bpf_kprobe_override);
    847
    848extern int  perf_trace_init(struct perf_event *event);
    849extern void perf_trace_destroy(struct perf_event *event);
    850extern int  perf_trace_add(struct perf_event *event, int flags);
    851extern void perf_trace_del(struct perf_event *event, int flags);
    852#ifdef CONFIG_KPROBE_EVENTS
    853extern int  perf_kprobe_init(struct perf_event *event, bool is_retprobe);
    854extern void perf_kprobe_destroy(struct perf_event *event);
    855extern int bpf_get_kprobe_info(const struct perf_event *event,
    856			       u32 *fd_type, const char **symbol,
    857			       u64 *probe_offset, u64 *probe_addr,
    858			       bool perf_type_tracepoint);
    859#endif
    860#ifdef CONFIG_UPROBE_EVENTS
    861extern int  perf_uprobe_init(struct perf_event *event,
    862			     unsigned long ref_ctr_offset, bool is_retprobe);
    863extern void perf_uprobe_destroy(struct perf_event *event);
    864extern int bpf_get_uprobe_info(const struct perf_event *event,
    865			       u32 *fd_type, const char **filename,
    866			       u64 *probe_offset, bool perf_type_tracepoint);
    867#endif
    868extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
    869				     char *filter_str);
    870extern void ftrace_profile_free_filter(struct perf_event *event);
    871void perf_trace_buf_update(void *record, u16 type);
    872void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
    873
    874int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie);
    875void perf_event_free_bpf_prog(struct perf_event *event);
    876
    877void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
    878void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
    879void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
    880		    u64 arg3);
    881void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
    882		    u64 arg3, u64 arg4);
    883void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
    884		    u64 arg3, u64 arg4, u64 arg5);
    885void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
    886		    u64 arg3, u64 arg4, u64 arg5, u64 arg6);
    887void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
    888		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
    889void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
    890		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
    891		    u64 arg8);
    892void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
    893		    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
    894		    u64 arg8, u64 arg9);
    895void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
    896		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
    897		     u64 arg8, u64 arg9, u64 arg10);
    898void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
    899		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
    900		     u64 arg8, u64 arg9, u64 arg10, u64 arg11);
    901void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
    902		     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
    903		     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
    904void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
    905			       struct trace_event_call *call, u64 count,
    906			       struct pt_regs *regs, struct hlist_head *head,
    907			       struct task_struct *task);
    908
    909static inline void
    910perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
    911		       u64 count, struct pt_regs *regs, void *head,
    912		       struct task_struct *task)
    913{
    914	perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
    915}
    916
    917#endif
    918
    919#endif /* _LINUX_TRACE_EVENT_H */
    920
    921/*
    922 * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection.
    923 *  This is due to the way trace custom events work. If a file includes two
    924 *  trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include
    925 *  will override the TRACE_CUSTOM_EVENT and break the second include.
    926 */
    927
    928#ifndef TRACE_CUSTOM_EVENT
    929
    930#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print)
    931#define DEFINE_CUSTOM_EVENT(template, name, proto, args)
    932#define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print)
    933
    934#endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */