cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

evsel.h (16051B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __PERF_EVSEL_H
      3#define __PERF_EVSEL_H 1
      4
      5#include <linux/list.h>
      6#include <stdbool.h>
      7#include <sys/types.h>
      8#include <linux/perf_event.h>
      9#include <linux/types.h>
     10#include <internal/evsel.h>
     11#include <perf/evsel.h>
     12#include "symbol_conf.h"
     13#include <internal/cpumap.h>
     14#include <perf/cpumap.h>
     15
     16struct bpf_object;
     17struct cgroup;
     18struct perf_counts;
     19struct perf_stat_evsel;
     20union perf_event;
     21struct bpf_counter_ops;
     22struct target;
     23struct hashmap;
     24struct bperf_leader_bpf;
     25struct bperf_follower_bpf;
     26struct perf_pmu;
     27
     28typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
     29
     30enum perf_tool_event {
     31	PERF_TOOL_NONE		= 0,
     32	PERF_TOOL_DURATION_TIME = 1,
     33	PERF_TOOL_USER_TIME = 2,
     34	PERF_TOOL_SYSTEM_TIME = 3,
     35
     36	PERF_TOOL_MAX,
     37};
     38
     39const char *perf_tool_event__to_str(enum perf_tool_event ev);
     40enum perf_tool_event perf_tool_event__from_str(const char *str);
     41
     42#define perf_tool_event__for_each_event(ev)		\
     43	for ((ev) = PERF_TOOL_DURATION_TIME; (ev) < PERF_TOOL_MAX; ev++)
     44
     45/** struct evsel - event selector
     46 *
     47 * @evlist - evlist this evsel is in, if it is in one.
     48 * @core - libperf evsel object
     49 * @name - Can be set to retain the original event name passed by the user,
     50 *         so that when showing results in tools such as 'perf stat', we
     51 *         show the name used, not some alias.
     52 * @id_pos: the position of the event id (PERF_SAMPLE_ID or
     53 *          PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
     54 *          struct perf_record_sample
     55 * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
     56 *          PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
     57 *          is used there is an id sample appended to non-sample events
     58 * @priv:   And what is in its containing unnamed union are tool specific
     59 */
     60struct evsel {
     61	struct perf_evsel	core;
     62	struct evlist		*evlist;
     63	off_t			id_offset;
     64	int			id_pos;
     65	int			is_pos;
     66	unsigned int		sample_size;
     67
     68	/*
     69	 * These fields can be set in the parse-events code or similar.
     70	 * Please check evsel__clone() to copy them properly so that
     71	 * they can be released properly.
     72	 */
     73	struct {
     74		char			*name;
     75		char			*group_name;
     76		const char		*pmu_name;
     77		struct tep_event	*tp_format;
     78		char			*filter;
     79		unsigned long		max_events;
     80		double			scale;
     81		const char		*unit;
     82		struct cgroup		*cgrp;
     83		const char		*metric_id;
     84		enum perf_tool_event	tool_event;
     85		/* parse modifier helper */
     86		int			exclude_GH;
     87		int			sample_read;
     88		bool			snapshot;
     89		bool			per_pkg;
     90		bool			percore;
     91		bool			precise_max;
     92		bool			use_uncore_alias;
     93		bool			is_libpfm_event;
     94		bool			auto_merge_stats;
     95		bool			collect_stat;
     96		bool			weak_group;
     97		bool			bpf_counter;
     98		bool			use_config_name;
     99		int			bpf_fd;
    100		struct bpf_object	*bpf_obj;
    101		struct list_head	config_terms;
    102	};
    103
    104	/*
    105	 * metric fields are similar, but needs more care as they can have
    106	 * references to other metric (evsel).
    107	 */
    108	const char *		metric_expr;
    109	const char *		metric_name;
    110	struct evsel		**metric_events;
    111	struct evsel		*metric_leader;
    112
    113	void			*handler;
    114	struct perf_counts	*counts;
    115	struct perf_counts	*prev_raw_counts;
    116	unsigned long		nr_events_printed;
    117	struct perf_stat_evsel  *stats;
    118	void			*priv;
    119	u64			db_id;
    120	bool			uniquified_name;
    121	bool 			supported;
    122	bool 			needs_swap;
    123	bool 			disabled;
    124	bool			no_aux_samples;
    125	bool			immediate;
    126	bool			tracking;
    127	bool			ignore_missing_thread;
    128	bool			forced_leader;
    129	bool			cmdline_group_boundary;
    130	bool			merged_stat;
    131	bool			reset_group;
    132	bool			errored;
    133	bool			needs_auxtrace_mmap;
    134	struct hashmap		*per_pkg_mask;
    135	int			err;
    136	struct {
    137		evsel__sb_cb_t	*cb;
    138		void		*data;
    139	} side_band;
    140	/*
    141	 * For reporting purposes, an evsel sample can have a callchain
    142	 * synthesized from AUX area data. Keep track of synthesized sample
    143	 * types here. Note, the recorded sample_type cannot be changed because
    144	 * it is needed to continue to parse events.
    145	 * See also evsel__has_callchain().
    146	 */
    147	__u64			synth_sample_type;
    148
    149	/*
    150	 * bpf_counter_ops serves two use cases:
    151	 *   1. perf-stat -b          counting events used byBPF programs
    152	 *   2. perf-stat --use-bpf   use BPF programs to aggregate counts
    153	 */
    154	struct bpf_counter_ops	*bpf_counter_ops;
    155
    156	/* for perf-stat -b */
    157	struct list_head	bpf_counter_list;
    158
    159	/* for perf-stat --use-bpf */
    160	int			bperf_leader_prog_fd;
    161	int			bperf_leader_link_fd;
    162	union {
    163		struct bperf_leader_bpf *leader_skel;
    164		struct bperf_follower_bpf *follower_skel;
    165	};
    166	unsigned long		open_flags;
    167	int			precise_ip_original;
    168
    169	/* for missing_features */
    170	struct perf_pmu		*pmu;
    171};
    172
    173struct perf_missing_features {
    174	bool sample_id_all;
    175	bool exclude_guest;
    176	bool mmap2;
    177	bool cloexec;
    178	bool clockid;
    179	bool clockid_wrong;
    180	bool lbr_flags;
    181	bool write_backward;
    182	bool group_read;
    183	bool ksymbol;
    184	bool bpf;
    185	bool aux_output;
    186	bool branch_hw_idx;
    187	bool cgroup;
    188	bool data_page_size;
    189	bool code_page_size;
    190	bool weight_struct;
    191};
    192
    193extern struct perf_missing_features perf_missing_features;
    194
    195struct perf_cpu_map;
    196struct thread_map;
    197struct record_opts;
    198
    199static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel)
    200{
    201	return perf_evsel__cpus(&evsel->core);
    202}
    203
    204static inline int evsel__nr_cpus(struct evsel *evsel)
    205{
    206	return perf_cpu_map__nr(evsel__cpus(evsel));
    207}
    208
    209void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
    210			   struct perf_counts_values *count);
    211
    212int evsel__object_config(size_t object_size,
    213			 int (*init)(struct evsel *evsel),
    214			 void (*fini)(struct evsel *evsel));
    215
    216struct perf_pmu *evsel__find_pmu(struct evsel *evsel);
    217bool evsel__is_aux_event(struct evsel *evsel);
    218
    219struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx);
    220
    221static inline struct evsel *evsel__new(struct perf_event_attr *attr)
    222{
    223	return evsel__new_idx(attr, 0);
    224}
    225
    226struct evsel *evsel__clone(struct evsel *orig);
    227struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx);
    228
    229int copy_config_terms(struct list_head *dst, struct list_head *src);
    230void free_config_terms(struct list_head *config_terms);
    231
    232/*
    233 * Returns pointer with encoded error via <linux/err.h> interface.
    234 */
    235static inline struct evsel *evsel__newtp(const char *sys, const char *name)
    236{
    237	return evsel__newtp_idx(sys, name, 0);
    238}
    239
    240struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config);
    241
    242struct tep_event *event_format__new(const char *sys, const char *name);
    243
    244void evsel__init(struct evsel *evsel, struct perf_event_attr *attr, int idx);
    245void evsel__exit(struct evsel *evsel);
    246void evsel__delete(struct evsel *evsel);
    247
    248struct callchain_param;
    249
    250void evsel__config(struct evsel *evsel, struct record_opts *opts,
    251		   struct callchain_param *callchain);
    252void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
    253			     struct callchain_param *callchain);
    254
    255int __evsel__sample_size(u64 sample_type);
    256void evsel__calc_id_pos(struct evsel *evsel);
    257
    258bool evsel__is_cache_op_valid(u8 type, u8 op);
    259
    260static inline bool evsel__is_bpf(struct evsel *evsel)
    261{
    262	return evsel->bpf_counter_ops != NULL;
    263}
    264
    265#define EVSEL__MAX_ALIASES 8
    266
    267extern const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
    268extern const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES];
    269extern const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
    270extern const char *const evsel__hw_names[PERF_COUNT_HW_MAX];
    271extern const char *const evsel__sw_names[PERF_COUNT_SW_MAX];
    272extern char *evsel__bpf_counter_events;
    273bool evsel__match_bpf_counter_events(const char *name);
    274
    275int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
    276const char *evsel__name(struct evsel *evsel);
    277const char *evsel__metric_id(const struct evsel *evsel);
    278
    279static inline bool evsel__is_tool(const struct evsel *evsel)
    280{
    281	return evsel->tool_event != PERF_TOOL_NONE;
    282}
    283
    284const char *evsel__group_name(struct evsel *evsel);
    285int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
    286
    287void __evsel__set_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
    288void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_format bit);
    289
    290#define evsel__set_sample_bit(evsel, bit) \
    291	__evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
    292
    293#define evsel__reset_sample_bit(evsel, bit) \
    294	__evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
    295
    296void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
    297
    298void arch_evsel__set_sample_weight(struct evsel *evsel);
    299void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr);
    300
    301int evsel__set_filter(struct evsel *evsel, const char *filter);
    302int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
    303int evsel__append_addr_filter(struct evsel *evsel, const char *filter);
    304int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx);
    305int evsel__enable(struct evsel *evsel);
    306int evsel__disable(struct evsel *evsel);
    307int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx);
    308
    309int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx);
    310int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads);
    311int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
    312		struct perf_thread_map *threads);
    313void evsel__close(struct evsel *evsel);
    314int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
    315		struct perf_thread_map *threads);
    316bool evsel__detect_missing_features(struct evsel *evsel);
    317
    318enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX };
    319bool evsel__increase_rlimit(enum rlimit_action *set_rlimit);
    320
    321bool evsel__precise_ip_fallback(struct evsel *evsel);
    322
    323struct perf_sample;
    324
    325void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name);
    326u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name);
    327
    328static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name)
    329{
    330	return evsel__rawptr(evsel, sample, name);
    331}
    332
    333struct tep_format_field;
    334
    335u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
    336
    337struct tep_format_field *evsel__field(struct evsel *evsel, const char *name);
    338
    339#define evsel__match(evsel, t, c)		\
    340	(evsel->core.attr.type == PERF_TYPE_##t &&	\
    341	 evsel->core.attr.config == PERF_COUNT_##c)
    342
    343static inline bool evsel__match2(struct evsel *e1, struct evsel *e2)
    344{
    345	return (e1->core.attr.type == e2->core.attr.type) &&
    346	       (e1->core.attr.config == e2->core.attr.config);
    347}
    348
    349int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread);
    350
    351int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale);
    352
    353/**
    354 * evsel__read_on_cpu - Read out the results on a CPU and thread
    355 *
    356 * @evsel - event selector to read value
    357 * @cpu_map_idx - CPU of interest
    358 * @thread - thread of interest
    359 */
    360static inline int evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread)
    361{
    362	return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, false);
    363}
    364
    365/**
    366 * evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
    367 *
    368 * @evsel - event selector to read value
    369 * @cpu_map_idx - CPU of interest
    370 * @thread - thread of interest
    371 */
    372static inline int evsel__read_on_cpu_scaled(struct evsel *evsel, int cpu_map_idx, int thread)
    373{
    374	return __evsel__read_on_cpu(evsel, cpu_map_idx, thread, true);
    375}
    376
    377int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
    378			struct perf_sample *sample);
    379
    380int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
    381				  u64 *timestamp);
    382
    383static inline struct evsel *evsel__next(struct evsel *evsel)
    384{
    385	return list_entry(evsel->core.node.next, struct evsel, core.node);
    386}
    387
    388static inline struct evsel *evsel__prev(struct evsel *evsel)
    389{
    390	return list_entry(evsel->core.node.prev, struct evsel, core.node);
    391}
    392
    393/**
    394 * evsel__is_group_leader - Return whether given evsel is a leader event
    395 *
    396 * @evsel - evsel selector to be tested
    397 *
    398 * Return %true if @evsel is a group leader or a stand-alone event
    399 */
    400static inline bool evsel__is_group_leader(const struct evsel *evsel)
    401{
    402	return evsel->core.leader == &evsel->core;
    403}
    404
    405/**
    406 * evsel__is_group_event - Return whether given evsel is a group event
    407 *
    408 * @evsel - evsel selector to be tested
    409 *
    410 * Return %true iff event group view is enabled and @evsel is a actual group
    411 * leader which has other members in the group
    412 */
    413static inline bool evsel__is_group_event(struct evsel *evsel)
    414{
    415	if (!symbol_conf.event_group)
    416		return false;
    417
    418	return evsel__is_group_leader(evsel) && evsel->core.nr_members > 1;
    419}
    420
    421bool evsel__is_function_event(struct evsel *evsel);
    422
    423static inline bool evsel__is_bpf_output(struct evsel *evsel)
    424{
    425	return evsel__match(evsel, SOFTWARE, SW_BPF_OUTPUT);
    426}
    427
    428static inline bool evsel__is_clock(struct evsel *evsel)
    429{
    430	return evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
    431	       evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK);
    432}
    433
    434bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize);
    435int evsel__open_strerror(struct evsel *evsel, struct target *target,
    436			 int err, char *msg, size_t size);
    437
    438static inline int evsel__group_idx(struct evsel *evsel)
    439{
    440	return evsel->core.idx - evsel->core.leader->idx;
    441}
    442
    443/* Iterates group WITHOUT the leader. */
    444#define for_each_group_member(_evsel, _leader) 					\
    445for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \
    446     (_evsel) && (_evsel)->core.leader == (&_leader->core);					\
    447     (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
    448
    449/* Iterates group WITH the leader. */
    450#define for_each_group_evsel(_evsel, _leader) 					\
    451for ((_evsel) = _leader; 							\
    452     (_evsel) && (_evsel)->core.leader == (&_leader->core);					\
    453     (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
    454
    455static inline bool evsel__has_branch_callstack(const struct evsel *evsel)
    456{
    457	return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
    458}
    459
    460static inline bool evsel__has_branch_hw_idx(const struct evsel *evsel)
    461{
    462	return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
    463}
    464
    465static inline bool evsel__has_callchain(const struct evsel *evsel)
    466{
    467	/*
    468	 * For reporting purposes, an evsel sample can have a recorded callchain
    469	 * or a callchain synthesized from AUX area data.
    470	 */
    471	return evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN ||
    472	       evsel->synth_sample_type & PERF_SAMPLE_CALLCHAIN;
    473}
    474
    475static inline bool evsel__has_br_stack(const struct evsel *evsel)
    476{
    477	/*
    478	 * For reporting purposes, an evsel sample can have a recorded branch
    479	 * stack or a branch stack synthesized from AUX area data.
    480	 */
    481	return evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK ||
    482	       evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
    483}
    484
    485static inline bool evsel__is_dummy_event(struct evsel *evsel)
    486{
    487	return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
    488	       (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
    489}
    490
    491struct perf_env *evsel__env(struct evsel *evsel);
    492
    493int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
    494
    495void evsel__zero_per_pkg(struct evsel *evsel);
    496bool evsel__is_hybrid(struct evsel *evsel);
    497struct evsel *evsel__leader(struct evsel *evsel);
    498bool evsel__has_leader(struct evsel *evsel, struct evsel *leader);
    499bool evsel__is_leader(struct evsel *evsel);
    500void evsel__set_leader(struct evsel *evsel, struct evsel *leader);
    501int evsel__source_count(const struct evsel *evsel);
    502void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader);
    503
    504bool arch_evsel__must_be_in_group(const struct evsel *evsel);
    505
    506/*
    507 * Macro to swap the bit-field postition and size.
    508 * Used when,
    509 * - dont need to swap the entire u64 &&
    510 * - when u64 has variable bit-field sizes &&
    511 * - when presented in a host endian which is different
    512 *   than the source endian of the perf.data file
    513 */
    514#define bitfield_swap(src, pos, size)	\
    515	((((src) >> (pos)) & ((1ull << (size)) - 1)) << (63 - ((pos) + (size) - 1)))
    516
    517u64 evsel__bitfield_swap_branch_flags(u64 value);
    518#endif /* __PERF_EVSEL_H */