cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

evsel.h (2284B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __LIBPERF_INTERNAL_EVSEL_H
      3#define __LIBPERF_INTERNAL_EVSEL_H
      4
      5#include <linux/types.h>
      6#include <linux/perf_event.h>
      7#include <stdbool.h>
      8#include <sys/types.h>
      9#include <internal/cpumap.h>
     10
     11struct perf_thread_map;
     12struct xyarray;
     13
     14/*
     15 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
     16 * more than one entry in the evlist.
     17 */
     18struct perf_sample_id {
     19	struct hlist_node	 node;
     20	u64			 id;
     21	struct perf_evsel	*evsel;
     22       /*
     23	* 'idx' will be used for AUX area sampling. A sample will have AUX area
     24	* data that will be queued for decoding, where there are separate
     25	* queues for each CPU (per-cpu tracing) or task (per-thread tracing).
     26	* The sample ID can be used to lookup 'idx' which is effectively the
     27	* queue number.
     28	*/
     29	int			 idx;
     30	struct perf_cpu		 cpu;
     31	pid_t			 tid;
     32
     33	/* Holds total ID period value for PERF_SAMPLE_READ processing. */
     34	u64			 period;
     35};
     36
     37struct perf_evsel {
     38	struct list_head	 node;
     39	struct perf_event_attr	 attr;
     40	struct perf_cpu_map	*cpus;
     41	struct perf_cpu_map	*own_cpus;
     42	struct perf_thread_map	*threads;
     43	struct xyarray		*fd;
     44	struct xyarray		*mmap;
     45	struct xyarray		*sample_id;
     46	u64			*id;
     47	u32			 ids;
     48	struct perf_evsel	*leader;
     49
     50	/* parse modifier helper */
     51	int			 nr_members;
     52	/*
     53	 * system_wide is for events that need to be on every CPU, irrespective
     54	 * of user requested CPUs or threads. Map propagation will set cpus to
     55	 * this event's own_cpus, whereby they will contribute to evlist
     56	 * all_cpus.
     57	 */
     58	bool			 system_wide;
     59	/*
     60	 * Some events, for example uncore events, require a CPU.
     61	 * i.e. it cannot be the 'any CPU' value of -1.
     62	 */
     63	bool			 requires_cpu;
     64	int			 idx;
     65};
     66
     67void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
     68		      int idx);
     69int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
     70void perf_evsel__close_fd(struct perf_evsel *evsel);
     71void perf_evsel__free_fd(struct perf_evsel *evsel);
     72int perf_evsel__read_size(struct perf_evsel *evsel);
     73int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
     74
     75int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
     76void perf_evsel__free_id(struct perf_evsel *evsel);
     77
     78#endif /* __LIBPERF_INTERNAL_EVSEL_H */