cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

perf_api_probe.c (3966B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2
      3#include "perf-sys.h"
      4#include "util/cloexec.h"
      5#include "util/evlist.h"
      6#include "util/evsel.h"
      7#include "util/parse-events.h"
      8#include "util/perf_api_probe.h"
      9#include <perf/cpumap.h>
     10#include <errno.h>
     11
     12typedef void (*setup_probe_fn_t)(struct evsel *evsel);
     13
     14static int perf_do_probe_api(setup_probe_fn_t fn, struct perf_cpu cpu, const char *str)
     15{
     16	struct evlist *evlist;
     17	struct evsel *evsel;
     18	unsigned long flags = perf_event_open_cloexec_flag();
     19	int err = -EAGAIN, fd;
     20	static pid_t pid = -1;
     21
     22	evlist = evlist__new();
     23	if (!evlist)
     24		return -ENOMEM;
     25
     26	if (parse_events(evlist, str, NULL))
     27		goto out_delete;
     28
     29	evsel = evlist__first(evlist);
     30
     31	while (1) {
     32		fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags);
     33		if (fd < 0) {
     34			if (pid == -1 && errno == EACCES) {
     35				pid = 0;
     36				continue;
     37			}
     38			goto out_delete;
     39		}
     40		break;
     41	}
     42	close(fd);
     43
     44	fn(evsel);
     45
     46	fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags);
     47	if (fd < 0) {
     48		if (errno == EINVAL)
     49			err = -EINVAL;
     50		goto out_delete;
     51	}
     52	close(fd);
     53	err = 0;
     54
     55out_delete:
     56	evlist__delete(evlist);
     57	return err;
     58}
     59
     60static bool perf_probe_api(setup_probe_fn_t fn)
     61{
     62	const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
     63	struct perf_cpu_map *cpus;
     64	struct perf_cpu cpu;
     65	int ret, i = 0;
     66
     67	cpus = perf_cpu_map__new(NULL);
     68	if (!cpus)
     69		return false;
     70	cpu = perf_cpu_map__cpu(cpus, 0);
     71	perf_cpu_map__put(cpus);
     72
     73	do {
     74		ret = perf_do_probe_api(fn, cpu, try[i++]);
     75		if (!ret)
     76			return true;
     77	} while (ret == -EAGAIN && try[i]);
     78
     79	return false;
     80}
     81
     82static void perf_probe_sample_identifier(struct evsel *evsel)
     83{
     84	evsel->core.attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
     85}
     86
     87static void perf_probe_comm_exec(struct evsel *evsel)
     88{
     89	evsel->core.attr.comm_exec = 1;
     90}
     91
     92static void perf_probe_context_switch(struct evsel *evsel)
     93{
     94	evsel->core.attr.context_switch = 1;
     95}
     96
     97static void perf_probe_text_poke(struct evsel *evsel)
     98{
     99	evsel->core.attr.text_poke = 1;
    100}
    101
    102static void perf_probe_build_id(struct evsel *evsel)
    103{
    104	evsel->core.attr.build_id = 1;
    105}
    106
    107static void perf_probe_cgroup(struct evsel *evsel)
    108{
    109	evsel->core.attr.cgroup = 1;
    110}
    111
    112bool perf_can_sample_identifier(void)
    113{
    114	return perf_probe_api(perf_probe_sample_identifier);
    115}
    116
    117bool perf_can_comm_exec(void)
    118{
    119	return perf_probe_api(perf_probe_comm_exec);
    120}
    121
    122bool perf_can_record_switch_events(void)
    123{
    124	return perf_probe_api(perf_probe_context_switch);
    125}
    126
    127bool perf_can_record_text_poke_events(void)
    128{
    129	return perf_probe_api(perf_probe_text_poke);
    130}
    131
    132bool perf_can_record_cpu_wide(void)
    133{
    134	struct perf_event_attr attr = {
    135		.type = PERF_TYPE_SOFTWARE,
    136		.config = PERF_COUNT_SW_CPU_CLOCK,
    137		.exclude_kernel = 1,
    138	};
    139	struct perf_cpu_map *cpus;
    140	struct perf_cpu cpu;
    141	int fd;
    142
    143	cpus = perf_cpu_map__new(NULL);
    144	if (!cpus)
    145		return false;
    146
    147	cpu = perf_cpu_map__cpu(cpus, 0);
    148	perf_cpu_map__put(cpus);
    149
    150	fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0);
    151	if (fd < 0)
    152		return false;
    153	close(fd);
    154
    155	return true;
    156}
    157
    158/*
    159 * Architectures are expected to know if AUX area sampling is supported by the
    160 * hardware. Here we check for kernel support.
    161 */
    162bool perf_can_aux_sample(void)
    163{
    164	struct perf_event_attr attr = {
    165		.size = sizeof(struct perf_event_attr),
    166		.exclude_kernel = 1,
    167		/*
    168		 * Non-zero value causes the kernel to calculate the effective
    169		 * attribute size up to that byte.
    170		 */
    171		.aux_sample_size = 1,
    172	};
    173	int fd;
    174
    175	fd = sys_perf_event_open(&attr, -1, 0, -1, 0);
    176	/*
    177	 * If the kernel attribute is big enough to contain aux_sample_size
    178	 * then we assume that it is supported. We are relying on the kernel to
    179	 * validate the attribute size before anything else that could be wrong.
    180	 */
    181	if (fd < 0 && errno == E2BIG)
    182		return false;
    183	if (fd >= 0)
    184		close(fd);
    185
    186	return true;
    187}
    188
    189bool perf_can_record_build_id(void)
    190{
    191	return perf_probe_api(perf_probe_build_id);
    192}
    193
    194bool perf_can_record_cgroup(void)
    195{
    196	return perf_probe_api(perf_probe_cgroup);
    197}