cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

env.c (11497B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include "cpumap.h"
      3#include "debug.h"
      4#include "env.h"
      5#include "util/header.h"
      6#include <linux/ctype.h>
      7#include <linux/zalloc.h>
      8#include "cgroup.h"
      9#include <errno.h>
     10#include <sys/utsname.h>
     11#include <stdlib.h>
     12#include <string.h>
     13#include "strbuf.h"
     14
     15struct perf_env perf_env;
     16
     17#ifdef HAVE_LIBBPF_SUPPORT
     18#include "bpf-event.h"
     19#include "bpf-utils.h"
     20#include <bpf/libbpf.h>
     21
     22void perf_env__insert_bpf_prog_info(struct perf_env *env,
     23				    struct bpf_prog_info_node *info_node)
     24{
     25	__u32 prog_id = info_node->info_linear->info.id;
     26	struct bpf_prog_info_node *node;
     27	struct rb_node *parent = NULL;
     28	struct rb_node **p;
     29
     30	down_write(&env->bpf_progs.lock);
     31	p = &env->bpf_progs.infos.rb_node;
     32
     33	while (*p != NULL) {
     34		parent = *p;
     35		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
     36		if (prog_id < node->info_linear->info.id) {
     37			p = &(*p)->rb_left;
     38		} else if (prog_id > node->info_linear->info.id) {
     39			p = &(*p)->rb_right;
     40		} else {
     41			pr_debug("duplicated bpf prog info %u\n", prog_id);
     42			goto out;
     43		}
     44	}
     45
     46	rb_link_node(&info_node->rb_node, parent, p);
     47	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
     48	env->bpf_progs.infos_cnt++;
     49out:
     50	up_write(&env->bpf_progs.lock);
     51}
     52
     53struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
     54							__u32 prog_id)
     55{
     56	struct bpf_prog_info_node *node = NULL;
     57	struct rb_node *n;
     58
     59	down_read(&env->bpf_progs.lock);
     60	n = env->bpf_progs.infos.rb_node;
     61
     62	while (n) {
     63		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
     64		if (prog_id < node->info_linear->info.id)
     65			n = n->rb_left;
     66		else if (prog_id > node->info_linear->info.id)
     67			n = n->rb_right;
     68		else
     69			goto out;
     70	}
     71	node = NULL;
     72
     73out:
     74	up_read(&env->bpf_progs.lock);
     75	return node;
     76}
     77
     78bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
     79{
     80	struct rb_node *parent = NULL;
     81	__u32 btf_id = btf_node->id;
     82	struct btf_node *node;
     83	struct rb_node **p;
     84	bool ret = true;
     85
     86	down_write(&env->bpf_progs.lock);
     87	p = &env->bpf_progs.btfs.rb_node;
     88
     89	while (*p != NULL) {
     90		parent = *p;
     91		node = rb_entry(parent, struct btf_node, rb_node);
     92		if (btf_id < node->id) {
     93			p = &(*p)->rb_left;
     94		} else if (btf_id > node->id) {
     95			p = &(*p)->rb_right;
     96		} else {
     97			pr_debug("duplicated btf %u\n", btf_id);
     98			ret = false;
     99			goto out;
    100		}
    101	}
    102
    103	rb_link_node(&btf_node->rb_node, parent, p);
    104	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
    105	env->bpf_progs.btfs_cnt++;
    106out:
    107	up_write(&env->bpf_progs.lock);
    108	return ret;
    109}
    110
    111struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
    112{
    113	struct btf_node *node = NULL;
    114	struct rb_node *n;
    115
    116	down_read(&env->bpf_progs.lock);
    117	n = env->bpf_progs.btfs.rb_node;
    118
    119	while (n) {
    120		node = rb_entry(n, struct btf_node, rb_node);
    121		if (btf_id < node->id)
    122			n = n->rb_left;
    123		else if (btf_id > node->id)
    124			n = n->rb_right;
    125		else
    126			goto out;
    127	}
    128	node = NULL;
    129
    130out:
    131	up_read(&env->bpf_progs.lock);
    132	return node;
    133}
    134
    135/* purge data in bpf_progs.infos tree */
    136static void perf_env__purge_bpf(struct perf_env *env)
    137{
    138	struct rb_root *root;
    139	struct rb_node *next;
    140
    141	down_write(&env->bpf_progs.lock);
    142
    143	root = &env->bpf_progs.infos;
    144	next = rb_first(root);
    145
    146	while (next) {
    147		struct bpf_prog_info_node *node;
    148
    149		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
    150		next = rb_next(&node->rb_node);
    151		rb_erase(&node->rb_node, root);
    152		free(node->info_linear);
    153		free(node);
    154	}
    155
    156	env->bpf_progs.infos_cnt = 0;
    157
    158	root = &env->bpf_progs.btfs;
    159	next = rb_first(root);
    160
    161	while (next) {
    162		struct btf_node *node;
    163
    164		node = rb_entry(next, struct btf_node, rb_node);
    165		next = rb_next(&node->rb_node);
    166		rb_erase(&node->rb_node, root);
    167		free(node);
    168	}
    169
    170	env->bpf_progs.btfs_cnt = 0;
    171
    172	up_write(&env->bpf_progs.lock);
    173}
    174#else // HAVE_LIBBPF_SUPPORT
    175static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
    176{
    177}
    178#endif // HAVE_LIBBPF_SUPPORT
    179
    180void perf_env__exit(struct perf_env *env)
    181{
    182	int i;
    183
    184	perf_env__purge_bpf(env);
    185	perf_env__purge_cgroups(env);
    186	zfree(&env->hostname);
    187	zfree(&env->os_release);
    188	zfree(&env->version);
    189	zfree(&env->arch);
    190	zfree(&env->cpu_desc);
    191	zfree(&env->cpuid);
    192	zfree(&env->cmdline);
    193	zfree(&env->cmdline_argv);
    194	zfree(&env->sibling_dies);
    195	zfree(&env->sibling_cores);
    196	zfree(&env->sibling_threads);
    197	zfree(&env->pmu_mappings);
    198	zfree(&env->cpu);
    199	zfree(&env->cpu_pmu_caps);
    200	zfree(&env->numa_map);
    201
    202	for (i = 0; i < env->nr_numa_nodes; i++)
    203		perf_cpu_map__put(env->numa_nodes[i].map);
    204	zfree(&env->numa_nodes);
    205
    206	for (i = 0; i < env->caches_cnt; i++)
    207		cpu_cache_level__free(&env->caches[i]);
    208	zfree(&env->caches);
    209
    210	for (i = 0; i < env->nr_memory_nodes; i++)
    211		zfree(&env->memory_nodes[i].set);
    212	zfree(&env->memory_nodes);
    213
    214	for (i = 0; i < env->nr_hybrid_nodes; i++) {
    215		zfree(&env->hybrid_nodes[i].pmu_name);
    216		zfree(&env->hybrid_nodes[i].cpus);
    217	}
    218	zfree(&env->hybrid_nodes);
    219
    220	for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
    221		zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
    222		zfree(&env->hybrid_cpc_nodes[i].pmu_name);
    223	}
    224	zfree(&env->hybrid_cpc_nodes);
    225}
    226
    227void perf_env__init(struct perf_env *env)
    228{
    229#ifdef HAVE_LIBBPF_SUPPORT
    230	env->bpf_progs.infos = RB_ROOT;
    231	env->bpf_progs.btfs = RB_ROOT;
    232	init_rwsem(&env->bpf_progs.lock);
    233#endif
    234	env->kernel_is_64_bit = -1;
    235}
    236
    237static void perf_env__init_kernel_mode(struct perf_env *env)
    238{
    239	const char *arch = perf_env__raw_arch(env);
    240
    241	if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
    242	    !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
    243	    !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
    244	    !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
    245		env->kernel_is_64_bit = 1;
    246	else
    247		env->kernel_is_64_bit = 0;
    248}
    249
    250int perf_env__kernel_is_64_bit(struct perf_env *env)
    251{
    252	if (env->kernel_is_64_bit == -1)
    253		perf_env__init_kernel_mode(env);
    254
    255	return env->kernel_is_64_bit;
    256}
    257
    258int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
    259{
    260	int i;
    261
    262	/* do not include NULL termination */
    263	env->cmdline_argv = calloc(argc, sizeof(char *));
    264	if (env->cmdline_argv == NULL)
    265		goto out_enomem;
    266
    267	/*
    268	 * Must copy argv contents because it gets moved around during option
    269	 * parsing:
    270	 */
    271	for (i = 0; i < argc ; i++) {
    272		env->cmdline_argv[i] = argv[i];
    273		if (env->cmdline_argv[i] == NULL)
    274			goto out_free;
    275	}
    276
    277	env->nr_cmdline = argc;
    278
    279	return 0;
    280out_free:
    281	zfree(&env->cmdline_argv);
    282out_enomem:
    283	return -ENOMEM;
    284}
    285
    286int perf_env__read_cpu_topology_map(struct perf_env *env)
    287{
    288	int idx, nr_cpus;
    289
    290	if (env->cpu != NULL)
    291		return 0;
    292
    293	if (env->nr_cpus_avail == 0)
    294		env->nr_cpus_avail = cpu__max_present_cpu().cpu;
    295
    296	nr_cpus = env->nr_cpus_avail;
    297	if (nr_cpus == -1)
    298		return -EINVAL;
    299
    300	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
    301	if (env->cpu == NULL)
    302		return -ENOMEM;
    303
    304	for (idx = 0; idx < nr_cpus; ++idx) {
    305		struct perf_cpu cpu = { .cpu = idx };
    306
    307		env->cpu[idx].core_id	= cpu__get_core_id(cpu);
    308		env->cpu[idx].socket_id	= cpu__get_socket_id(cpu);
    309		env->cpu[idx].die_id	= cpu__get_die_id(cpu);
    310	}
    311
    312	env->nr_cpus_avail = nr_cpus;
    313	return 0;
    314}
    315
    316int perf_env__read_pmu_mappings(struct perf_env *env)
    317{
    318	struct perf_pmu *pmu = NULL;
    319	u32 pmu_num = 0;
    320	struct strbuf sb;
    321
    322	while ((pmu = perf_pmu__scan(pmu))) {
    323		if (!pmu->name)
    324			continue;
    325		pmu_num++;
    326	}
    327	if (!pmu_num) {
    328		pr_debug("pmu mappings not available\n");
    329		return -ENOENT;
    330	}
    331	env->nr_pmu_mappings = pmu_num;
    332
    333	if (strbuf_init(&sb, 128 * pmu_num) < 0)
    334		return -ENOMEM;
    335
    336	while ((pmu = perf_pmu__scan(pmu))) {
    337		if (!pmu->name)
    338			continue;
    339		if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
    340			goto error;
    341		/* include a NULL character at the end */
    342		if (strbuf_add(&sb, "", 1) < 0)
    343			goto error;
    344	}
    345
    346	env->pmu_mappings = strbuf_detach(&sb, NULL);
    347
    348	return 0;
    349
    350error:
    351	strbuf_release(&sb);
    352	return -1;
    353}
    354
    355int perf_env__read_cpuid(struct perf_env *env)
    356{
    357	char cpuid[128];
    358	int err = get_cpuid(cpuid, sizeof(cpuid));
    359
    360	if (err)
    361		return err;
    362
    363	free(env->cpuid);
    364	env->cpuid = strdup(cpuid);
    365	if (env->cpuid == NULL)
    366		return ENOMEM;
    367	return 0;
    368}
    369
    370static int perf_env__read_arch(struct perf_env *env)
    371{
    372	struct utsname uts;
    373
    374	if (env->arch)
    375		return 0;
    376
    377	if (!uname(&uts))
    378		env->arch = strdup(uts.machine);
    379
    380	return env->arch ? 0 : -ENOMEM;
    381}
    382
    383static int perf_env__read_nr_cpus_avail(struct perf_env *env)
    384{
    385	if (env->nr_cpus_avail == 0)
    386		env->nr_cpus_avail = cpu__max_present_cpu().cpu;
    387
    388	return env->nr_cpus_avail ? 0 : -ENOENT;
    389}
    390
    391const char *perf_env__raw_arch(struct perf_env *env)
    392{
    393	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
    394}
    395
    396int perf_env__nr_cpus_avail(struct perf_env *env)
    397{
    398	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
    399}
    400
    401void cpu_cache_level__free(struct cpu_cache_level *cache)
    402{
    403	zfree(&cache->type);
    404	zfree(&cache->map);
    405	zfree(&cache->size);
    406}
    407
    408/*
    409 * Return architecture name in a normalized form.
    410 * The conversion logic comes from the Makefile.
    411 */
    412static const char *normalize_arch(char *arch)
    413{
    414	if (!strcmp(arch, "x86_64"))
    415		return "x86";
    416	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
    417		return "x86";
    418	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
    419		return "sparc";
    420	if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
    421		return "arm64";
    422	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
    423		return "arm";
    424	if (!strncmp(arch, "s390", 4))
    425		return "s390";
    426	if (!strncmp(arch, "parisc", 6))
    427		return "parisc";
    428	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
    429		return "powerpc";
    430	if (!strncmp(arch, "mips", 4))
    431		return "mips";
    432	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
    433		return "sh";
    434
    435	return arch;
    436}
    437
    438const char *perf_env__arch(struct perf_env *env)
    439{
    440	char *arch_name;
    441
    442	if (!env || !env->arch) { /* Assume local operation */
    443		static struct utsname uts = { .machine[0] = '\0', };
    444		if (uts.machine[0] == '\0' && uname(&uts) < 0)
    445			return NULL;
    446		arch_name = uts.machine;
    447	} else
    448		arch_name = env->arch;
    449
    450	return normalize_arch(arch_name);
    451}
    452
    453const char *perf_env__cpuid(struct perf_env *env)
    454{
    455	int status;
    456
    457	if (!env || !env->cpuid) { /* Assume local operation */
    458		status = perf_env__read_cpuid(env);
    459		if (status)
    460			return NULL;
    461	}
    462
    463	return env->cpuid;
    464}
    465
    466int perf_env__nr_pmu_mappings(struct perf_env *env)
    467{
    468	int status;
    469
    470	if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
    471		status = perf_env__read_pmu_mappings(env);
    472		if (status)
    473			return 0;
    474	}
    475
    476	return env->nr_pmu_mappings;
    477}
    478
    479const char *perf_env__pmu_mappings(struct perf_env *env)
    480{
    481	int status;
    482
    483	if (!env || !env->pmu_mappings) { /* Assume local operation */
    484		status = perf_env__read_pmu_mappings(env);
    485		if (status)
    486			return NULL;
    487	}
    488
    489	return env->pmu_mappings;
    490}
    491
    492int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
    493{
    494	if (!env->nr_numa_map) {
    495		struct numa_node *nn;
    496		int i, nr = 0;
    497
    498		for (i = 0; i < env->nr_numa_nodes; i++) {
    499			nn = &env->numa_nodes[i];
    500			nr = max(nr, perf_cpu_map__max(nn->map).cpu);
    501		}
    502
    503		nr++;
    504
    505		/*
    506		 * We initialize the numa_map array to prepare
    507		 * it for missing cpus, which return node -1
    508		 */
    509		env->numa_map = malloc(nr * sizeof(int));
    510		if (!env->numa_map)
    511			return -1;
    512
    513		for (i = 0; i < nr; i++)
    514			env->numa_map[i] = -1;
    515
    516		env->nr_numa_map = nr;
    517
    518		for (i = 0; i < env->nr_numa_nodes; i++) {
    519			struct perf_cpu tmp;
    520			int j;
    521
    522			nn = &env->numa_nodes[i];
    523			perf_cpu_map__for_each_cpu(tmp, j, nn->map)
    524				env->numa_map[tmp.cpu] = i;
    525		}
    526	}
    527
    528	return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
    529}