cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

get_branch_snapshot.c (2872B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (c) 2021 Facebook */
      3#include <test_progs.h>
      4#include "get_branch_snapshot.skel.h"
      5
      6static int *pfd_array;
      7static int cpu_cnt;
      8
      9static bool is_hypervisor(void)
     10{
     11	char *line = NULL;
     12	bool ret = false;
     13	size_t len;
     14	FILE *fp;
     15
     16	fp = fopen("/proc/cpuinfo", "r");
     17	if (!fp)
     18		return false;
     19
     20	while (getline(&line, &len, fp) != -1) {
     21		if (!strncmp(line, "flags", 5)) {
     22			if (strstr(line, "hypervisor") != NULL)
     23				ret = true;
     24			break;
     25		}
     26	}
     27
     28	free(line);
     29	fclose(fp);
     30	return ret;
     31}
     32
     33static int create_perf_events(void)
     34{
     35	struct perf_event_attr attr = {0};
     36	int cpu;
     37
     38	/* create perf event */
     39	attr.size = sizeof(attr);
     40	attr.type = PERF_TYPE_RAW;
     41	attr.config = 0x1b00;
     42	attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
     43	attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL |
     44		PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
     45
     46	cpu_cnt = libbpf_num_possible_cpus();
     47	pfd_array = malloc(sizeof(int) * cpu_cnt);
     48	if (!pfd_array) {
     49		cpu_cnt = 0;
     50		return 1;
     51	}
     52
     53	for (cpu = 0; cpu < cpu_cnt; cpu++) {
     54		pfd_array[cpu] = syscall(__NR_perf_event_open, &attr,
     55					 -1, cpu, -1, PERF_FLAG_FD_CLOEXEC);
     56		if (pfd_array[cpu] < 0)
     57			break;
     58	}
     59
     60	return cpu == 0;
     61}
     62
     63static void close_perf_events(void)
     64{
     65	int cpu, fd;
     66
     67	for (cpu = 0; cpu < cpu_cnt; cpu++) {
     68		fd = pfd_array[cpu];
     69		if (fd < 0)
     70			break;
     71		close(fd);
     72	}
     73	free(pfd_array);
     74}
     75
     76void serial_test_get_branch_snapshot(void)
     77{
     78	struct get_branch_snapshot *skel = NULL;
     79	int err;
     80
     81	/* Skip the test before we fix LBR snapshot for hypervisor. */
     82	if (is_hypervisor()) {
     83		test__skip();
     84		return;
     85	}
     86
     87	if (create_perf_events()) {
     88		test__skip();  /* system doesn't support LBR */
     89		goto cleanup;
     90	}
     91
     92	skel = get_branch_snapshot__open_and_load();
     93	if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load"))
     94		goto cleanup;
     95
     96	err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low);
     97	if (!ASSERT_OK(err, "kallsyms_find"))
     98		goto cleanup;
     99
    100	/* Just a guess for the end of this function, as module functions
    101	 * in /proc/kallsyms could come in any order.
    102	 */
    103	skel->bss->address_high = skel->bss->address_low + 128;
    104
    105	err = get_branch_snapshot__attach(skel);
    106	if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
    107		goto cleanup;
    108
    109	trigger_module_test_read(100);
    110
    111	if (skel->bss->total_entries < 16) {
    112		/* too few entries for the hit/waste test */
    113		test__skip();
    114		goto cleanup;
    115	}
    116
    117	ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr");
    118
    119	/* Given we stop LBR in software, we will waste a few entries.
    120	 * But we should try to waste as few as possible entries. We are at
    121	 * about 7 on x86_64 systems.
    122	 * Add a check for < 10 so that we get heads-up when something
    123	 * changes and wastes too many entries.
    124	 */
    125	ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries");
    126
    127cleanup:
    128	get_branch_snapshot__destroy(skel);
    129	close_perf_events();
    130}