cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

get_stack_raw_tp.c (4163B)


      1// SPDX-License-Identifier: GPL-2.0
      2#define _GNU_SOURCE
      3#include <pthread.h>
      4#include <sched.h>
      5#include <sys/socket.h>
      6#include <test_progs.h>
      7
      8#define MAX_CNT_RAWTP	10ull
      9#define MAX_STACK_RAWTP	100
     10
     11static int duration = 0;
     12
     13struct get_stack_trace_t {
     14	int pid;
     15	int kern_stack_size;
     16	int user_stack_size;
     17	int user_stack_buildid_size;
     18	__u64 kern_stack[MAX_STACK_RAWTP];
     19	__u64 user_stack[MAX_STACK_RAWTP];
     20	struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
     21};
     22
     23static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
     24{
     25	bool good_kern_stack = false, good_user_stack = false;
     26	const char *nonjit_func = "___bpf_prog_run";
     27	/* perfbuf-submitted data is 4-byte aligned, but we need 8-byte
     28	 * alignment, so copy data into a local variable, for simplicity
     29	 */
     30	struct get_stack_trace_t e;
     31	int i, num_stack;
     32	struct ksym *ks;
     33
     34	memset(&e, 0, sizeof(e));
     35	memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e));
     36
     37	if (size < sizeof(struct get_stack_trace_t)) {
     38		__u64 *raw_data = data;
     39		bool found = false;
     40
     41		num_stack = size / sizeof(__u64);
     42		/* If jit is enabled, we do not have a good way to
     43		 * verify the sanity of the kernel stack. So we
     44		 * just assume it is good if the stack is not empty.
     45		 * This could be improved in the future.
     46		 */
     47		if (env.jit_enabled) {
     48			found = num_stack > 0;
     49		} else {
     50			for (i = 0; i < num_stack; i++) {
     51				ks = ksym_search(raw_data[i]);
     52				if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
     53					found = true;
     54					break;
     55				}
     56			}
     57		}
     58		if (found) {
     59			good_kern_stack = true;
     60			good_user_stack = true;
     61		}
     62	} else {
     63		num_stack = e.kern_stack_size / sizeof(__u64);
     64		if (env.jit_enabled) {
     65			good_kern_stack = num_stack > 0;
     66		} else {
     67			for (i = 0; i < num_stack; i++) {
     68				ks = ksym_search(e.kern_stack[i]);
     69				if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
     70					good_kern_stack = true;
     71					break;
     72				}
     73			}
     74		}
     75		if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0)
     76			good_user_stack = true;
     77	}
     78
     79	if (!good_kern_stack)
     80	    CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n");
     81	if (!good_user_stack)
     82	    CHECK(!good_user_stack, "user_stack", "corrupted user stack\n");
     83}
     84
     85void test_get_stack_raw_tp(void)
     86{
     87	const char *file = "./test_get_stack_rawtp.o";
     88	const char *file_err = "./test_get_stack_rawtp_err.o";
     89	const char *prog_name = "bpf_prog1";
     90	int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
     91	struct perf_buffer *pb = NULL;
     92	struct bpf_link *link = NULL;
     93	struct timespec tv = {0, 10};
     94	struct bpf_program *prog;
     95	struct bpf_object *obj;
     96	struct bpf_map *map;
     97	cpu_set_t cpu_set;
     98
     99	err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
    100	if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
    101		return;
    102
    103	err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
    104	if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
    105		return;
    106
    107	prog = bpf_object__find_program_by_name(obj, prog_name);
    108	if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
    109		goto close_prog;
    110
    111	map = bpf_object__find_map_by_name(obj, "perfmap");
    112	if (CHECK(!map, "bpf_find_map", "not found\n"))
    113		goto close_prog;
    114
    115	err = load_kallsyms();
    116	if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
    117		goto close_prog;
    118
    119	CPU_ZERO(&cpu_set);
    120	CPU_SET(0, &cpu_set);
    121	err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
    122	if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
    123		goto close_prog;
    124
    125	link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
    126	if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
    127		goto close_prog;
    128
    129	pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output,
    130			      NULL, NULL, NULL);
    131	if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
    132		goto close_prog;
    133
    134	/* trigger some syscall action */
    135	for (i = 0; i < MAX_CNT_RAWTP; i++)
    136		nanosleep(&tv, NULL);
    137
    138	while (exp_cnt > 0) {
    139		err = perf_buffer__poll(pb, 100);
    140		if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err))
    141			goto close_prog;
    142		exp_cnt -= err;
    143	}
    144
    145close_prog:
    146	bpf_link__destroy(link);
    147	perf_buffer__free(pb);
    148	bpf_object__close(obj);
    149}