cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

profiler.bpf.c (2998B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
      2// Copyright (c) 2020 Facebook
      3#include <vmlinux.h>
      4#include <bpf/bpf_helpers.h>
      5#include <bpf/bpf_tracing.h>
      6
      7/* map of perf event fds, num_cpu * num_metric entries */
      8struct {
      9	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
     10	__uint(key_size, sizeof(u32));
     11	__uint(value_size, sizeof(int));
     12} events SEC(".maps");
     13
     14/* readings at fentry */
     15struct {
     16	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
     17	__uint(key_size, sizeof(u32));
     18	__uint(value_size, sizeof(struct bpf_perf_event_value));
     19} fentry_readings SEC(".maps");
     20
     21/* accumulated readings */
     22struct {
     23	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
     24	__uint(key_size, sizeof(u32));
     25	__uint(value_size, sizeof(struct bpf_perf_event_value));
     26} accum_readings SEC(".maps");
     27
     28/* sample counts, one per cpu */
     29struct {
     30	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
     31	__uint(key_size, sizeof(u32));
     32	__uint(value_size, sizeof(u64));
     33} counts SEC(".maps");
     34
     35const volatile __u32 num_cpu = 1;
     36const volatile __u32 num_metric = 1;
     37#define MAX_NUM_MATRICS 4
     38
     39SEC("fentry/XXX")
     40int BPF_PROG(fentry_XXX)
     41{
     42	struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
     43	u32 key = bpf_get_smp_processor_id();
     44	u32 i;
     45
     46	/* look up before reading, to reduce error */
     47	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
     48		u32 flag = i;
     49
     50		ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
     51		if (!ptrs[i])
     52			return 0;
     53	}
     54
     55	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
     56		struct bpf_perf_event_value reading;
     57		int err;
     58
     59		err = bpf_perf_event_read_value(&events, key, &reading,
     60						sizeof(reading));
     61		if (err)
     62			return 0;
     63		*(ptrs[i]) = reading;
     64		key += num_cpu;
     65	}
     66
     67	return 0;
     68}
     69
     70static inline void
     71fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
     72{
     73	struct bpf_perf_event_value *before, diff;
     74
     75	before = bpf_map_lookup_elem(&fentry_readings, &id);
     76	/* only account samples with a valid fentry_reading */
     77	if (before && before->counter) {
     78		struct bpf_perf_event_value *accum;
     79
     80		diff.counter = after->counter - before->counter;
     81		diff.enabled = after->enabled - before->enabled;
     82		diff.running = after->running - before->running;
     83
     84		accum = bpf_map_lookup_elem(&accum_readings, &id);
     85		if (accum) {
     86			accum->counter += diff.counter;
     87			accum->enabled += diff.enabled;
     88			accum->running += diff.running;
     89		}
     90	}
     91}
     92
     93SEC("fexit/XXX")
     94int BPF_PROG(fexit_XXX)
     95{
     96	struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
     97	u32 cpu = bpf_get_smp_processor_id();
     98	u32 i, zero = 0;
     99	int err;
    100	u64 *count;
    101
    102	/* read all events before updating the maps, to reduce error */
    103	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
    104		err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
    105						readings + i, sizeof(*readings));
    106		if (err)
    107			return 0;
    108	}
    109	count = bpf_map_lookup_elem(&counts, &zero);
    110	if (count) {
    111		*count += 1;
    112		for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
    113			fexit_update_maps(i, &readings[i]);
    114	}
    115	return 0;
    116}
    117
    118char LICENSE[] SEC("license") = "Dual BSD/GPL";