cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

func_latency.bpf.c (2164B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
      2// Copyright (c) 2021 Google
      3#include "vmlinux.h"
      4#include <bpf/bpf_helpers.h>
      5#include <bpf/bpf_tracing.h>
      6
      7// This should be in sync with "util/ftrace.h"
      8#define NUM_BUCKET  22
      9
     10struct {
     11	__uint(type, BPF_MAP_TYPE_HASH);
     12	__uint(key_size, sizeof(__u64));
     13	__uint(value_size, sizeof(__u64));
     14	__uint(max_entries, 10000);
     15} functime SEC(".maps");
     16
     17struct {
     18	__uint(type, BPF_MAP_TYPE_HASH);
     19	__uint(key_size, sizeof(__u32));
     20	__uint(value_size, sizeof(__u8));
     21	__uint(max_entries, 1);
     22} cpu_filter SEC(".maps");
     23
     24struct {
     25	__uint(type, BPF_MAP_TYPE_HASH);
     26	__uint(key_size, sizeof(__u32));
     27	__uint(value_size, sizeof(__u8));
     28	__uint(max_entries, 1);
     29} task_filter SEC(".maps");
     30
     31struct {
     32	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
     33	__uint(key_size, sizeof(__u32));
     34	__uint(value_size, sizeof(__u64));
     35	__uint(max_entries, NUM_BUCKET);
     36} latency SEC(".maps");
     37
     38
     39int enabled = 0;
     40int has_cpu = 0;
     41int has_task = 0;
     42int use_nsec = 0;
     43
     44SEC("kprobe/func")
     45int BPF_PROG(func_begin)
     46{
     47	__u64 key, now;
     48
     49	if (!enabled)
     50		return 0;
     51
     52	key = bpf_get_current_pid_tgid();
     53
     54	if (has_cpu) {
     55		__u32 cpu = bpf_get_smp_processor_id();
     56		__u8 *ok;
     57
     58		ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
     59		if (!ok)
     60			return 0;
     61	}
     62
     63	if (has_task) {
     64		__u32 pid = key & 0xffffffff;
     65		__u8 *ok;
     66
     67		ok = bpf_map_lookup_elem(&task_filter, &pid);
     68		if (!ok)
     69			return 0;
     70	}
     71
     72	now = bpf_ktime_get_ns();
     73
     74	// overwrite timestamp for nested functions
     75	bpf_map_update_elem(&functime, &key, &now, BPF_ANY);
     76	return 0;
     77}
     78
     79SEC("kretprobe/func")
     80int BPF_PROG(func_end)
     81{
     82	__u64 tid;
     83	__u64 *start;
     84	__u64 cmp_base = use_nsec ? 1 : 1000;
     85
     86	if (!enabled)
     87		return 0;
     88
     89	tid = bpf_get_current_pid_tgid();
     90
     91	start = bpf_map_lookup_elem(&functime, &tid);
     92	if (start) {
     93		__s64 delta = bpf_ktime_get_ns() - *start;
     94		__u32 key;
     95		__u64 *hist;
     96
     97		bpf_map_delete_elem(&functime, &tid);
     98
     99		if (delta < 0)
    100			return 0;
    101
    102		// calculate index using delta
    103		for (key = 0; key < (NUM_BUCKET - 1); key++) {
    104			if (delta < (cmp_base << key))
    105				break;
    106		}
    107
    108		hist = bpf_map_lookup_elem(&latency, &key);
    109		if (!hist)
    110			return 0;
    111
    112		*hist += 1;
    113	}
    114
    115	return 0;
    116}