cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

map_init.c (5199B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
      3
      4#include <test_progs.h>
      5#include "test_map_init.skel.h"
      6
      7#define TEST_VALUE 0x1234
      8#define FILL_VALUE 0xdeadbeef
      9
     10static int nr_cpus;
     11static int duration;
     12
     13typedef unsigned long long map_key_t;
     14typedef unsigned long long map_value_t;
     15typedef struct {
     16	map_value_t v; /* padding */
     17} __bpf_percpu_val_align pcpu_map_value_t;
     18
     19
     20static int map_populate(int map_fd, int num)
     21{
     22	pcpu_map_value_t value[nr_cpus];
     23	int i, err;
     24	map_key_t key;
     25
     26	for (i = 0; i < nr_cpus; i++)
     27		bpf_percpu(value, i) = FILL_VALUE;
     28
     29	for (key = 1; key <= num; key++) {
     30		err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
     31		if (!ASSERT_OK(err, "bpf_map_update_elem"))
     32			return -1;
     33	}
     34
     35	return 0;
     36}
     37
     38static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
     39			    int *map_fd, int populate)
     40{
     41	struct test_map_init *skel;
     42	int err;
     43
     44	skel = test_map_init__open();
     45	if (!ASSERT_OK_PTR(skel, "skel_open"))
     46		return NULL;
     47
     48	err = bpf_map__set_type(skel->maps.hashmap1, map_type);
     49	if (!ASSERT_OK(err, "bpf_map__set_type"))
     50		goto error;
     51
     52	err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
     53	if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
     54		goto error;
     55
     56	err = test_map_init__load(skel);
     57	if (!ASSERT_OK(err, "skel_load"))
     58		goto error;
     59
     60	*map_fd = bpf_map__fd(skel->maps.hashmap1);
     61	if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
     62		goto error;
     63
     64	err = map_populate(*map_fd, populate);
     65	if (!ASSERT_OK(err, "map_populate"))
     66		goto error_map;
     67
     68	return skel;
     69
     70error_map:
     71	close(*map_fd);
     72error:
     73	test_map_init__destroy(skel);
     74	return NULL;
     75}
     76
     77/* executes bpf program that updates map with key, value */
     78static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
     79				map_value_t value)
     80{
     81	struct test_map_init__bss *bss;
     82
     83	bss = skel->bss;
     84
     85	bss->inKey = key;
     86	bss->inValue = value;
     87	bss->inPid = getpid();
     88
     89	if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
     90		return -1;
     91
     92	/* Let tracepoint trigger */
     93	syscall(__NR_getpgid);
     94
     95	test_map_init__detach(skel);
     96
     97	return 0;
     98}
     99
    100static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
    101{
    102	int i, nzCnt = 0;
    103	map_value_t val;
    104
    105	for (i = 0; i < nr_cpus; i++) {
    106		val = bpf_percpu(value, i);
    107		if (val) {
    108			if (CHECK(val != expected, "map value",
    109				  "unexpected for cpu %d: 0x%llx\n", i, val))
    110				return -1;
    111			nzCnt++;
    112		}
    113	}
    114
    115	if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
    116		  nzCnt))
    117		return -1;
    118
    119	return 0;
    120}
    121
    122/* Add key=1 elem with values set for all CPUs
    123 * Delete elem key=1
    124 * Run bpf prog that inserts new key=1 elem with value=0x1234
    125 *   (bpf prog can only set value for current CPU)
    126 * Lookup Key=1 and check value is as expected for all CPUs:
    127 *   value set by bpf prog for one CPU, 0 for all others
    128 */
    129static void test_pcpu_map_init(void)
    130{
    131	pcpu_map_value_t value[nr_cpus];
    132	struct test_map_init *skel;
    133	int map_fd, err;
    134	map_key_t key;
    135
    136	/* max 1 elem in map so insertion is forced to reuse freed entry */
    137	skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
    138	if (!ASSERT_OK_PTR(skel, "prog_setup"))
    139		return;
    140
    141	/* delete element so the entry can be re-used*/
    142	key = 1;
    143	err = bpf_map_delete_elem(map_fd, &key);
    144	if (!ASSERT_OK(err, "bpf_map_delete_elem"))
    145		goto cleanup;
    146
    147	/* run bpf prog that inserts new elem, re-using the slot just freed */
    148	err = prog_run_insert_elem(skel, key, TEST_VALUE);
    149	if (!ASSERT_OK(err, "prog_run_insert_elem"))
    150		goto cleanup;
    151
    152	/* check that key=1 was re-created by bpf prog */
    153	err = bpf_map_lookup_elem(map_fd, &key, value);
    154	if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
    155		goto cleanup;
    156
    157	/* and has expected values */
    158	check_values_one_cpu(value, TEST_VALUE);
    159
    160cleanup:
    161	test_map_init__destroy(skel);
    162}
    163
    164/* Add key=1 and key=2 elems with values set for all CPUs
    165 * Run bpf prog that inserts new key=3 elem
    166 *   (only for current cpu; other cpus should have initial value = 0)
    167 * Lookup Key=1 and check value is as expected for all CPUs
    168 */
    169static void test_pcpu_lru_map_init(void)
    170{
    171	pcpu_map_value_t value[nr_cpus];
    172	struct test_map_init *skel;
    173	int map_fd, err;
    174	map_key_t key;
    175
    176	/* Set up LRU map with 2 elements, values filled for all CPUs.
    177	 * With these 2 elements, the LRU map is full
    178	 */
    179	skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
    180	if (!ASSERT_OK_PTR(skel, "prog_setup"))
    181		return;
    182
    183	/* run bpf prog that inserts new key=3 element, re-using LRU slot */
    184	key = 3;
    185	err = prog_run_insert_elem(skel, key, TEST_VALUE);
    186	if (!ASSERT_OK(err, "prog_run_insert_elem"))
    187		goto cleanup;
    188
    189	/* check that key=3 replaced one of earlier elements */
    190	err = bpf_map_lookup_elem(map_fd, &key, value);
    191	if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
    192		goto cleanup;
    193
    194	/* and has expected values */
    195	check_values_one_cpu(value, TEST_VALUE);
    196
    197cleanup:
    198	test_map_init__destroy(skel);
    199}
    200
    201void test_map_init(void)
    202{
    203	nr_cpus = bpf_num_possible_cpus();
    204	if (nr_cpus <= 1) {
    205		printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
    206		test__skip();
    207		return;
    208	}
    209
    210	if (test__start_subtest("pcpu_map_init"))
    211		test_pcpu_map_init();
    212	if (test__start_subtest("pcpu_lru_map_init"))
    213		test_pcpu_lru_map_init();
    214}