cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

array_map_batch_ops.c (4035B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#include <stdio.h>
      4#include <errno.h>
      5#include <string.h>
      6
      7#include <bpf/bpf.h>
      8#include <bpf/libbpf.h>
      9
     10#include <test_maps.h>
     11
     12static int nr_cpus;
     13
     14static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
     15			     __s64 *values, bool is_pcpu)
     16{
     17	int i, j, err;
     18	int cpu_offset = 0;
     19	DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
     20		.elem_flags = 0,
     21		.flags = 0,
     22	);
     23
     24	for (i = 0; i < max_entries; i++) {
     25		keys[i] = i;
     26		if (is_pcpu) {
     27			cpu_offset = i * nr_cpus;
     28			for (j = 0; j < nr_cpus; j++)
     29				(values + cpu_offset)[j] = i + 1 + j;
     30		} else {
     31			values[i] = i + 1;
     32		}
     33	}
     34
     35	err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
     36	CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
     37}
     38
     39static void map_batch_verify(int *visited, __u32 max_entries, int *keys,
     40			     __s64 *values, bool is_pcpu)
     41{
     42	int i, j;
     43	int cpu_offset = 0;
     44
     45	memset(visited, 0, max_entries * sizeof(*visited));
     46	for (i = 0; i < max_entries; i++) {
     47		if (is_pcpu) {
     48			cpu_offset = i * nr_cpus;
     49			for (j = 0; j < nr_cpus; j++) {
     50				__s64 value = (values + cpu_offset)[j];
     51				CHECK(keys[i] + j + 1 != value,
     52				      "key/value checking",
     53				      "error: i %d j %d key %d value %lld\n", i,
     54				      j, keys[i], value);
     55			}
     56		} else {
     57			CHECK(keys[i] + 1 != values[i], "key/value checking",
     58			      "error: i %d key %d value %lld\n", i, keys[i],
     59			      values[i]);
     60		}
     61		visited[i] = 1;
     62	}
     63	for (i = 0; i < max_entries; i++) {
     64		CHECK(visited[i] != 1, "visited checking",
     65		      "error: keys array at index %d missing\n", i);
     66	}
     67}
     68
     69static void __test_map_lookup_and_update_batch(bool is_pcpu)
     70{
     71	int map_fd, *keys, *visited;
     72	__u32 count, total, total_success;
     73	const __u32 max_entries = 10;
     74	__u64 batch = 0;
     75	int err, step, value_size;
     76	void *values;
     77	DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
     78		.elem_flags = 0,
     79		.flags = 0,
     80	);
     81
     82	map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : BPF_MAP_TYPE_ARRAY,
     83				"array_map", sizeof(int), sizeof(__s64), max_entries, NULL);
     84	CHECK(map_fd == -1,
     85	      "bpf_map_create()", "error:%s\n", strerror(errno));
     86
     87	value_size = sizeof(__s64);
     88	if (is_pcpu)
     89		value_size *= nr_cpus;
     90
     91	keys = calloc(max_entries, sizeof(*keys));
     92	values = calloc(max_entries, value_size);
     93	visited = calloc(max_entries, sizeof(*visited));
     94	CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
     95	      strerror(errno));
     96
     97	/* test 1: lookup in a loop with various steps. */
     98	total_success = 0;
     99	for (step = 1; step < max_entries; step++) {
    100		map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
    101		map_batch_verify(visited, max_entries, keys, values, is_pcpu);
    102		memset(keys, 0, max_entries * sizeof(*keys));
    103		memset(values, 0, max_entries * value_size);
    104		batch = 0;
    105		total = 0;
    106		/* iteratively lookup/delete elements with 'step'
    107		 * elements each.
    108		 */
    109		count = step;
    110		while (true) {
    111			err = bpf_map_lookup_batch(map_fd,
    112						   total ? &batch : NULL,
    113						   &batch, keys + total,
    114						   values + total * value_size,
    115						   &count, &opts);
    116
    117			CHECK((err && errno != ENOENT), "lookup with steps",
    118			      "error: %s\n", strerror(errno));
    119
    120			total += count;
    121			if (err)
    122				break;
    123
    124		}
    125
    126		CHECK(total != max_entries, "lookup with steps",
    127		      "total = %u, max_entries = %u\n", total, max_entries);
    128
    129		map_batch_verify(visited, max_entries, keys, values, is_pcpu);
    130
    131		total_success++;
    132	}
    133
    134	CHECK(total_success == 0, "check total_success",
    135	      "unexpected failure\n");
    136
    137	free(keys);
    138	free(values);
    139	free(visited);
    140}
    141
    142static void array_map_batch_ops(void)
    143{
    144	__test_map_lookup_and_update_batch(false);
    145	printf("test_%s:PASS\n", __func__);
    146}
    147
    148static void array_percpu_map_batch_ops(void)
    149{
    150	__test_map_lookup_and_update_batch(true);
    151	printf("test_%s:PASS\n", __func__);
    152}
    153
    154void test_array_map_batch_ops(void)
    155{
    156	nr_cpus = libbpf_num_possible_cpus();
    157
    158	CHECK(nr_cpus < 0, "nr_cpus checking",
    159	      "error: get possible cpus failed");
    160
    161	array_map_batch_ops();
    162	array_percpu_map_batch_ops();
    163}