cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ringbuf.c (9309B)


      1// SPDX-License-Identifier: GPL-2.0
      2#define _GNU_SOURCE
      3#include <linux/compiler.h>
      4#include <asm/barrier.h>
      5#include <test_progs.h>
      6#include <sys/mman.h>
      7#include <sys/epoll.h>
      8#include <time.h>
      9#include <sched.h>
     10#include <signal.h>
     11#include <pthread.h>
     12#include <sys/sysinfo.h>
     13#include <linux/perf_event.h>
     14#include <linux/ring_buffer.h>
     15#include "test_ringbuf.lskel.h"
     16
     17#define EDONE 7777
     18
     19static int duration = 0;
     20
     21struct sample {
     22	int pid;
     23	int seq;
     24	long value;
     25	char comm[16];
     26};
     27
     28static int sample_cnt;
     29
     30static void atomic_inc(int *cnt)
     31{
     32	__atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
     33}
     34
     35static int atomic_xchg(int *cnt, int val)
     36{
     37	return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
     38}
     39
     40static int process_sample(void *ctx, void *data, size_t len)
     41{
     42	struct sample *s = data;
     43
     44	atomic_inc(&sample_cnt);
     45
     46	switch (s->seq) {
     47	case 0:
     48		CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
     49		      333L, s->value);
     50		return 0;
     51	case 1:
     52		CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
     53		      777L, s->value);
     54		return -EDONE;
     55	default:
     56		/* we don't care about the rest */
     57		return 0;
     58	}
     59}
     60
     61static struct test_ringbuf_lskel *skel;
     62static struct ring_buffer *ringbuf;
     63
     64static void trigger_samples()
     65{
     66	skel->bss->dropped = 0;
     67	skel->bss->total = 0;
     68	skel->bss->discarded = 0;
     69
     70	/* trigger exactly two samples */
     71	skel->bss->value = 333;
     72	syscall(__NR_getpgid);
     73	skel->bss->value = 777;
     74	syscall(__NR_getpgid);
     75}
     76
     77static void *poll_thread(void *input)
     78{
     79	long timeout = (long)input;
     80
     81	return (void *)(long)ring_buffer__poll(ringbuf, timeout);
     82}
     83
     84void test_ringbuf(void)
     85{
     86	const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
     87	pthread_t thread;
     88	long bg_ret = -1;
     89	int err, cnt, rb_fd;
     90	int page_size = getpagesize();
     91	void *mmap_ptr, *tmp_ptr;
     92
     93	skel = test_ringbuf_lskel__open();
     94	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
     95		return;
     96
     97	skel->maps.ringbuf.max_entries = page_size;
     98
     99	err = test_ringbuf_lskel__load(skel);
    100	if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
    101		goto cleanup;
    102
    103	rb_fd = skel->maps.ringbuf.map_fd;
    104	/* good read/write cons_pos */
    105	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
    106	ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
    107	tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
    108	if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
    109		goto cleanup;
    110	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
    111	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
    112
    113	/* bad writeable prod_pos */
    114	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
    115	err = -errno;
    116	ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
    117	ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
    118
    119	/* bad writeable data pages */
    120	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
    121	err = -errno;
    122	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
    123	ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
    124	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
    125	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
    126	mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
    127	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
    128
    129	/* good read-only pages */
    130	mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
    131	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
    132		goto cleanup;
    133
    134	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
    135	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
    136	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
    137	ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
    138
    139	/* good read-only pages with initial offset */
    140	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
    141	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
    142		goto cleanup;
    143
    144	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
    145	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
    146	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
    147	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
    148
    149	/* only trigger BPF program for current process */
    150	skel->bss->pid = getpid();
    151
    152	ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
    153				   process_sample, NULL, NULL);
    154	if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
    155		goto cleanup;
    156
    157	err = test_ringbuf_lskel__attach(skel);
    158	if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
    159		goto cleanup;
    160
    161	trigger_samples();
    162
    163	/* 2 submitted + 1 discarded records */
    164	CHECK(skel->bss->avail_data != 3 * rec_sz,
    165	      "err_avail_size", "exp %ld, got %ld\n",
    166	      3L * rec_sz, skel->bss->avail_data);
    167	CHECK(skel->bss->ring_size != page_size,
    168	      "err_ring_size", "exp %ld, got %ld\n",
    169	      (long)page_size, skel->bss->ring_size);
    170	CHECK(skel->bss->cons_pos != 0,
    171	      "err_cons_pos", "exp %ld, got %ld\n",
    172	      0L, skel->bss->cons_pos);
    173	CHECK(skel->bss->prod_pos != 3 * rec_sz,
    174	      "err_prod_pos", "exp %ld, got %ld\n",
    175	      3L * rec_sz, skel->bss->prod_pos);
    176
    177	/* poll for samples */
    178	err = ring_buffer__poll(ringbuf, -1);
    179
    180	/* -EDONE is used as an indicator that we are done */
    181	if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
    182		goto cleanup;
    183	cnt = atomic_xchg(&sample_cnt, 0);
    184	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
    185
    186	/* we expect extra polling to return nothing */
    187	err = ring_buffer__poll(ringbuf, 0);
    188	if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
    189		goto cleanup;
    190	cnt = atomic_xchg(&sample_cnt, 0);
    191	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
    192
    193	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
    194	      0L, skel->bss->dropped);
    195	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
    196	      2L, skel->bss->total);
    197	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
    198	      1L, skel->bss->discarded);
    199
    200	/* now validate consumer position is updated and returned */
    201	trigger_samples();
    202	CHECK(skel->bss->cons_pos != 3 * rec_sz,
    203	      "err_cons_pos", "exp %ld, got %ld\n",
    204	      3L * rec_sz, skel->bss->cons_pos);
    205	err = ring_buffer__poll(ringbuf, -1);
    206	CHECK(err <= 0, "poll_err", "err %d\n", err);
    207	cnt = atomic_xchg(&sample_cnt, 0);
    208	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
    209
    210	/* start poll in background w/ long timeout */
    211	err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
    212	if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
    213		goto cleanup;
    214
    215	/* turn off notifications now */
    216	skel->bss->flags = BPF_RB_NO_WAKEUP;
    217
    218	/* give background thread a bit of a time */
    219	usleep(50000);
    220	trigger_samples();
    221	/* sleeping arbitrarily is bad, but no better way to know that
    222	 * epoll_wait() **DID NOT** unblock in background thread
    223	 */
    224	usleep(50000);
    225	/* background poll should still be blocked */
    226	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
    227	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
    228		goto cleanup;
    229
    230	/* BPF side did everything right */
    231	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
    232	      0L, skel->bss->dropped);
    233	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
    234	      2L, skel->bss->total);
    235	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
    236	      1L, skel->bss->discarded);
    237	cnt = atomic_xchg(&sample_cnt, 0);
    238	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
    239
    240	/* clear flags to return to "adaptive" notification mode */
    241	skel->bss->flags = 0;
    242
    243	/* produce new samples, no notification should be triggered, because
    244	 * consumer is now behind
    245	 */
    246	trigger_samples();
    247
    248	/* background poll should still be blocked */
    249	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
    250	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
    251		goto cleanup;
    252
    253	/* still no samples, because consumer is behind */
    254	cnt = atomic_xchg(&sample_cnt, 0);
    255	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
    256
    257	skel->bss->dropped = 0;
    258	skel->bss->total = 0;
    259	skel->bss->discarded = 0;
    260
    261	skel->bss->value = 333;
    262	syscall(__NR_getpgid);
    263	/* now force notifications */
    264	skel->bss->flags = BPF_RB_FORCE_WAKEUP;
    265	skel->bss->value = 777;
    266	syscall(__NR_getpgid);
    267
    268	/* now we should get a pending notification */
    269	usleep(50000);
    270	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
    271	if (CHECK(err, "join_bg", "err %d\n", err))
    272		goto cleanup;
    273
    274	if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
    275		goto cleanup;
    276
    277	/* due to timing variations, there could still be non-notified
    278	 * samples, so consume them here to collect all the samples
    279	 */
    280	err = ring_buffer__consume(ringbuf);
    281	CHECK(err < 0, "rb_consume", "failed: %d\b", err);
    282
    283	/* 3 rounds, 2 samples each */
    284	cnt = atomic_xchg(&sample_cnt, 0);
    285	CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
    286
    287	/* BPF side did everything right */
    288	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
    289	      0L, skel->bss->dropped);
    290	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
    291	      2L, skel->bss->total);
    292	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
    293	      1L, skel->bss->discarded);
    294
    295	test_ringbuf_lskel__detach(skel);
    296cleanup:
    297	ring_buffer__free(ringbuf);
    298	test_ringbuf_lskel__destroy(skel);
    299}