cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bpf_mod_race.c (6734B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <unistd.h>
      3#include <pthread.h>
      4#include <sys/mman.h>
      5#include <stdatomic.h>
      6#include <test_progs.h>
      7#include <sys/syscall.h>
      8#include <linux/module.h>
      9#include <linux/userfaultfd.h>
     10
     11#include "ksym_race.skel.h"
     12#include "bpf_mod_race.skel.h"
     13#include "kfunc_call_race.skel.h"
     14
     15/* This test crafts a race between btf_try_get_module and do_init_module, and
     16 * checks whether btf_try_get_module handles the invocation for a well-formed
     17 * but uninitialized module correctly. Unless the module has completed its
     18 * initcalls, the verifier should fail the program load and return ENXIO.
     19 *
     20 * userfaultfd is used to trigger a fault in an fmod_ret program, and make it
     21 * sleep, then the BPF program is loaded and the return value from verifier is
     22 * inspected. After this, the userfaultfd is closed so that the module loading
     23 * thread makes forward progress, and fmod_ret injects an error so that the
     24 * module load fails and it is freed.
     25 *
     26 * If the verifier succeeded in loading the supplied program, it will end up
     27 * taking reference to freed module, and trigger a crash when the program fd
     28 * is closed later. This is true for both kfuncs and ksyms. In both cases,
     29 * the crash is triggered inside bpf_prog_free_deferred, when module reference
     30 * is finally released.
     31 */
     32
     33struct test_config {
     34	const char *str_open;
     35	void *(*bpf_open_and_load)();
     36	void (*bpf_destroy)(void *);
     37};
     38
     39enum bpf_test_state {
     40	_TS_INVALID,
     41	TS_MODULE_LOAD,
     42	TS_MODULE_LOAD_FAIL,
     43};
     44
     45static _Atomic enum bpf_test_state state = _TS_INVALID;
     46
     47static int sys_finit_module(int fd, const char *param_values, int flags)
     48{
     49	return syscall(__NR_finit_module, fd, param_values, flags);
     50}
     51
     52static int sys_delete_module(const char *name, unsigned int flags)
     53{
     54	return syscall(__NR_delete_module, name, flags);
     55}
     56
     57static int load_module(const char *mod)
     58{
     59	int ret, fd;
     60
     61	fd = open("bpf_testmod.ko", O_RDONLY);
     62	if (fd < 0)
     63		return fd;
     64
     65	ret = sys_finit_module(fd, "", 0);
     66	close(fd);
     67	if (ret < 0)
     68		return ret;
     69	return 0;
     70}
     71
     72static void *load_module_thread(void *p)
     73{
     74
     75	if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail"))
     76		atomic_store(&state, TS_MODULE_LOAD);
     77	else
     78		atomic_store(&state, TS_MODULE_LOAD_FAIL);
     79	return p;
     80}
     81
     82static int sys_userfaultfd(int flags)
     83{
     84	return syscall(__NR_userfaultfd, flags);
     85}
     86
     87static int test_setup_uffd(void *fault_addr)
     88{
     89	struct uffdio_register uffd_register = {};
     90	struct uffdio_api uffd_api = {};
     91	int uffd;
     92
     93	uffd = sys_userfaultfd(O_CLOEXEC);
     94	if (uffd < 0)
     95		return -errno;
     96
     97	uffd_api.api = UFFD_API;
     98	uffd_api.features = 0;
     99	if (ioctl(uffd, UFFDIO_API, &uffd_api)) {
    100		close(uffd);
    101		return -1;
    102	}
    103
    104	uffd_register.range.start = (unsigned long)fault_addr;
    105	uffd_register.range.len = 4096;
    106	uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
    107	if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) {
    108		close(uffd);
    109		return -1;
    110	}
    111	return uffd;
    112}
    113
    114static void test_bpf_mod_race_config(const struct test_config *config)
    115{
    116	void *fault_addr, *skel_fail;
    117	struct bpf_mod_race *skel;
    118	struct uffd_msg uffd_msg;
    119	pthread_t load_mod_thrd;
    120	_Atomic int *blockingp;
    121	int uffd, ret;
    122
    123	fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    124	if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration"))
    125		return;
    126
    127	if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod"))
    128		goto end_mmap;
    129
    130	skel = bpf_mod_race__open();
    131	if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open"))
    132		goto end_module;
    133
    134	skel->rodata->bpf_mod_race_config.tgid = getpid();
    135	skel->rodata->bpf_mod_race_config.inject_error = -4242;
    136	skel->rodata->bpf_mod_race_config.fault_addr = fault_addr;
    137	if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load"))
    138		goto end_destroy;
    139	blockingp = (_Atomic int *)&skel->bss->bpf_blocking;
    140
    141	if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach"))
    142		goto end_destroy;
    143
    144	uffd = test_setup_uffd(fault_addr);
    145	if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address"))
    146		goto end_destroy;
    147
    148	if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL),
    149		       "load module thread"))
    150		goto end_uffd;
    151
    152	/* Now, we either fail loading module, or block in bpf prog, spin to find out */
    153	while (!atomic_load(&state) && !atomic_load(blockingp))
    154		;
    155	if (!ASSERT_EQ(state, _TS_INVALID, "module load should block"))
    156		goto end_join;
    157	if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) {
    158		pthread_kill(load_mod_thrd, SIGKILL);
    159		goto end_uffd;
    160	}
    161
    162	/* We might have set bpf_blocking to 1, but may have not blocked in
    163	 * bpf_copy_from_user. Read userfaultfd descriptor to verify that.
    164	 */
    165	if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg),
    166		       "read uffd block event"))
    167		goto end_join;
    168	if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault"))
    169		goto end_join;
    170
    171	/* We know that load_mod_thrd is blocked in the fmod_ret program, the
    172	 * module state is still MODULE_STATE_COMING because mod->init hasn't
    173	 * returned. This is the time we try to load a program calling kfunc and
    174	 * check if we get ENXIO from verifier.
    175	 */
    176	skel_fail = config->bpf_open_and_load();
    177	ret = errno;
    178	if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) {
    179		/* Close uffd to unblock load_mod_thrd */
    180		close(uffd);
    181		uffd = -1;
    182		while (atomic_load(blockingp) != 2)
    183			;
    184		ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
    185		config->bpf_destroy(skel_fail);
    186		goto end_join;
    187
    188	}
    189	ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO");
    190	ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false");
    191
    192	close(uffd);
    193	uffd = -1;
    194end_join:
    195	pthread_join(load_mod_thrd, NULL);
    196	if (uffd < 0)
    197		ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success");
    198end_uffd:
    199	if (uffd >= 0)
    200		close(uffd);
    201end_destroy:
    202	bpf_mod_race__destroy(skel);
    203	ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
    204end_module:
    205	sys_delete_module("bpf_testmod", 0);
    206	ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod");
    207end_mmap:
    208	munmap(fault_addr, 4096);
    209	atomic_store(&state, _TS_INVALID);
    210}
    211
    212static const struct test_config ksym_config = {
    213	.str_open = "ksym_race__open_and_load",
    214	.bpf_open_and_load = (void *)ksym_race__open_and_load,
    215	.bpf_destroy = (void *)ksym_race__destroy,
    216};
    217
    218static const struct test_config kfunc_config = {
    219	.str_open = "kfunc_call_race__open_and_load",
    220	.bpf_open_and_load = (void *)kfunc_call_race__open_and_load,
    221	.bpf_destroy = (void *)kfunc_call_race__destroy,
    222};
    223
    224void serial_test_bpf_mod_race(void)
    225{
    226	if (test__start_subtest("ksym (used_btfs UAF)"))
    227		test_bpf_mod_race_config(&ksym_config);
    228	if (test__start_subtest("kfunc (kfunc_btf_tab UAF)"))
    229		test_bpf_mod_race_config(&kfunc_config);
    230}