cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cgroup_link.c (7704B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#include <test_progs.h>
      4#include "cgroup_helpers.h"
      5#include "testing_helpers.h"
      6#include "test_cgroup_link.skel.h"
      7
      8static __u32 duration = 0;
      9#define PING_CMD	"ping -q -c1 -w1 127.0.0.1 > /dev/null"
     10
     11static struct test_cgroup_link *skel = NULL;
     12
     13int ping_and_check(int exp_calls, int exp_alt_calls)
     14{
     15	skel->bss->calls = 0;
     16	skel->bss->alt_calls = 0;
     17	CHECK_FAIL(system(PING_CMD));
     18	if (CHECK(skel->bss->calls != exp_calls, "call_cnt",
     19		  "exp %d, got %d\n", exp_calls, skel->bss->calls))
     20		return -EINVAL;
     21	if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt",
     22		  "exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls))
     23		return -EINVAL;
     24	return 0;
     25}
     26
     27void serial_test_cgroup_link(void)
     28{
     29	struct {
     30		const char *path;
     31		int fd;
     32	} cgs[] = {
     33		{ "/cg1" },
     34		{ "/cg1/cg2" },
     35		{ "/cg1/cg2/cg3" },
     36		{ "/cg1/cg2/cg3/cg4" },
     37	};
     38	int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
     39	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
     40	struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
     41	__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags, prog_id;
     42	struct bpf_link_info info;
     43	int i = 0, err, prog_fd;
     44	bool detach_legacy = false;
     45
     46	skel = test_cgroup_link__open_and_load();
     47	if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n"))
     48		return;
     49	prog_fd = bpf_program__fd(skel->progs.egress);
     50
     51	err = setup_cgroup_environment();
     52	if (CHECK(err, "cg_init", "failed: %d\n", err))
     53		goto cleanup;
     54
     55	for (i = 0; i < cg_nr; i++) {
     56		cgs[i].fd = create_and_get_cgroup(cgs[i].path);
     57		if (!ASSERT_GE(cgs[i].fd, 0, "cg_create"))
     58			goto cleanup;
     59	}
     60
     61	err = join_cgroup(cgs[last_cg].path);
     62	if (CHECK(err, "cg_join", "fail: %d\n", err))
     63		goto cleanup;
     64
     65	for (i = 0; i < cg_nr; i++) {
     66		links[i] = bpf_program__attach_cgroup(skel->progs.egress,
     67						      cgs[i].fd);
     68		if (!ASSERT_OK_PTR(links[i], "cg_attach"))
     69			goto cleanup;
     70	}
     71
     72	ping_and_check(cg_nr, 0);
     73
     74	/* query the number of effective progs and attach flags in root cg */
     75	err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
     76			     BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL,
     77			     &prog_cnt);
     78	CHECK_FAIL(err);
     79	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
     80	if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
     81		goto cleanup;
     82
     83	/* query the number of effective progs in last cg */
     84	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
     85			     BPF_F_QUERY_EFFECTIVE, NULL, NULL,
     86			     &prog_cnt);
     87	CHECK_FAIL(err);
     88	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
     89	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
     90		  cg_nr, prog_cnt))
     91		goto cleanup;
     92
     93	/* query the effective prog IDs in last cg */
     94	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
     95			     BPF_F_QUERY_EFFECTIVE, &attach_flags,
     96			     prog_ids, &prog_cnt);
     97	CHECK_FAIL(err);
     98	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
     99	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
    100		  cg_nr, prog_cnt))
    101		goto cleanup;
    102	for (i = 1; i < prog_cnt; i++) {
    103		CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check",
    104		      "idx %d, prev id %d, cur id %d\n",
    105		      i, prog_ids[i - 1], prog_ids[i]);
    106	}
    107
    108	/* detach bottom program and ping again */
    109	bpf_link__destroy(links[last_cg]);
    110	links[last_cg] = NULL;
    111
    112	ping_and_check(cg_nr - 1, 0);
    113
    114	/* mix in with non link-based multi-attachments */
    115	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
    116			      BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI);
    117	if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno))
    118		goto cleanup;
    119	detach_legacy = true;
    120
    121	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
    122						    cgs[last_cg].fd);
    123	if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
    124		goto cleanup;
    125
    126	ping_and_check(cg_nr + 1, 0);
    127
    128	/* detach link */
    129	bpf_link__destroy(links[last_cg]);
    130	links[last_cg] = NULL;
    131
    132	/* detach legacy */
    133	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
    134	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
    135		goto cleanup;
    136	detach_legacy = false;
    137
    138	/* attach legacy exclusive prog attachment */
    139	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
    140			      BPF_CGROUP_INET_EGRESS, 0);
    141	if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno))
    142		goto cleanup;
    143	detach_legacy = true;
    144
    145	/* attempt to mix in with multi-attach bpf_link */
    146	tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
    147					      cgs[last_cg].fd);
    148	if (!ASSERT_ERR_PTR(tmp_link, "cg_attach_fail")) {
    149		bpf_link__destroy(tmp_link);
    150		goto cleanup;
    151	}
    152
    153	ping_and_check(cg_nr, 0);
    154
    155	/* detach */
    156	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
    157	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
    158		goto cleanup;
    159	detach_legacy = false;
    160
    161	ping_and_check(cg_nr - 1, 0);
    162
    163	/* attach back link-based one */
    164	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
    165						    cgs[last_cg].fd);
    166	if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
    167		goto cleanup;
    168
    169	ping_and_check(cg_nr, 0);
    170
    171	/* check legacy exclusive prog can't be attached */
    172	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
    173			      BPF_CGROUP_INET_EGRESS, 0);
    174	if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) {
    175		bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
    176		goto cleanup;
    177	}
    178
    179	/* replace BPF programs inside their links for all but first link */
    180	for (i = 1; i < cg_nr; i++) {
    181		err = bpf_link__update_program(links[i], skel->progs.egress_alt);
    182		if (CHECK(err, "prog_upd", "link #%d\n", i))
    183			goto cleanup;
    184	}
    185
    186	ping_and_check(1, cg_nr - 1);
    187
    188	/* Attempt program update with wrong expected BPF program */
    189	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt);
    190	link_upd_opts.flags = BPF_F_REPLACE;
    191	err = bpf_link_update(bpf_link__fd(links[0]),
    192			      bpf_program__fd(skel->progs.egress_alt),
    193			      &link_upd_opts);
    194	if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1",
    195		  "unexpectedly succeeded, err %d, errno %d\n", err, -errno))
    196		goto cleanup;
    197
    198	/* Compare-exchange single link program from egress to egress_alt */
    199	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress);
    200	link_upd_opts.flags = BPF_F_REPLACE;
    201	err = bpf_link_update(bpf_link__fd(links[0]),
    202			      bpf_program__fd(skel->progs.egress_alt),
    203			      &link_upd_opts);
    204	if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno))
    205		goto cleanup;
    206
    207	/* ping */
    208	ping_and_check(0, cg_nr);
    209
    210	/* close cgroup FDs before detaching links */
    211	for (i = 0; i < cg_nr; i++) {
    212		if (cgs[i].fd > 0) {
    213			close(cgs[i].fd);
    214			cgs[i].fd = -1;
    215		}
    216	}
    217
    218	/* BPF programs should still get called */
    219	ping_and_check(0, cg_nr);
    220
    221	prog_id = link_info_prog_id(links[0], &info);
    222	CHECK(prog_id == 0, "link_info", "failed\n");
    223	CHECK(info.cgroup.cgroup_id == 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
    224
    225	err = bpf_link__detach(links[0]);
    226	if (CHECK(err, "link_detach", "failed %d\n", err))
    227		goto cleanup;
    228
    229	/* cgroup_id should be zero in link_info */
    230	prog_id = link_info_prog_id(links[0], &info);
    231	CHECK(prog_id == 0, "link_info", "failed\n");
    232	CHECK(info.cgroup.cgroup_id != 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
    233
    234	/* First BPF program shouldn't be called anymore */
    235	ping_and_check(0, cg_nr - 1);
    236
    237	/* leave cgroup and remove them, don't detach programs */
    238	cleanup_cgroup_environment();
    239
    240	/* BPF programs should have been auto-detached */
    241	ping_and_check(0, 0);
    242
    243cleanup:
    244	if (detach_legacy)
    245		bpf_prog_detach2(prog_fd, cgs[last_cg].fd,
    246				 BPF_CGROUP_INET_EGRESS);
    247
    248	for (i = 0; i < cg_nr; i++) {
    249		bpf_link__destroy(links[i]);
    250	}
    251	test_cgroup_link__destroy(skel);
    252
    253	for (i = 0; i < cg_nr; i++) {
    254		if (cgs[i].fd > 0)
    255			close(cgs[i].fd);
    256	}
    257	cleanup_cgroup_environment();
    258}