bpf.c (9043B)
1// SPDX-License-Identifier: GPL-2.0 2#include <errno.h> 3#include <stdio.h> 4#include <stdlib.h> 5#include <sys/epoll.h> 6#include <sys/types.h> 7#include <sys/stat.h> 8#include <fcntl.h> 9#include <util/record.h> 10#include <util/util.h> 11#include <util/bpf-loader.h> 12#include <util/evlist.h> 13#include <linux/filter.h> 14#include <linux/kernel.h> 15#include <linux/string.h> 16#include <api/fs/fs.h> 17#include <perf/mmap.h> 18#include "tests.h" 19#include "llvm.h" 20#include "debug.h" 21#include "parse-events.h" 22#include "util/mmap.h" 23#define NR_ITERS 111 24#define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test" 25 26#ifdef HAVE_LIBBPF_SUPPORT 27#include <linux/bpf.h> 28#include <bpf/bpf.h> 29 30static int epoll_pwait_loop(void) 31{ 32 int i; 33 34 /* Should fail NR_ITERS times */ 35 for (i = 0; i < NR_ITERS; i++) 36 epoll_pwait(-(i + 1), NULL, 0, 0, NULL); 37 return 0; 38} 39 40#ifdef HAVE_BPF_PROLOGUE 41 42static int llseek_loop(void) 43{ 44 int fds[2], i; 45 46 fds[0] = open("/dev/null", O_RDONLY); 47 fds[1] = open("/dev/null", O_RDWR); 48 49 if (fds[0] < 0 || fds[1] < 0) 50 return -1; 51 52 for (i = 0; i < NR_ITERS; i++) { 53 lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET); 54 lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET); 55 } 56 close(fds[0]); 57 close(fds[1]); 58 return 0; 59} 60 61#endif 62 63static struct { 64 enum test_llvm__testcase prog_id; 65 const char *name; 66 const char *msg_compile_fail; 67 const char *msg_load_fail; 68 int (*target_func)(void); 69 int expect_result; 70 bool pin; 71} bpf_testcase_table[] = { 72 { 73 .prog_id = LLVM_TESTCASE_BASE, 74 .name = "[basic_bpf_test]", 75 .msg_compile_fail = "fix 'perf test LLVM' first", 76 .msg_load_fail = "load bpf object failed", 77 .target_func = &epoll_pwait_loop, 78 .expect_result = (NR_ITERS + 1) / 2, 79 }, 80 { 81 .prog_id = LLVM_TESTCASE_BASE, 82 .name = "[bpf_pinning]", 83 .msg_compile_fail = "fix kbuild first", 84 .msg_load_fail = "check your vmlinux setting?", 85 .target_func = &epoll_pwait_loop, 86 .expect_result = (NR_ITERS + 1) / 2, 87 .pin = true, 88 }, 89#ifdef HAVE_BPF_PROLOGUE 90 { 91 .prog_id = LLVM_TESTCASE_BPF_PROLOGUE, 92 .name = "[bpf_prologue_test]", 93 .msg_compile_fail = "fix kbuild first", 94 .msg_load_fail = "check your vmlinux setting?", 95 .target_func = &llseek_loop, 96 .expect_result = (NR_ITERS + 1) / 4, 97 }, 98#endif 99}; 100 101static int do_test(struct bpf_object *obj, int (*func)(void), 102 int expect) 103{ 104 struct record_opts opts = { 105 .target = { 106 .uid = UINT_MAX, 107 .uses_mmap = true, 108 }, 109 .freq = 0, 110 .mmap_pages = 256, 111 .default_interval = 1, 112 }; 113 114 char pid[16]; 115 char sbuf[STRERR_BUFSIZE]; 116 struct evlist *evlist; 117 int i, ret = TEST_FAIL, err = 0, count = 0; 118 119 struct parse_events_state parse_state; 120 struct parse_events_error parse_error; 121 122 parse_events_error__init(&parse_error); 123 bzero(&parse_state, sizeof(parse_state)); 124 parse_state.error = &parse_error; 125 INIT_LIST_HEAD(&parse_state.list); 126 127 err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL); 128 parse_events_error__exit(&parse_error); 129 if (err || list_empty(&parse_state.list)) { 130 pr_debug("Failed to add events selected by BPF\n"); 131 return TEST_FAIL; 132 } 133 134 snprintf(pid, sizeof(pid), "%d", getpid()); 135 pid[sizeof(pid) - 1] = '\0'; 136 opts.target.tid = opts.target.pid = pid; 137 138 /* Instead of evlist__new_default, don't add default events */ 139 evlist = evlist__new(); 140 if (!evlist) { 141 pr_debug("Not enough memory to create evlist\n"); 142 return TEST_FAIL; 143 } 144 145 err = evlist__create_maps(evlist, &opts.target); 146 if (err < 0) { 147 pr_debug("Not enough memory to create thread/cpu maps\n"); 148 goto out_delete_evlist; 149 } 150 151 evlist__splice_list_tail(evlist, &parse_state.list); 152 evlist->core.nr_groups = parse_state.nr_groups; 153 154 evlist__config(evlist, &opts, NULL); 155 156 err = evlist__open(evlist); 157 if (err < 0) { 158 pr_debug("perf_evlist__open: %s\n", 159 str_error_r(errno, sbuf, sizeof(sbuf))); 160 goto out_delete_evlist; 161 } 162 163 err = evlist__mmap(evlist, opts.mmap_pages); 164 if (err < 0) { 165 pr_debug("evlist__mmap: %s\n", 166 str_error_r(errno, sbuf, sizeof(sbuf))); 167 goto out_delete_evlist; 168 } 169 170 evlist__enable(evlist); 171 (*func)(); 172 evlist__disable(evlist); 173 174 for (i = 0; i < evlist->core.nr_mmaps; i++) { 175 union perf_event *event; 176 struct mmap *md; 177 178 md = &evlist->mmap[i]; 179 if (perf_mmap__read_init(&md->core) < 0) 180 continue; 181 182 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 183 const u32 type = event->header.type; 184 185 if (type == PERF_RECORD_SAMPLE) 186 count ++; 187 } 188 perf_mmap__read_done(&md->core); 189 } 190 191 if (count != expect * evlist->core.nr_entries) { 192 pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect * evlist->core.nr_entries, count); 193 goto out_delete_evlist; 194 } 195 196 ret = TEST_OK; 197 198out_delete_evlist: 199 evlist__delete(evlist); 200 return ret; 201} 202 203static struct bpf_object * 204prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name) 205{ 206 struct bpf_object *obj; 207 208 obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name); 209 if (IS_ERR(obj)) { 210 pr_debug("Compile BPF program failed.\n"); 211 return NULL; 212 } 213 return obj; 214} 215 216static int __test__bpf(int idx) 217{ 218 int ret; 219 void *obj_buf; 220 size_t obj_buf_sz; 221 struct bpf_object *obj; 222 223 ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz, 224 bpf_testcase_table[idx].prog_id, 225 false, NULL); 226 if (ret != TEST_OK || !obj_buf || !obj_buf_sz) { 227 pr_debug("Unable to get BPF object, %s\n", 228 bpf_testcase_table[idx].msg_compile_fail); 229 if ((idx == 0) || (ret == TEST_SKIP)) 230 return TEST_SKIP; 231 else 232 return TEST_FAIL; 233 } 234 235 obj = prepare_bpf(obj_buf, obj_buf_sz, 236 bpf_testcase_table[idx].name); 237 if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) { 238 if (!obj) 239 pr_debug("Fail to load BPF object: %s\n", 240 bpf_testcase_table[idx].msg_load_fail); 241 else 242 pr_debug("Success unexpectedly: %s\n", 243 bpf_testcase_table[idx].msg_load_fail); 244 ret = TEST_FAIL; 245 goto out; 246 } 247 248 if (obj) { 249 ret = do_test(obj, 250 bpf_testcase_table[idx].target_func, 251 bpf_testcase_table[idx].expect_result); 252 if (ret != TEST_OK) 253 goto out; 254 if (bpf_testcase_table[idx].pin) { 255 int err; 256 257 if (!bpf_fs__mount()) { 258 pr_debug("BPF filesystem not mounted\n"); 259 ret = TEST_FAIL; 260 goto out; 261 } 262 err = mkdir(PERF_TEST_BPF_PATH, 0777); 263 if (err && errno != EEXIST) { 264 pr_debug("Failed to make perf_test dir: %s\n", 265 strerror(errno)); 266 ret = TEST_FAIL; 267 goto out; 268 } 269 if (bpf_object__pin(obj, PERF_TEST_BPF_PATH)) 270 ret = TEST_FAIL; 271 if (rm_rf(PERF_TEST_BPF_PATH)) 272 ret = TEST_FAIL; 273 } 274 } 275 276out: 277 free(obj_buf); 278 bpf__clear(); 279 return ret; 280} 281 282static int check_env(void) 283{ 284 LIBBPF_OPTS(bpf_prog_load_opts, opts); 285 int err; 286 char license[] = "GPL"; 287 288 struct bpf_insn insns[] = { 289 BPF_MOV64_IMM(BPF_REG_0, 1), 290 BPF_EXIT_INSN(), 291 }; 292 293 err = fetch_kernel_version(&opts.kern_version, NULL, 0); 294 if (err) { 295 pr_debug("Unable to get kernel version\n"); 296 return err; 297 } 298 err = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, license, insns, 299 ARRAY_SIZE(insns), &opts); 300 if (err < 0) { 301 pr_err("Missing basic BPF support, skip this test: %s\n", 302 strerror(errno)); 303 return err; 304 } 305 close(err); 306 307 return 0; 308} 309 310static int test__bpf(int i) 311{ 312 int err; 313 314 if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table)) 315 return TEST_FAIL; 316 317 if (geteuid() != 0) { 318 pr_debug("Only root can run BPF test\n"); 319 return TEST_SKIP; 320 } 321 322 if (check_env()) 323 return TEST_SKIP; 324 325 err = __test__bpf(i); 326 return err; 327} 328#endif 329 330static int test__basic_bpf_test(struct test_suite *test __maybe_unused, 331 int subtest __maybe_unused) 332{ 333#ifdef HAVE_LIBBPF_SUPPORT 334 return test__bpf(0); 335#else 336 pr_debug("Skip BPF test because BPF support is not compiled\n"); 337 return TEST_SKIP; 338#endif 339} 340 341static int test__bpf_pinning(struct test_suite *test __maybe_unused, 342 int subtest __maybe_unused) 343{ 344#ifdef HAVE_LIBBPF_SUPPORT 345 return test__bpf(1); 346#else 347 pr_debug("Skip BPF test because BPF support is not compiled\n"); 348 return TEST_SKIP; 349#endif 350} 351 352static int test__bpf_prologue_test(struct test_suite *test __maybe_unused, 353 int subtest __maybe_unused) 354{ 355#if defined(HAVE_LIBBPF_SUPPORT) && defined(HAVE_BPF_PROLOGUE) 356 return test__bpf(2); 357#else 358 pr_debug("Skip BPF test because BPF support is not compiled\n"); 359 return TEST_SKIP; 360#endif 361} 362 363 364static struct test_case bpf_tests[] = { 365#ifdef HAVE_LIBBPF_SUPPORT 366 TEST_CASE("Basic BPF filtering", basic_bpf_test), 367 TEST_CASE_REASON("BPF pinning", bpf_pinning, 368 "clang isn't installed or environment missing BPF support"), 369#ifdef HAVE_BPF_PROLOGUE 370 TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, 371 "clang isn't installed or environment missing BPF support"), 372#else 373 TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"), 374#endif 375#else 376 TEST_CASE_REASON("Basic BPF filtering", basic_bpf_test, "not compiled in"), 377 TEST_CASE_REASON("BPF pinning", bpf_pinning, "not compiled in"), 378 TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"), 379#endif 380 { .name = NULL, } 381}; 382 383struct test_suite suite__bpf = { 384 .desc = "BPF filter", 385 .test_cases = bpf_tests, 386};