get_stackid_cannot_attach.c (2676B)
1// SPDX-License-Identifier: GPL-2.0 2// Copyright (c) 2020 Facebook 3#include <test_progs.h> 4#include "test_stacktrace_build_id.skel.h" 5 6void test_get_stackid_cannot_attach(void) 7{ 8 struct perf_event_attr attr = { 9 /* .type = PERF_TYPE_SOFTWARE, */ 10 .type = PERF_TYPE_HARDWARE, 11 .config = PERF_COUNT_HW_CPU_CYCLES, 12 .precise_ip = 1, 13 .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK, 14 .branch_sample_type = PERF_SAMPLE_BRANCH_USER | 15 PERF_SAMPLE_BRANCH_NO_FLAGS | 16 PERF_SAMPLE_BRANCH_NO_CYCLES | 17 PERF_SAMPLE_BRANCH_CALL_STACK, 18 .sample_period = 5000, 19 .size = sizeof(struct perf_event_attr), 20 }; 21 struct test_stacktrace_build_id *skel; 22 __u32 duration = 0; 23 int pmu_fd, err; 24 25 skel = test_stacktrace_build_id__open(); 26 if (CHECK(!skel, "skel_open", "skeleton open failed\n")) 27 return; 28 29 /* override program type */ 30 bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT); 31 32 err = test_stacktrace_build_id__load(skel); 33 if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) 34 goto cleanup; 35 36 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 37 0 /* cpu 0 */, -1 /* group id */, 38 0 /* flags */); 39 if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) { 40 printf("%s:SKIP:cannot open PERF_COUNT_HW_CPU_CYCLES with precise_ip > 0\n", 41 __func__); 42 test__skip(); 43 goto cleanup; 44 } 45 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", 46 pmu_fd, errno)) 47 goto cleanup; 48 49 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, 50 pmu_fd); 51 ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_no_callchain"); 52 close(pmu_fd); 53 54 /* add PERF_SAMPLE_CALLCHAIN, attach should succeed */ 55 attr.sample_type |= PERF_SAMPLE_CALLCHAIN; 56 57 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 58 0 /* cpu 0 */, -1 /* group id */, 59 0 /* flags */); 60 61 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", 62 pmu_fd, errno)) 63 goto cleanup; 64 65 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, 66 pmu_fd); 67 ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain"); 68 close(pmu_fd); 69 70 /* add exclude_callchain_kernel, attach should fail */ 71 attr.exclude_callchain_kernel = 1; 72 73 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 74 0 /* cpu 0 */, -1 /* group id */, 75 0 /* flags */); 76 77 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", 78 pmu_fd, errno)) 79 goto cleanup; 80 81 skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, 82 pmu_fd); 83 ASSERT_ERR_PTR(skel->links.oncpu, "attach_perf_event_exclude_callchain_kernel"); 84 close(pmu_fd); 85 86cleanup: 87 test_stacktrace_build_id__destroy(skel); 88}