perf.h (3405B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2 3#undef TRACE_SYSTEM_VAR 4 5#ifdef CONFIG_PERF_EVENTS 6 7#undef __entry 8#define __entry entry 9 10#undef __get_dynamic_array 11#define __get_dynamic_array(field) \ 12 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 13 14#undef __get_dynamic_array_len 15#define __get_dynamic_array_len(field) \ 16 ((__entry->__data_loc_##field >> 16) & 0xffff) 17 18#undef __get_str 19#define __get_str(field) ((char *)__get_dynamic_array(field)) 20 21#undef __get_bitmask 22#define __get_bitmask(field) (char *)__get_dynamic_array(field) 23 24#undef __get_sockaddr 25#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) 26 27#undef __get_rel_dynamic_array 28#define __get_rel_dynamic_array(field) \ 29 ((void *)__entry + \ 30 offsetof(typeof(*__entry), __rel_loc_##field) + \ 31 sizeof(__entry->__rel_loc_##field) + \ 32 (__entry->__rel_loc_##field & 0xffff)) 33 34#undef __get_rel_dynamic_array_len 35#define __get_rel_dynamic_array_len(field) \ 36 ((__entry->__rel_loc_##field >> 16) & 0xffff) 37 38#undef __get_rel_str 39#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field)) 40 41#undef __get_rel_bitmask 42#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) 43 44#undef __get_rel_sockaddr 45#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) 46 47#undef __perf_count 48#define __perf_count(c) (__count = (c)) 49 50#undef __perf_task 51#define __perf_task(t) (__task = (t)) 52 53#undef DECLARE_EVENT_CLASS 54#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 55static notrace void \ 56perf_trace_##call(void *__data, proto) \ 57{ \ 58 struct trace_event_call *event_call = __data; \ 59 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 60 struct trace_event_raw_##call *entry; \ 61 struct pt_regs *__regs; \ 62 u64 __count = 1; \ 63 struct task_struct *__task = NULL; \ 64 struct hlist_head *head; \ 65 int __entry_size; \ 66 int __data_size; \ 67 int rctx; \ 68 \ 69 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 70 \ 71 head = this_cpu_ptr(event_call->perf_events); \ 72 if (!bpf_prog_array_valid(event_call) && \ 73 __builtin_constant_p(!__task) && !__task && \ 74 hlist_empty(head)) \ 75 return; \ 76 \ 77 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 78 sizeof(u64)); \ 79 __entry_size -= sizeof(u32); \ 80 \ 81 entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \ 82 if (!entry) \ 83 return; \ 84 \ 85 perf_fetch_caller_regs(__regs); \ 86 \ 87 tstruct \ 88 \ 89 { assign; } \ 90 \ 91 perf_trace_run_bpf_submit(entry, __entry_size, rctx, \ 92 event_call, __count, __regs, \ 93 head, __task); \ 94} 95 96/* 97 * This part is compiled out, it is only here as a build time check 98 * to make sure that if the tracepoint handling changes, the 99 * perf probe will fail to compile unless it too is updated. 100 */ 101#undef DEFINE_EVENT 102#define DEFINE_EVENT(template, call, proto, args) \ 103static inline void perf_test_probe_##call(void) \ 104{ \ 105 check_trace_callback_type_##call(perf_trace_##template); \ 106} 107 108 109#undef DEFINE_EVENT_PRINT 110#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 111 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 112 113#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 114#endif /* CONFIG_PERF_EVENTS */