bpf_helpers.h (9950B)
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2#ifndef __BPF_HELPERS__ 3#define __BPF_HELPERS__ 4 5/* 6 * Note that bpf programs need to include either 7 * vmlinux.h (auto-generated from BTF) or linux/types.h 8 * in advance since bpf_helper_defs.h uses such types 9 * as __u64. 10 */ 11#include "bpf_helper_defs.h" 12 13#define __uint(name, val) int (*name)[val] 14#define __type(name, val) typeof(val) *name 15#define __array(name, val) typeof(val) *name[] 16 17/* 18 * Helper macro to place programs, maps, license in 19 * different sections in elf_bpf file. Section names 20 * are interpreted by libbpf depending on the context (BPF programs, BPF maps, 21 * extern variables, etc). 22 * To allow use of SEC() with externs (e.g., for extern .maps declarations), 23 * make sure __attribute__((unused)) doesn't trigger compilation warning. 24 */ 25#define SEC(name) \ 26 _Pragma("GCC diagnostic push") \ 27 _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \ 28 __attribute__((section(name), used)) \ 29 _Pragma("GCC diagnostic pop") \ 30 31/* Avoid 'linux/stddef.h' definition of '__always_inline'. */ 32#undef __always_inline 33#define __always_inline inline __attribute__((always_inline)) 34 35#ifndef __noinline 36#define __noinline __attribute__((noinline)) 37#endif 38#ifndef __weak 39#define __weak __attribute__((weak)) 40#endif 41 42/* 43 * Use __hidden attribute to mark a non-static BPF subprogram effectively 44 * static for BPF verifier's verification algorithm purposes, allowing more 45 * extensive and permissive BPF verification process, taking into account 46 * subprogram's caller context. 47 */ 48#define __hidden __attribute__((visibility("hidden"))) 49 50/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include 51 * any system-level headers (such as stddef.h, linux/version.h, etc), and 52 * commonly-used macros like NULL and KERNEL_VERSION aren't available through 53 * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define 54 * them on their own. So as a convenience, provide such definitions here. 55 */ 56#ifndef NULL 57#define NULL ((void *)0) 58#endif 59 60#ifndef KERNEL_VERSION 61#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c))) 62#endif 63 64/* 65 * Helper macros to manipulate data structures 66 */ 67#ifndef offsetof 68#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) 69#endif 70#ifndef container_of 71#define container_of(ptr, type, member) \ 72 ({ \ 73 void *__mptr = (void *)(ptr); \ 74 ((type *)(__mptr - offsetof(type, member))); \ 75 }) 76#endif 77 78/* 79 * Compiler (optimization) barrier. 80 */ 81#ifndef barrier 82#define barrier() asm volatile("" ::: "memory") 83#endif 84 85/* Variable-specific compiler (optimization) barrier. It's a no-op which makes 86 * compiler believe that there is some black box modification of a given 87 * variable and thus prevents compiler from making extra assumption about its 88 * value and potential simplifications and optimizations on this variable. 89 * 90 * E.g., compiler might often delay or even omit 32-bit to 64-bit casting of 91 * a variable, making some code patterns unverifiable. Putting barrier_var() 92 * in place will ensure that cast is performed before the barrier_var() 93 * invocation, because compiler has to pessimistically assume that embedded 94 * asm section might perform some extra operations on that variable. 95 * 96 * This is a variable-specific variant of more global barrier(). 97 */ 98#ifndef barrier_var 99#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var)) 100#endif 101 102/* 103 * Helper macro to throw a compilation error if __bpf_unreachable() gets 104 * built into the resulting code. This works given BPF back end does not 105 * implement __builtin_trap(). This is useful to assert that certain paths 106 * of the program code are never used and hence eliminated by the compiler. 107 * 108 * For example, consider a switch statement that covers known cases used by 109 * the program. __bpf_unreachable() can then reside in the default case. If 110 * the program gets extended such that a case is not covered in the switch 111 * statement, then it will throw a build error due to the default case not 112 * being compiled out. 113 */ 114#ifndef __bpf_unreachable 115# define __bpf_unreachable() __builtin_trap() 116#endif 117 118/* 119 * Helper function to perform a tail call with a constant/immediate map slot. 120 */ 121#if __clang_major__ >= 8 && defined(__bpf__) 122static __always_inline void 123bpf_tail_call_static(void *ctx, const void *map, const __u32 slot) 124{ 125 if (!__builtin_constant_p(slot)) 126 __bpf_unreachable(); 127 128 /* 129 * Provide a hard guarantee that LLVM won't optimize setting r2 (map 130 * pointer) and r3 (constant map index) from _different paths_ ending 131 * up at the _same_ call insn as otherwise we won't be able to use the 132 * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel 133 * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key 134 * tracking for prog array pokes") for details on verifier tracking. 135 * 136 * Note on clobber list: we need to stay in-line with BPF calling 137 * convention, so even if we don't end up using r0, r4, r5, we need 138 * to mark them as clobber so that LLVM doesn't end up using them 139 * before / after the call. 140 */ 141 asm volatile("r1 = %[ctx]\n\t" 142 "r2 = %[map]\n\t" 143 "r3 = %[slot]\n\t" 144 "call 12" 145 :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot) 146 : "r0", "r1", "r2", "r3", "r4", "r5"); 147} 148#endif 149 150/* 151 * Helper structure used by eBPF C program 152 * to describe BPF map attributes to libbpf loader 153 */ 154struct bpf_map_def { 155 unsigned int type; 156 unsigned int key_size; 157 unsigned int value_size; 158 unsigned int max_entries; 159 unsigned int map_flags; 160} __attribute__((deprecated("use BTF-defined maps in .maps section"))); 161 162enum libbpf_pin_type { 163 LIBBPF_PIN_NONE, 164 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ 165 LIBBPF_PIN_BY_NAME, 166}; 167 168enum libbpf_tristate { 169 TRI_NO = 0, 170 TRI_YES = 1, 171 TRI_MODULE = 2, 172}; 173 174#define __kconfig __attribute__((section(".kconfig"))) 175#define __ksym __attribute__((section(".ksyms"))) 176#define __kptr __attribute__((btf_type_tag("kptr"))) 177#define __kptr_ref __attribute__((btf_type_tag("kptr_ref"))) 178 179#ifndef ___bpf_concat 180#define ___bpf_concat(a, b) a ## b 181#endif 182#ifndef ___bpf_apply 183#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) 184#endif 185#ifndef ___bpf_nth 186#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N 187#endif 188#ifndef ___bpf_narg 189#define ___bpf_narg(...) \ 190 ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 191#endif 192 193#define ___bpf_fill0(arr, p, x) do {} while (0) 194#define ___bpf_fill1(arr, p, x) arr[p] = x 195#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args) 196#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args) 197#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args) 198#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args) 199#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args) 200#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args) 201#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args) 202#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args) 203#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args) 204#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args) 205#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args) 206#define ___bpf_fill(arr, args...) \ 207 ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args) 208 209/* 210 * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values 211 * in a structure. 212 */ 213#define BPF_SEQ_PRINTF(seq, fmt, args...) \ 214({ \ 215 static const char ___fmt[] = fmt; \ 216 unsigned long long ___param[___bpf_narg(args)]; \ 217 \ 218 _Pragma("GCC diagnostic push") \ 219 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 220 ___bpf_fill(___param, args); \ 221 _Pragma("GCC diagnostic pop") \ 222 \ 223 bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \ 224 ___param, sizeof(___param)); \ 225}) 226 227/* 228 * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of 229 * an array of u64. 230 */ 231#define BPF_SNPRINTF(out, out_size, fmt, args...) \ 232({ \ 233 static const char ___fmt[] = fmt; \ 234 unsigned long long ___param[___bpf_narg(args)]; \ 235 \ 236 _Pragma("GCC diagnostic push") \ 237 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 238 ___bpf_fill(___param, args); \ 239 _Pragma("GCC diagnostic pop") \ 240 \ 241 bpf_snprintf(out, out_size, ___fmt, \ 242 ___param, sizeof(___param)); \ 243}) 244 245#ifdef BPF_NO_GLOBAL_DATA 246#define BPF_PRINTK_FMT_MOD 247#else 248#define BPF_PRINTK_FMT_MOD static const 249#endif 250 251#define __bpf_printk(fmt, ...) \ 252({ \ 253 BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \ 254 bpf_trace_printk(____fmt, sizeof(____fmt), \ 255 ##__VA_ARGS__); \ 256}) 257 258/* 259 * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments 260 * instead of an array of u64. 261 */ 262#define __bpf_vprintk(fmt, args...) \ 263({ \ 264 static const char ___fmt[] = fmt; \ 265 unsigned long long ___param[___bpf_narg(args)]; \ 266 \ 267 _Pragma("GCC diagnostic push") \ 268 _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ 269 ___bpf_fill(___param, args); \ 270 _Pragma("GCC diagnostic pop") \ 271 \ 272 bpf_trace_vprintk(___fmt, sizeof(___fmt), \ 273 ___param, sizeof(___param)); \ 274}) 275 276/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args 277 * Otherwise use __bpf_vprintk 278 */ 279#define ___bpf_pick_printk(...) \ 280 ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ 281 __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \ 282 __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\ 283 __bpf_printk /*1*/, __bpf_printk /*0*/) 284 285/* Helper macro to print out debug messages */ 286#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) 287 288#endif