cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bpf_verifier.h (21004B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
      3 */
      4#ifndef _LINUX_BPF_VERIFIER_H
      5#define _LINUX_BPF_VERIFIER_H 1
      6
      7#include <linux/bpf.h> /* for enum bpf_reg_type */
      8#include <linux/btf.h> /* for struct btf and btf_id() */
      9#include <linux/filter.h> /* for MAX_BPF_STACK */
     10#include <linux/tnum.h>
     11
     12/* Maximum variable offset umax_value permitted when resolving memory accesses.
     13 * In practice this is far bigger than any realistic pointer offset; this limit
     14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
     15 */
     16#define BPF_MAX_VAR_OFF	(1 << 29)
     17/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
     18 * that converting umax_value to int cannot overflow.
     19 */
     20#define BPF_MAX_VAR_SIZ	(1 << 29)
     21/* size of type_str_buf in bpf_verifier. */
     22#define TYPE_STR_BUF_LEN 64
     23
     24/* Liveness marks, used for registers and spilled-regs (in stack slots).
     25 * Read marks propagate upwards until they find a write mark; they record that
     26 * "one of this state's descendants read this reg" (and therefore the reg is
     27 * relevant for states_equal() checks).
     28 * Write marks collect downwards and do not propagate; they record that "the
     29 * straight-line code that reached this state (from its parent) wrote this reg"
     30 * (and therefore that reads propagated from this state or its descendants
     31 * should not propagate to its parent).
     32 * A state with a write mark can receive read marks; it just won't propagate
     33 * them to its parent, since the write mark is a property, not of the state,
     34 * but of the link between it and its parent.  See mark_reg_read() and
     35 * mark_stack_slot_read() in kernel/bpf/verifier.c.
     36 */
     37enum bpf_reg_liveness {
     38	REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
     39	REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
     40	REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
     41	REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
     42	REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
     43	REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
     44};
     45
     46struct bpf_reg_state {
     47	/* Ordering of fields matters.  See states_equal() */
     48	enum bpf_reg_type type;
     49	/* Fixed part of pointer offset, pointer types only */
     50	s32 off;
     51	union {
     52		/* valid when type == PTR_TO_PACKET */
     53		int range;
     54
     55		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
     56		 *   PTR_TO_MAP_VALUE_OR_NULL
     57		 */
     58		struct {
     59			struct bpf_map *map_ptr;
     60			/* To distinguish map lookups from outer map
     61			 * the map_uid is non-zero for registers
     62			 * pointing to inner maps.
     63			 */
     64			u32 map_uid;
     65		};
     66
     67		/* for PTR_TO_BTF_ID */
     68		struct {
     69			struct btf *btf;
     70			u32 btf_id;
     71		};
     72
     73		u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
     74
     75		/* For dynptr stack slots */
     76		struct {
     77			enum bpf_dynptr_type type;
     78			/* A dynptr is 16 bytes so it takes up 2 stack slots.
     79			 * We need to track which slot is the first slot
     80			 * to protect against cases where the user may try to
     81			 * pass in an address starting at the second slot of the
     82			 * dynptr.
     83			 */
     84			bool first_slot;
     85		} dynptr;
     86
     87		/* Max size from any of the above. */
     88		struct {
     89			unsigned long raw1;
     90			unsigned long raw2;
     91		} raw;
     92
     93		u32 subprogno; /* for PTR_TO_FUNC */
     94	};
     95	/* For PTR_TO_PACKET, used to find other pointers with the same variable
     96	 * offset, so they can share range knowledge.
     97	 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
     98	 * came from, when one is tested for != NULL.
     99	 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
    100	 * for the purpose of tracking that it's freed.
    101	 * For PTR_TO_SOCKET this is used to share which pointers retain the
    102	 * same reference to the socket, to determine proper reference freeing.
    103	 * For stack slots that are dynptrs, this is used to track references to
    104	 * the dynptr to determine proper reference freeing.
    105	 */
    106	u32 id;
    107	/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
    108	 * from a pointer-cast helper, bpf_sk_fullsock() and
    109	 * bpf_tcp_sock().
    110	 *
    111	 * Consider the following where "sk" is a reference counted
    112	 * pointer returned from "sk = bpf_sk_lookup_tcp();":
    113	 *
    114	 * 1: sk = bpf_sk_lookup_tcp();
    115	 * 2: if (!sk) { return 0; }
    116	 * 3: fullsock = bpf_sk_fullsock(sk);
    117	 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
    118	 * 5: tp = bpf_tcp_sock(fullsock);
    119	 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
    120	 * 7: bpf_sk_release(sk);
    121	 * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
    122	 *
    123	 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
    124	 * "tp" ptr should be invalidated also.  In order to do that,
    125	 * the reg holding "fullsock" and "sk" need to remember
    126	 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
    127	 * such that the verifier can reset all regs which have
    128	 * ref_obj_id matching the sk_reg->id.
    129	 *
    130	 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
    131	 * sk_reg->id will stay as NULL-marking purpose only.
    132	 * After NULL-marking is done, sk_reg->id can be reset to 0.
    133	 *
    134	 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
    135	 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
    136	 *
    137	 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
    138	 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
    139	 * which is the same as sk_reg->ref_obj_id.
    140	 *
    141	 * From the verifier perspective, if sk, fullsock and tp
    142	 * are not NULL, they are the same ptr with different
    143	 * reg->type.  In particular, bpf_sk_release(tp) is also
    144	 * allowed and has the same effect as bpf_sk_release(sk).
    145	 */
    146	u32 ref_obj_id;
    147	/* For scalar types (SCALAR_VALUE), this represents our knowledge of
    148	 * the actual value.
    149	 * For pointer types, this represents the variable part of the offset
    150	 * from the pointed-to object, and is shared with all bpf_reg_states
    151	 * with the same id as us.
    152	 */
    153	struct tnum var_off;
    154	/* Used to determine if any memory access using this register will
    155	 * result in a bad access.
    156	 * These refer to the same value as var_off, not necessarily the actual
    157	 * contents of the register.
    158	 */
    159	s64 smin_value; /* minimum possible (s64)value */
    160	s64 smax_value; /* maximum possible (s64)value */
    161	u64 umin_value; /* minimum possible (u64)value */
    162	u64 umax_value; /* maximum possible (u64)value */
    163	s32 s32_min_value; /* minimum possible (s32)value */
    164	s32 s32_max_value; /* maximum possible (s32)value */
    165	u32 u32_min_value; /* minimum possible (u32)value */
    166	u32 u32_max_value; /* maximum possible (u32)value */
    167	/* parentage chain for liveness checking */
    168	struct bpf_reg_state *parent;
    169	/* Inside the callee two registers can be both PTR_TO_STACK like
    170	 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
    171	 * while another to the caller's stack. To differentiate them 'frameno'
    172	 * is used which is an index in bpf_verifier_state->frame[] array
    173	 * pointing to bpf_func_state.
    174	 */
    175	u32 frameno;
    176	/* Tracks subreg definition. The stored value is the insn_idx of the
    177	 * writing insn. This is safe because subreg_def is used before any insn
    178	 * patching which only happens after main verification finished.
    179	 */
    180	s32 subreg_def;
    181	enum bpf_reg_liveness live;
    182	/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
    183	bool precise;
    184};
    185
    186enum bpf_stack_slot_type {
    187	STACK_INVALID,    /* nothing was stored in this stack slot */
    188	STACK_SPILL,      /* register spilled into stack */
    189	STACK_MISC,	  /* BPF program wrote some data into this slot */
    190	STACK_ZERO,	  /* BPF program wrote constant zero */
    191	/* A dynptr is stored in this stack slot. The type of dynptr
    192	 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
    193	 */
    194	STACK_DYNPTR,
    195};
    196
    197#define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
    198#define BPF_DYNPTR_SIZE		sizeof(struct bpf_dynptr_kern)
    199#define BPF_DYNPTR_NR_SLOTS		(BPF_DYNPTR_SIZE / BPF_REG_SIZE)
    200
    201struct bpf_stack_state {
    202	struct bpf_reg_state spilled_ptr;
    203	u8 slot_type[BPF_REG_SIZE];
    204};
    205
    206struct bpf_reference_state {
    207	/* Track each reference created with a unique id, even if the same
    208	 * instruction creates the reference multiple times (eg, via CALL).
    209	 */
    210	int id;
    211	/* Instruction where the allocation of this reference occurred. This
    212	 * is used purely to inform the user of a reference leak.
    213	 */
    214	int insn_idx;
    215};
    216
    217/* state of the program:
    218 * type of all registers and stack info
    219 */
    220struct bpf_func_state {
    221	struct bpf_reg_state regs[MAX_BPF_REG];
    222	/* index of call instruction that called into this func */
    223	int callsite;
    224	/* stack frame number of this function state from pov of
    225	 * enclosing bpf_verifier_state.
    226	 * 0 = main function, 1 = first callee.
    227	 */
    228	u32 frameno;
    229	/* subprog number == index within subprog_info
    230	 * zero == main subprog
    231	 */
    232	u32 subprogno;
    233	/* Every bpf_timer_start will increment async_entry_cnt.
    234	 * It's used to distinguish:
    235	 * void foo(void) { for(;;); }
    236	 * void foo(void) { bpf_timer_set_callback(,foo); }
    237	 */
    238	u32 async_entry_cnt;
    239	bool in_callback_fn;
    240	bool in_async_callback_fn;
    241
    242	/* The following fields should be last. See copy_func_state() */
    243	int acquired_refs;
    244	struct bpf_reference_state *refs;
    245	int allocated_stack;
    246	struct bpf_stack_state *stack;
    247};
    248
    249struct bpf_idx_pair {
    250	u32 prev_idx;
    251	u32 idx;
    252};
    253
    254struct bpf_id_pair {
    255	u32 old;
    256	u32 cur;
    257};
    258
    259/* Maximum number of register states that can exist at once */
    260#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
    261#define MAX_CALL_FRAMES 8
    262struct bpf_verifier_state {
    263	/* call stack tracking */
    264	struct bpf_func_state *frame[MAX_CALL_FRAMES];
    265	struct bpf_verifier_state *parent;
    266	/*
    267	 * 'branches' field is the number of branches left to explore:
    268	 * 0 - all possible paths from this state reached bpf_exit or
    269	 * were safely pruned
    270	 * 1 - at least one path is being explored.
    271	 * This state hasn't reached bpf_exit
    272	 * 2 - at least two paths are being explored.
    273	 * This state is an immediate parent of two children.
    274	 * One is fallthrough branch with branches==1 and another
    275	 * state is pushed into stack (to be explored later) also with
    276	 * branches==1. The parent of this state has branches==1.
    277	 * The verifier state tree connected via 'parent' pointer looks like:
    278	 * 1
    279	 * 1
    280	 * 2 -> 1 (first 'if' pushed into stack)
    281	 * 1
    282	 * 2 -> 1 (second 'if' pushed into stack)
    283	 * 1
    284	 * 1
    285	 * 1 bpf_exit.
    286	 *
    287	 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
    288	 * and the verifier state tree will look:
    289	 * 1
    290	 * 1
    291	 * 2 -> 1 (first 'if' pushed into stack)
    292	 * 1
    293	 * 1 -> 1 (second 'if' pushed into stack)
    294	 * 0
    295	 * 0
    296	 * 0 bpf_exit.
    297	 * After pop_stack() the do_check() will resume at second 'if'.
    298	 *
    299	 * If is_state_visited() sees a state with branches > 0 it means
    300	 * there is a loop. If such state is exactly equal to the current state
    301	 * it's an infinite loop. Note states_equal() checks for states
    302	 * equvalency, so two states being 'states_equal' does not mean
    303	 * infinite loop. The exact comparison is provided by
    304	 * states_maybe_looping() function. It's a stronger pre-check and
    305	 * much faster than states_equal().
    306	 *
    307	 * This algorithm may not find all possible infinite loops or
    308	 * loop iteration count may be too high.
    309	 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
    310	 */
    311	u32 branches;
    312	u32 insn_idx;
    313	u32 curframe;
    314	u32 active_spin_lock;
    315	bool speculative;
    316
    317	/* first and last insn idx of this verifier state */
    318	u32 first_insn_idx;
    319	u32 last_insn_idx;
    320	/* jmp history recorded from first to last.
    321	 * backtracking is using it to go from last to first.
    322	 * For most states jmp_history_cnt is [0-3].
    323	 * For loops can go up to ~40.
    324	 */
    325	struct bpf_idx_pair *jmp_history;
    326	u32 jmp_history_cnt;
    327};
    328
    329#define bpf_get_spilled_reg(slot, frame)				\
    330	(((slot < frame->allocated_stack / BPF_REG_SIZE) &&		\
    331	  (frame->stack[slot].slot_type[0] == STACK_SPILL))		\
    332	 ? &frame->stack[slot].spilled_ptr : NULL)
    333
    334/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
    335#define bpf_for_each_spilled_reg(iter, frame, reg)			\
    336	for (iter = 0, reg = bpf_get_spilled_reg(iter, frame);		\
    337	     iter < frame->allocated_stack / BPF_REG_SIZE;		\
    338	     iter++, reg = bpf_get_spilled_reg(iter, frame))
    339
    340/* linked list of verifier states used to prune search */
    341struct bpf_verifier_state_list {
    342	struct bpf_verifier_state state;
    343	struct bpf_verifier_state_list *next;
    344	int miss_cnt, hit_cnt;
    345};
    346
    347/* Possible states for alu_state member. */
    348#define BPF_ALU_SANITIZE_SRC		(1U << 0)
    349#define BPF_ALU_SANITIZE_DST		(1U << 1)
    350#define BPF_ALU_NEG_VALUE		(1U << 2)
    351#define BPF_ALU_NON_POINTER		(1U << 3)
    352#define BPF_ALU_IMMEDIATE		(1U << 4)
    353#define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
    354					 BPF_ALU_SANITIZE_DST)
    355
    356struct bpf_insn_aux_data {
    357	union {
    358		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
    359		unsigned long map_ptr_state;	/* pointer/poison value for maps */
    360		s32 call_imm;			/* saved imm field of call insn */
    361		u32 alu_limit;			/* limit for add/sub register with pointer */
    362		struct {
    363			u32 map_index;		/* index into used_maps[] */
    364			u32 map_off;		/* offset from value base address */
    365		};
    366		struct {
    367			enum bpf_reg_type reg_type;	/* type of pseudo_btf_id */
    368			union {
    369				struct {
    370					struct btf *btf;
    371					u32 btf_id;	/* btf_id for struct typed var */
    372				};
    373				u32 mem_size;	/* mem_size for non-struct typed var */
    374			};
    375		} btf_var;
    376	};
    377	u64 map_key_state; /* constant (32 bit) key tracking for maps */
    378	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
    379	u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
    380	bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
    381	bool zext_dst; /* this insn zero extends dst reg */
    382	u8 alu_state; /* used in combination with alu_limit */
    383
    384	/* below fields are initialized once */
    385	unsigned int orig_idx; /* original instruction index */
    386	bool prune_point;
    387};
    388
    389#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
    390#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
    391
    392#define BPF_VERIFIER_TMP_LOG_SIZE	1024
    393
    394struct bpf_verifier_log {
    395	u32 level;
    396	char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
    397	char __user *ubuf;
    398	u32 len_used;
    399	u32 len_total;
    400};
    401
    402static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
    403{
    404	return log->len_used >= log->len_total - 1;
    405}
    406
    407#define BPF_LOG_LEVEL1	1
    408#define BPF_LOG_LEVEL2	2
    409#define BPF_LOG_STATS	4
    410#define BPF_LOG_LEVEL	(BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
    411#define BPF_LOG_MASK	(BPF_LOG_LEVEL | BPF_LOG_STATS)
    412#define BPF_LOG_KERNEL	(BPF_LOG_MASK + 1) /* kernel internal flag */
    413#define BPF_LOG_MIN_ALIGNMENT 8U
    414#define BPF_LOG_ALIGNMENT 40U
    415
    416static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
    417{
    418	return log &&
    419		((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
    420		 log->level == BPF_LOG_KERNEL);
    421}
    422
    423static inline bool
    424bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
    425{
    426	return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
    427	       log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
    428}
    429
    430#define BPF_MAX_SUBPROGS 256
    431
    432struct bpf_subprog_info {
    433	/* 'start' has to be the first field otherwise find_subprog() won't work */
    434	u32 start; /* insn idx of function entry point */
    435	u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
    436	u16 stack_depth; /* max. stack depth used by this function */
    437	bool has_tail_call;
    438	bool tail_call_reachable;
    439	bool has_ld_abs;
    440	bool is_async_cb;
    441};
    442
    443/* single container for all structs
    444 * one verifier_env per bpf_check() call
    445 */
    446struct bpf_verifier_env {
    447	u32 insn_idx;
    448	u32 prev_insn_idx;
    449	struct bpf_prog *prog;		/* eBPF program being verified */
    450	const struct bpf_verifier_ops *ops;
    451	struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
    452	int stack_size;			/* number of states to be processed */
    453	bool strict_alignment;		/* perform strict pointer alignment checks */
    454	bool test_state_freq;		/* test verifier with different pruning frequency */
    455	struct bpf_verifier_state *cur_state; /* current verifier state */
    456	struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
    457	struct bpf_verifier_state_list *free_list;
    458	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
    459	struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
    460	u32 used_map_cnt;		/* number of used maps */
    461	u32 used_btf_cnt;		/* number of used BTF objects */
    462	u32 id_gen;			/* used to generate unique reg IDs */
    463	bool explore_alu_limits;
    464	bool allow_ptr_leaks;
    465	bool allow_uninit_stack;
    466	bool allow_ptr_to_map_access;
    467	bool bpf_capable;
    468	bool bypass_spec_v1;
    469	bool bypass_spec_v4;
    470	bool seen_direct_write;
    471	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
    472	const struct bpf_line_info *prev_linfo;
    473	struct bpf_verifier_log log;
    474	struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
    475	struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
    476	struct {
    477		int *insn_state;
    478		int *insn_stack;
    479		int cur_stack;
    480	} cfg;
    481	u32 pass_cnt; /* number of times do_check() was called */
    482	u32 subprog_cnt;
    483	/* number of instructions analyzed by the verifier */
    484	u32 prev_insn_processed, insn_processed;
    485	/* number of jmps, calls, exits analyzed so far */
    486	u32 prev_jmps_processed, jmps_processed;
    487	/* total verification time */
    488	u64 verification_time;
    489	/* maximum number of verifier states kept in 'branching' instructions */
    490	u32 max_states_per_insn;
    491	/* total number of allocated verifier states */
    492	u32 total_states;
    493	/* some states are freed during program analysis.
    494	 * this is peak number of states. this number dominates kernel
    495	 * memory consumption during verification
    496	 */
    497	u32 peak_states;
    498	/* longest register parentage chain walked for liveness marking */
    499	u32 longest_mark_read_walk;
    500	bpfptr_t fd_array;
    501
    502	/* bit mask to keep track of whether a register has been accessed
    503	 * since the last time the function state was printed
    504	 */
    505	u32 scratched_regs;
    506	/* Same as scratched_regs but for stack slots */
    507	u64 scratched_stack_slots;
    508	u32 prev_log_len, prev_insn_print_len;
    509	/* buffer used in reg_type_str() to generate reg_type string */
    510	char type_str_buf[TYPE_STR_BUF_LEN];
    511};
    512
    513__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
    514				      const char *fmt, va_list args);
    515__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
    516					   const char *fmt, ...);
    517__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
    518			    const char *fmt, ...);
    519
    520static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
    521{
    522	struct bpf_verifier_state *cur = env->cur_state;
    523
    524	return cur->frame[cur->curframe];
    525}
    526
    527static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
    528{
    529	return cur_func(env)->regs;
    530}
    531
    532int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
    533int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
    534				 int insn_idx, int prev_insn_idx);
    535int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
    536void
    537bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
    538			      struct bpf_insn *insn);
    539void
    540bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
    541
    542int check_ptr_off_reg(struct bpf_verifier_env *env,
    543		      const struct bpf_reg_state *reg, int regno);
    544int check_func_arg_reg_off(struct bpf_verifier_env *env,
    545			   const struct bpf_reg_state *reg, int regno,
    546			   enum bpf_arg_type arg_type);
    547int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
    548			     u32 regno);
    549int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
    550		   u32 regno, u32 mem_size);
    551
    552/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
    553static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
    554					     struct btf *btf, u32 btf_id)
    555{
    556	if (tgt_prog)
    557		return ((u64)tgt_prog->aux->id << 32) | btf_id;
    558	else
    559		return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
    560}
    561
    562/* unpack the IDs from the key as constructed above */
    563static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
    564{
    565	if (obj_id)
    566		*obj_id = key >> 32;
    567	if (btf_id)
    568		*btf_id = key & 0x7FFFFFFF;
    569}
    570
    571int bpf_check_attach_target(struct bpf_verifier_log *log,
    572			    const struct bpf_prog *prog,
    573			    const struct bpf_prog *tgt_prog,
    574			    u32 btf_id,
    575			    struct bpf_attach_target_info *tgt_info);
    576void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
    577
    578#define BPF_BASE_TYPE_MASK	GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
    579
    580/* extract base type from bpf_{arg, return, reg}_type. */
    581static inline u32 base_type(u32 type)
    582{
    583	return type & BPF_BASE_TYPE_MASK;
    584}
    585
    586/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
    587static inline u32 type_flag(u32 type)
    588{
    589	return type & ~BPF_BASE_TYPE_MASK;
    590}
    591
    592/* only use after check_attach_btf_id() */
    593static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
    594{
    595	return prog->type == BPF_PROG_TYPE_EXT ?
    596		prog->aux->dst_prog->type : prog->type;
    597}
    598
    599#endif /* _LINUX_BPF_VERIFIER_H */