cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

filter.h (9358B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Linux Socket Filter Data Structures
      4 */
      5#ifndef __TOOLS_LINUX_FILTER_H
      6#define __TOOLS_LINUX_FILTER_H
      7
      8#include <linux/bpf.h>
      9
     10/* ArgX, context and stack frame pointer register positions. Note,
     11 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
     12 * calls in BPF_CALL instruction.
     13 */
     14#define BPF_REG_ARG1	BPF_REG_1
     15#define BPF_REG_ARG2	BPF_REG_2
     16#define BPF_REG_ARG3	BPF_REG_3
     17#define BPF_REG_ARG4	BPF_REG_4
     18#define BPF_REG_ARG5	BPF_REG_5
     19#define BPF_REG_CTX	BPF_REG_6
     20#define BPF_REG_FP	BPF_REG_10
     21
     22/* Additional register mappings for converted user programs. */
     23#define BPF_REG_A	BPF_REG_0
     24#define BPF_REG_X	BPF_REG_7
     25#define BPF_REG_TMP	BPF_REG_8
     26
     27/* BPF program can access up to 512 bytes of stack space. */
     28#define MAX_BPF_STACK	512
     29
     30/* Helper macros for filter block array initializers. */
     31
     32/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
     33
     34#define BPF_ALU64_REG(OP, DST, SRC)				\
     35	((struct bpf_insn) {					\
     36		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
     37		.dst_reg = DST,					\
     38		.src_reg = SRC,					\
     39		.off   = 0,					\
     40		.imm   = 0 })
     41
     42#define BPF_ALU32_REG(OP, DST, SRC)				\
     43	((struct bpf_insn) {					\
     44		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
     45		.dst_reg = DST,					\
     46		.src_reg = SRC,					\
     47		.off   = 0,					\
     48		.imm   = 0 })
     49
     50/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
     51
     52#define BPF_ALU64_IMM(OP, DST, IMM)				\
     53	((struct bpf_insn) {					\
     54		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
     55		.dst_reg = DST,					\
     56		.src_reg = 0,					\
     57		.off   = 0,					\
     58		.imm   = IMM })
     59
     60#define BPF_ALU32_IMM(OP, DST, IMM)				\
     61	((struct bpf_insn) {					\
     62		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
     63		.dst_reg = DST,					\
     64		.src_reg = 0,					\
     65		.off   = 0,					\
     66		.imm   = IMM })
     67
     68/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
     69
     70#define BPF_ENDIAN(TYPE, DST, LEN)				\
     71	((struct bpf_insn) {					\
     72		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
     73		.dst_reg = DST,					\
     74		.src_reg = 0,					\
     75		.off   = 0,					\
     76		.imm   = LEN })
     77
     78/* Short form of mov, dst_reg = src_reg */
     79
     80#define BPF_MOV64_REG(DST, SRC)					\
     81	((struct bpf_insn) {					\
     82		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
     83		.dst_reg = DST,					\
     84		.src_reg = SRC,					\
     85		.off   = 0,					\
     86		.imm   = 0 })
     87
     88#define BPF_MOV32_REG(DST, SRC)					\
     89	((struct bpf_insn) {					\
     90		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
     91		.dst_reg = DST,					\
     92		.src_reg = SRC,					\
     93		.off   = 0,					\
     94		.imm   = 0 })
     95
     96/* Short form of mov, dst_reg = imm32 */
     97
     98#define BPF_MOV64_IMM(DST, IMM)					\
     99	((struct bpf_insn) {					\
    100		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
    101		.dst_reg = DST,					\
    102		.src_reg = 0,					\
    103		.off   = 0,					\
    104		.imm   = IMM })
    105
    106#define BPF_MOV32_IMM(DST, IMM)					\
    107	((struct bpf_insn) {					\
    108		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
    109		.dst_reg = DST,					\
    110		.src_reg = 0,					\
    111		.off   = 0,					\
    112		.imm   = IMM })
    113
    114/* Short form of mov based on type,  BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
    115
    116#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
    117	((struct bpf_insn) {					\
    118		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
    119		.dst_reg = DST,					\
    120		.src_reg = SRC,					\
    121		.off   = 0,					\
    122		.imm   = IMM })
    123
    124#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
    125	((struct bpf_insn) {					\
    126		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
    127		.dst_reg = DST,					\
    128		.src_reg = SRC,					\
    129		.off   = 0,					\
    130		.imm   = IMM })
    131
    132/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
    133
    134#define BPF_LD_ABS(SIZE, IMM)					\
    135	((struct bpf_insn) {					\
    136		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
    137		.dst_reg = 0,					\
    138		.src_reg = 0,					\
    139		.off   = 0,					\
    140		.imm   = IMM })
    141
    142/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
    143
    144#define BPF_LD_IND(SIZE, SRC, IMM)				\
    145	((struct bpf_insn) {					\
    146		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
    147		.dst_reg = 0,					\
    148		.src_reg = SRC,					\
    149		.off   = 0,					\
    150		.imm   = IMM })
    151
    152/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
    153
    154#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
    155	((struct bpf_insn) {					\
    156		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
    157		.dst_reg = DST,					\
    158		.src_reg = SRC,					\
    159		.off   = OFF,					\
    160		.imm   = 0 })
    161
    162/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
    163
    164#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
    165	((struct bpf_insn) {					\
    166		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
    167		.dst_reg = DST,					\
    168		.src_reg = SRC,					\
    169		.off   = OFF,					\
    170		.imm   = 0 })
    171
    172/*
    173 * Atomic operations:
    174 *
    175 *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
    176 *   BPF_AND                  *(uint *) (dst_reg + off16) &= src_reg
    177 *   BPF_OR                   *(uint *) (dst_reg + off16) |= src_reg
    178 *   BPF_XOR                  *(uint *) (dst_reg + off16) ^= src_reg
    179 *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
    180 *   BPF_AND | BPF_FETCH      src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
    181 *   BPF_OR | BPF_FETCH       src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
    182 *   BPF_XOR | BPF_FETCH      src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
    183 *   BPF_XCHG                 src_reg = atomic_xchg(dst_reg + off16, src_reg)
    184 *   BPF_CMPXCHG              r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
    185 */
    186
    187#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)			\
    188	((struct bpf_insn) {					\
    189		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
    190		.dst_reg = DST,					\
    191		.src_reg = SRC,					\
    192		.off   = OFF,					\
    193		.imm   = OP })
    194
    195/* Legacy alias */
    196#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
    197
    198/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
    199
    200#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
    201	((struct bpf_insn) {					\
    202		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
    203		.dst_reg = DST,					\
    204		.src_reg = 0,					\
    205		.off   = OFF,					\
    206		.imm   = IMM })
    207
    208/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
    209
    210#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
    211	((struct bpf_insn) {					\
    212		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
    213		.dst_reg = DST,					\
    214		.src_reg = SRC,					\
    215		.off   = OFF,					\
    216		.imm   = 0 })
    217
    218/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
    219
    220#define BPF_JMP32_REG(OP, DST, SRC, OFF)			\
    221	((struct bpf_insn) {					\
    222		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,	\
    223		.dst_reg = DST,					\
    224		.src_reg = SRC,					\
    225		.off   = OFF,					\
    226		.imm   = 0 })
    227
    228/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
    229
    230#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
    231	((struct bpf_insn) {					\
    232		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
    233		.dst_reg = DST,					\
    234		.src_reg = 0,					\
    235		.off   = OFF,					\
    236		.imm   = IMM })
    237
    238/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
    239
    240#define BPF_JMP32_IMM(OP, DST, IMM, OFF)			\
    241	((struct bpf_insn) {					\
    242		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,	\
    243		.dst_reg = DST,					\
    244		.src_reg = 0,					\
    245		.off   = OFF,					\
    246		.imm   = IMM })
    247
    248/* Unconditional jumps, goto pc + off16 */
    249
    250#define BPF_JMP_A(OFF)						\
    251	((struct bpf_insn) {					\
    252		.code  = BPF_JMP | BPF_JA,			\
    253		.dst_reg = 0,					\
    254		.src_reg = 0,					\
    255		.off   = OFF,					\
    256		.imm   = 0 })
    257
    258/* Function call */
    259
    260#define BPF_EMIT_CALL(FUNC)					\
    261	((struct bpf_insn) {					\
    262		.code  = BPF_JMP | BPF_CALL,			\
    263		.dst_reg = 0,					\
    264		.src_reg = 0,					\
    265		.off   = 0,					\
    266		.imm   = ((FUNC) - BPF_FUNC_unspec) })
    267
    268/* Raw code statement block */
    269
    270#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
    271	((struct bpf_insn) {					\
    272		.code  = CODE,					\
    273		.dst_reg = DST,					\
    274		.src_reg = SRC,					\
    275		.off   = OFF,					\
    276		.imm   = IMM })
    277
    278/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
    279
    280#define BPF_LD_IMM64(DST, IMM)					\
    281	BPF_LD_IMM64_RAW(DST, 0, IMM)
    282
    283#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
    284	((struct bpf_insn) {					\
    285		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
    286		.dst_reg = DST,					\
    287		.src_reg = SRC,					\
    288		.off   = 0,					\
    289		.imm   = (__u32) (IMM) }),			\
    290	((struct bpf_insn) {					\
    291		.code  = 0, /* zero is reserved opcode */	\
    292		.dst_reg = 0,					\
    293		.src_reg = 0,					\
    294		.off   = 0,					\
    295		.imm   = ((__u64) (IMM)) >> 32 })
    296
    297#define BPF_LD_IMM64_RAW_FULL(DST, SRC, OFF1, OFF2, IMM1, IMM2)	\
    298	((struct bpf_insn) {					\
    299		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
    300		.dst_reg = DST,					\
    301		.src_reg = SRC,					\
    302		.off   = OFF1,					\
    303		.imm   = IMM1 }),				\
    304	((struct bpf_insn) {					\
    305		.code  = 0, /* zero is reserved opcode */	\
    306		.dst_reg = 0,					\
    307		.src_reg = 0,					\
    308		.off   = OFF2,					\
    309		.imm   = IMM2 })
    310
    311/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
    312
    313#define BPF_LD_MAP_FD(DST, MAP_FD)				\
    314	BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_FD, 0, 0,	\
    315			      MAP_FD, 0)
    316
    317#define BPF_LD_MAP_VALUE(DST, MAP_FD, VALUE_OFF)		\
    318	BPF_LD_IMM64_RAW_FULL(DST, BPF_PSEUDO_MAP_VALUE, 0, 0,	\
    319			      MAP_FD, VALUE_OFF)
    320
    321/* Relative call */
    322
    323#define BPF_CALL_REL(TGT)					\
    324	((struct bpf_insn) {					\
    325		.code  = BPF_JMP | BPF_CALL,			\
    326		.dst_reg = 0,					\
    327		.src_reg = BPF_PSEUDO_CALL,			\
    328		.off   = 0,					\
    329		.imm   = TGT })
    330
    331/* Program exit */
    332
    333#define BPF_EXIT_INSN()						\
    334	((struct bpf_insn) {					\
    335		.code  = BPF_JMP | BPF_EXIT,			\
    336		.dst_reg = 0,					\
    337		.src_reg = 0,					\
    338		.off   = 0,					\
    339		.imm   = 0 })
    340
    341#endif /* __TOOLS_LINUX_FILTER_H */