cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atomic_fetch.c (6701B)


      1{
      2	"atomic dw/fetch and address leakage of (map ptr & -1) via stack slot",
      3	.insns = {
      4		BPF_LD_IMM64(BPF_REG_1, -1),
      5		BPF_LD_MAP_FD(BPF_REG_8, 0),
      6		BPF_LD_MAP_FD(BPF_REG_9, 0),
      7		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
      8		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
      9		BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
     10		BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
     11		BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
     12		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
     13		BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
     14		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
     15		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
     16		BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
     17		BPF_MOV64_IMM(BPF_REG_0, 0),
     18		BPF_EXIT_INSN(),
     19	},
     20	.fixup_map_array_48b = { 2, 4 },
     21	.result = ACCEPT,
     22	.result_unpriv = REJECT,
     23	.errstr_unpriv = "leaking pointer from stack off -8",
     24},
     25{
     26	"atomic dw/fetch and address leakage of (map ptr & -1) via returned value",
     27	.insns = {
     28		BPF_LD_IMM64(BPF_REG_1, -1),
     29		BPF_LD_MAP_FD(BPF_REG_8, 0),
     30		BPF_LD_MAP_FD(BPF_REG_9, 0),
     31		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     32		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     33		BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
     34		BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
     35		BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
     36		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
     37		BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
     38		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
     39		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
     40		BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
     41		BPF_MOV64_IMM(BPF_REG_0, 0),
     42		BPF_EXIT_INSN(),
     43	},
     44	.fixup_map_array_48b = { 2, 4 },
     45	.result = ACCEPT,
     46	.result_unpriv = REJECT,
     47	.errstr_unpriv = "leaking pointer from stack off -8",
     48},
     49{
     50	"atomic w/fetch and address leakage of (map ptr & -1) via stack slot",
     51	.insns = {
     52		BPF_LD_IMM64(BPF_REG_1, -1),
     53		BPF_LD_MAP_FD(BPF_REG_8, 0),
     54		BPF_LD_MAP_FD(BPF_REG_9, 0),
     55		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     56		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     57		BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
     58		BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
     59		BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
     60		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
     61		BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
     62		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
     63		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
     64		BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
     65		BPF_MOV64_IMM(BPF_REG_0, 0),
     66		BPF_EXIT_INSN(),
     67	},
     68	.fixup_map_array_48b = { 2, 4 },
     69	.result = REJECT,
     70	.errstr = "invalid size of register fill",
     71},
     72{
     73	"atomic w/fetch and address leakage of (map ptr & -1) via returned value",
     74	.insns = {
     75		BPF_LD_IMM64(BPF_REG_1, -1),
     76		BPF_LD_MAP_FD(BPF_REG_8, 0),
     77		BPF_LD_MAP_FD(BPF_REG_9, 0),
     78		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     79		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     80		BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
     81		BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
     82		BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
     83		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
     84		BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
     85		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
     86		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
     87		BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
     88		BPF_MOV64_IMM(BPF_REG_0, 0),
     89		BPF_EXIT_INSN(),
     90	},
     91	.fixup_map_array_48b = { 2, 4 },
     92	.result = REJECT,
     93	.errstr = "invalid size of register fill",
     94},
     95#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
     96	{								\
     97		"atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg,	\
     98		.insns = {						\
     99			/* u64 val = operan1; */			\
    100			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, operand1),	\
    101			/* u64 old = atomic_fetch_add(&val, operand2); */ \
    102			BPF_MOV64_REG(dst_reg, BPF_REG_10),		\
    103			BPF_MOV64_IMM(src_reg, operand2),		\
    104			BPF_ATOMIC_OP(BPF_DW, op,			\
    105				      dst_reg, src_reg, -8),		\
    106			/* if (old != operand1) exit(1); */		\
    107			BPF_JMP_IMM(BPF_JEQ, src_reg, operand1, 2),	\
    108			BPF_MOV64_IMM(BPF_REG_0, 1),			\
    109			BPF_EXIT_INSN(),				\
    110			/* if (val != result) exit (2); */		\
    111			BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),	\
    112			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, expect, 2),	\
    113			BPF_MOV64_IMM(BPF_REG_0, 2),			\
    114			BPF_EXIT_INSN(),				\
    115			/* exit(0); */					\
    116			BPF_MOV64_IMM(BPF_REG_0, 0),			\
    117			BPF_EXIT_INSN(),				\
    118		},							\
    119		.result = ACCEPT,					\
    120	}
    121__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 1, BPF_ADD | BPF_FETCH, 2, 3),
    122__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 1, BPF_ADD | BPF_FETCH, 2, 3),
    123__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 1, BPF_ADD | BPF_FETCH, 2, 3),
    124__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 1, BPF_ADD | BPF_FETCH, 2, 3),
    125__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 1, BPF_ADD | BPF_FETCH, 2, 3),
    126__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 1, BPF_ADD | BPF_FETCH, 2, 3),
    127__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
    128__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
    129__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
    130__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
    131__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
    132__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
    133__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
    134__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
    135__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
    136__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
    137__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
    138__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
    139__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
    140__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
    141__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
    142__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
    143__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
    144__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
    145__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XCHG, 0x011, 0x011),
    146__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XCHG, 0x011, 0x011),
    147__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XCHG, 0x011, 0x011),
    148__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XCHG, 0x011, 0x011),
    149__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XCHG, 0x011, 0x011),
    150__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XCHG, 0x011, 0x011),
    151#undef __ATOMIC_FETCH_OP_TEST