cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xadd.c (3060B)


      1{
      2	"xadd/w check unaligned stack",
      3	.insns = {
      4	BPF_MOV64_IMM(BPF_REG_0, 1),
      5	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
      6	BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7),
      7	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
      8	BPF_EXIT_INSN(),
      9	},
     10	.result = REJECT,
     11	.errstr = "misaligned stack access off",
     12	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     13},
     14{
     15	"xadd/w check unaligned map",
     16	.insns = {
     17	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
     18	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     19	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     20	BPF_LD_MAP_FD(BPF_REG_1, 0),
     21	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
     22	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
     23	BPF_EXIT_INSN(),
     24	BPF_MOV64_IMM(BPF_REG_1, 1),
     25	BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3),
     26	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
     27	BPF_EXIT_INSN(),
     28	},
     29	.fixup_map_hash_8b = { 3 },
     30	.result = REJECT,
     31	.errstr = "misaligned value access off",
     32	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     33},
     34{
     35	"xadd/w check unaligned pkt",
     36	.insns = {
     37	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
     38	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
     39		    offsetof(struct xdp_md, data_end)),
     40	BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
     41	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
     42	BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
     43	BPF_MOV64_IMM(BPF_REG_0, 99),
     44	BPF_JMP_IMM(BPF_JA, 0, 0, 6),
     45	BPF_MOV64_IMM(BPF_REG_0, 1),
     46	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
     47	BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
     48	BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1),
     49	BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2),
     50	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
     51	BPF_EXIT_INSN(),
     52	},
     53	.result = REJECT,
     54	.errstr = "BPF_ATOMIC stores into R2 pkt is not allowed",
     55	.prog_type = BPF_PROG_TYPE_XDP,
     56	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
     57},
     58{
     59	"xadd/w check whether src/dst got mangled, 1",
     60	.insns = {
     61	BPF_MOV64_IMM(BPF_REG_0, 1),
     62	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
     63	BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
     64	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
     65	BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
     66	BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
     67	BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
     68	BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
     69	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
     70	BPF_EXIT_INSN(),
     71	BPF_MOV64_IMM(BPF_REG_0, 42),
     72	BPF_EXIT_INSN(),
     73	},
     74	.result = ACCEPT,
     75	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     76	.retval = 3,
     77},
     78{
     79	"xadd/w check whether src/dst got mangled, 2",
     80	.insns = {
     81	BPF_MOV64_IMM(BPF_REG_0, 1),
     82	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
     83	BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
     84	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
     85	BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
     86	BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8),
     87	BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
     88	BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
     89	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
     90	BPF_EXIT_INSN(),
     91	BPF_MOV64_IMM(BPF_REG_0, 42),
     92	BPF_EXIT_INSN(),
     93	},
     94	.result = ACCEPT,
     95	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     96	.retval = 3,
     97},