cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bpf_get_stack.c (2997B)


      1{
      2	"bpf_get_stack return R0 within range",
      3	.insns = {
      4	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
      5	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
      6	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
      7	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
      8	BPF_LD_MAP_FD(BPF_REG_1, 0),
      9	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
     10	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
     11	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
     12	BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
     13	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
     14	BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
     15	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
     16	BPF_MOV64_IMM(BPF_REG_4, 256),
     17	BPF_EMIT_CALL(BPF_FUNC_get_stack),
     18	BPF_MOV64_IMM(BPF_REG_1, 0),
     19	BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
     20	BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
     21	BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
     22	BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16),
     23	BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
     24	BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
     25	BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
     26	BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
     27	BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
     28	BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
     29	BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
     30	BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
     31	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
     32	BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
     33	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
     34	BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
     35	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
     36	BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
     37	BPF_MOV64_IMM(BPF_REG_4, 0),
     38	BPF_EMIT_CALL(BPF_FUNC_get_stack),
     39	BPF_EXIT_INSN(),
     40	},
     41	.fixup_map_hash_48b = { 4 },
     42	.result = ACCEPT,
     43	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
     44},
     45{
     46	"bpf_get_task_stack return R0 range is refined",
     47	.insns = {
     48	BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
     49	BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq
     50	BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task
     51	BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b
     52	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
     53	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     54	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     55	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
     56	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
     57	BPF_MOV64_IMM(BPF_REG_0, 0),
     58	BPF_EXIT_INSN(),
     59	BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
     60	BPF_MOV64_IMM(BPF_REG_0, 0),
     61	BPF_EXIT_INSN(),
     62
     63	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
     64	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
     65	BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write
     66	BPF_MOV64_IMM(BPF_REG_3, 48),
     67	BPF_MOV64_IMM(BPF_REG_4, 0),
     68	BPF_EMIT_CALL(BPF_FUNC_get_task_stack),
     69	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2),
     70	BPF_MOV64_IMM(BPF_REG_0, 0),
     71	BPF_EXIT_INSN(),
     72
     73	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
     74	BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
     75	BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
     76	BPF_EMIT_CALL(BPF_FUNC_seq_write),
     77
     78	BPF_MOV64_IMM(BPF_REG_0, 0),
     79	BPF_EXIT_INSN(),
     80	},
     81	.result = ACCEPT,
     82	.prog_type = BPF_PROG_TYPE_TRACING,
     83	.expected_attach_type = BPF_TRACE_ITER,
     84	.kfunc = "task",
     85	.runs = -1, // Don't run, just load
     86	.fixup_map_array_48b = { 3 },
     87},