cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

regalloc.c (9517B)


      1{
      2	"regalloc basic",
      3	.insns = {
      4	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
      5	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
      6	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
      7	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
      8	BPF_LD_MAP_FD(BPF_REG_1, 0),
      9	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
     10	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
     11	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
     12	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
     13	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
     14	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 4),
     15	BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 3),
     16	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
     17	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
     18	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
     19	BPF_EXIT_INSN(),
     20	},
     21	.fixup_map_hash_48b = { 4 },
     22	.result = ACCEPT,
     23	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
     24	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
     25},
     26{
     27	"regalloc negative",
     28	.insns = {
     29	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
     30	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
     31	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     32	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     33	BPF_LD_MAP_FD(BPF_REG_1, 0),
     34	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
     35	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
     36	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
     37	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
     38	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
     39	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 24, 4),
     40	BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 3),
     41	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
     42	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
     43	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0),
     44	BPF_EXIT_INSN(),
     45	},
     46	.fixup_map_hash_48b = { 4 },
     47	.result = REJECT,
     48	.errstr = "invalid access to map value, value_size=48 off=48 size=1",
     49	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
     50},
     51{
     52	"regalloc src_reg mark",
     53	.insns = {
     54	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
     55	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
     56	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     57	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     58	BPF_LD_MAP_FD(BPF_REG_1, 0),
     59	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
     60	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
     61	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
     62	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
     63	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
     64	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 5),
     65	BPF_MOV64_IMM(BPF_REG_3, 0),
     66	BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_2, 3),
     67	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
     68	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
     69	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
     70	BPF_EXIT_INSN(),
     71	},
     72	.fixup_map_hash_48b = { 4 },
     73	.result = ACCEPT,
     74	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
     75	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
     76},
     77{
     78	"regalloc src_reg negative",
     79	.insns = {
     80	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
     81	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
     82	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     83	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     84	BPF_LD_MAP_FD(BPF_REG_1, 0),
     85	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
     86	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
     87	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
     88	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
     89	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
     90	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 22, 5),
     91	BPF_MOV64_IMM(BPF_REG_3, 0),
     92	BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_2, 3),
     93	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
     94	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
     95	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
     96	BPF_EXIT_INSN(),
     97	},
     98	.fixup_map_hash_48b = { 4 },
     99	.result = REJECT,
    100	.errstr = "invalid access to map value, value_size=48 off=44 size=8",
    101	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    102	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
    103},
    104{
    105	"regalloc and spill",
    106	.insns = {
    107	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    108	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    109	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    110	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    111	BPF_LD_MAP_FD(BPF_REG_1, 0),
    112	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
    113	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
    114	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    115	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
    116	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    117	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 7),
    118	/* r0 has upper bound that should propagate into r2 */
    119	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), /* spill r2 */
    120	BPF_MOV64_IMM(BPF_REG_0, 0),
    121	BPF_MOV64_IMM(BPF_REG_2, 0), /* clear r0 and r2 */
    122	BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 */
    123	BPF_JMP_REG(BPF_JSGE, BPF_REG_0, BPF_REG_3, 2),
    124	/* r3 has lower and upper bounds */
    125	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_3),
    126	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
    127	BPF_EXIT_INSN(),
    128	},
    129	.fixup_map_hash_48b = { 4 },
    130	.result = ACCEPT,
    131	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    132	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
    133},
    134{
    135	"regalloc and spill negative",
    136	.insns = {
    137	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    138	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    139	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    140	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    141	BPF_LD_MAP_FD(BPF_REG_1, 0),
    142	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
    143	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
    144	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    145	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
    146	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    147	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 48, 7),
    148	/* r0 has upper bound that should propagate into r2 */
    149	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), /* spill r2 */
    150	BPF_MOV64_IMM(BPF_REG_0, 0),
    151	BPF_MOV64_IMM(BPF_REG_2, 0), /* clear r0 and r2 */
    152	BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 */
    153	BPF_JMP_REG(BPF_JSGE, BPF_REG_0, BPF_REG_3, 2),
    154	/* r3 has lower and upper bounds */
    155	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_3),
    156	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
    157	BPF_EXIT_INSN(),
    158	},
    159	.fixup_map_hash_48b = { 4 },
    160	.result = REJECT,
    161	.errstr = "invalid access to map value, value_size=48 off=48 size=8",
    162	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    163	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
    164},
    165{
    166	"regalloc three regs",
    167	.insns = {
    168	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    169	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    170	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    171	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    172	BPF_LD_MAP_FD(BPF_REG_1, 0),
    173	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
    174	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
    175	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    176	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
    177	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    178	BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
    179	BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 12, 5),
    180	BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 4),
    181	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
    182	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
    183	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_4),
    184	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
    185	BPF_EXIT_INSN(),
    186	},
    187	.fixup_map_hash_48b = { 4 },
    188	.result = ACCEPT,
    189	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    190	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
    191},
    192{
    193	"regalloc after call",
    194	.insns = {
    195	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    196	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    197	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    198	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    199	BPF_LD_MAP_FD(BPF_REG_1, 0),
    200	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
    201	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
    202	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    203	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
    204	BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
    205	BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
    206	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
    207	BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 20, 4),
    208	BPF_JMP_IMM(BPF_JSLT, BPF_REG_9, 0, 3),
    209	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_8),
    210	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_9),
    211	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
    212	BPF_EXIT_INSN(),
    213	BPF_MOV64_IMM(BPF_REG_0, 0),
    214	BPF_EXIT_INSN(),
    215	},
    216	.fixup_map_hash_48b = { 4 },
    217	.result = ACCEPT,
    218	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    219	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
    220},
    221{
    222	"regalloc in callee",
    223	.insns = {
    224	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    225	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    226	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    227	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    228	BPF_LD_MAP_FD(BPF_REG_1, 0),
    229	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
    230	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
    231	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    232	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
    233	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
    234	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    235	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
    236	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    237	BPF_EXIT_INSN(),
    238	BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 20, 5),
    239	BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 4),
    240	BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
    241	BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
    242	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
    243	BPF_EXIT_INSN(),
    244	BPF_MOV64_IMM(BPF_REG_0, 0),
    245	BPF_EXIT_INSN(),
    246	},
    247	.fixup_map_hash_48b = { 4 },
    248	.result = ACCEPT,
    249	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    250	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
    251},
    252{
    253	"regalloc, spill, JEQ",
    254	.insns = {
    255	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    256	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    257	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    258	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    259	BPF_LD_MAP_FD(BPF_REG_1, 0),
    260	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
    261	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), /* spill r0 */
    262	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 0),
    263	/* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */
    264	BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
    265	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    266	BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 20, 0),
    267	/* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */
    268	BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 with map_value */
    269	BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1), /* skip ldx if map_value == NULL */
    270	/* Buggy verifier will think that r3 == 20 here */
    271	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), /* read from map_value */
    272	BPF_EXIT_INSN(),
    273	},
    274	.fixup_map_hash_48b = { 4 },
    275	.result = ACCEPT,
    276	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    277},