cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

unpriv.c (15248B)


      1{
      2	"unpriv: return pointer",
      3	.insns = {
      4	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
      5	BPF_EXIT_INSN(),
      6	},
      7	.result = ACCEPT,
      8	.result_unpriv = REJECT,
      9	.errstr_unpriv = "R0 leaks addr",
     10	.retval = POINTER_VALUE,
     11},
     12{
     13	"unpriv: add const to pointer",
     14	.insns = {
     15	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
     16	BPF_MOV64_IMM(BPF_REG_0, 0),
     17	BPF_EXIT_INSN(),
     18	},
     19	.result = ACCEPT,
     20},
     21{
     22	"unpriv: add pointer to pointer",
     23	.insns = {
     24	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
     25	BPF_MOV64_IMM(BPF_REG_0, 0),
     26	BPF_EXIT_INSN(),
     27	},
     28	.result = REJECT,
     29	.errstr = "R1 pointer += pointer",
     30},
     31{
     32	"unpriv: neg pointer",
     33	.insns = {
     34	BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
     35	BPF_MOV64_IMM(BPF_REG_0, 0),
     36	BPF_EXIT_INSN(),
     37	},
     38	.result = ACCEPT,
     39	.result_unpriv = REJECT,
     40	.errstr_unpriv = "R1 pointer arithmetic",
     41},
     42{
     43	"unpriv: cmp pointer with const",
     44	.insns = {
     45	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
     46	BPF_MOV64_IMM(BPF_REG_0, 0),
     47	BPF_EXIT_INSN(),
     48	},
     49	.result = ACCEPT,
     50	.result_unpriv = REJECT,
     51	.errstr_unpriv = "R1 pointer comparison",
     52},
     53{
     54	"unpriv: cmp pointer with pointer",
     55	.insns = {
     56	BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
     57	BPF_MOV64_IMM(BPF_REG_0, 0),
     58	BPF_EXIT_INSN(),
     59	},
     60	.result = ACCEPT,
     61	.result_unpriv = REJECT,
     62	.errstr_unpriv = "R10 pointer comparison",
     63},
     64{
     65	"unpriv: check that printk is disallowed",
     66	.insns = {
     67	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
     68	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
     69	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
     70	BPF_MOV64_IMM(BPF_REG_2, 8),
     71	BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
     72	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
     73	BPF_MOV64_IMM(BPF_REG_0, 0),
     74	BPF_EXIT_INSN(),
     75	},
     76	.errstr_unpriv = "unknown func bpf_trace_printk#6",
     77	.result_unpriv = REJECT,
     78	.result = ACCEPT,
     79	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
     80},
     81{
     82	"unpriv: pass pointer to helper function",
     83	.insns = {
     84	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
     85	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
     86	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
     87	BPF_LD_MAP_FD(BPF_REG_1, 0),
     88	BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
     89	BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
     90	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
     91	BPF_MOV64_IMM(BPF_REG_0, 0),
     92	BPF_EXIT_INSN(),
     93	},
     94	.fixup_map_hash_8b = { 3 },
     95	.errstr_unpriv = "R4 leaks addr",
     96	.result_unpriv = REJECT,
     97	.result = ACCEPT,
     98},
     99{
    100	"unpriv: indirectly pass pointer on stack to helper function",
    101	.insns = {
    102	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
    103	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    104	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    105	BPF_LD_MAP_FD(BPF_REG_1, 0),
    106	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
    107	BPF_MOV64_IMM(BPF_REG_0, 0),
    108	BPF_EXIT_INSN(),
    109	},
    110	.fixup_map_hash_8b = { 3 },
    111	.errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
    112	.result_unpriv = REJECT,
    113	.result = ACCEPT,
    114},
    115{
    116	"unpriv: mangle pointer on stack 1",
    117	.insns = {
    118	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
    119	BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
    120	BPF_MOV64_IMM(BPF_REG_0, 0),
    121	BPF_EXIT_INSN(),
    122	},
    123	.errstr_unpriv = "attempt to corrupt spilled",
    124	.result_unpriv = REJECT,
    125	.result = ACCEPT,
    126},
    127{
    128	"unpriv: mangle pointer on stack 2",
    129	.insns = {
    130	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
    131	BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
    132	BPF_MOV64_IMM(BPF_REG_0, 0),
    133	BPF_EXIT_INSN(),
    134	},
    135	.errstr_unpriv = "attempt to corrupt spilled",
    136	.result_unpriv = REJECT,
    137	.result = ACCEPT,
    138},
    139{
    140	"unpriv: read pointer from stack in small chunks",
    141	.insns = {
    142	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
    143	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
    144	BPF_MOV64_IMM(BPF_REG_0, 0),
    145	BPF_EXIT_INSN(),
    146	},
    147	.errstr = "invalid size",
    148	.result = REJECT,
    149},
    150{
    151	"unpriv: write pointer into ctx",
    152	.insns = {
    153	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
    154	BPF_MOV64_IMM(BPF_REG_0, 0),
    155	BPF_EXIT_INSN(),
    156	},
    157	.errstr_unpriv = "R1 leaks addr",
    158	.result_unpriv = REJECT,
    159	.errstr = "invalid bpf_context access",
    160	.result = REJECT,
    161},
    162{
    163	"unpriv: spill/fill of ctx",
    164	.insns = {
    165	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    166	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    167	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    168	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    169	BPF_MOV64_IMM(BPF_REG_0, 0),
    170	BPF_EXIT_INSN(),
    171	},
    172	.result = ACCEPT,
    173},
    174{
    175	"unpriv: spill/fill of ctx 2",
    176	.insns = {
    177	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    178	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    179	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    180	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    181	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
    182	BPF_MOV64_IMM(BPF_REG_0, 0),
    183	BPF_EXIT_INSN(),
    184	},
    185	.result = ACCEPT,
    186	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    187},
    188{
    189	"unpriv: spill/fill of ctx 3",
    190	.insns = {
    191	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    192	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    193	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    194	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
    195	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    196	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
    197	BPF_EXIT_INSN(),
    198	},
    199	.result = REJECT,
    200	.errstr = "R1 type=fp expected=ctx",
    201	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    202},
    203{
    204	"unpriv: spill/fill of ctx 4",
    205	.insns = {
    206	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    207	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    208	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    209	BPF_MOV64_IMM(BPF_REG_0, 1),
    210	BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW,
    211		     BPF_REG_10, BPF_REG_0, -8, BPF_ADD),
    212	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    213	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc),
    214	BPF_EXIT_INSN(),
    215	},
    216	.result = REJECT,
    217	.errstr = "R1 type=scalar expected=ctx",
    218	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    219},
    220{
    221	"unpriv: spill/fill of different pointers stx",
    222	.insns = {
    223	BPF_MOV64_IMM(BPF_REG_3, 42),
    224	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    225	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    226	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
    227	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    228	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
    229	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
    230	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
    231	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    232	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    233	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
    234		    offsetof(struct __sk_buff, mark)),
    235	BPF_MOV64_IMM(BPF_REG_0, 0),
    236	BPF_EXIT_INSN(),
    237	},
    238	.result = REJECT,
    239	.errstr = "same insn cannot be used with different pointers",
    240	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    241},
    242{
    243	"unpriv: spill/fill of different pointers stx - ctx and sock",
    244	.insns = {
    245	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
    246	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
    247	BPF_SK_LOOKUP(sk_lookup_tcp),
    248	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    249	/* u64 foo; */
    250	/* void *target = &foo; */
    251	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    252	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    253	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
    254	/* if (skb == NULL) *target = sock; */
    255	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
    256		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
    257	/* else *target = skb; */
    258	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
    259		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    260	/* struct __sk_buff *skb = *target; */
    261	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    262	/* skb->mark = 42; */
    263	BPF_MOV64_IMM(BPF_REG_3, 42),
    264	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
    265		    offsetof(struct __sk_buff, mark)),
    266	/* if (sk) bpf_sk_release(sk) */
    267	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
    268		BPF_EMIT_CALL(BPF_FUNC_sk_release),
    269	BPF_MOV64_IMM(BPF_REG_0, 0),
    270	BPF_EXIT_INSN(),
    271	},
    272	.result = REJECT,
    273	.errstr = "type=ctx expected=sock",
    274	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    275},
    276{
    277	"unpriv: spill/fill of different pointers stx - leak sock",
    278	.insns = {
    279	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
    280	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
    281	BPF_SK_LOOKUP(sk_lookup_tcp),
    282	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    283	/* u64 foo; */
    284	/* void *target = &foo; */
    285	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    286	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    287	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
    288	/* if (skb == NULL) *target = sock; */
    289	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
    290		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
    291	/* else *target = skb; */
    292	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
    293		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    294	/* struct __sk_buff *skb = *target; */
    295	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    296	/* skb->mark = 42; */
    297	BPF_MOV64_IMM(BPF_REG_3, 42),
    298	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
    299		    offsetof(struct __sk_buff, mark)),
    300	BPF_EXIT_INSN(),
    301	},
    302	.result = REJECT,
    303	//.errstr = "same insn cannot be used with different pointers",
    304	.errstr = "Unreleased reference",
    305	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    306},
    307{
    308	"unpriv: spill/fill of different pointers stx - sock and ctx (read)",
    309	.insns = {
    310	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
    311	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
    312	BPF_SK_LOOKUP(sk_lookup_tcp),
    313	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    314	/* u64 foo; */
    315	/* void *target = &foo; */
    316	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    317	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    318	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
    319	/* if (skb) *target = skb */
    320	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
    321		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    322	/* else *target = sock */
    323	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
    324		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
    325	/* struct bpf_sock *sk = *target; */
    326	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    327	/* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */
    328	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
    329		BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
    330			    offsetof(struct bpf_sock, mark)),
    331		BPF_EMIT_CALL(BPF_FUNC_sk_release),
    332	BPF_MOV64_IMM(BPF_REG_0, 0),
    333	BPF_EXIT_INSN(),
    334	},
    335	.result = REJECT,
    336	.errstr = "same insn cannot be used with different pointers",
    337	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    338},
    339{
    340	"unpriv: spill/fill of different pointers stx - sock and ctx (write)",
    341	.insns = {
    342	BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
    343	/* struct bpf_sock *sock = bpf_sock_lookup(...); */
    344	BPF_SK_LOOKUP(sk_lookup_tcp),
    345	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    346	/* u64 foo; */
    347	/* void *target = &foo; */
    348	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    349	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    350	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
    351	/* if (skb) *target = skb */
    352	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
    353		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    354	/* else *target = sock */
    355	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
    356		BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
    357	/* struct bpf_sock *sk = *target; */
    358	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    359	/* if (sk) sk->mark = 42; bpf_sk_release(sk); */
    360	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
    361		BPF_MOV64_IMM(BPF_REG_3, 42),
    362		BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
    363			    offsetof(struct bpf_sock, mark)),
    364		BPF_EMIT_CALL(BPF_FUNC_sk_release),
    365	BPF_MOV64_IMM(BPF_REG_0, 0),
    366	BPF_EXIT_INSN(),
    367	},
    368	.result = REJECT,
    369	//.errstr = "same insn cannot be used with different pointers",
    370	.errstr = "cannot write into sock",
    371	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    372},
    373{
    374	"unpriv: spill/fill of different pointers ldx",
    375	.insns = {
    376	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    377	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    378	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
    379	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    380	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
    381		      -(__s32)offsetof(struct bpf_perf_event_data,
    382				       sample_period) - 8),
    383	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
    384	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
    385	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
    386	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
    387	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
    388		    offsetof(struct bpf_perf_event_data, sample_period)),
    389	BPF_MOV64_IMM(BPF_REG_0, 0),
    390	BPF_EXIT_INSN(),
    391	},
    392	.result = REJECT,
    393	.errstr = "same insn cannot be used with different pointers",
    394	.prog_type = BPF_PROG_TYPE_PERF_EVENT,
    395},
    396{
    397	"unpriv: write pointer into map elem value",
    398	.insns = {
    399	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    400	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    401	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    402	BPF_LD_MAP_FD(BPF_REG_1, 0),
    403	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
    404	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
    405	BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
    406	BPF_EXIT_INSN(),
    407	},
    408	.fixup_map_hash_8b = { 3 },
    409	.errstr_unpriv = "R0 leaks addr",
    410	.result_unpriv = REJECT,
    411	.result = ACCEPT,
    412},
    413{
    414	"alu32: mov u32 const",
    415	.insns = {
    416	BPF_MOV32_IMM(BPF_REG_7, 0),
    417	BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
    418	BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
    419	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
    420	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
    421	BPF_EXIT_INSN(),
    422	},
    423	.errstr_unpriv = "R7 invalid mem access 'scalar'",
    424	.result_unpriv = REJECT,
    425	.result = ACCEPT,
    426	.retval = 0,
    427},
    428{
    429	"unpriv: partial copy of pointer",
    430	.insns = {
    431	BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
    432	BPF_MOV64_IMM(BPF_REG_0, 0),
    433	BPF_EXIT_INSN(),
    434	},
    435	.errstr_unpriv = "R10 partial copy",
    436	.result_unpriv = REJECT,
    437	.result = ACCEPT,
    438},
    439{
    440	"unpriv: pass pointer to tail_call",
    441	.insns = {
    442	BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
    443	BPF_LD_MAP_FD(BPF_REG_2, 0),
    444	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
    445	BPF_MOV64_IMM(BPF_REG_0, 0),
    446	BPF_EXIT_INSN(),
    447	},
    448	.fixup_prog1 = { 1 },
    449	.errstr_unpriv = "R3 leaks addr into helper",
    450	.result_unpriv = REJECT,
    451	.result = ACCEPT,
    452},
    453{
    454	"unpriv: cmp map pointer with zero",
    455	.insns = {
    456	BPF_MOV64_IMM(BPF_REG_1, 0),
    457	BPF_LD_MAP_FD(BPF_REG_1, 0),
    458	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
    459	BPF_MOV64_IMM(BPF_REG_0, 0),
    460	BPF_EXIT_INSN(),
    461	},
    462	.fixup_map_hash_8b = { 1 },
    463	.errstr_unpriv = "R1 pointer comparison",
    464	.result_unpriv = REJECT,
    465	.result = ACCEPT,
    466},
    467{
    468	"unpriv: write into frame pointer",
    469	.insns = {
    470	BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
    471	BPF_MOV64_IMM(BPF_REG_0, 0),
    472	BPF_EXIT_INSN(),
    473	},
    474	.errstr = "frame pointer is read only",
    475	.result = REJECT,
    476},
    477{
    478	"unpriv: spill/fill frame pointer",
    479	.insns = {
    480	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
    481	BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
    482	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
    483	BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
    484	BPF_MOV64_IMM(BPF_REG_0, 0),
    485	BPF_EXIT_INSN(),
    486	},
    487	.errstr = "frame pointer is read only",
    488	.result = REJECT,
    489},
    490{
    491	"unpriv: cmp of frame pointer",
    492	.insns = {
    493	BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
    494	BPF_MOV64_IMM(BPF_REG_0, 0),
    495	BPF_EXIT_INSN(),
    496	},
    497	.errstr_unpriv = "R10 pointer comparison",
    498	.result_unpriv = REJECT,
    499	.result = ACCEPT,
    500},
    501{
    502	"unpriv: adding of fp, reg",
    503	.insns = {
    504	BPF_MOV64_IMM(BPF_REG_0, 0),
    505	BPF_MOV64_IMM(BPF_REG_1, 0),
    506	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
    507	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
    508	BPF_EXIT_INSN(),
    509	},
    510	.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
    511	.result_unpriv = REJECT,
    512	.result = ACCEPT,
    513},
    514{
    515	"unpriv: adding of fp, imm",
    516	.insns = {
    517	BPF_MOV64_IMM(BPF_REG_0, 0),
    518	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
    519	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
    520	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
    521	BPF_EXIT_INSN(),
    522	},
    523	.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
    524	.result_unpriv = REJECT,
    525	.result = ACCEPT,
    526},
    527{
    528	"unpriv: cmp of stack pointer",
    529	.insns = {
    530	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    531	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    532	BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
    533	BPF_MOV64_IMM(BPF_REG_0, 0),
    534	BPF_EXIT_INSN(),
    535	},
    536	.errstr_unpriv = "R2 pointer comparison",
    537	.result_unpriv = REJECT,
    538	.result = ACCEPT,
    539},