cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

calls.c (68167B)


      1{
      2	"calls: invalid kfunc call not eliminated",
      3	.insns = {
      4	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
      5	BPF_MOV64_IMM(BPF_REG_0, 1),
      6	BPF_EXIT_INSN(),
      7	},
      8	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
      9	.result  = REJECT,
     10	.errstr = "invalid kernel function call not eliminated in verifier pass",
     11},
     12{
     13	"calls: invalid kfunc call unreachable",
     14	.insns = {
     15	BPF_MOV64_IMM(BPF_REG_0, 1),
     16	BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
     17	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
     18	BPF_MOV64_IMM(BPF_REG_0, 1),
     19	BPF_EXIT_INSN(),
     20	},
     21	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
     22	.result  = ACCEPT,
     23},
     24{
     25	"calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
     26	.insns = {
     27	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
     28	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
     29	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
     30	BPF_EXIT_INSN(),
     31	},
     32	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     33	.result = REJECT,
     34	.errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
     35	.fixup_kfunc_btf_id = {
     36		{ "bpf_kfunc_call_test_fail1", 2 },
     37	},
     38},
     39{
     40	"calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
     41	.insns = {
     42	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
     43	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
     44	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
     45	BPF_EXIT_INSN(),
     46	},
     47	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     48	.result = REJECT,
     49	.errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
     50	.fixup_kfunc_btf_id = {
     51		{ "bpf_kfunc_call_test_fail2", 2 },
     52	},
     53},
     54{
     55	"calls: invalid kfunc call: ptr_to_mem to struct with FAM",
     56	.insns = {
     57	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
     58	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
     59	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
     60	BPF_EXIT_INSN(),
     61	},
     62	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     63	.result = REJECT,
     64	.errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
     65	.fixup_kfunc_btf_id = {
     66		{ "bpf_kfunc_call_test_fail3", 2 },
     67	},
     68},
     69{
     70	"calls: invalid kfunc call: reg->type != PTR_TO_CTX",
     71	.insns = {
     72	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
     73	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
     74	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
     75	BPF_EXIT_INSN(),
     76	},
     77	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     78	.result = REJECT,
     79	.errstr = "arg#0 expected pointer to ctx, but got PTR",
     80	.fixup_kfunc_btf_id = {
     81		{ "bpf_kfunc_call_test_pass_ctx", 2 },
     82	},
     83},
     84{
     85	"calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
     86	.insns = {
     87	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
     88	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
     89	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
     90	BPF_EXIT_INSN(),
     91	},
     92	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
     93	.result = REJECT,
     94	.errstr = "arg#0 pointer type UNKNOWN  must point to scalar",
     95	.fixup_kfunc_btf_id = {
     96		{ "bpf_kfunc_call_test_mem_len_fail1", 2 },
     97	},
     98},
     99{
    100	"calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
    101	.insns = {
    102	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
    103	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
    104	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
    105	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    106	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
    107	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    108	BPF_EXIT_INSN(),
    109	},
    110	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    111	.result = REJECT,
    112	.errstr = "arg#0 pointer type STRUCT prog_test_ref_kfunc must point",
    113	.fixup_kfunc_btf_id = {
    114		{ "bpf_kfunc_call_test_acquire", 3 },
    115		{ "bpf_kfunc_call_test_release", 5 },
    116	},
    117},
    118{
    119	"calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
    120	.insns = {
    121	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
    122	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
    123	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
    124	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    125	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
    126	BPF_EXIT_INSN(),
    127	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
    128	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
    129	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    130	BPF_MOV64_IMM(BPF_REG_0, 0),
    131	BPF_EXIT_INSN(),
    132	},
    133	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    134	.result = REJECT,
    135	.errstr = "R1 must have zero offset when passed to release func",
    136	.fixup_kfunc_btf_id = {
    137		{ "bpf_kfunc_call_test_acquire", 3 },
    138		{ "bpf_kfunc_call_memb_release", 8 },
    139	},
    140},
    141{
    142	"calls: invalid kfunc call: don't match first member type when passed to release kfunc",
    143	.insns = {
    144	BPF_MOV64_IMM(BPF_REG_0, 0),
    145	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    146	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
    147	BPF_EXIT_INSN(),
    148	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
    149	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    150	BPF_MOV64_IMM(BPF_REG_0, 0),
    151	BPF_EXIT_INSN(),
    152	},
    153	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    154	.result = REJECT,
    155	.errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
    156	.fixup_kfunc_btf_id = {
    157		{ "bpf_kfunc_call_memb_acquire", 1 },
    158		{ "bpf_kfunc_call_memb1_release", 5 },
    159	},
    160},
    161{
    162	"calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
    163	.insns = {
    164	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
    165	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
    166	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
    167	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    168	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
    169	BPF_EXIT_INSN(),
    170	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
    171	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16),
    172	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
    173	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    174	BPF_MOV64_IMM(BPF_REG_0, 0),
    175	BPF_EXIT_INSN(),
    176	},
    177	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    178	.fixup_kfunc_btf_id = {
    179		{ "bpf_kfunc_call_test_acquire", 3 },
    180		{ "bpf_kfunc_call_test_release", 9 },
    181	},
    182	.result_unpriv = REJECT,
    183	.result = REJECT,
    184	.errstr = "negative offset ptr_ ptr R1 off=-4 disallowed",
    185},
    186{
    187	"calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
    188	.insns = {
    189	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
    190	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
    191	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
    192	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    193	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
    194	BPF_EXIT_INSN(),
    195	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
    196	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
    197	BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
    198	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    199	BPF_MOV64_IMM(BPF_REG_0, 0),
    200	BPF_EXIT_INSN(),
    201	BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
    202	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    203	BPF_MOV64_IMM(BPF_REG_0, 0),
    204	BPF_EXIT_INSN(),
    205	BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
    206	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
    207	BPF_MOV64_IMM(BPF_REG_0, 0),
    208	BPF_EXIT_INSN(),
    209	},
    210	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    211	.fixup_kfunc_btf_id = {
    212		{ "bpf_kfunc_call_test_acquire", 3 },
    213		{ "bpf_kfunc_call_test_release", 9 },
    214		{ "bpf_kfunc_call_test_release", 13 },
    215		{ "bpf_kfunc_call_test_release", 17 },
    216	},
    217	.result_unpriv = REJECT,
    218	.result = REJECT,
    219	.errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
    220},
    221{
    222	"calls: basic sanity",
    223	.insns = {
    224	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
    225	BPF_MOV64_IMM(BPF_REG_0, 1),
    226	BPF_EXIT_INSN(),
    227	BPF_MOV64_IMM(BPF_REG_0, 2),
    228	BPF_EXIT_INSN(),
    229	},
    230	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    231	.result = ACCEPT,
    232},
    233{
    234	"calls: not on unpriviledged",
    235	.insns = {
    236	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
    237	BPF_MOV64_IMM(BPF_REG_0, 1),
    238	BPF_EXIT_INSN(),
    239	BPF_MOV64_IMM(BPF_REG_0, 2),
    240	BPF_EXIT_INSN(),
    241	},
    242	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
    243	.result_unpriv = REJECT,
    244	.result = ACCEPT,
    245	.retval = 1,
    246},
    247{
    248	"calls: div by 0 in subprog",
    249	.insns = {
    250	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    251	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
    252	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    253	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
    254		    offsetof(struct __sk_buff, data_end)),
    255	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    256	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
    257	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
    258	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
    259	BPF_MOV64_IMM(BPF_REG_0, 1),
    260	BPF_EXIT_INSN(),
    261	BPF_MOV32_IMM(BPF_REG_2, 0),
    262	BPF_MOV32_IMM(BPF_REG_3, 1),
    263	BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
    264	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    265		    offsetof(struct __sk_buff, data)),
    266	BPF_EXIT_INSN(),
    267	},
    268	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    269	.result = ACCEPT,
    270	.retval = 1,
    271},
    272{
    273	"calls: multiple ret types in subprog 1",
    274	.insns = {
    275	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    276	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
    277	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    278	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
    279		    offsetof(struct __sk_buff, data_end)),
    280	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    281	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
    282	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
    283	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
    284	BPF_MOV64_IMM(BPF_REG_0, 1),
    285	BPF_EXIT_INSN(),
    286	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    287		    offsetof(struct __sk_buff, data)),
    288	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
    289	BPF_MOV32_IMM(BPF_REG_0, 42),
    290	BPF_EXIT_INSN(),
    291	},
    292	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    293	.result = REJECT,
    294	.errstr = "R0 invalid mem access 'scalar'",
    295},
    296{
    297	"calls: multiple ret types in subprog 2",
    298	.insns = {
    299	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    300	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
    301	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    302	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
    303		    offsetof(struct __sk_buff, data_end)),
    304	BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
    305	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
    306	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
    307	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
    308	BPF_MOV64_IMM(BPF_REG_0, 1),
    309	BPF_EXIT_INSN(),
    310	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    311		    offsetof(struct __sk_buff, data)),
    312	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    313	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
    314	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    315	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    316	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    317	BPF_LD_MAP_FD(BPF_REG_1, 0),
    318	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
    319	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
    320	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
    321		    offsetof(struct __sk_buff, data)),
    322	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
    323	BPF_EXIT_INSN(),
    324	},
    325	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    326	.fixup_map_hash_8b = { 16 },
    327	.result = REJECT,
    328	.errstr = "R0 min value is outside of the allowed memory range",
    329},
    330{
    331	"calls: overlapping caller/callee",
    332	.insns = {
    333	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
    334	BPF_MOV64_IMM(BPF_REG_0, 1),
    335	BPF_EXIT_INSN(),
    336	},
    337	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    338	.errstr = "last insn is not an exit or jmp",
    339	.result = REJECT,
    340},
    341{
    342	"calls: wrong recursive calls",
    343	.insns = {
    344	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
    345	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
    346	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
    347	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
    348	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
    349	BPF_MOV64_IMM(BPF_REG_0, 1),
    350	BPF_EXIT_INSN(),
    351	},
    352	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    353	.errstr = "jump out of range",
    354	.result = REJECT,
    355},
    356{
    357	"calls: wrong src reg",
    358	.insns = {
    359	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
    360	BPF_MOV64_IMM(BPF_REG_0, 1),
    361	BPF_EXIT_INSN(),
    362	},
    363	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    364	.errstr = "BPF_CALL uses reserved fields",
    365	.result = REJECT,
    366},
    367{
    368	"calls: wrong off value",
    369	.insns = {
    370	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
    371	BPF_MOV64_IMM(BPF_REG_0, 1),
    372	BPF_EXIT_INSN(),
    373	BPF_MOV64_IMM(BPF_REG_0, 2),
    374	BPF_EXIT_INSN(),
    375	},
    376	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    377	.errstr = "BPF_CALL uses reserved fields",
    378	.result = REJECT,
    379},
    380{
    381	"calls: jump back loop",
    382	.insns = {
    383	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
    384	BPF_MOV64_IMM(BPF_REG_0, 1),
    385	BPF_EXIT_INSN(),
    386	},
    387	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    388	.errstr = "back-edge from insn 0 to 0",
    389	.result = REJECT,
    390},
    391{
    392	"calls: conditional call",
    393	.insns = {
    394	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    395		    offsetof(struct __sk_buff, mark)),
    396	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
    397	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
    398	BPF_MOV64_IMM(BPF_REG_0, 1),
    399	BPF_EXIT_INSN(),
    400	BPF_MOV64_IMM(BPF_REG_0, 2),
    401	BPF_EXIT_INSN(),
    402	},
    403	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    404	.errstr = "jump out of range",
    405	.result = REJECT,
    406},
    407{
    408	"calls: conditional call 2",
    409	.insns = {
    410	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    411		    offsetof(struct __sk_buff, mark)),
    412	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
    413	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
    414	BPF_MOV64_IMM(BPF_REG_0, 1),
    415	BPF_EXIT_INSN(),
    416	BPF_MOV64_IMM(BPF_REG_0, 2),
    417	BPF_EXIT_INSN(),
    418	BPF_MOV64_IMM(BPF_REG_0, 3),
    419	BPF_EXIT_INSN(),
    420	},
    421	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    422	.result = ACCEPT,
    423},
    424{
    425	"calls: conditional call 3",
    426	.insns = {
    427	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    428		    offsetof(struct __sk_buff, mark)),
    429	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
    430	BPF_JMP_IMM(BPF_JA, 0, 0, 4),
    431	BPF_MOV64_IMM(BPF_REG_0, 1),
    432	BPF_EXIT_INSN(),
    433	BPF_MOV64_IMM(BPF_REG_0, 1),
    434	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
    435	BPF_MOV64_IMM(BPF_REG_0, 3),
    436	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
    437	},
    438	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
    439	.errstr_unpriv = "back-edge from insn",
    440	.result_unpriv = REJECT,
    441	.result = ACCEPT,
    442	.retval = 1,
    443},
    444{
    445	"calls: conditional call 4",
    446	.insns = {
    447	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    448		    offsetof(struct __sk_buff, mark)),
    449	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
    450	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
    451	BPF_MOV64_IMM(BPF_REG_0, 1),
    452	BPF_EXIT_INSN(),
    453	BPF_MOV64_IMM(BPF_REG_0, 1),
    454	BPF_JMP_IMM(BPF_JA, 0, 0, -5),
    455	BPF_MOV64_IMM(BPF_REG_0, 3),
    456	BPF_EXIT_INSN(),
    457	},
    458	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    459	.result = ACCEPT,
    460},
    461{
    462	"calls: conditional call 5",
    463	.insns = {
    464	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    465		    offsetof(struct __sk_buff, mark)),
    466	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
    467	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
    468	BPF_MOV64_IMM(BPF_REG_0, 1),
    469	BPF_EXIT_INSN(),
    470	BPF_MOV64_IMM(BPF_REG_0, 1),
    471	BPF_JMP_IMM(BPF_JA, 0, 0, -6),
    472	BPF_MOV64_IMM(BPF_REG_0, 3),
    473	BPF_EXIT_INSN(),
    474	},
    475	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    476	.result = ACCEPT,
    477	.retval = 1,
    478},
    479{
    480	"calls: conditional call 6",
    481	.insns = {
    482	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    483	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    484	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
    485	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
    486	BPF_EXIT_INSN(),
    487	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    488		    offsetof(struct __sk_buff, mark)),
    489	BPF_EXIT_INSN(),
    490	},
    491	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    492	.errstr = "infinite loop detected",
    493	.result = REJECT,
    494},
    495{
    496	"calls: using r0 returned by callee",
    497	.insns = {
    498	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    499	BPF_EXIT_INSN(),
    500	BPF_MOV64_IMM(BPF_REG_0, 2),
    501	BPF_EXIT_INSN(),
    502	},
    503	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    504	.result = ACCEPT,
    505},
    506{
    507	"calls: using uninit r0 from callee",
    508	.insns = {
    509	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    510	BPF_EXIT_INSN(),
    511	BPF_EXIT_INSN(),
    512	},
    513	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    514	.errstr = "!read_ok",
    515	.result = REJECT,
    516},
    517{
    518	"calls: callee is using r1",
    519	.insns = {
    520	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    521	BPF_EXIT_INSN(),
    522	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    523		    offsetof(struct __sk_buff, len)),
    524	BPF_EXIT_INSN(),
    525	},
    526	.prog_type = BPF_PROG_TYPE_SCHED_ACT,
    527	.result = ACCEPT,
    528	.retval = TEST_DATA_LEN,
    529},
    530{
    531	"calls: callee using args1",
    532	.insns = {
    533	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    534	BPF_EXIT_INSN(),
    535	BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
    536	BPF_EXIT_INSN(),
    537	},
    538	.errstr_unpriv = "allowed for",
    539	.result_unpriv = REJECT,
    540	.result = ACCEPT,
    541	.retval = POINTER_VALUE,
    542},
    543{
    544	"calls: callee using wrong args2",
    545	.insns = {
    546	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    547	BPF_EXIT_INSN(),
    548	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
    549	BPF_EXIT_INSN(),
    550	},
    551	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    552	.errstr = "R2 !read_ok",
    553	.result = REJECT,
    554},
    555{
    556	"calls: callee using two args",
    557	.insns = {
    558	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    559	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
    560		    offsetof(struct __sk_buff, len)),
    561	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
    562		    offsetof(struct __sk_buff, len)),
    563	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    564	BPF_EXIT_INSN(),
    565	BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
    566	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
    567	BPF_EXIT_INSN(),
    568	},
    569	.errstr_unpriv = "allowed for",
    570	.result_unpriv = REJECT,
    571	.result = ACCEPT,
    572	.retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
    573},
    574{
    575	"calls: callee changing pkt pointers",
    576	.insns = {
    577	BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
    578	BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
    579		    offsetof(struct xdp_md, data_end)),
    580	BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
    581	BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
    582	BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
    583	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    584	/* clear_all_pkt_pointers() has to walk all frames
    585	 * to make sure that pkt pointers in the caller
    586	 * are cleared when callee is calling a helper that
    587	 * adjusts packet size
    588	 */
    589	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
    590	BPF_MOV32_IMM(BPF_REG_0, 0),
    591	BPF_EXIT_INSN(),
    592	BPF_MOV64_IMM(BPF_REG_2, 0),
    593	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
    594	BPF_EXIT_INSN(),
    595	},
    596	.result = REJECT,
    597	.errstr = "R6 invalid mem access 'scalar'",
    598	.prog_type = BPF_PROG_TYPE_XDP,
    599	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
    600},
    601{
    602	"calls: ptr null check in subprog",
    603	.insns = {
    604	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    605	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    606	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
    607	BPF_LD_MAP_FD(BPF_REG_1, 0),
    608	BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
    609	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
    610	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
    611	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    612	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
    613	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
    614	BPF_EXIT_INSN(),
    615	BPF_MOV64_IMM(BPF_REG_0, 0),
    616	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
    617	BPF_MOV64_IMM(BPF_REG_0, 1),
    618	BPF_EXIT_INSN(),
    619	},
    620	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
    621	.fixup_map_hash_48b = { 3 },
    622	.result_unpriv = REJECT,
    623	.result = ACCEPT,
    624	.retval = 0,
    625},
    626{
    627	"calls: two calls with args",
    628	.insns = {
    629	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    630	BPF_EXIT_INSN(),
    631	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    632	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
    633	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    634	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    635	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    636	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
    637	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
    638	BPF_EXIT_INSN(),
    639	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    640		    offsetof(struct __sk_buff, len)),
    641	BPF_EXIT_INSN(),
    642	},
    643	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    644	.result = ACCEPT,
    645	.retval = TEST_DATA_LEN + TEST_DATA_LEN,
    646},
    647{
    648	"calls: calls with stack arith",
    649	.insns = {
    650	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    651	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
    652	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    653	BPF_EXIT_INSN(),
    654	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
    655	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    656	BPF_EXIT_INSN(),
    657	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
    658	BPF_MOV64_IMM(BPF_REG_0, 42),
    659	BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
    660	BPF_EXIT_INSN(),
    661	},
    662	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    663	.result = ACCEPT,
    664	.retval = 42,
    665},
    666{
    667	"calls: calls with misaligned stack access",
    668	.insns = {
    669	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    670	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
    671	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    672	BPF_EXIT_INSN(),
    673	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
    674	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    675	BPF_EXIT_INSN(),
    676	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
    677	BPF_MOV64_IMM(BPF_REG_0, 42),
    678	BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
    679	BPF_EXIT_INSN(),
    680	},
    681	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    682	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
    683	.errstr = "misaligned stack access",
    684	.result = REJECT,
    685},
    686{
    687	"calls: calls control flow, jump test",
    688	.insns = {
    689	BPF_MOV64_IMM(BPF_REG_0, 42),
    690	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
    691	BPF_MOV64_IMM(BPF_REG_0, 43),
    692	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
    693	BPF_JMP_IMM(BPF_JA, 0, 0, -3),
    694	BPF_EXIT_INSN(),
    695	},
    696	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    697	.result = ACCEPT,
    698	.retval = 43,
    699},
    700{
    701	"calls: calls control flow, jump test 2",
    702	.insns = {
    703	BPF_MOV64_IMM(BPF_REG_0, 42),
    704	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
    705	BPF_MOV64_IMM(BPF_REG_0, 43),
    706	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
    707	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
    708	BPF_EXIT_INSN(),
    709	},
    710	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    711	.errstr = "jump out of range from insn 1 to 4",
    712	.result = REJECT,
    713},
    714{
    715	"calls: two calls with bad jump",
    716	.insns = {
    717	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    718	BPF_EXIT_INSN(),
    719	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    720	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
    721	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    722	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    723	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    724	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
    725	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
    726	BPF_EXIT_INSN(),
    727	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    728		    offsetof(struct __sk_buff, len)),
    729	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
    730	BPF_EXIT_INSN(),
    731	},
    732	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    733	.errstr = "jump out of range from insn 11 to 9",
    734	.result = REJECT,
    735},
    736{
    737	"calls: recursive call. test1",
    738	.insns = {
    739	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    740	BPF_EXIT_INSN(),
    741	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
    742	BPF_EXIT_INSN(),
    743	},
    744	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    745	.errstr = "back-edge",
    746	.result = REJECT,
    747},
    748{
    749	"calls: recursive call. test2",
    750	.insns = {
    751	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    752	BPF_EXIT_INSN(),
    753	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
    754	BPF_EXIT_INSN(),
    755	},
    756	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    757	.errstr = "back-edge",
    758	.result = REJECT,
    759},
    760{
    761	"calls: unreachable code",
    762	.insns = {
    763	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    764	BPF_EXIT_INSN(),
    765	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    766	BPF_EXIT_INSN(),
    767	BPF_MOV64_IMM(BPF_REG_0, 0),
    768	BPF_EXIT_INSN(),
    769	BPF_MOV64_IMM(BPF_REG_0, 0),
    770	BPF_EXIT_INSN(),
    771	},
    772	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    773	.errstr = "unreachable insn 6",
    774	.result = REJECT,
    775},
    776{
    777	"calls: invalid call",
    778	.insns = {
    779	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    780	BPF_EXIT_INSN(),
    781	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
    782	BPF_EXIT_INSN(),
    783	},
    784	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    785	.errstr = "invalid destination",
    786	.result = REJECT,
    787},
    788{
    789	"calls: invalid call 2",
    790	.insns = {
    791	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    792	BPF_EXIT_INSN(),
    793	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
    794	BPF_EXIT_INSN(),
    795	},
    796	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    797	.errstr = "invalid destination",
    798	.result = REJECT,
    799},
    800{
    801	"calls: jumping across function bodies. test1",
    802	.insns = {
    803	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
    804	BPF_MOV64_IMM(BPF_REG_0, 0),
    805	BPF_EXIT_INSN(),
    806	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
    807	BPF_EXIT_INSN(),
    808	},
    809	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    810	.errstr = "jump out of range",
    811	.result = REJECT,
    812},
    813{
    814	"calls: jumping across function bodies. test2",
    815	.insns = {
    816	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
    817	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
    818	BPF_MOV64_IMM(BPF_REG_0, 0),
    819	BPF_EXIT_INSN(),
    820	BPF_EXIT_INSN(),
    821	},
    822	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    823	.errstr = "jump out of range",
    824	.result = REJECT,
    825},
    826{
    827	"calls: call without exit",
    828	.insns = {
    829	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    830	BPF_EXIT_INSN(),
    831	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    832	BPF_EXIT_INSN(),
    833	BPF_MOV64_IMM(BPF_REG_0, 0),
    834	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
    835	},
    836	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    837	.errstr = "not an exit",
    838	.result = REJECT,
    839},
    840{
    841	"calls: call into middle of ld_imm64",
    842	.insns = {
    843	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    844	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    845	BPF_MOV64_IMM(BPF_REG_0, 0),
    846	BPF_EXIT_INSN(),
    847	BPF_LD_IMM64(BPF_REG_0, 0),
    848	BPF_EXIT_INSN(),
    849	},
    850	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    851	.errstr = "last insn",
    852	.result = REJECT,
    853},
    854{
    855	"calls: call into middle of other call",
    856	.insns = {
    857	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    858	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    859	BPF_MOV64_IMM(BPF_REG_0, 0),
    860	BPF_EXIT_INSN(),
    861	BPF_MOV64_IMM(BPF_REG_0, 0),
    862	BPF_MOV64_IMM(BPF_REG_0, 0),
    863	BPF_EXIT_INSN(),
    864	},
    865	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    866	.errstr = "last insn",
    867	.result = REJECT,
    868},
    869{
    870	"calls: subprog call with ld_abs in main prog",
    871	.insns = {
    872	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    873	BPF_LD_ABS(BPF_B, 0),
    874	BPF_LD_ABS(BPF_H, 0),
    875	BPF_LD_ABS(BPF_W, 0),
    876	BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
    877	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    878	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
    879	BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
    880	BPF_LD_ABS(BPF_B, 0),
    881	BPF_LD_ABS(BPF_H, 0),
    882	BPF_LD_ABS(BPF_W, 0),
    883	BPF_EXIT_INSN(),
    884	BPF_MOV64_IMM(BPF_REG_2, 1),
    885	BPF_MOV64_IMM(BPF_REG_3, 2),
    886	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
    887	BPF_EXIT_INSN(),
    888	},
    889	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
    890	.result = ACCEPT,
    891},
    892{
    893	"calls: two calls with bad fallthrough",
    894	.insns = {
    895	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    896	BPF_EXIT_INSN(),
    897	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    898	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
    899	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    900	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    901	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    902	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
    903	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
    904	BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
    905	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
    906		    offsetof(struct __sk_buff, len)),
    907	BPF_EXIT_INSN(),
    908	},
    909	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
    910	.errstr = "not an exit",
    911	.result = REJECT,
    912},
    913{
    914	"calls: two calls with stack read",
    915	.insns = {
    916	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    917	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
    918	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
    919	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
    920	BPF_EXIT_INSN(),
    921	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    922	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
    923	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
    924	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    925	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
    926	BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
    927	BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
    928	BPF_EXIT_INSN(),
    929	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
    930	BPF_EXIT_INSN(),
    931	},
    932	.prog_type = BPF_PROG_TYPE_XDP,
    933	.result = ACCEPT,
    934},
    935{
    936	"calls: two calls with stack write",
    937	.insns = {
    938	/* main prog */
    939	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    940	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
    941	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
    942	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
    943	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
    944	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
    945	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
    946	BPF_EXIT_INSN(),
    947
    948	/* subprog 1 */
    949	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
    950	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
    951	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
    952	BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
    953	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
    954	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
    955	BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
    956	BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
    957	/* write into stack frame of main prog */
    958	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
    959	BPF_EXIT_INSN(),
    960
    961	/* subprog 2 */
    962	/* read from stack frame of main prog */
    963	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
    964	BPF_EXIT_INSN(),
    965	},
    966	.prog_type = BPF_PROG_TYPE_XDP,
    967	.result = ACCEPT,
    968},
    969{
    970	"calls: stack overflow using two frames (pre-call access)",
    971	.insns = {
    972	/* prog 1 */
    973	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
    974	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
    975	BPF_EXIT_INSN(),
    976
    977	/* prog 2 */
    978	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
    979	BPF_MOV64_IMM(BPF_REG_0, 0),
    980	BPF_EXIT_INSN(),
    981	},
    982	.prog_type = BPF_PROG_TYPE_XDP,
    983	.errstr = "combined stack size",
    984	.result = REJECT,
    985},
    986{
    987	"calls: stack overflow using two frames (post-call access)",
    988	.insns = {
    989	/* prog 1 */
    990	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
    991	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
    992	BPF_EXIT_INSN(),
    993
    994	/* prog 2 */
    995	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
    996	BPF_MOV64_IMM(BPF_REG_0, 0),
    997	BPF_EXIT_INSN(),
    998	},
    999	.prog_type = BPF_PROG_TYPE_XDP,
   1000	.errstr = "combined stack size",
   1001	.result = REJECT,
   1002},
   1003{
   1004	"calls: stack depth check using three frames. test1",
   1005	.insns = {
   1006	/* main */
   1007	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
   1008	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
   1009	BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
   1010	BPF_MOV64_IMM(BPF_REG_0, 0),
   1011	BPF_EXIT_INSN(),
   1012	/* A */
   1013	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
   1014	BPF_EXIT_INSN(),
   1015	/* B */
   1016	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
   1017	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
   1018	BPF_EXIT_INSN(),
   1019	},
   1020	.prog_type = BPF_PROG_TYPE_XDP,
   1021	/* stack_main=32, stack_A=256, stack_B=64
   1022	 * and max(main+A, main+A+B) < 512
   1023	 */
   1024	.result = ACCEPT,
   1025},
   1026{
   1027	"calls: stack depth check using three frames. test2",
   1028	.insns = {
   1029	/* main */
   1030	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
   1031	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
   1032	BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
   1033	BPF_MOV64_IMM(BPF_REG_0, 0),
   1034	BPF_EXIT_INSN(),
   1035	/* A */
   1036	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
   1037	BPF_EXIT_INSN(),
   1038	/* B */
   1039	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
   1040	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
   1041	BPF_EXIT_INSN(),
   1042	},
   1043	.prog_type = BPF_PROG_TYPE_XDP,
   1044	/* stack_main=32, stack_A=64, stack_B=256
   1045	 * and max(main+A, main+A+B) < 512
   1046	 */
   1047	.result = ACCEPT,
   1048},
   1049{
   1050	"calls: stack depth check using three frames. test3",
   1051	.insns = {
   1052	/* main */
   1053	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1054	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
   1055	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
   1056	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
   1057	BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
   1058	BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
   1059	BPF_MOV64_IMM(BPF_REG_0, 0),
   1060	BPF_EXIT_INSN(),
   1061	/* A */
   1062	BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
   1063	BPF_EXIT_INSN(),
   1064	BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
   1065	BPF_JMP_IMM(BPF_JA, 0, 0, -3),
   1066	/* B */
   1067	BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
   1068	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
   1069	BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
   1070	BPF_EXIT_INSN(),
   1071	},
   1072	.prog_type = BPF_PROG_TYPE_XDP,
   1073	/* stack_main=64, stack_A=224, stack_B=256
   1074	 * and max(main+A, main+A+B) > 512
   1075	 */
   1076	.errstr = "combined stack",
   1077	.result = REJECT,
   1078},
   1079{
   1080	"calls: stack depth check using three frames. test4",
   1081	/* void main(void) {
   1082	 *   func1(0);
   1083	 *   func1(1);
   1084	 *   func2(1);
   1085	 * }
   1086	 * void func1(int alloc_or_recurse) {
   1087	 *   if (alloc_or_recurse) {
   1088	 *     frame_pointer[-300] = 1;
   1089	 *   } else {
   1090	 *     func2(alloc_or_recurse);
   1091	 *   }
   1092	 * }
   1093	 * void func2(int alloc_or_recurse) {
   1094	 *   if (alloc_or_recurse) {
   1095	 *     frame_pointer[-300] = 1;
   1096	 *   }
   1097	 * }
   1098	 */
   1099	.insns = {
   1100	/* main */
   1101	BPF_MOV64_IMM(BPF_REG_1, 0),
   1102	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
   1103	BPF_MOV64_IMM(BPF_REG_1, 1),
   1104	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
   1105	BPF_MOV64_IMM(BPF_REG_1, 1),
   1106	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
   1107	BPF_MOV64_IMM(BPF_REG_0, 0),
   1108	BPF_EXIT_INSN(),
   1109	/* A */
   1110	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
   1111	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
   1112	BPF_EXIT_INSN(),
   1113	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
   1114	BPF_EXIT_INSN(),
   1115	/* B */
   1116	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
   1117	BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
   1118	BPF_EXIT_INSN(),
   1119	},
   1120	.prog_type = BPF_PROG_TYPE_XDP,
   1121	.result = REJECT,
   1122	.errstr = "combined stack",
   1123},
   1124{
   1125	"calls: stack depth check using three frames. test5",
   1126	.insns = {
   1127	/* main */
   1128	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
   1129	BPF_EXIT_INSN(),
   1130	/* A */
   1131	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
   1132	BPF_EXIT_INSN(),
   1133	/* B */
   1134	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
   1135	BPF_EXIT_INSN(),
   1136	/* C */
   1137	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
   1138	BPF_EXIT_INSN(),
   1139	/* D */
   1140	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
   1141	BPF_EXIT_INSN(),
   1142	/* E */
   1143	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
   1144	BPF_EXIT_INSN(),
   1145	/* F */
   1146	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
   1147	BPF_EXIT_INSN(),
   1148	/* G */
   1149	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
   1150	BPF_EXIT_INSN(),
   1151	/* H */
   1152	BPF_MOV64_IMM(BPF_REG_0, 0),
   1153	BPF_EXIT_INSN(),
   1154	},
   1155	.prog_type = BPF_PROG_TYPE_XDP,
   1156	.errstr = "call stack",
   1157	.result = REJECT,
   1158},
   1159{
   1160	"calls: stack depth check in dead code",
   1161	.insns = {
   1162	/* main */
   1163	BPF_MOV64_IMM(BPF_REG_1, 0),
   1164	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
   1165	BPF_EXIT_INSN(),
   1166	/* A */
   1167	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
   1168	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
   1169	BPF_MOV64_IMM(BPF_REG_0, 0),
   1170	BPF_EXIT_INSN(),
   1171	/* B */
   1172	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
   1173	BPF_EXIT_INSN(),
   1174	/* C */
   1175	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
   1176	BPF_EXIT_INSN(),
   1177	/* D */
   1178	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
   1179	BPF_EXIT_INSN(),
   1180	/* E */
   1181	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
   1182	BPF_EXIT_INSN(),
   1183	/* F */
   1184	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
   1185	BPF_EXIT_INSN(),
   1186	/* G */
   1187	BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
   1188	BPF_EXIT_INSN(),
   1189	/* H */
   1190	BPF_MOV64_IMM(BPF_REG_0, 0),
   1191	BPF_EXIT_INSN(),
   1192	},
   1193	.prog_type = BPF_PROG_TYPE_XDP,
   1194	.errstr = "call stack",
   1195	.result = REJECT,
   1196},
   1197{
   1198	"calls: spill into caller stack frame",
   1199	.insns = {
   1200	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1201	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1202	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1203	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
   1204	BPF_EXIT_INSN(),
   1205	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
   1206	BPF_MOV64_IMM(BPF_REG_0, 0),
   1207	BPF_EXIT_INSN(),
   1208	},
   1209	.prog_type = BPF_PROG_TYPE_XDP,
   1210	.errstr = "cannot spill",
   1211	.result = REJECT,
   1212},
   1213{
   1214	"calls: write into caller stack frame",
   1215	.insns = {
   1216	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1217	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1218	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1219	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1220	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
   1221	BPF_EXIT_INSN(),
   1222	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
   1223	BPF_MOV64_IMM(BPF_REG_0, 0),
   1224	BPF_EXIT_INSN(),
   1225	},
   1226	.prog_type = BPF_PROG_TYPE_XDP,
   1227	.result = ACCEPT,
   1228	.retval = 42,
   1229},
   1230{
   1231	"calls: write into callee stack frame",
   1232	.insns = {
   1233	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1234	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
   1235	BPF_EXIT_INSN(),
   1236	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
   1237	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
   1238	BPF_EXIT_INSN(),
   1239	},
   1240	.prog_type = BPF_PROG_TYPE_XDP,
   1241	.errstr = "cannot return stack pointer",
   1242	.result = REJECT,
   1243},
   1244{
   1245	"calls: two calls with stack write and void return",
   1246	.insns = {
   1247	/* main prog */
   1248	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1249	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1250	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1251	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1252	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1253	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1254	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
   1255	BPF_EXIT_INSN(),
   1256
   1257	/* subprog 1 */
   1258	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1259	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1260	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
   1261	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
   1262	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
   1263	BPF_EXIT_INSN(),
   1264
   1265	/* subprog 2 */
   1266	/* write into stack frame of main prog */
   1267	BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
   1268	BPF_EXIT_INSN(), /* void return */
   1269	},
   1270	.prog_type = BPF_PROG_TYPE_XDP,
   1271	.result = ACCEPT,
   1272},
   1273{
   1274	"calls: ambiguous return value",
   1275	.insns = {
   1276	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1277	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
   1278	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
   1279	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
   1280	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1281	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
   1282	BPF_EXIT_INSN(),
   1283	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
   1284	BPF_MOV64_IMM(BPF_REG_0, 0),
   1285	BPF_EXIT_INSN(),
   1286	},
   1287	.errstr_unpriv = "allowed for",
   1288	.result_unpriv = REJECT,
   1289	.errstr = "R0 !read_ok",
   1290	.result = REJECT,
   1291},
   1292{
   1293	"calls: two calls that return map_value",
   1294	.insns = {
   1295	/* main prog */
   1296	/* pass fp-16, fp-8 into a function */
   1297	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1298	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1299	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1300	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1301	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
   1302
   1303	/* fetch map_value_ptr from the stack of this function */
   1304	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
   1305	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
   1306	/* write into map value */
   1307	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1308	/* fetch secound map_value_ptr from the stack */
   1309	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
   1310	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
   1311	/* write into map value */
   1312	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1313	BPF_MOV64_IMM(BPF_REG_0, 0),
   1314	BPF_EXIT_INSN(),
   1315
   1316	/* subprog 1 */
   1317	/* call 3rd function twice */
   1318	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1319	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1320	/* first time with fp-8 */
   1321	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
   1322	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
   1323	/* second time with fp-16 */
   1324	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
   1325	BPF_EXIT_INSN(),
   1326
   1327	/* subprog 2 */
   1328	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1329	/* lookup from map */
   1330	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1331	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1332	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1333	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1334	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1335	/* write map_value_ptr into stack frame of main prog */
   1336	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   1337	BPF_MOV64_IMM(BPF_REG_0, 0),
   1338	BPF_EXIT_INSN(), /* return 0 */
   1339	},
   1340	.prog_type = BPF_PROG_TYPE_XDP,
   1341	.fixup_map_hash_8b = { 23 },
   1342	.result = ACCEPT,
   1343},
   1344{
   1345	"calls: two calls that return map_value with bool condition",
   1346	.insns = {
   1347	/* main prog */
   1348	/* pass fp-16, fp-8 into a function */
   1349	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1350	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1351	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1352	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1353	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1354	BPF_MOV64_IMM(BPF_REG_0, 0),
   1355	BPF_EXIT_INSN(),
   1356
   1357	/* subprog 1 */
   1358	/* call 3rd function twice */
   1359	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1360	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1361	/* first time with fp-8 */
   1362	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
   1363	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
   1364	/* fetch map_value_ptr from the stack of this function */
   1365	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
   1366	/* write into map value */
   1367	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1368	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
   1369	/* second time with fp-16 */
   1370	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
   1371	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
   1372	/* fetch secound map_value_ptr from the stack */
   1373	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
   1374	/* write into map value */
   1375	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1376	BPF_EXIT_INSN(),
   1377
   1378	/* subprog 2 */
   1379	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1380	/* lookup from map */
   1381	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1382	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1383	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1384	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1385	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1386	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1387	BPF_MOV64_IMM(BPF_REG_0, 0),
   1388	BPF_EXIT_INSN(), /* return 0 */
   1389	/* write map_value_ptr into stack frame of main prog */
   1390	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   1391	BPF_MOV64_IMM(BPF_REG_0, 1),
   1392	BPF_EXIT_INSN(), /* return 1 */
   1393	},
   1394	.prog_type = BPF_PROG_TYPE_XDP,
   1395	.fixup_map_hash_8b = { 23 },
   1396	.result = ACCEPT,
   1397},
   1398{
   1399	"calls: two calls that return map_value with incorrect bool check",
   1400	.insns = {
   1401	/* main prog */
   1402	/* pass fp-16, fp-8 into a function */
   1403	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1404	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1405	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1406	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1407	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1408	BPF_MOV64_IMM(BPF_REG_0, 0),
   1409	BPF_EXIT_INSN(),
   1410
   1411	/* subprog 1 */
   1412	/* call 3rd function twice */
   1413	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1414	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1415	/* first time with fp-8 */
   1416	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
   1417	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
   1418	/* fetch map_value_ptr from the stack of this function */
   1419	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
   1420	/* write into map value */
   1421	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1422	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
   1423	/* second time with fp-16 */
   1424	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
   1425	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1426	/* fetch secound map_value_ptr from the stack */
   1427	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
   1428	/* write into map value */
   1429	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1430	BPF_EXIT_INSN(),
   1431
   1432	/* subprog 2 */
   1433	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1434	/* lookup from map */
   1435	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1436	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1437	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1438	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1439	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1440	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1441	BPF_MOV64_IMM(BPF_REG_0, 0),
   1442	BPF_EXIT_INSN(), /* return 0 */
   1443	/* write map_value_ptr into stack frame of main prog */
   1444	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   1445	BPF_MOV64_IMM(BPF_REG_0, 1),
   1446	BPF_EXIT_INSN(), /* return 1 */
   1447	},
   1448	.prog_type = BPF_PROG_TYPE_XDP,
   1449	.fixup_map_hash_8b = { 23 },
   1450	.result = REJECT,
   1451	.errstr = "invalid read from stack R7 off=-16 size=8",
   1452},
   1453{
   1454	"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
   1455	.insns = {
   1456	/* main prog */
   1457	/* pass fp-16, fp-8 into a function */
   1458	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1459	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1460	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1461	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1462	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1463	BPF_MOV64_IMM(BPF_REG_0, 0),
   1464	BPF_EXIT_INSN(),
   1465
   1466	/* subprog 1 */
   1467	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1468	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1469	/* 1st lookup from map */
   1470	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1471	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1472	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1473	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1474	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1475	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1476	BPF_MOV64_IMM(BPF_REG_8, 0),
   1477	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
   1478	/* write map_value_ptr into stack frame of main prog at fp-8 */
   1479	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   1480	BPF_MOV64_IMM(BPF_REG_8, 1),
   1481
   1482	/* 2nd lookup from map */
   1483	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
   1484	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1485	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1486	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
   1487		     BPF_FUNC_map_lookup_elem),
   1488	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1489	BPF_MOV64_IMM(BPF_REG_9, 0),
   1490	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
   1491	/* write map_value_ptr into stack frame of main prog at fp-16 */
   1492	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
   1493	BPF_MOV64_IMM(BPF_REG_9, 1),
   1494
   1495	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
   1496	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
   1497	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
   1498	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
   1499	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
   1500	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
   1501	BPF_EXIT_INSN(),
   1502
   1503	/* subprog 2 */
   1504	/* if arg2 == 1 do *arg1 = 0 */
   1505	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
   1506	/* fetch map_value_ptr from the stack of this function */
   1507	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
   1508	/* write into map value */
   1509	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1510
   1511	/* if arg4 == 1 do *arg3 = 0 */
   1512	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
   1513	/* fetch map_value_ptr from the stack of this function */
   1514	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
   1515	/* write into map value */
   1516	BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
   1517	BPF_EXIT_INSN(),
   1518	},
   1519	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1520	.fixup_map_hash_8b = { 12, 22 },
   1521	.result = REJECT,
   1522	.errstr = "invalid access to map value, value_size=8 off=2 size=8",
   1523	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   1524},
   1525{
   1526	"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
   1527	.insns = {
   1528	/* main prog */
   1529	/* pass fp-16, fp-8 into a function */
   1530	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1531	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1532	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1533	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1534	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1535	BPF_MOV64_IMM(BPF_REG_0, 0),
   1536	BPF_EXIT_INSN(),
   1537
   1538	/* subprog 1 */
   1539	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1540	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1541	/* 1st lookup from map */
   1542	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1543	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1544	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1545	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1546	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1547	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1548	BPF_MOV64_IMM(BPF_REG_8, 0),
   1549	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
   1550	/* write map_value_ptr into stack frame of main prog at fp-8 */
   1551	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   1552	BPF_MOV64_IMM(BPF_REG_8, 1),
   1553
   1554	/* 2nd lookup from map */
   1555	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
   1556	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1557	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1558	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
   1559		     BPF_FUNC_map_lookup_elem),
   1560	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1561	BPF_MOV64_IMM(BPF_REG_9, 0),
   1562	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
   1563	/* write map_value_ptr into stack frame of main prog at fp-16 */
   1564	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
   1565	BPF_MOV64_IMM(BPF_REG_9, 1),
   1566
   1567	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
   1568	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
   1569	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
   1570	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
   1571	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
   1572	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
   1573	BPF_EXIT_INSN(),
   1574
   1575	/* subprog 2 */
   1576	/* if arg2 == 1 do *arg1 = 0 */
   1577	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
   1578	/* fetch map_value_ptr from the stack of this function */
   1579	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
   1580	/* write into map value */
   1581	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1582
   1583	/* if arg4 == 1 do *arg3 = 0 */
   1584	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
   1585	/* fetch map_value_ptr from the stack of this function */
   1586	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
   1587	/* write into map value */
   1588	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1589	BPF_EXIT_INSN(),
   1590	},
   1591	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1592	.fixup_map_hash_8b = { 12, 22 },
   1593	.result = ACCEPT,
   1594},
   1595{
   1596	"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
   1597	.insns = {
   1598	/* main prog */
   1599	/* pass fp-16, fp-8 into a function */
   1600	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1601	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1602	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1603	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1604	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
   1605	BPF_MOV64_IMM(BPF_REG_0, 0),
   1606	BPF_EXIT_INSN(),
   1607
   1608	/* subprog 1 */
   1609	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1610	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1611	/* 1st lookup from map */
   1612	BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
   1613	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1614	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
   1615	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1616	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1617	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1618	BPF_MOV64_IMM(BPF_REG_8, 0),
   1619	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
   1620	/* write map_value_ptr into stack frame of main prog at fp-8 */
   1621	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   1622	BPF_MOV64_IMM(BPF_REG_8, 1),
   1623
   1624	/* 2nd lookup from map */
   1625	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1626	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
   1627	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1628	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1629	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1630	BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
   1631	BPF_JMP_IMM(BPF_JA, 0, 0, 2),
   1632	/* write map_value_ptr into stack frame of main prog at fp-16 */
   1633	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
   1634	BPF_MOV64_IMM(BPF_REG_9, 1),
   1635
   1636	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
   1637	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
   1638	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
   1639	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
   1640	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
   1641	BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
   1642	BPF_JMP_IMM(BPF_JA, 0, 0, -30),
   1643
   1644	/* subprog 2 */
   1645	/* if arg2 == 1 do *arg1 = 0 */
   1646	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
   1647	/* fetch map_value_ptr from the stack of this function */
   1648	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
   1649	/* write into map value */
   1650	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1651
   1652	/* if arg4 == 1 do *arg3 = 0 */
   1653	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
   1654	/* fetch map_value_ptr from the stack of this function */
   1655	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
   1656	/* write into map value */
   1657	BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
   1658	BPF_JMP_IMM(BPF_JA, 0, 0, -8),
   1659	},
   1660	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1661	.fixup_map_hash_8b = { 12, 22 },
   1662	.result = REJECT,
   1663	.errstr = "invalid access to map value, value_size=8 off=2 size=8",
   1664	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   1665},
   1666{
   1667	"calls: two calls that receive map_value_ptr_or_null via arg. test1",
   1668	.insns = {
   1669	/* main prog */
   1670	/* pass fp-16, fp-8 into a function */
   1671	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1672	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1673	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1674	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1675	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1676	BPF_MOV64_IMM(BPF_REG_0, 0),
   1677	BPF_EXIT_INSN(),
   1678
   1679	/* subprog 1 */
   1680	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1681	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1682	/* 1st lookup from map */
   1683	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1684	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1685	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1686	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1687	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1688	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
   1689	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   1690	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1691	BPF_MOV64_IMM(BPF_REG_8, 0),
   1692	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
   1693	BPF_MOV64_IMM(BPF_REG_8, 1),
   1694
   1695	/* 2nd lookup from map */
   1696	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1697	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1698	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1699	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1700	/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
   1701	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
   1702	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1703	BPF_MOV64_IMM(BPF_REG_9, 0),
   1704	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
   1705	BPF_MOV64_IMM(BPF_REG_9, 1),
   1706
   1707	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
   1708	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
   1709	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
   1710	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
   1711	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
   1712	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
   1713	BPF_EXIT_INSN(),
   1714
   1715	/* subprog 2 */
   1716	/* if arg2 == 1 do *arg1 = 0 */
   1717	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
   1718	/* fetch map_value_ptr from the stack of this function */
   1719	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
   1720	/* write into map value */
   1721	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1722
   1723	/* if arg4 == 1 do *arg3 = 0 */
   1724	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
   1725	/* fetch map_value_ptr from the stack of this function */
   1726	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
   1727	/* write into map value */
   1728	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1729	BPF_EXIT_INSN(),
   1730	},
   1731	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1732	.fixup_map_hash_8b = { 12, 22 },
   1733	.result = ACCEPT,
   1734},
   1735{
   1736	"calls: two calls that receive map_value_ptr_or_null via arg. test2",
   1737	.insns = {
   1738	/* main prog */
   1739	/* pass fp-16, fp-8 into a function */
   1740	BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
   1741	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
   1742	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1743	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
   1744	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   1745	BPF_MOV64_IMM(BPF_REG_0, 0),
   1746	BPF_EXIT_INSN(),
   1747
   1748	/* subprog 1 */
   1749	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   1750	BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
   1751	/* 1st lookup from map */
   1752	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   1753	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1754	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1755	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1756	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1757	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
   1758	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   1759	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1760	BPF_MOV64_IMM(BPF_REG_8, 0),
   1761	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
   1762	BPF_MOV64_IMM(BPF_REG_8, 1),
   1763
   1764	/* 2nd lookup from map */
   1765	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   1766	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   1767	BPF_LD_MAP_FD(BPF_REG_1, 0),
   1768	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   1769	/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
   1770	BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
   1771	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
   1772	BPF_MOV64_IMM(BPF_REG_9, 0),
   1773	BPF_JMP_IMM(BPF_JA, 0, 0, 1),
   1774	BPF_MOV64_IMM(BPF_REG_9, 1),
   1775
   1776	/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
   1777	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
   1778	BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
   1779	BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
   1780	BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
   1781	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
   1782	BPF_EXIT_INSN(),
   1783
   1784	/* subprog 2 */
   1785	/* if arg2 == 1 do *arg1 = 0 */
   1786	BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
   1787	/* fetch map_value_ptr from the stack of this function */
   1788	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
   1789	/* write into map value */
   1790	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1791
   1792	/* if arg4 == 0 do *arg3 = 0 */
   1793	BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
   1794	/* fetch map_value_ptr from the stack of this function */
   1795	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
   1796	/* write into map value */
   1797	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
   1798	BPF_EXIT_INSN(),
   1799	},
   1800	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1801	.fixup_map_hash_8b = { 12, 22 },
   1802	.result = REJECT,
   1803	.errstr = "R0 invalid mem access 'scalar'",
   1804},
   1805{
   1806	"calls: pkt_ptr spill into caller stack",
   1807	.insns = {
   1808	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   1809	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   1810	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
   1811	BPF_EXIT_INSN(),
   1812
   1813	/* subprog 1 */
   1814	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   1815		    offsetof(struct __sk_buff, data)),
   1816	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   1817		    offsetof(struct __sk_buff, data_end)),
   1818	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   1819	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   1820	/* spill unchecked pkt_ptr into stack of caller */
   1821	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   1822	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
   1823	/* now the pkt range is verified, read pkt_ptr from stack */
   1824	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
   1825	/* write 4 bytes into packet */
   1826	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   1827	BPF_EXIT_INSN(),
   1828	},
   1829	.result = ACCEPT,
   1830	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1831	.retval = POINTER_VALUE,
   1832	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   1833},
   1834{
   1835	"calls: pkt_ptr spill into caller stack 2",
   1836	.insns = {
   1837	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   1838	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   1839	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
   1840	/* Marking is still kept, but not in all cases safe. */
   1841	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
   1842	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
   1843	BPF_EXIT_INSN(),
   1844
   1845	/* subprog 1 */
   1846	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   1847		    offsetof(struct __sk_buff, data)),
   1848	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   1849		    offsetof(struct __sk_buff, data_end)),
   1850	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   1851	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   1852	/* spill unchecked pkt_ptr into stack of caller */
   1853	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   1854	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
   1855	/* now the pkt range is verified, read pkt_ptr from stack */
   1856	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
   1857	/* write 4 bytes into packet */
   1858	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   1859	BPF_EXIT_INSN(),
   1860	},
   1861	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1862	.errstr = "invalid access to packet",
   1863	.result = REJECT,
   1864	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   1865},
   1866{
   1867	"calls: pkt_ptr spill into caller stack 3",
   1868	.insns = {
   1869	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   1870	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   1871	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
   1872	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
   1873	/* Marking is still kept and safe here. */
   1874	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
   1875	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
   1876	BPF_EXIT_INSN(),
   1877
   1878	/* subprog 1 */
   1879	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   1880		    offsetof(struct __sk_buff, data)),
   1881	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   1882		    offsetof(struct __sk_buff, data_end)),
   1883	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   1884	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   1885	/* spill unchecked pkt_ptr into stack of caller */
   1886	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   1887	BPF_MOV64_IMM(BPF_REG_5, 0),
   1888	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
   1889	BPF_MOV64_IMM(BPF_REG_5, 1),
   1890	/* now the pkt range is verified, read pkt_ptr from stack */
   1891	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
   1892	/* write 4 bytes into packet */
   1893	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   1894	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
   1895	BPF_EXIT_INSN(),
   1896	},
   1897	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1898	.result = ACCEPT,
   1899	.retval = 1,
   1900	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   1901},
   1902{
   1903	"calls: pkt_ptr spill into caller stack 4",
   1904	.insns = {
   1905	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   1906	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   1907	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
   1908	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
   1909	/* Check marking propagated. */
   1910	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
   1911	BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
   1912	BPF_EXIT_INSN(),
   1913
   1914	/* subprog 1 */
   1915	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   1916		    offsetof(struct __sk_buff, data)),
   1917	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   1918		    offsetof(struct __sk_buff, data_end)),
   1919	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   1920	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   1921	/* spill unchecked pkt_ptr into stack of caller */
   1922	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   1923	BPF_MOV64_IMM(BPF_REG_5, 0),
   1924	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
   1925	BPF_MOV64_IMM(BPF_REG_5, 1),
   1926	/* don't read back pkt_ptr from stack here */
   1927	/* write 4 bytes into packet */
   1928	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   1929	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
   1930	BPF_EXIT_INSN(),
   1931	},
   1932	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1933	.result = ACCEPT,
   1934	.retval = 1,
   1935	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   1936},
   1937{
   1938	"calls: pkt_ptr spill into caller stack 5",
   1939	.insns = {
   1940	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   1941	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   1942	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
   1943	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
   1944	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
   1945	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
   1946	BPF_EXIT_INSN(),
   1947
   1948	/* subprog 1 */
   1949	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   1950		    offsetof(struct __sk_buff, data)),
   1951	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   1952		    offsetof(struct __sk_buff, data_end)),
   1953	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   1954	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   1955	BPF_MOV64_IMM(BPF_REG_5, 0),
   1956	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
   1957	/* spill checked pkt_ptr into stack of caller */
   1958	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   1959	BPF_MOV64_IMM(BPF_REG_5, 1),
   1960	/* don't read back pkt_ptr from stack here */
   1961	/* write 4 bytes into packet */
   1962	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   1963	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
   1964	BPF_EXIT_INSN(),
   1965	},
   1966	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   1967	.errstr = "same insn cannot be used with different",
   1968	.result = REJECT,
   1969	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   1970},
   1971{
   1972	"calls: pkt_ptr spill into caller stack 6",
   1973	.insns = {
   1974	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   1975		    offsetof(struct __sk_buff, data_end)),
   1976	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   1977	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   1978	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   1979	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
   1980	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
   1981	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
   1982	BPF_EXIT_INSN(),
   1983
   1984	/* subprog 1 */
   1985	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   1986		    offsetof(struct __sk_buff, data)),
   1987	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   1988		    offsetof(struct __sk_buff, data_end)),
   1989	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   1990	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   1991	BPF_MOV64_IMM(BPF_REG_5, 0),
   1992	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
   1993	/* spill checked pkt_ptr into stack of caller */
   1994	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   1995	BPF_MOV64_IMM(BPF_REG_5, 1),
   1996	/* don't read back pkt_ptr from stack here */
   1997	/* write 4 bytes into packet */
   1998	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   1999	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
   2000	BPF_EXIT_INSN(),
   2001	},
   2002	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   2003	.errstr = "R4 invalid mem access",
   2004	.result = REJECT,
   2005	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   2006},
   2007{
   2008	"calls: pkt_ptr spill into caller stack 7",
   2009	.insns = {
   2010	BPF_MOV64_IMM(BPF_REG_2, 0),
   2011	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   2012	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   2013	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   2014	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
   2015	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
   2016	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
   2017	BPF_EXIT_INSN(),
   2018
   2019	/* subprog 1 */
   2020	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   2021		    offsetof(struct __sk_buff, data)),
   2022	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   2023		    offsetof(struct __sk_buff, data_end)),
   2024	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   2025	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   2026	BPF_MOV64_IMM(BPF_REG_5, 0),
   2027	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
   2028	/* spill checked pkt_ptr into stack of caller */
   2029	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   2030	BPF_MOV64_IMM(BPF_REG_5, 1),
   2031	/* don't read back pkt_ptr from stack here */
   2032	/* write 4 bytes into packet */
   2033	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   2034	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
   2035	BPF_EXIT_INSN(),
   2036	},
   2037	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   2038	.errstr = "R4 invalid mem access",
   2039	.result = REJECT,
   2040	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   2041},
   2042{
   2043	"calls: pkt_ptr spill into caller stack 8",
   2044	.insns = {
   2045	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   2046		    offsetof(struct __sk_buff, data)),
   2047	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   2048		    offsetof(struct __sk_buff, data_end)),
   2049	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   2050	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   2051	BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
   2052	BPF_EXIT_INSN(),
   2053	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   2054	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   2055	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   2056	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
   2057	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
   2058	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
   2059	BPF_EXIT_INSN(),
   2060
   2061	/* subprog 1 */
   2062	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   2063		    offsetof(struct __sk_buff, data)),
   2064	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   2065		    offsetof(struct __sk_buff, data_end)),
   2066	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   2067	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   2068	BPF_MOV64_IMM(BPF_REG_5, 0),
   2069	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
   2070	/* spill checked pkt_ptr into stack of caller */
   2071	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   2072	BPF_MOV64_IMM(BPF_REG_5, 1),
   2073	/* don't read back pkt_ptr from stack here */
   2074	/* write 4 bytes into packet */
   2075	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   2076	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
   2077	BPF_EXIT_INSN(),
   2078	},
   2079	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   2080	.result = ACCEPT,
   2081	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   2082},
   2083{
   2084	"calls: pkt_ptr spill into caller stack 9",
   2085	.insns = {
   2086	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   2087		    offsetof(struct __sk_buff, data)),
   2088	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   2089		    offsetof(struct __sk_buff, data_end)),
   2090	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   2091	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   2092	BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
   2093	BPF_EXIT_INSN(),
   2094	BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
   2095	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
   2096	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   2097	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
   2098	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
   2099	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
   2100	BPF_EXIT_INSN(),
   2101
   2102	/* subprog 1 */
   2103	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
   2104		    offsetof(struct __sk_buff, data)),
   2105	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
   2106		    offsetof(struct __sk_buff, data_end)),
   2107	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
   2108	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
   2109	BPF_MOV64_IMM(BPF_REG_5, 0),
   2110	/* spill unchecked pkt_ptr into stack of caller */
   2111	BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
   2112	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
   2113	BPF_MOV64_IMM(BPF_REG_5, 1),
   2114	/* don't read back pkt_ptr from stack here */
   2115	/* write 4 bytes into packet */
   2116	BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
   2117	BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
   2118	BPF_EXIT_INSN(),
   2119	},
   2120	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
   2121	.errstr = "invalid access to packet",
   2122	.result = REJECT,
   2123	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
   2124},
   2125{
   2126	"calls: caller stack init to zero or map_value_or_null",
   2127	.insns = {
   2128	BPF_MOV64_IMM(BPF_REG_0, 0),
   2129	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
   2130	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   2131	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   2132	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
   2133	/* fetch map_value_or_null or const_zero from stack */
   2134	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
   2135	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
   2136	/* store into map_value */
   2137	BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
   2138	BPF_EXIT_INSN(),
   2139
   2140	/* subprog 1 */
   2141	/* if (ctx == 0) return; */
   2142	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
   2143	/* else bpf_map_lookup() and *(fp - 8) = r0 */
   2144	BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
   2145	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   2146	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   2147	BPF_LD_MAP_FD(BPF_REG_1, 0),
   2148	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   2149	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   2150	/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
   2151	BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
   2152	BPF_EXIT_INSN(),
   2153	},
   2154	.fixup_map_hash_8b = { 13 },
   2155	.result = ACCEPT,
   2156	.prog_type = BPF_PROG_TYPE_XDP,
   2157},
   2158{
   2159	"calls: stack init to zero and pruning",
   2160	.insns = {
   2161	/* first make allocated_stack 16 byte */
   2162	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
   2163	/* now fork the execution such that the false branch
   2164	 * of JGT insn will be verified second and it skisp zero
   2165	 * init of fp-8 stack slot. If stack liveness marking
   2166	 * is missing live_read marks from call map_lookup
   2167	 * processing then pruning will incorrectly assume
   2168	 * that fp-8 stack slot was unused in the fall-through
   2169	 * branch and will accept the program incorrectly
   2170	 */
   2171	BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
   2172	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
   2173	BPF_JMP_IMM(BPF_JA, 0, 0, 0),
   2174	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
   2175	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
   2176	BPF_LD_MAP_FD(BPF_REG_1, 0),
   2177	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
   2178	BPF_EXIT_INSN(),
   2179	},
   2180	.fixup_map_hash_48b = { 6 },
   2181	.errstr = "invalid indirect read from stack R2 off -8+0 size 8",
   2182	.result = REJECT,
   2183	.prog_type = BPF_PROG_TYPE_XDP,
   2184},
   2185{
   2186	"calls: ctx read at start of subprog",
   2187	.insns = {
   2188	BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
   2189	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
   2190	BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
   2191	BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
   2192	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
   2193	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
   2194	BPF_EXIT_INSN(),
   2195	BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
   2196	BPF_MOV64_IMM(BPF_REG_0, 0),
   2197	BPF_EXIT_INSN(),
   2198	},
   2199	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
   2200	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
   2201	.result_unpriv = REJECT,
   2202	.result = ACCEPT,
   2203},
   2204{
   2205	"calls: cross frame pruning",
   2206	.insns = {
   2207	/* r8 = !!random();
   2208	 * call pruner()
   2209	 * if (r8)
   2210	 *     do something bad;
   2211	 */
   2212	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
   2213	BPF_MOV64_IMM(BPF_REG_8, 0),
   2214	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
   2215	BPF_MOV64_IMM(BPF_REG_8, 1),
   2216	BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
   2217	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
   2218	BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
   2219	BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
   2220	BPF_MOV64_IMM(BPF_REG_0, 0),
   2221	BPF_EXIT_INSN(),
   2222	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
   2223	BPF_EXIT_INSN(),
   2224	},
   2225	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
   2226	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
   2227	.errstr = "!read_ok",
   2228	.result = REJECT,
   2229},
   2230{
   2231	"calls: cross frame pruning - liveness propagation",
   2232	.insns = {
   2233	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
   2234	BPF_MOV64_IMM(BPF_REG_8, 0),
   2235	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
   2236	BPF_MOV64_IMM(BPF_REG_8, 1),
   2237	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
   2238	BPF_MOV64_IMM(BPF_REG_9, 0),
   2239	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
   2240	BPF_MOV64_IMM(BPF_REG_9, 1),
   2241	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
   2242	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
   2243	BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
   2244	BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
   2245	BPF_MOV64_IMM(BPF_REG_0, 0),
   2246	BPF_EXIT_INSN(),
   2247	BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
   2248	BPF_EXIT_INSN(),
   2249	},
   2250	.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
   2251	.errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
   2252	.errstr = "!read_ok",
   2253	.result = REJECT,
   2254},