Back to home page

OSCL-LXR

 
 

    


0001 {
0002     "calls: invalid kfunc call not eliminated",
0003     .insns = {
0004     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0005     BPF_MOV64_IMM(BPF_REG_0, 1),
0006     BPF_EXIT_INSN(),
0007     },
0008     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0009     .result  = REJECT,
0010     .errstr = "invalid kernel function call not eliminated in verifier pass",
0011 },
0012 {
0013     "calls: invalid kfunc call unreachable",
0014     .insns = {
0015     BPF_MOV64_IMM(BPF_REG_0, 1),
0016     BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
0017     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0018     BPF_MOV64_IMM(BPF_REG_0, 1),
0019     BPF_EXIT_INSN(),
0020     },
0021     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0022     .result  = ACCEPT,
0023 },
0024 {
0025     "calls: invalid kfunc call: ptr_to_mem to struct with non-scalar",
0026     .insns = {
0027     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0028     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0029     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0030     BPF_EXIT_INSN(),
0031     },
0032     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0033     .result = REJECT,
0034     .errstr = "arg#0 pointer type STRUCT prog_test_fail1 must point to scalar",
0035     .fixup_kfunc_btf_id = {
0036         { "bpf_kfunc_call_test_fail1", 2 },
0037     },
0038 },
0039 {
0040     "calls: invalid kfunc call: ptr_to_mem to struct with nesting depth > 4",
0041     .insns = {
0042     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0043     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0044     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0045     BPF_EXIT_INSN(),
0046     },
0047     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0048     .result = REJECT,
0049     .errstr = "max struct nesting depth exceeded\narg#0 pointer type STRUCT prog_test_fail2",
0050     .fixup_kfunc_btf_id = {
0051         { "bpf_kfunc_call_test_fail2", 2 },
0052     },
0053 },
0054 {
0055     "calls: invalid kfunc call: ptr_to_mem to struct with FAM",
0056     .insns = {
0057     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0058     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0059     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0060     BPF_EXIT_INSN(),
0061     },
0062     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0063     .result = REJECT,
0064     .errstr = "arg#0 pointer type STRUCT prog_test_fail3 must point to scalar",
0065     .fixup_kfunc_btf_id = {
0066         { "bpf_kfunc_call_test_fail3", 2 },
0067     },
0068 },
0069 {
0070     "calls: invalid kfunc call: reg->type != PTR_TO_CTX",
0071     .insns = {
0072     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0073     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0074     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0075     BPF_EXIT_INSN(),
0076     },
0077     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0078     .result = REJECT,
0079     .errstr = "arg#0 expected pointer to ctx, but got PTR",
0080     .fixup_kfunc_btf_id = {
0081         { "bpf_kfunc_call_test_pass_ctx", 2 },
0082     },
0083 },
0084 {
0085     "calls: invalid kfunc call: void * not allowed in func proto without mem size arg",
0086     .insns = {
0087     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0088     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0089     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0090     BPF_EXIT_INSN(),
0091     },
0092     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0093     .result = REJECT,
0094     .errstr = "arg#0 pointer type UNKNOWN  must point to scalar",
0095     .fixup_kfunc_btf_id = {
0096         { "bpf_kfunc_call_test_mem_len_fail1", 2 },
0097     },
0098 },
0099 {
0100     "calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
0101     .insns = {
0102     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0103     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0104     BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
0105     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0106     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0107     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0108     BPF_EXIT_INSN(),
0109     },
0110     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0111     .result = REJECT,
0112     .errstr = "arg#0 pointer type STRUCT prog_test_ref_kfunc must point",
0113     .fixup_kfunc_btf_id = {
0114         { "bpf_kfunc_call_test_acquire", 3 },
0115         { "bpf_kfunc_call_test_release", 5 },
0116     },
0117 },
0118 {
0119     "calls: invalid kfunc call: reg->off must be zero when passed to release kfunc",
0120     .insns = {
0121     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0122     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0123     BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
0124     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0125     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0126     BPF_EXIT_INSN(),
0127     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
0128     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0129     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0130     BPF_MOV64_IMM(BPF_REG_0, 0),
0131     BPF_EXIT_INSN(),
0132     },
0133     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0134     .result = REJECT,
0135     .errstr = "R1 must have zero offset when passed to release func",
0136     .fixup_kfunc_btf_id = {
0137         { "bpf_kfunc_call_test_acquire", 3 },
0138         { "bpf_kfunc_call_memb_release", 8 },
0139     },
0140 },
0141 {
0142     "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
0143     .insns = {
0144     BPF_MOV64_IMM(BPF_REG_0, 0),
0145     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0146     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0147     BPF_EXIT_INSN(),
0148     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0149     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0150     BPF_MOV64_IMM(BPF_REG_0, 0),
0151     BPF_EXIT_INSN(),
0152     },
0153     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0154     .result = REJECT,
0155     .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
0156     .fixup_kfunc_btf_id = {
0157         { "bpf_kfunc_call_memb_acquire", 1 },
0158         { "bpf_kfunc_call_memb1_release", 5 },
0159     },
0160 },
0161 {
0162     "calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
0163     .insns = {
0164     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0165     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0166     BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
0167     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0168     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0169     BPF_EXIT_INSN(),
0170     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0171     BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16),
0172     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4),
0173     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0174     BPF_MOV64_IMM(BPF_REG_0, 0),
0175     BPF_EXIT_INSN(),
0176     },
0177     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0178     .fixup_kfunc_btf_id = {
0179         { "bpf_kfunc_call_test_acquire", 3 },
0180         { "bpf_kfunc_call_test_release", 9 },
0181     },
0182     .result_unpriv = REJECT,
0183     .result = REJECT,
0184     .errstr = "negative offset ptr_ ptr R1 off=-4 disallowed",
0185 },
0186 {
0187     "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset",
0188     .insns = {
0189     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0190     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0191     BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
0192     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0193     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0194     BPF_EXIT_INSN(),
0195     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0196     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
0197     BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 3),
0198     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0199     BPF_MOV64_IMM(BPF_REG_0, 0),
0200     BPF_EXIT_INSN(),
0201     BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 3),
0202     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0203     BPF_MOV64_IMM(BPF_REG_0, 0),
0204     BPF_EXIT_INSN(),
0205     BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
0206     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0207     BPF_MOV64_IMM(BPF_REG_0, 0),
0208     BPF_EXIT_INSN(),
0209     },
0210     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0211     .fixup_kfunc_btf_id = {
0212         { "bpf_kfunc_call_test_acquire", 3 },
0213         { "bpf_kfunc_call_test_release", 9 },
0214         { "bpf_kfunc_call_test_release", 13 },
0215         { "bpf_kfunc_call_test_release", 17 },
0216     },
0217     .result_unpriv = REJECT,
0218     .result = REJECT,
0219     .errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
0220 },
0221 {
0222     "calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
0223     .insns = {
0224     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0225     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0226     BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
0227     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0228     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0229     BPF_EXIT_INSN(),
0230     BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
0231     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0232     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0233     BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
0234     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0235     BPF_MOV64_IMM(BPF_REG_0, 0),
0236     BPF_EXIT_INSN(),
0237     },
0238     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0239     .fixup_kfunc_btf_id = {
0240         { "bpf_kfunc_call_test_acquire", 3 },
0241         { "bpf_kfunc_call_test_ref", 8 },
0242         { "bpf_kfunc_call_test_ref", 10 },
0243     },
0244     .result_unpriv = REJECT,
0245     .result = REJECT,
0246     .errstr = "R1 must be referenced",
0247 },
0248 {
0249     "calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
0250     .insns = {
0251     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0252     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0253     BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
0254     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0255     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0256     BPF_EXIT_INSN(),
0257     BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
0258     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0259     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0260     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0261     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
0262     BPF_MOV64_IMM(BPF_REG_0, 0),
0263     BPF_EXIT_INSN(),
0264     },
0265     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0266     .fixup_kfunc_btf_id = {
0267         { "bpf_kfunc_call_test_acquire", 3 },
0268         { "bpf_kfunc_call_test_ref", 8 },
0269         { "bpf_kfunc_call_test_release", 10 },
0270     },
0271     .result_unpriv = REJECT,
0272     .result = ACCEPT,
0273 },
0274 {
0275     "calls: basic sanity",
0276     .insns = {
0277     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
0278     BPF_MOV64_IMM(BPF_REG_0, 1),
0279     BPF_EXIT_INSN(),
0280     BPF_MOV64_IMM(BPF_REG_0, 2),
0281     BPF_EXIT_INSN(),
0282     },
0283     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0284     .result = ACCEPT,
0285 },
0286 {
0287     "calls: not on unpriviledged",
0288     .insns = {
0289     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
0290     BPF_MOV64_IMM(BPF_REG_0, 1),
0291     BPF_EXIT_INSN(),
0292     BPF_MOV64_IMM(BPF_REG_0, 2),
0293     BPF_EXIT_INSN(),
0294     },
0295     .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
0296     .result_unpriv = REJECT,
0297     .result = ACCEPT,
0298     .retval = 1,
0299 },
0300 {
0301     "calls: div by 0 in subprog",
0302     .insns = {
0303     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0304     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
0305     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0306     BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0307             offsetof(struct __sk_buff, data_end)),
0308     BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
0309     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
0310     BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
0311     BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
0312     BPF_MOV64_IMM(BPF_REG_0, 1),
0313     BPF_EXIT_INSN(),
0314     BPF_MOV32_IMM(BPF_REG_2, 0),
0315     BPF_MOV32_IMM(BPF_REG_3, 1),
0316     BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2),
0317     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0318             offsetof(struct __sk_buff, data)),
0319     BPF_EXIT_INSN(),
0320     },
0321     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0322     .result = ACCEPT,
0323     .retval = 1,
0324 },
0325 {
0326     "calls: multiple ret types in subprog 1",
0327     .insns = {
0328     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0329     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
0330     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0331     BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0332             offsetof(struct __sk_buff, data_end)),
0333     BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
0334     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
0335     BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
0336     BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
0337     BPF_MOV64_IMM(BPF_REG_0, 1),
0338     BPF_EXIT_INSN(),
0339     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0340             offsetof(struct __sk_buff, data)),
0341     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0342     BPF_MOV32_IMM(BPF_REG_0, 42),
0343     BPF_EXIT_INSN(),
0344     },
0345     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0346     .result = REJECT,
0347     .errstr = "R0 invalid mem access 'scalar'",
0348 },
0349 {
0350     "calls: multiple ret types in subprog 2",
0351     .insns = {
0352     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0353     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
0354     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0355     BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
0356             offsetof(struct __sk_buff, data_end)),
0357     BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
0358     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
0359     BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
0360     BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
0361     BPF_MOV64_IMM(BPF_REG_0, 1),
0362     BPF_EXIT_INSN(),
0363     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0364             offsetof(struct __sk_buff, data)),
0365     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0366     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9),
0367     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0368     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
0369     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
0370     BPF_LD_MAP_FD(BPF_REG_1, 0),
0371     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
0372     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
0373     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
0374             offsetof(struct __sk_buff, data)),
0375     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
0376     BPF_EXIT_INSN(),
0377     },
0378     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0379     .fixup_map_hash_8b = { 16 },
0380     .result = REJECT,
0381     .errstr = "R0 min value is outside of the allowed memory range",
0382 },
0383 {
0384     "calls: overlapping caller/callee",
0385     .insns = {
0386     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
0387     BPF_MOV64_IMM(BPF_REG_0, 1),
0388     BPF_EXIT_INSN(),
0389     },
0390     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0391     .errstr = "last insn is not an exit or jmp",
0392     .result = REJECT,
0393 },
0394 {
0395     "calls: wrong recursive calls",
0396     .insns = {
0397     BPF_JMP_IMM(BPF_JA, 0, 0, 4),
0398     BPF_JMP_IMM(BPF_JA, 0, 0, 4),
0399     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
0400     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
0401     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
0402     BPF_MOV64_IMM(BPF_REG_0, 1),
0403     BPF_EXIT_INSN(),
0404     },
0405     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0406     .errstr = "jump out of range",
0407     .result = REJECT,
0408 },
0409 {
0410     "calls: wrong src reg",
0411     .insns = {
0412     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 3, 0, 0),
0413     BPF_MOV64_IMM(BPF_REG_0, 1),
0414     BPF_EXIT_INSN(),
0415     },
0416     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0417     .errstr = "BPF_CALL uses reserved fields",
0418     .result = REJECT,
0419 },
0420 {
0421     "calls: wrong off value",
0422     .insns = {
0423     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
0424     BPF_MOV64_IMM(BPF_REG_0, 1),
0425     BPF_EXIT_INSN(),
0426     BPF_MOV64_IMM(BPF_REG_0, 2),
0427     BPF_EXIT_INSN(),
0428     },
0429     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0430     .errstr = "BPF_CALL uses reserved fields",
0431     .result = REJECT,
0432 },
0433 {
0434     "calls: jump back loop",
0435     .insns = {
0436     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
0437     BPF_MOV64_IMM(BPF_REG_0, 1),
0438     BPF_EXIT_INSN(),
0439     },
0440     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0441     .errstr = "back-edge from insn 0 to 0",
0442     .result = REJECT,
0443 },
0444 {
0445     "calls: conditional call",
0446     .insns = {
0447     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0448             offsetof(struct __sk_buff, mark)),
0449     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
0450     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
0451     BPF_MOV64_IMM(BPF_REG_0, 1),
0452     BPF_EXIT_INSN(),
0453     BPF_MOV64_IMM(BPF_REG_0, 2),
0454     BPF_EXIT_INSN(),
0455     },
0456     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0457     .errstr = "jump out of range",
0458     .result = REJECT,
0459 },
0460 {
0461     "calls: conditional call 2",
0462     .insns = {
0463     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0464             offsetof(struct __sk_buff, mark)),
0465     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
0466     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
0467     BPF_MOV64_IMM(BPF_REG_0, 1),
0468     BPF_EXIT_INSN(),
0469     BPF_MOV64_IMM(BPF_REG_0, 2),
0470     BPF_EXIT_INSN(),
0471     BPF_MOV64_IMM(BPF_REG_0, 3),
0472     BPF_EXIT_INSN(),
0473     },
0474     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0475     .result = ACCEPT,
0476 },
0477 {
0478     "calls: conditional call 3",
0479     .insns = {
0480     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0481             offsetof(struct __sk_buff, mark)),
0482     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
0483     BPF_JMP_IMM(BPF_JA, 0, 0, 4),
0484     BPF_MOV64_IMM(BPF_REG_0, 1),
0485     BPF_EXIT_INSN(),
0486     BPF_MOV64_IMM(BPF_REG_0, 1),
0487     BPF_JMP_IMM(BPF_JA, 0, 0, -6),
0488     BPF_MOV64_IMM(BPF_REG_0, 3),
0489     BPF_JMP_IMM(BPF_JA, 0, 0, -6),
0490     },
0491     .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
0492     .errstr_unpriv = "back-edge from insn",
0493     .result_unpriv = REJECT,
0494     .result = ACCEPT,
0495     .retval = 1,
0496 },
0497 {
0498     "calls: conditional call 4",
0499     .insns = {
0500     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0501             offsetof(struct __sk_buff, mark)),
0502     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
0503     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
0504     BPF_MOV64_IMM(BPF_REG_0, 1),
0505     BPF_EXIT_INSN(),
0506     BPF_MOV64_IMM(BPF_REG_0, 1),
0507     BPF_JMP_IMM(BPF_JA, 0, 0, -5),
0508     BPF_MOV64_IMM(BPF_REG_0, 3),
0509     BPF_EXIT_INSN(),
0510     },
0511     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0512     .result = ACCEPT,
0513 },
0514 {
0515     "calls: conditional call 5",
0516     .insns = {
0517     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0518             offsetof(struct __sk_buff, mark)),
0519     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
0520     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
0521     BPF_MOV64_IMM(BPF_REG_0, 1),
0522     BPF_EXIT_INSN(),
0523     BPF_MOV64_IMM(BPF_REG_0, 1),
0524     BPF_JMP_IMM(BPF_JA, 0, 0, -6),
0525     BPF_MOV64_IMM(BPF_REG_0, 3),
0526     BPF_EXIT_INSN(),
0527     },
0528     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0529     .result = ACCEPT,
0530     .retval = 1,
0531 },
0532 {
0533     "calls: conditional call 6",
0534     .insns = {
0535     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0536     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0537     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
0538     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
0539     BPF_EXIT_INSN(),
0540     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0541             offsetof(struct __sk_buff, mark)),
0542     BPF_EXIT_INSN(),
0543     },
0544     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0545     .errstr = "infinite loop detected",
0546     .result = REJECT,
0547 },
0548 {
0549     "calls: using r0 returned by callee",
0550     .insns = {
0551     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0552     BPF_EXIT_INSN(),
0553     BPF_MOV64_IMM(BPF_REG_0, 2),
0554     BPF_EXIT_INSN(),
0555     },
0556     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0557     .result = ACCEPT,
0558 },
0559 {
0560     "calls: using uninit r0 from callee",
0561     .insns = {
0562     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0563     BPF_EXIT_INSN(),
0564     BPF_EXIT_INSN(),
0565     },
0566     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0567     .errstr = "!read_ok",
0568     .result = REJECT,
0569 },
0570 {
0571     "calls: callee is using r1",
0572     .insns = {
0573     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0574     BPF_EXIT_INSN(),
0575     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0576             offsetof(struct __sk_buff, len)),
0577     BPF_EXIT_INSN(),
0578     },
0579     .prog_type = BPF_PROG_TYPE_SCHED_ACT,
0580     .result = ACCEPT,
0581     .retval = TEST_DATA_LEN,
0582 },
0583 {
0584     "calls: callee using args1",
0585     .insns = {
0586     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0587     BPF_EXIT_INSN(),
0588     BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
0589     BPF_EXIT_INSN(),
0590     },
0591     .errstr_unpriv = "allowed for",
0592     .result_unpriv = REJECT,
0593     .result = ACCEPT,
0594     .retval = POINTER_VALUE,
0595 },
0596 {
0597     "calls: callee using wrong args2",
0598     .insns = {
0599     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0600     BPF_EXIT_INSN(),
0601     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
0602     BPF_EXIT_INSN(),
0603     },
0604     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0605     .errstr = "R2 !read_ok",
0606     .result = REJECT,
0607 },
0608 {
0609     "calls: callee using two args",
0610     .insns = {
0611     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0612     BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
0613             offsetof(struct __sk_buff, len)),
0614     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
0615             offsetof(struct __sk_buff, len)),
0616     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0617     BPF_EXIT_INSN(),
0618     BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
0619     BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
0620     BPF_EXIT_INSN(),
0621     },
0622     .errstr_unpriv = "allowed for",
0623     .result_unpriv = REJECT,
0624     .result = ACCEPT,
0625     .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN,
0626 },
0627 {
0628     "calls: callee changing pkt pointers",
0629     .insns = {
0630     BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)),
0631     BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
0632             offsetof(struct xdp_md, data_end)),
0633     BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
0634     BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
0635     BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
0636     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0637     /* clear_all_pkt_pointers() has to walk all frames
0638      * to make sure that pkt pointers in the caller
0639      * are cleared when callee is calling a helper that
0640      * adjusts packet size
0641      */
0642     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
0643     BPF_MOV32_IMM(BPF_REG_0, 0),
0644     BPF_EXIT_INSN(),
0645     BPF_MOV64_IMM(BPF_REG_2, 0),
0646     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head),
0647     BPF_EXIT_INSN(),
0648     },
0649     .result = REJECT,
0650     .errstr = "R6 invalid mem access 'scalar'",
0651     .prog_type = BPF_PROG_TYPE_XDP,
0652     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
0653 },
0654 {
0655     "calls: ptr null check in subprog",
0656     .insns = {
0657     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0658     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
0659     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
0660     BPF_LD_MAP_FD(BPF_REG_1, 0),
0661     BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
0662     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
0663     BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
0664     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0665     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
0666     BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
0667     BPF_EXIT_INSN(),
0668     BPF_MOV64_IMM(BPF_REG_0, 0),
0669     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
0670     BPF_MOV64_IMM(BPF_REG_0, 1),
0671     BPF_EXIT_INSN(),
0672     },
0673     .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
0674     .fixup_map_hash_48b = { 3 },
0675     .result_unpriv = REJECT,
0676     .result = ACCEPT,
0677     .retval = 0,
0678 },
0679 {
0680     "calls: two calls with args",
0681     .insns = {
0682     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0683     BPF_EXIT_INSN(),
0684     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0685     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
0686     BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
0687     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0688     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0689     BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
0690     BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
0691     BPF_EXIT_INSN(),
0692     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0693             offsetof(struct __sk_buff, len)),
0694     BPF_EXIT_INSN(),
0695     },
0696     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0697     .result = ACCEPT,
0698     .retval = TEST_DATA_LEN + TEST_DATA_LEN,
0699 },
0700 {
0701     "calls: calls with stack arith",
0702     .insns = {
0703     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
0704     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
0705     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0706     BPF_EXIT_INSN(),
0707     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
0708     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0709     BPF_EXIT_INSN(),
0710     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
0711     BPF_MOV64_IMM(BPF_REG_0, 42),
0712     BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
0713     BPF_EXIT_INSN(),
0714     },
0715     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0716     .result = ACCEPT,
0717     .retval = 42,
0718 },
0719 {
0720     "calls: calls with misaligned stack access",
0721     .insns = {
0722     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
0723     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
0724     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0725     BPF_EXIT_INSN(),
0726     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
0727     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0728     BPF_EXIT_INSN(),
0729     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
0730     BPF_MOV64_IMM(BPF_REG_0, 42),
0731     BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
0732     BPF_EXIT_INSN(),
0733     },
0734     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0735     .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
0736     .errstr = "misaligned stack access",
0737     .result = REJECT,
0738 },
0739 {
0740     "calls: calls control flow, jump test",
0741     .insns = {
0742     BPF_MOV64_IMM(BPF_REG_0, 42),
0743     BPF_JMP_IMM(BPF_JA, 0, 0, 2),
0744     BPF_MOV64_IMM(BPF_REG_0, 43),
0745     BPF_JMP_IMM(BPF_JA, 0, 0, 1),
0746     BPF_JMP_IMM(BPF_JA, 0, 0, -3),
0747     BPF_EXIT_INSN(),
0748     },
0749     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0750     .result = ACCEPT,
0751     .retval = 43,
0752 },
0753 {
0754     "calls: calls control flow, jump test 2",
0755     .insns = {
0756     BPF_MOV64_IMM(BPF_REG_0, 42),
0757     BPF_JMP_IMM(BPF_JA, 0, 0, 2),
0758     BPF_MOV64_IMM(BPF_REG_0, 43),
0759     BPF_JMP_IMM(BPF_JA, 0, 0, 1),
0760     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
0761     BPF_EXIT_INSN(),
0762     },
0763     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0764     .errstr = "jump out of range from insn 1 to 4",
0765     .result = REJECT,
0766 },
0767 {
0768     "calls: two calls with bad jump",
0769     .insns = {
0770     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0771     BPF_EXIT_INSN(),
0772     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0773     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
0774     BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
0775     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0776     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0777     BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
0778     BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
0779     BPF_EXIT_INSN(),
0780     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0781             offsetof(struct __sk_buff, len)),
0782     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
0783     BPF_EXIT_INSN(),
0784     },
0785     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0786     .errstr = "jump out of range from insn 11 to 9",
0787     .result = REJECT,
0788 },
0789 {
0790     "calls: recursive call. test1",
0791     .insns = {
0792     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0793     BPF_EXIT_INSN(),
0794     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
0795     BPF_EXIT_INSN(),
0796     },
0797     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0798     .errstr = "back-edge",
0799     .result = REJECT,
0800 },
0801 {
0802     "calls: recursive call. test2",
0803     .insns = {
0804     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0805     BPF_EXIT_INSN(),
0806     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
0807     BPF_EXIT_INSN(),
0808     },
0809     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0810     .errstr = "back-edge",
0811     .result = REJECT,
0812 },
0813 {
0814     "calls: unreachable code",
0815     .insns = {
0816     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0817     BPF_EXIT_INSN(),
0818     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0819     BPF_EXIT_INSN(),
0820     BPF_MOV64_IMM(BPF_REG_0, 0),
0821     BPF_EXIT_INSN(),
0822     BPF_MOV64_IMM(BPF_REG_0, 0),
0823     BPF_EXIT_INSN(),
0824     },
0825     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0826     .errstr = "unreachable insn 6",
0827     .result = REJECT,
0828 },
0829 {
0830     "calls: invalid call",
0831     .insns = {
0832     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0833     BPF_EXIT_INSN(),
0834     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
0835     BPF_EXIT_INSN(),
0836     },
0837     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0838     .errstr = "invalid destination",
0839     .result = REJECT,
0840 },
0841 {
0842     "calls: invalid call 2",
0843     .insns = {
0844     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0845     BPF_EXIT_INSN(),
0846     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
0847     BPF_EXIT_INSN(),
0848     },
0849     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0850     .errstr = "invalid destination",
0851     .result = REJECT,
0852 },
0853 {
0854     "calls: jumping across function bodies. test1",
0855     .insns = {
0856     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
0857     BPF_MOV64_IMM(BPF_REG_0, 0),
0858     BPF_EXIT_INSN(),
0859     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
0860     BPF_EXIT_INSN(),
0861     },
0862     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0863     .errstr = "jump out of range",
0864     .result = REJECT,
0865 },
0866 {
0867     "calls: jumping across function bodies. test2",
0868     .insns = {
0869     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
0870     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
0871     BPF_MOV64_IMM(BPF_REG_0, 0),
0872     BPF_EXIT_INSN(),
0873     BPF_EXIT_INSN(),
0874     },
0875     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0876     .errstr = "jump out of range",
0877     .result = REJECT,
0878 },
0879 {
0880     "calls: call without exit",
0881     .insns = {
0882     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0883     BPF_EXIT_INSN(),
0884     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0885     BPF_EXIT_INSN(),
0886     BPF_MOV64_IMM(BPF_REG_0, 0),
0887     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
0888     },
0889     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0890     .errstr = "not an exit",
0891     .result = REJECT,
0892 },
0893 {
0894     "calls: call into middle of ld_imm64",
0895     .insns = {
0896     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0897     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0898     BPF_MOV64_IMM(BPF_REG_0, 0),
0899     BPF_EXIT_INSN(),
0900     BPF_LD_IMM64(BPF_REG_0, 0),
0901     BPF_EXIT_INSN(),
0902     },
0903     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0904     .errstr = "last insn",
0905     .result = REJECT,
0906 },
0907 {
0908     "calls: call into middle of other call",
0909     .insns = {
0910     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0911     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0912     BPF_MOV64_IMM(BPF_REG_0, 0),
0913     BPF_EXIT_INSN(),
0914     BPF_MOV64_IMM(BPF_REG_0, 0),
0915     BPF_MOV64_IMM(BPF_REG_0, 0),
0916     BPF_EXIT_INSN(),
0917     },
0918     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0919     .errstr = "last insn",
0920     .result = REJECT,
0921 },
0922 {
0923     "calls: subprog call with ld_abs in main prog",
0924     .insns = {
0925     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0926     BPF_LD_ABS(BPF_B, 0),
0927     BPF_LD_ABS(BPF_H, 0),
0928     BPF_LD_ABS(BPF_W, 0),
0929     BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
0930     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0931     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
0932     BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
0933     BPF_LD_ABS(BPF_B, 0),
0934     BPF_LD_ABS(BPF_H, 0),
0935     BPF_LD_ABS(BPF_W, 0),
0936     BPF_EXIT_INSN(),
0937     BPF_MOV64_IMM(BPF_REG_2, 1),
0938     BPF_MOV64_IMM(BPF_REG_3, 2),
0939     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push),
0940     BPF_EXIT_INSN(),
0941     },
0942     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
0943     .result = ACCEPT,
0944 },
0945 {
0946     "calls: two calls with bad fallthrough",
0947     .insns = {
0948     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0949     BPF_EXIT_INSN(),
0950     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0951     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
0952     BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
0953     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0954     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0955     BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
0956     BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
0957     BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
0958     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
0959             offsetof(struct __sk_buff, len)),
0960     BPF_EXIT_INSN(),
0961     },
0962     .prog_type = BPF_PROG_TYPE_TRACEPOINT,
0963     .errstr = "not an exit",
0964     .result = REJECT,
0965 },
0966 {
0967     "calls: two calls with stack read",
0968     .insns = {
0969     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0970     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0971     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0972     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
0973     BPF_EXIT_INSN(),
0974     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
0975     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
0976     BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
0977     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
0978     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
0979     BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
0980     BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
0981     BPF_EXIT_INSN(),
0982     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
0983     BPF_EXIT_INSN(),
0984     },
0985     .prog_type = BPF_PROG_TYPE_XDP,
0986     .result = ACCEPT,
0987 },
0988 {
0989     "calls: two calls with stack write",
0990     .insns = {
0991     /* main prog */
0992     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
0993     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
0994     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
0995     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
0996     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
0997     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
0998     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
0999     BPF_EXIT_INSN(),
1000 
1001     /* subprog 1 */
1002     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1003     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1004     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
1005     BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
1006     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1007     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1008     BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
1009     BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
1010     /* write into stack frame of main prog */
1011     BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1012     BPF_EXIT_INSN(),
1013 
1014     /* subprog 2 */
1015     /* read from stack frame of main prog */
1016     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
1017     BPF_EXIT_INSN(),
1018     },
1019     .prog_type = BPF_PROG_TYPE_XDP,
1020     .result = ACCEPT,
1021 },
1022 {
1023     "calls: stack overflow using two frames (pre-call access)",
1024     .insns = {
1025     /* prog 1 */
1026     BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1027     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
1028     BPF_EXIT_INSN(),
1029 
1030     /* prog 2 */
1031     BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1032     BPF_MOV64_IMM(BPF_REG_0, 0),
1033     BPF_EXIT_INSN(),
1034     },
1035     .prog_type = BPF_PROG_TYPE_XDP,
1036     .errstr = "combined stack size",
1037     .result = REJECT,
1038 },
1039 {
1040     "calls: stack overflow using two frames (post-call access)",
1041     .insns = {
1042     /* prog 1 */
1043     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
1044     BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1045     BPF_EXIT_INSN(),
1046 
1047     /* prog 2 */
1048     BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1049     BPF_MOV64_IMM(BPF_REG_0, 0),
1050     BPF_EXIT_INSN(),
1051     },
1052     .prog_type = BPF_PROG_TYPE_XDP,
1053     .errstr = "combined stack size",
1054     .result = REJECT,
1055 },
1056 {
1057     "calls: stack depth check using three frames. test1",
1058     .insns = {
1059     /* main */
1060     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1061     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1062     BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1063     BPF_MOV64_IMM(BPF_REG_0, 0),
1064     BPF_EXIT_INSN(),
1065     /* A */
1066     BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1067     BPF_EXIT_INSN(),
1068     /* B */
1069     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1070     BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1071     BPF_EXIT_INSN(),
1072     },
1073     .prog_type = BPF_PROG_TYPE_XDP,
1074     /* stack_main=32, stack_A=256, stack_B=64
1075      * and max(main+A, main+A+B) < 512
1076      */
1077     .result = ACCEPT,
1078 },
1079 {
1080     "calls: stack depth check using three frames. test2",
1081     .insns = {
1082     /* main */
1083     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1084     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
1085     BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
1086     BPF_MOV64_IMM(BPF_REG_0, 0),
1087     BPF_EXIT_INSN(),
1088     /* A */
1089     BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1090     BPF_EXIT_INSN(),
1091     /* B */
1092     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
1093     BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1094     BPF_EXIT_INSN(),
1095     },
1096     .prog_type = BPF_PROG_TYPE_XDP,
1097     /* stack_main=32, stack_A=64, stack_B=256
1098      * and max(main+A, main+A+B) < 512
1099      */
1100     .result = ACCEPT,
1101 },
1102 {
1103     "calls: stack depth check using three frames. test3",
1104     .insns = {
1105     /* main */
1106     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1107     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1108     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1109     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
1110     BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
1111     BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
1112     BPF_MOV64_IMM(BPF_REG_0, 0),
1113     BPF_EXIT_INSN(),
1114     /* A */
1115     BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
1116     BPF_EXIT_INSN(),
1117     BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
1118     BPF_JMP_IMM(BPF_JA, 0, 0, -3),
1119     /* B */
1120     BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
1121     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
1122     BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
1123     BPF_EXIT_INSN(),
1124     },
1125     .prog_type = BPF_PROG_TYPE_XDP,
1126     /* stack_main=64, stack_A=224, stack_B=256
1127      * and max(main+A, main+A+B) > 512
1128      */
1129     .errstr = "combined stack",
1130     .result = REJECT,
1131 },
1132 {
1133     "calls: stack depth check using three frames. test4",
1134     /* void main(void) {
1135      *   func1(0);
1136      *   func1(1);
1137      *   func2(1);
1138      * }
1139      * void func1(int alloc_or_recurse) {
1140      *   if (alloc_or_recurse) {
1141      *     frame_pointer[-300] = 1;
1142      *   } else {
1143      *     func2(alloc_or_recurse);
1144      *   }
1145      * }
1146      * void func2(int alloc_or_recurse) {
1147      *   if (alloc_or_recurse) {
1148      *     frame_pointer[-300] = 1;
1149      *   }
1150      * }
1151      */
1152     .insns = {
1153     /* main */
1154     BPF_MOV64_IMM(BPF_REG_1, 0),
1155     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
1156     BPF_MOV64_IMM(BPF_REG_1, 1),
1157     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
1158     BPF_MOV64_IMM(BPF_REG_1, 1),
1159     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
1160     BPF_MOV64_IMM(BPF_REG_0, 0),
1161     BPF_EXIT_INSN(),
1162     /* A */
1163     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
1164     BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1165     BPF_EXIT_INSN(),
1166     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1167     BPF_EXIT_INSN(),
1168     /* B */
1169     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1170     BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
1171     BPF_EXIT_INSN(),
1172     },
1173     .prog_type = BPF_PROG_TYPE_XDP,
1174     .result = REJECT,
1175     .errstr = "combined stack",
1176 },
1177 {
1178     "calls: stack depth check using three frames. test5",
1179     .insns = {
1180     /* main */
1181     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1182     BPF_EXIT_INSN(),
1183     /* A */
1184     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
1185     BPF_EXIT_INSN(),
1186     /* B */
1187     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1188     BPF_EXIT_INSN(),
1189     /* C */
1190     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1191     BPF_EXIT_INSN(),
1192     /* D */
1193     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1194     BPF_EXIT_INSN(),
1195     /* E */
1196     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1197     BPF_EXIT_INSN(),
1198     /* F */
1199     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1200     BPF_EXIT_INSN(),
1201     /* G */
1202     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1203     BPF_EXIT_INSN(),
1204     /* H */
1205     BPF_MOV64_IMM(BPF_REG_0, 0),
1206     BPF_EXIT_INSN(),
1207     },
1208     .prog_type = BPF_PROG_TYPE_XDP,
1209     .errstr = "call stack",
1210     .result = REJECT,
1211 },
1212 {
1213     "calls: stack depth check in dead code",
1214     .insns = {
1215     /* main */
1216     BPF_MOV64_IMM(BPF_REG_1, 0),
1217     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
1218     BPF_EXIT_INSN(),
1219     /* A */
1220     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1221     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
1222     BPF_MOV64_IMM(BPF_REG_0, 0),
1223     BPF_EXIT_INSN(),
1224     /* B */
1225     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
1226     BPF_EXIT_INSN(),
1227     /* C */
1228     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
1229     BPF_EXIT_INSN(),
1230     /* D */
1231     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
1232     BPF_EXIT_INSN(),
1233     /* E */
1234     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
1235     BPF_EXIT_INSN(),
1236     /* F */
1237     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
1238     BPF_EXIT_INSN(),
1239     /* G */
1240     BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
1241     BPF_EXIT_INSN(),
1242     /* H */
1243     BPF_MOV64_IMM(BPF_REG_0, 0),
1244     BPF_EXIT_INSN(),
1245     },
1246     .prog_type = BPF_PROG_TYPE_XDP,
1247     .errstr = "call stack",
1248     .result = REJECT,
1249 },
1250 {
1251     "calls: spill into caller stack frame",
1252     .insns = {
1253     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1254     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1255     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1256     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1257     BPF_EXIT_INSN(),
1258     BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1259     BPF_MOV64_IMM(BPF_REG_0, 0),
1260     BPF_EXIT_INSN(),
1261     },
1262     .prog_type = BPF_PROG_TYPE_XDP,
1263     .errstr = "cannot spill",
1264     .result = REJECT,
1265 },
1266 {
1267     "calls: write into caller stack frame",
1268     .insns = {
1269     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1270     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1271     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1272     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1273     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1274     BPF_EXIT_INSN(),
1275     BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1276     BPF_MOV64_IMM(BPF_REG_0, 0),
1277     BPF_EXIT_INSN(),
1278     },
1279     .prog_type = BPF_PROG_TYPE_XDP,
1280     .result = ACCEPT,
1281     .retval = 42,
1282 },
1283 {
1284     "calls: write into callee stack frame",
1285     .insns = {
1286     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1287     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
1288     BPF_EXIT_INSN(),
1289     BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1290     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
1291     BPF_EXIT_INSN(),
1292     },
1293     .prog_type = BPF_PROG_TYPE_XDP,
1294     .errstr = "cannot return stack pointer",
1295     .result = REJECT,
1296 },
1297 {
1298     "calls: two calls with stack write and void return",
1299     .insns = {
1300     /* main prog */
1301     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1302     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1303     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1304     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1305     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1306     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1307     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1308     BPF_EXIT_INSN(),
1309 
1310     /* subprog 1 */
1311     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1312     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1313     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1314     BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1315     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1316     BPF_EXIT_INSN(),
1317 
1318     /* subprog 2 */
1319     /* write into stack frame of main prog */
1320     BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
1321     BPF_EXIT_INSN(), /* void return */
1322     },
1323     .prog_type = BPF_PROG_TYPE_XDP,
1324     .result = ACCEPT,
1325 },
1326 {
1327     "calls: ambiguous return value",
1328     .insns = {
1329     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1330     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
1331     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1332     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1333     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1334     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1335     BPF_EXIT_INSN(),
1336     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
1337     BPF_MOV64_IMM(BPF_REG_0, 0),
1338     BPF_EXIT_INSN(),
1339     },
1340     .errstr_unpriv = "allowed for",
1341     .result_unpriv = REJECT,
1342     .errstr = "R0 !read_ok",
1343     .result = REJECT,
1344 },
1345 {
1346     "calls: two calls that return map_value",
1347     .insns = {
1348     /* main prog */
1349     /* pass fp-16, fp-8 into a function */
1350     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1351     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1352     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1353     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1354     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
1355 
1356     /* fetch map_value_ptr from the stack of this function */
1357     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1358     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1359     /* write into map value */
1360     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1361     /* fetch secound map_value_ptr from the stack */
1362     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
1363     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1364     /* write into map value */
1365     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1366     BPF_MOV64_IMM(BPF_REG_0, 0),
1367     BPF_EXIT_INSN(),
1368 
1369     /* subprog 1 */
1370     /* call 3rd function twice */
1371     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1372     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1373     /* first time with fp-8 */
1374     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1375     BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1376     /* second time with fp-16 */
1377     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1378     BPF_EXIT_INSN(),
1379 
1380     /* subprog 2 */
1381     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1382     /* lookup from map */
1383     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1384     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1385     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1386     BPF_LD_MAP_FD(BPF_REG_1, 0),
1387     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1388     /* write map_value_ptr into stack frame of main prog */
1389     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1390     BPF_MOV64_IMM(BPF_REG_0, 0),
1391     BPF_EXIT_INSN(), /* return 0 */
1392     },
1393     .prog_type = BPF_PROG_TYPE_XDP,
1394     .fixup_map_hash_8b = { 23 },
1395     .result = ACCEPT,
1396 },
1397 {
1398     "calls: two calls that return map_value with bool condition",
1399     .insns = {
1400     /* main prog */
1401     /* pass fp-16, fp-8 into a function */
1402     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1403     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1404     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1405     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1406     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1407     BPF_MOV64_IMM(BPF_REG_0, 0),
1408     BPF_EXIT_INSN(),
1409 
1410     /* subprog 1 */
1411     /* call 3rd function twice */
1412     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1413     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1414     /* first time with fp-8 */
1415     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1416     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1417     /* fetch map_value_ptr from the stack of this function */
1418     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1419     /* write into map value */
1420     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1421     BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1422     /* second time with fp-16 */
1423     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1424     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1425     /* fetch secound map_value_ptr from the stack */
1426     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1427     /* write into map value */
1428     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1429     BPF_EXIT_INSN(),
1430 
1431     /* subprog 2 */
1432     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1433     /* lookup from map */
1434     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1435     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1436     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1437     BPF_LD_MAP_FD(BPF_REG_1, 0),
1438     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1439     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1440     BPF_MOV64_IMM(BPF_REG_0, 0),
1441     BPF_EXIT_INSN(), /* return 0 */
1442     /* write map_value_ptr into stack frame of main prog */
1443     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1444     BPF_MOV64_IMM(BPF_REG_0, 1),
1445     BPF_EXIT_INSN(), /* return 1 */
1446     },
1447     .prog_type = BPF_PROG_TYPE_XDP,
1448     .fixup_map_hash_8b = { 23 },
1449     .result = ACCEPT,
1450 },
1451 {
1452     "calls: two calls that return map_value with incorrect bool check",
1453     .insns = {
1454     /* main prog */
1455     /* pass fp-16, fp-8 into a function */
1456     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1457     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1458     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1459     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1460     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1461     BPF_MOV64_IMM(BPF_REG_0, 0),
1462     BPF_EXIT_INSN(),
1463 
1464     /* subprog 1 */
1465     /* call 3rd function twice */
1466     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1467     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1468     /* first time with fp-8 */
1469     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
1470     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
1471     /* fetch map_value_ptr from the stack of this function */
1472     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1473     /* write into map value */
1474     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1475     BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
1476     /* second time with fp-16 */
1477     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1478     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1479     /* fetch secound map_value_ptr from the stack */
1480     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
1481     /* write into map value */
1482     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1483     BPF_EXIT_INSN(),
1484 
1485     /* subprog 2 */
1486     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1487     /* lookup from map */
1488     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1489     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1490     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1491     BPF_LD_MAP_FD(BPF_REG_1, 0),
1492     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1493     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1494     BPF_MOV64_IMM(BPF_REG_0, 0),
1495     BPF_EXIT_INSN(), /* return 0 */
1496     /* write map_value_ptr into stack frame of main prog */
1497     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1498     BPF_MOV64_IMM(BPF_REG_0, 1),
1499     BPF_EXIT_INSN(), /* return 1 */
1500     },
1501     .prog_type = BPF_PROG_TYPE_XDP,
1502     .fixup_map_hash_8b = { 23 },
1503     .result = REJECT,
1504     .errstr = "invalid read from stack R7 off=-16 size=8",
1505 },
1506 {
1507     "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
1508     .insns = {
1509     /* main prog */
1510     /* pass fp-16, fp-8 into a function */
1511     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1512     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1513     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1514     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1515     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1516     BPF_MOV64_IMM(BPF_REG_0, 0),
1517     BPF_EXIT_INSN(),
1518 
1519     /* subprog 1 */
1520     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1521     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1522     /* 1st lookup from map */
1523     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1524     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1525     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1526     BPF_LD_MAP_FD(BPF_REG_1, 0),
1527     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1528     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1529     BPF_MOV64_IMM(BPF_REG_8, 0),
1530     BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1531     /* write map_value_ptr into stack frame of main prog at fp-8 */
1532     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1533     BPF_MOV64_IMM(BPF_REG_8, 1),
1534 
1535     /* 2nd lookup from map */
1536     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1537     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1538     BPF_LD_MAP_FD(BPF_REG_1, 0),
1539     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1540              BPF_FUNC_map_lookup_elem),
1541     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1542     BPF_MOV64_IMM(BPF_REG_9, 0),
1543     BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1544     /* write map_value_ptr into stack frame of main prog at fp-16 */
1545     BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1546     BPF_MOV64_IMM(BPF_REG_9, 1),
1547 
1548     /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1549     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1550     BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1551     BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1552     BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1553     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
1554     BPF_EXIT_INSN(),
1555 
1556     /* subprog 2 */
1557     /* if arg2 == 1 do *arg1 = 0 */
1558     BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1559     /* fetch map_value_ptr from the stack of this function */
1560     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1561     /* write into map value */
1562     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1563 
1564     /* if arg4 == 1 do *arg3 = 0 */
1565     BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1566     /* fetch map_value_ptr from the stack of this function */
1567     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1568     /* write into map value */
1569     BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1570     BPF_EXIT_INSN(),
1571     },
1572     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1573     .fixup_map_hash_8b = { 12, 22 },
1574     .result = REJECT,
1575     .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1576     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1577 },
1578 {
1579     "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
1580     .insns = {
1581     /* main prog */
1582     /* pass fp-16, fp-8 into a function */
1583     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1584     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1585     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1586     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1587     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1588     BPF_MOV64_IMM(BPF_REG_0, 0),
1589     BPF_EXIT_INSN(),
1590 
1591     /* subprog 1 */
1592     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1593     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1594     /* 1st lookup from map */
1595     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1596     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1597     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1598     BPF_LD_MAP_FD(BPF_REG_1, 0),
1599     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1600     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1601     BPF_MOV64_IMM(BPF_REG_8, 0),
1602     BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1603     /* write map_value_ptr into stack frame of main prog at fp-8 */
1604     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1605     BPF_MOV64_IMM(BPF_REG_8, 1),
1606 
1607     /* 2nd lookup from map */
1608     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
1609     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1610     BPF_LD_MAP_FD(BPF_REG_1, 0),
1611     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
1612              BPF_FUNC_map_lookup_elem),
1613     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1614     BPF_MOV64_IMM(BPF_REG_9, 0),
1615     BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1616     /* write map_value_ptr into stack frame of main prog at fp-16 */
1617     BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1618     BPF_MOV64_IMM(BPF_REG_9, 1),
1619 
1620     /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1621     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
1622     BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1623     BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1624     BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1625     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
1626     BPF_EXIT_INSN(),
1627 
1628     /* subprog 2 */
1629     /* if arg2 == 1 do *arg1 = 0 */
1630     BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1631     /* fetch map_value_ptr from the stack of this function */
1632     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1633     /* write into map value */
1634     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1635 
1636     /* if arg4 == 1 do *arg3 = 0 */
1637     BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1638     /* fetch map_value_ptr from the stack of this function */
1639     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1640     /* write into map value */
1641     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1642     BPF_EXIT_INSN(),
1643     },
1644     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1645     .fixup_map_hash_8b = { 12, 22 },
1646     .result = ACCEPT,
1647 },
1648 {
1649     "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
1650     .insns = {
1651     /* main prog */
1652     /* pass fp-16, fp-8 into a function */
1653     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1654     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1655     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1656     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1657     BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
1658     BPF_MOV64_IMM(BPF_REG_0, 0),
1659     BPF_EXIT_INSN(),
1660 
1661     /* subprog 1 */
1662     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1663     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1664     /* 1st lookup from map */
1665     BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
1666     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1667     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1668     BPF_LD_MAP_FD(BPF_REG_1, 0),
1669     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1670     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1671     BPF_MOV64_IMM(BPF_REG_8, 0),
1672     BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1673     /* write map_value_ptr into stack frame of main prog at fp-8 */
1674     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1675     BPF_MOV64_IMM(BPF_REG_8, 1),
1676 
1677     /* 2nd lookup from map */
1678     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1679     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
1680     BPF_LD_MAP_FD(BPF_REG_1, 0),
1681     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1682     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1683     BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
1684     BPF_JMP_IMM(BPF_JA, 0, 0, 2),
1685     /* write map_value_ptr into stack frame of main prog at fp-16 */
1686     BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1687     BPF_MOV64_IMM(BPF_REG_9, 1),
1688 
1689     /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1690     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
1691     BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1692     BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1693     BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1694     BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
1695     BPF_JMP_IMM(BPF_JA, 0, 0, -30),
1696 
1697     /* subprog 2 */
1698     /* if arg2 == 1 do *arg1 = 0 */
1699     BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1700     /* fetch map_value_ptr from the stack of this function */
1701     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1702     /* write into map value */
1703     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1704 
1705     /* if arg4 == 1 do *arg3 = 0 */
1706     BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1707     /* fetch map_value_ptr from the stack of this function */
1708     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1709     /* write into map value */
1710     BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
1711     BPF_JMP_IMM(BPF_JA, 0, 0, -8),
1712     },
1713     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1714     .fixup_map_hash_8b = { 12, 22 },
1715     .result = REJECT,
1716     .errstr = "invalid access to map value, value_size=8 off=2 size=8",
1717     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1718 },
1719 {
1720     "calls: two calls that receive map_value_ptr_or_null via arg. test1",
1721     .insns = {
1722     /* main prog */
1723     /* pass fp-16, fp-8 into a function */
1724     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1725     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1726     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1727     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1728     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1729     BPF_MOV64_IMM(BPF_REG_0, 0),
1730     BPF_EXIT_INSN(),
1731 
1732     /* subprog 1 */
1733     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1734     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1735     /* 1st lookup from map */
1736     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1737     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1738     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1739     BPF_LD_MAP_FD(BPF_REG_1, 0),
1740     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1741     /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1742     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1743     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1744     BPF_MOV64_IMM(BPF_REG_8, 0),
1745     BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1746     BPF_MOV64_IMM(BPF_REG_8, 1),
1747 
1748     /* 2nd lookup from map */
1749     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1750     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1751     BPF_LD_MAP_FD(BPF_REG_1, 0),
1752     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1753     /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1754     BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1755     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1756     BPF_MOV64_IMM(BPF_REG_9, 0),
1757     BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1758     BPF_MOV64_IMM(BPF_REG_9, 1),
1759 
1760     /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1761     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1762     BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1763     BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1764     BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1765     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1766     BPF_EXIT_INSN(),
1767 
1768     /* subprog 2 */
1769     /* if arg2 == 1 do *arg1 = 0 */
1770     BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1771     /* fetch map_value_ptr from the stack of this function */
1772     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1773     /* write into map value */
1774     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1775 
1776     /* if arg4 == 1 do *arg3 = 0 */
1777     BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
1778     /* fetch map_value_ptr from the stack of this function */
1779     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1780     /* write into map value */
1781     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1782     BPF_EXIT_INSN(),
1783     },
1784     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1785     .fixup_map_hash_8b = { 12, 22 },
1786     .result = ACCEPT,
1787 },
1788 {
1789     "calls: two calls that receive map_value_ptr_or_null via arg. test2",
1790     .insns = {
1791     /* main prog */
1792     /* pass fp-16, fp-8 into a function */
1793     BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1794     BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1795     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1796     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1797     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
1798     BPF_MOV64_IMM(BPF_REG_0, 0),
1799     BPF_EXIT_INSN(),
1800 
1801     /* subprog 1 */
1802     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
1803     BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
1804     /* 1st lookup from map */
1805     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1806     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1807     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1808     BPF_LD_MAP_FD(BPF_REG_1, 0),
1809     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1810     /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
1811     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
1812     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1813     BPF_MOV64_IMM(BPF_REG_8, 0),
1814     BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1815     BPF_MOV64_IMM(BPF_REG_8, 1),
1816 
1817     /* 2nd lookup from map */
1818     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1819     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1820     BPF_LD_MAP_FD(BPF_REG_1, 0),
1821     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
1822     /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
1823     BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
1824     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
1825     BPF_MOV64_IMM(BPF_REG_9, 0),
1826     BPF_JMP_IMM(BPF_JA, 0, 0, 1),
1827     BPF_MOV64_IMM(BPF_REG_9, 1),
1828 
1829     /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
1830     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
1831     BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
1832     BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
1833     BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
1834     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1835     BPF_EXIT_INSN(),
1836 
1837     /* subprog 2 */
1838     /* if arg2 == 1 do *arg1 = 0 */
1839     BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
1840     /* fetch map_value_ptr from the stack of this function */
1841     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
1842     /* write into map value */
1843     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1844 
1845     /* if arg4 == 0 do *arg3 = 0 */
1846     BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
1847     /* fetch map_value_ptr from the stack of this function */
1848     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
1849     /* write into map value */
1850     BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
1851     BPF_EXIT_INSN(),
1852     },
1853     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1854     .fixup_map_hash_8b = { 12, 22 },
1855     .result = REJECT,
1856     .errstr = "R0 invalid mem access 'scalar'",
1857 },
1858 {
1859     "calls: pkt_ptr spill into caller stack",
1860     .insns = {
1861     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1862     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1863     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
1864     BPF_EXIT_INSN(),
1865 
1866     /* subprog 1 */
1867     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1868             offsetof(struct __sk_buff, data)),
1869     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1870             offsetof(struct __sk_buff, data_end)),
1871     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1872     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1873     /* spill unchecked pkt_ptr into stack of caller */
1874     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1875     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1876     /* now the pkt range is verified, read pkt_ptr from stack */
1877     BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1878     /* write 4 bytes into packet */
1879     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1880     BPF_EXIT_INSN(),
1881     },
1882     .result = ACCEPT,
1883     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1884     .retval = POINTER_VALUE,
1885     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1886 },
1887 {
1888     "calls: pkt_ptr spill into caller stack 2",
1889     .insns = {
1890     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1891     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1892     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1893     /* Marking is still kept, but not in all cases safe. */
1894     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1895     BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1896     BPF_EXIT_INSN(),
1897 
1898     /* subprog 1 */
1899     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1900             offsetof(struct __sk_buff, data)),
1901     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1902             offsetof(struct __sk_buff, data_end)),
1903     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1904     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1905     /* spill unchecked pkt_ptr into stack of caller */
1906     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1907     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1908     /* now the pkt range is verified, read pkt_ptr from stack */
1909     BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1910     /* write 4 bytes into packet */
1911     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1912     BPF_EXIT_INSN(),
1913     },
1914     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1915     .errstr = "invalid access to packet",
1916     .result = REJECT,
1917     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1918 },
1919 {
1920     "calls: pkt_ptr spill into caller stack 3",
1921     .insns = {
1922     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1923     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1924     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1925     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1926     /* Marking is still kept and safe here. */
1927     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1928     BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1929     BPF_EXIT_INSN(),
1930 
1931     /* subprog 1 */
1932     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1933             offsetof(struct __sk_buff, data)),
1934     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1935             offsetof(struct __sk_buff, data_end)),
1936     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1937     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1938     /* spill unchecked pkt_ptr into stack of caller */
1939     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1940     BPF_MOV64_IMM(BPF_REG_5, 0),
1941     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
1942     BPF_MOV64_IMM(BPF_REG_5, 1),
1943     /* now the pkt range is verified, read pkt_ptr from stack */
1944     BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
1945     /* write 4 bytes into packet */
1946     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1947     BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1948     BPF_EXIT_INSN(),
1949     },
1950     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1951     .result = ACCEPT,
1952     .retval = 1,
1953     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1954 },
1955 {
1956     "calls: pkt_ptr spill into caller stack 4",
1957     .insns = {
1958     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1959     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1960     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1961     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1962     /* Check marking propagated. */
1963     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1964     BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
1965     BPF_EXIT_INSN(),
1966 
1967     /* subprog 1 */
1968     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1969             offsetof(struct __sk_buff, data)),
1970     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1971             offsetof(struct __sk_buff, data_end)),
1972     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1973     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1974     /* spill unchecked pkt_ptr into stack of caller */
1975     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
1976     BPF_MOV64_IMM(BPF_REG_5, 0),
1977     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
1978     BPF_MOV64_IMM(BPF_REG_5, 1),
1979     /* don't read back pkt_ptr from stack here */
1980     /* write 4 bytes into packet */
1981     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
1982     BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
1983     BPF_EXIT_INSN(),
1984     },
1985     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1986     .result = ACCEPT,
1987     .retval = 1,
1988     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
1989 },
1990 {
1991     "calls: pkt_ptr spill into caller stack 5",
1992     .insns = {
1993     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
1994     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
1995     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
1996     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
1997     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
1998     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
1999     BPF_EXIT_INSN(),
2000 
2001     /* subprog 1 */
2002     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2003             offsetof(struct __sk_buff, data)),
2004     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2005             offsetof(struct __sk_buff, data_end)),
2006     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2007     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2008     BPF_MOV64_IMM(BPF_REG_5, 0),
2009     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2010     /* spill checked pkt_ptr into stack of caller */
2011     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2012     BPF_MOV64_IMM(BPF_REG_5, 1),
2013     /* don't read back pkt_ptr from stack here */
2014     /* write 4 bytes into packet */
2015     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2016     BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2017     BPF_EXIT_INSN(),
2018     },
2019     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2020     .errstr = "same insn cannot be used with different",
2021     .result = REJECT,
2022     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2023 },
2024 {
2025     "calls: pkt_ptr spill into caller stack 6",
2026     .insns = {
2027     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2028             offsetof(struct __sk_buff, data_end)),
2029     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2030     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2031     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2032     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2033     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2034     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2035     BPF_EXIT_INSN(),
2036 
2037     /* subprog 1 */
2038     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2039             offsetof(struct __sk_buff, data)),
2040     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2041             offsetof(struct __sk_buff, data_end)),
2042     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2043     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2044     BPF_MOV64_IMM(BPF_REG_5, 0),
2045     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2046     /* spill checked pkt_ptr into stack of caller */
2047     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2048     BPF_MOV64_IMM(BPF_REG_5, 1),
2049     /* don't read back pkt_ptr from stack here */
2050     /* write 4 bytes into packet */
2051     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2052     BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2053     BPF_EXIT_INSN(),
2054     },
2055     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2056     .errstr = "R4 invalid mem access",
2057     .result = REJECT,
2058     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2059 },
2060 {
2061     "calls: pkt_ptr spill into caller stack 7",
2062     .insns = {
2063     BPF_MOV64_IMM(BPF_REG_2, 0),
2064     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2065     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2066     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2067     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2068     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2069     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2070     BPF_EXIT_INSN(),
2071 
2072     /* subprog 1 */
2073     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2074             offsetof(struct __sk_buff, data)),
2075     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2076             offsetof(struct __sk_buff, data_end)),
2077     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2078     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2079     BPF_MOV64_IMM(BPF_REG_5, 0),
2080     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2081     /* spill checked pkt_ptr into stack of caller */
2082     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2083     BPF_MOV64_IMM(BPF_REG_5, 1),
2084     /* don't read back pkt_ptr from stack here */
2085     /* write 4 bytes into packet */
2086     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2087     BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2088     BPF_EXIT_INSN(),
2089     },
2090     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2091     .errstr = "R4 invalid mem access",
2092     .result = REJECT,
2093     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2094 },
2095 {
2096     "calls: pkt_ptr spill into caller stack 8",
2097     .insns = {
2098     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2099             offsetof(struct __sk_buff, data)),
2100     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2101             offsetof(struct __sk_buff, data_end)),
2102     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2103     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2104     BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2105     BPF_EXIT_INSN(),
2106     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2107     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2108     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2109     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2110     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2111     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2112     BPF_EXIT_INSN(),
2113 
2114     /* subprog 1 */
2115     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2116             offsetof(struct __sk_buff, data)),
2117     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2118             offsetof(struct __sk_buff, data_end)),
2119     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2120     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2121     BPF_MOV64_IMM(BPF_REG_5, 0),
2122     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2123     /* spill checked pkt_ptr into stack of caller */
2124     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2125     BPF_MOV64_IMM(BPF_REG_5, 1),
2126     /* don't read back pkt_ptr from stack here */
2127     /* write 4 bytes into packet */
2128     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2129     BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2130     BPF_EXIT_INSN(),
2131     },
2132     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2133     .result = ACCEPT,
2134     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2135 },
2136 {
2137     "calls: pkt_ptr spill into caller stack 9",
2138     .insns = {
2139     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2140             offsetof(struct __sk_buff, data)),
2141     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2142             offsetof(struct __sk_buff, data_end)),
2143     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2144     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2145     BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
2146     BPF_EXIT_INSN(),
2147     BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2148     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2149     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2150     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
2151     BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2152     BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
2153     BPF_EXIT_INSN(),
2154 
2155     /* subprog 1 */
2156     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2157             offsetof(struct __sk_buff, data)),
2158     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2159             offsetof(struct __sk_buff, data_end)),
2160     BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2161     BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2162     BPF_MOV64_IMM(BPF_REG_5, 0),
2163     /* spill unchecked pkt_ptr into stack of caller */
2164     BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2165     BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2166     BPF_MOV64_IMM(BPF_REG_5, 1),
2167     /* don't read back pkt_ptr from stack here */
2168     /* write 4 bytes into packet */
2169     BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
2170     BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
2171     BPF_EXIT_INSN(),
2172     },
2173     .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2174     .errstr = "invalid access to packet",
2175     .result = REJECT,
2176     .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2177 },
2178 {
2179     "calls: caller stack init to zero or map_value_or_null",
2180     .insns = {
2181     BPF_MOV64_IMM(BPF_REG_0, 0),
2182     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2183     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2184     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2185     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2186     /* fetch map_value_or_null or const_zero from stack */
2187     BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2188     BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2189     /* store into map_value */
2190     BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
2191     BPF_EXIT_INSN(),
2192 
2193     /* subprog 1 */
2194     /* if (ctx == 0) return; */
2195     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
2196     /* else bpf_map_lookup() and *(fp - 8) = r0 */
2197     BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2198     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2199     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2200     BPF_LD_MAP_FD(BPF_REG_1, 0),
2201     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2202     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2203     /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
2204     BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
2205     BPF_EXIT_INSN(),
2206     },
2207     .fixup_map_hash_8b = { 13 },
2208     .result = ACCEPT,
2209     .prog_type = BPF_PROG_TYPE_XDP,
2210 },
2211 {
2212     "calls: stack init to zero and pruning",
2213     .insns = {
2214     /* first make allocated_stack 16 byte */
2215     BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
2216     /* now fork the execution such that the false branch
2217      * of JGT insn will be verified second and it skisp zero
2218      * init of fp-8 stack slot. If stack liveness marking
2219      * is missing live_read marks from call map_lookup
2220      * processing then pruning will incorrectly assume
2221      * that fp-8 stack slot was unused in the fall-through
2222      * branch and will accept the program incorrectly
2223      */
2224     BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
2225     BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2226     BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2227     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2228     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2229     BPF_LD_MAP_FD(BPF_REG_1, 0),
2230     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
2231     BPF_EXIT_INSN(),
2232     },
2233     .fixup_map_hash_48b = { 6 },
2234     .errstr = "invalid indirect read from stack R2 off -8+0 size 8",
2235     .result = REJECT,
2236     .prog_type = BPF_PROG_TYPE_XDP,
2237 },
2238 {
2239     "calls: ctx read at start of subprog",
2240     .insns = {
2241     BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
2242     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
2243     BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
2244     BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
2245     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
2246     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2247     BPF_EXIT_INSN(),
2248     BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2249     BPF_MOV64_IMM(BPF_REG_0, 0),
2250     BPF_EXIT_INSN(),
2251     },
2252     .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2253     .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2254     .result_unpriv = REJECT,
2255     .result = ACCEPT,
2256 },
2257 {
2258     "calls: cross frame pruning",
2259     .insns = {
2260     /* r8 = !!random();
2261      * call pruner()
2262      * if (r8)
2263      *     do something bad;
2264      */
2265     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2266     BPF_MOV64_IMM(BPF_REG_8, 0),
2267     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2268     BPF_MOV64_IMM(BPF_REG_8, 1),
2269     BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
2270     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2271     BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2272     BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
2273     BPF_MOV64_IMM(BPF_REG_0, 0),
2274     BPF_EXIT_INSN(),
2275     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2276     BPF_EXIT_INSN(),
2277     },
2278     .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2279     .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2280     .errstr = "!read_ok",
2281     .result = REJECT,
2282 },
2283 {
2284     "calls: cross frame pruning - liveness propagation",
2285     .insns = {
2286     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2287     BPF_MOV64_IMM(BPF_REG_8, 0),
2288     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2289     BPF_MOV64_IMM(BPF_REG_8, 1),
2290     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
2291     BPF_MOV64_IMM(BPF_REG_9, 0),
2292     BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
2293     BPF_MOV64_IMM(BPF_REG_9, 1),
2294     BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
2295     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
2296     BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
2297     BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
2298     BPF_MOV64_IMM(BPF_REG_0, 0),
2299     BPF_EXIT_INSN(),
2300     BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2301     BPF_EXIT_INSN(),
2302     },
2303     .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2304     .errstr_unpriv = "loading/calling other bpf or kernel functions are allowed for",
2305     .errstr = "!read_ok",
2306     .result = REJECT,
2307 },