Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <test_progs.h>
0003 #include <network_helpers.h>
0004 
0005 /* test_tailcall_1 checks basic functionality by patching multiple locations
0006  * in a single program for a single tail call slot with nop->jmp, jmp->nop
0007  * and jmp->jmp rewrites. Also checks for nop->nop.
0008  */
0009 static void test_tailcall_1(void)
0010 {
0011     int err, map_fd, prog_fd, main_fd, i, j;
0012     struct bpf_map *prog_array;
0013     struct bpf_program *prog;
0014     struct bpf_object *obj;
0015     char prog_name[32];
0016     char buff[128] = {};
0017     LIBBPF_OPTS(bpf_test_run_opts, topts,
0018         .data_in = buff,
0019         .data_size_in = sizeof(buff),
0020         .repeat = 1,
0021     );
0022 
0023     err = bpf_prog_test_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
0024                 &prog_fd);
0025     if (CHECK_FAIL(err))
0026         return;
0027 
0028     prog = bpf_object__find_program_by_name(obj, "entry");
0029     if (CHECK_FAIL(!prog))
0030         goto out;
0031 
0032     main_fd = bpf_program__fd(prog);
0033     if (CHECK_FAIL(main_fd < 0))
0034         goto out;
0035 
0036     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0037     if (CHECK_FAIL(!prog_array))
0038         goto out;
0039 
0040     map_fd = bpf_map__fd(prog_array);
0041     if (CHECK_FAIL(map_fd < 0))
0042         goto out;
0043 
0044     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0045         snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
0046 
0047         prog = bpf_object__find_program_by_name(obj, prog_name);
0048         if (CHECK_FAIL(!prog))
0049             goto out;
0050 
0051         prog_fd = bpf_program__fd(prog);
0052         if (CHECK_FAIL(prog_fd < 0))
0053             goto out;
0054 
0055         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0056         if (CHECK_FAIL(err))
0057             goto out;
0058     }
0059 
0060     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0061         err = bpf_prog_test_run_opts(main_fd, &topts);
0062         ASSERT_OK(err, "tailcall");
0063         ASSERT_EQ(topts.retval, i, "tailcall retval");
0064 
0065         err = bpf_map_delete_elem(map_fd, &i);
0066         if (CHECK_FAIL(err))
0067             goto out;
0068     }
0069 
0070     err = bpf_prog_test_run_opts(main_fd, &topts);
0071     ASSERT_OK(err, "tailcall");
0072     ASSERT_EQ(topts.retval, 3, "tailcall retval");
0073 
0074     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0075         snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
0076 
0077         prog = bpf_object__find_program_by_name(obj, prog_name);
0078         if (CHECK_FAIL(!prog))
0079             goto out;
0080 
0081         prog_fd = bpf_program__fd(prog);
0082         if (CHECK_FAIL(prog_fd < 0))
0083             goto out;
0084 
0085         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0086         if (CHECK_FAIL(err))
0087             goto out;
0088     }
0089 
0090     err = bpf_prog_test_run_opts(main_fd, &topts);
0091     ASSERT_OK(err, "tailcall");
0092     ASSERT_OK(topts.retval, "tailcall retval");
0093 
0094     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0095         j = bpf_map__max_entries(prog_array) - 1 - i;
0096         snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
0097 
0098         prog = bpf_object__find_program_by_name(obj, prog_name);
0099         if (CHECK_FAIL(!prog))
0100             goto out;
0101 
0102         prog_fd = bpf_program__fd(prog);
0103         if (CHECK_FAIL(prog_fd < 0))
0104             goto out;
0105 
0106         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0107         if (CHECK_FAIL(err))
0108             goto out;
0109     }
0110 
0111     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0112         j = bpf_map__max_entries(prog_array) - 1 - i;
0113 
0114         err = bpf_prog_test_run_opts(main_fd, &topts);
0115         ASSERT_OK(err, "tailcall");
0116         ASSERT_EQ(topts.retval, j, "tailcall retval");
0117 
0118         err = bpf_map_delete_elem(map_fd, &i);
0119         if (CHECK_FAIL(err))
0120             goto out;
0121     }
0122 
0123     err = bpf_prog_test_run_opts(main_fd, &topts);
0124     ASSERT_OK(err, "tailcall");
0125     ASSERT_EQ(topts.retval, 3, "tailcall retval");
0126 
0127     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0128         err = bpf_map_delete_elem(map_fd, &i);
0129         if (CHECK_FAIL(err >= 0 || errno != ENOENT))
0130             goto out;
0131 
0132         err = bpf_prog_test_run_opts(main_fd, &topts);
0133         ASSERT_OK(err, "tailcall");
0134         ASSERT_EQ(topts.retval, 3, "tailcall retval");
0135     }
0136 
0137 out:
0138     bpf_object__close(obj);
0139 }
0140 
0141 /* test_tailcall_2 checks that patching multiple programs for a single
0142  * tail call slot works. It also jumps through several programs and tests
0143  * the tail call limit counter.
0144  */
0145 static void test_tailcall_2(void)
0146 {
0147     int err, map_fd, prog_fd, main_fd, i;
0148     struct bpf_map *prog_array;
0149     struct bpf_program *prog;
0150     struct bpf_object *obj;
0151     char prog_name[32];
0152     char buff[128] = {};
0153     LIBBPF_OPTS(bpf_test_run_opts, topts,
0154         .data_in = buff,
0155         .data_size_in = sizeof(buff),
0156         .repeat = 1,
0157     );
0158 
0159     err = bpf_prog_test_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
0160                 &prog_fd);
0161     if (CHECK_FAIL(err))
0162         return;
0163 
0164     prog = bpf_object__find_program_by_name(obj, "entry");
0165     if (CHECK_FAIL(!prog))
0166         goto out;
0167 
0168     main_fd = bpf_program__fd(prog);
0169     if (CHECK_FAIL(main_fd < 0))
0170         goto out;
0171 
0172     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0173     if (CHECK_FAIL(!prog_array))
0174         goto out;
0175 
0176     map_fd = bpf_map__fd(prog_array);
0177     if (CHECK_FAIL(map_fd < 0))
0178         goto out;
0179 
0180     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0181         snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
0182 
0183         prog = bpf_object__find_program_by_name(obj, prog_name);
0184         if (CHECK_FAIL(!prog))
0185             goto out;
0186 
0187         prog_fd = bpf_program__fd(prog);
0188         if (CHECK_FAIL(prog_fd < 0))
0189             goto out;
0190 
0191         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0192         if (CHECK_FAIL(err))
0193             goto out;
0194     }
0195 
0196     err = bpf_prog_test_run_opts(main_fd, &topts);
0197     ASSERT_OK(err, "tailcall");
0198     ASSERT_EQ(topts.retval, 2, "tailcall retval");
0199 
0200     i = 2;
0201     err = bpf_map_delete_elem(map_fd, &i);
0202     if (CHECK_FAIL(err))
0203         goto out;
0204 
0205     err = bpf_prog_test_run_opts(main_fd, &topts);
0206     ASSERT_OK(err, "tailcall");
0207     ASSERT_EQ(topts.retval, 1, "tailcall retval");
0208 
0209     i = 0;
0210     err = bpf_map_delete_elem(map_fd, &i);
0211     if (CHECK_FAIL(err))
0212         goto out;
0213 
0214     err = bpf_prog_test_run_opts(main_fd, &topts);
0215     ASSERT_OK(err, "tailcall");
0216     ASSERT_EQ(topts.retval, 3, "tailcall retval");
0217 out:
0218     bpf_object__close(obj);
0219 }
0220 
0221 static void test_tailcall_count(const char *which)
0222 {
0223     int err, map_fd, prog_fd, main_fd, data_fd, i, val;
0224     struct bpf_map *prog_array, *data_map;
0225     struct bpf_program *prog;
0226     struct bpf_object *obj;
0227     char buff[128] = {};
0228     LIBBPF_OPTS(bpf_test_run_opts, topts,
0229         .data_in = buff,
0230         .data_size_in = sizeof(buff),
0231         .repeat = 1,
0232     );
0233 
0234     err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
0235                 &prog_fd);
0236     if (CHECK_FAIL(err))
0237         return;
0238 
0239     prog = bpf_object__find_program_by_name(obj, "entry");
0240     if (CHECK_FAIL(!prog))
0241         goto out;
0242 
0243     main_fd = bpf_program__fd(prog);
0244     if (CHECK_FAIL(main_fd < 0))
0245         goto out;
0246 
0247     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0248     if (CHECK_FAIL(!prog_array))
0249         goto out;
0250 
0251     map_fd = bpf_map__fd(prog_array);
0252     if (CHECK_FAIL(map_fd < 0))
0253         goto out;
0254 
0255     prog = bpf_object__find_program_by_name(obj, "classifier_0");
0256     if (CHECK_FAIL(!prog))
0257         goto out;
0258 
0259     prog_fd = bpf_program__fd(prog);
0260     if (CHECK_FAIL(prog_fd < 0))
0261         goto out;
0262 
0263     i = 0;
0264     err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0265     if (CHECK_FAIL(err))
0266         goto out;
0267 
0268     err = bpf_prog_test_run_opts(main_fd, &topts);
0269     ASSERT_OK(err, "tailcall");
0270     ASSERT_EQ(topts.retval, 1, "tailcall retval");
0271 
0272     data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
0273     if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
0274         return;
0275 
0276     data_fd = bpf_map__fd(data_map);
0277     if (CHECK_FAIL(map_fd < 0))
0278         return;
0279 
0280     i = 0;
0281     err = bpf_map_lookup_elem(data_fd, &i, &val);
0282     ASSERT_OK(err, "tailcall count");
0283     ASSERT_EQ(val, 33, "tailcall count");
0284 
0285     i = 0;
0286     err = bpf_map_delete_elem(map_fd, &i);
0287     if (CHECK_FAIL(err))
0288         goto out;
0289 
0290     err = bpf_prog_test_run_opts(main_fd, &topts);
0291     ASSERT_OK(err, "tailcall");
0292     ASSERT_OK(topts.retval, "tailcall retval");
0293 out:
0294     bpf_object__close(obj);
0295 }
0296 
0297 /* test_tailcall_3 checks that the count value of the tail call limit
0298  * enforcement matches with expectations. JIT uses direct jump.
0299  */
0300 static void test_tailcall_3(void)
0301 {
0302     test_tailcall_count("tailcall3.o");
0303 }
0304 
0305 /* test_tailcall_6 checks that the count value of the tail call limit
0306  * enforcement matches with expectations. JIT uses indirect jump.
0307  */
0308 static void test_tailcall_6(void)
0309 {
0310     test_tailcall_count("tailcall6.o");
0311 }
0312 
0313 /* test_tailcall_4 checks that the kernel properly selects indirect jump
0314  * for the case where the key is not known. Latter is passed via global
0315  * data to select different targets we can compare return value of.
0316  */
0317 static void test_tailcall_4(void)
0318 {
0319     int err, map_fd, prog_fd, main_fd, data_fd, i;
0320     struct bpf_map *prog_array, *data_map;
0321     struct bpf_program *prog;
0322     struct bpf_object *obj;
0323     static const int zero = 0;
0324     char buff[128] = {};
0325     char prog_name[32];
0326     LIBBPF_OPTS(bpf_test_run_opts, topts,
0327         .data_in = buff,
0328         .data_size_in = sizeof(buff),
0329         .repeat = 1,
0330     );
0331 
0332     err = bpf_prog_test_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
0333                 &prog_fd);
0334     if (CHECK_FAIL(err))
0335         return;
0336 
0337     prog = bpf_object__find_program_by_name(obj, "entry");
0338     if (CHECK_FAIL(!prog))
0339         goto out;
0340 
0341     main_fd = bpf_program__fd(prog);
0342     if (CHECK_FAIL(main_fd < 0))
0343         goto out;
0344 
0345     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0346     if (CHECK_FAIL(!prog_array))
0347         goto out;
0348 
0349     map_fd = bpf_map__fd(prog_array);
0350     if (CHECK_FAIL(map_fd < 0))
0351         goto out;
0352 
0353     data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
0354     if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
0355         return;
0356 
0357     data_fd = bpf_map__fd(data_map);
0358     if (CHECK_FAIL(map_fd < 0))
0359         return;
0360 
0361     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0362         snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
0363 
0364         prog = bpf_object__find_program_by_name(obj, prog_name);
0365         if (CHECK_FAIL(!prog))
0366             goto out;
0367 
0368         prog_fd = bpf_program__fd(prog);
0369         if (CHECK_FAIL(prog_fd < 0))
0370             goto out;
0371 
0372         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0373         if (CHECK_FAIL(err))
0374             goto out;
0375     }
0376 
0377     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0378         err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
0379         if (CHECK_FAIL(err))
0380             goto out;
0381 
0382         err = bpf_prog_test_run_opts(main_fd, &topts);
0383         ASSERT_OK(err, "tailcall");
0384         ASSERT_EQ(topts.retval, i, "tailcall retval");
0385     }
0386 
0387     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0388         err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
0389         if (CHECK_FAIL(err))
0390             goto out;
0391 
0392         err = bpf_map_delete_elem(map_fd, &i);
0393         if (CHECK_FAIL(err))
0394             goto out;
0395 
0396         err = bpf_prog_test_run_opts(main_fd, &topts);
0397         ASSERT_OK(err, "tailcall");
0398         ASSERT_EQ(topts.retval, 3, "tailcall retval");
0399     }
0400 out:
0401     bpf_object__close(obj);
0402 }
0403 
0404 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
0405  * an indirect jump when the keys are const but different from different branches.
0406  */
0407 static void test_tailcall_5(void)
0408 {
0409     int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
0410     struct bpf_map *prog_array, *data_map;
0411     struct bpf_program *prog;
0412     struct bpf_object *obj;
0413     static const int zero = 0;
0414     char buff[128] = {};
0415     char prog_name[32];
0416     LIBBPF_OPTS(bpf_test_run_opts, topts,
0417         .data_in = buff,
0418         .data_size_in = sizeof(buff),
0419         .repeat = 1,
0420     );
0421 
0422     err = bpf_prog_test_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
0423                 &prog_fd);
0424     if (CHECK_FAIL(err))
0425         return;
0426 
0427     prog = bpf_object__find_program_by_name(obj, "entry");
0428     if (CHECK_FAIL(!prog))
0429         goto out;
0430 
0431     main_fd = bpf_program__fd(prog);
0432     if (CHECK_FAIL(main_fd < 0))
0433         goto out;
0434 
0435     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0436     if (CHECK_FAIL(!prog_array))
0437         goto out;
0438 
0439     map_fd = bpf_map__fd(prog_array);
0440     if (CHECK_FAIL(map_fd < 0))
0441         goto out;
0442 
0443     data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
0444     if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
0445         return;
0446 
0447     data_fd = bpf_map__fd(data_map);
0448     if (CHECK_FAIL(map_fd < 0))
0449         return;
0450 
0451     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0452         snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
0453 
0454         prog = bpf_object__find_program_by_name(obj, prog_name);
0455         if (CHECK_FAIL(!prog))
0456             goto out;
0457 
0458         prog_fd = bpf_program__fd(prog);
0459         if (CHECK_FAIL(prog_fd < 0))
0460             goto out;
0461 
0462         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0463         if (CHECK_FAIL(err))
0464             goto out;
0465     }
0466 
0467     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0468         err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
0469         if (CHECK_FAIL(err))
0470             goto out;
0471 
0472         err = bpf_prog_test_run_opts(main_fd, &topts);
0473         ASSERT_OK(err, "tailcall");
0474         ASSERT_EQ(topts.retval, i, "tailcall retval");
0475     }
0476 
0477     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0478         err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
0479         if (CHECK_FAIL(err))
0480             goto out;
0481 
0482         err = bpf_map_delete_elem(map_fd, &i);
0483         if (CHECK_FAIL(err))
0484             goto out;
0485 
0486         err = bpf_prog_test_run_opts(main_fd, &topts);
0487         ASSERT_OK(err, "tailcall");
0488         ASSERT_EQ(topts.retval, 3, "tailcall retval");
0489     }
0490 out:
0491     bpf_object__close(obj);
0492 }
0493 
0494 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
0495  * correctly in correlation with BPF subprograms
0496  */
0497 static void test_tailcall_bpf2bpf_1(void)
0498 {
0499     int err, map_fd, prog_fd, main_fd, i;
0500     struct bpf_map *prog_array;
0501     struct bpf_program *prog;
0502     struct bpf_object *obj;
0503     char prog_name[32];
0504     LIBBPF_OPTS(bpf_test_run_opts, topts,
0505         .data_in = &pkt_v4,
0506         .data_size_in = sizeof(pkt_v4),
0507         .repeat = 1,
0508     );
0509 
0510     err = bpf_prog_test_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
0511                 &obj, &prog_fd);
0512     if (CHECK_FAIL(err))
0513         return;
0514 
0515     prog = bpf_object__find_program_by_name(obj, "entry");
0516     if (CHECK_FAIL(!prog))
0517         goto out;
0518 
0519     main_fd = bpf_program__fd(prog);
0520     if (CHECK_FAIL(main_fd < 0))
0521         goto out;
0522 
0523     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0524     if (CHECK_FAIL(!prog_array))
0525         goto out;
0526 
0527     map_fd = bpf_map__fd(prog_array);
0528     if (CHECK_FAIL(map_fd < 0))
0529         goto out;
0530 
0531     /* nop -> jmp */
0532     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0533         snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
0534 
0535         prog = bpf_object__find_program_by_name(obj, prog_name);
0536         if (CHECK_FAIL(!prog))
0537             goto out;
0538 
0539         prog_fd = bpf_program__fd(prog);
0540         if (CHECK_FAIL(prog_fd < 0))
0541             goto out;
0542 
0543         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0544         if (CHECK_FAIL(err))
0545             goto out;
0546     }
0547 
0548     err = bpf_prog_test_run_opts(main_fd, &topts);
0549     ASSERT_OK(err, "tailcall");
0550     ASSERT_EQ(topts.retval, 1, "tailcall retval");
0551 
0552     /* jmp -> nop, call subprog that will do tailcall */
0553     i = 1;
0554     err = bpf_map_delete_elem(map_fd, &i);
0555     if (CHECK_FAIL(err))
0556         goto out;
0557 
0558     err = bpf_prog_test_run_opts(main_fd, &topts);
0559     ASSERT_OK(err, "tailcall");
0560     ASSERT_OK(topts.retval, "tailcall retval");
0561 
0562     /* make sure that subprog can access ctx and entry prog that
0563      * called this subprog can properly return
0564      */
0565     i = 0;
0566     err = bpf_map_delete_elem(map_fd, &i);
0567     if (CHECK_FAIL(err))
0568         goto out;
0569 
0570     err = bpf_prog_test_run_opts(main_fd, &topts);
0571     ASSERT_OK(err, "tailcall");
0572     ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
0573 out:
0574     bpf_object__close(obj);
0575 }
0576 
0577 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
0578  * enforcement matches with expectations when tailcall is preceded with
0579  * bpf2bpf call.
0580  */
0581 static void test_tailcall_bpf2bpf_2(void)
0582 {
0583     int err, map_fd, prog_fd, main_fd, data_fd, i, val;
0584     struct bpf_map *prog_array, *data_map;
0585     struct bpf_program *prog;
0586     struct bpf_object *obj;
0587     char buff[128] = {};
0588     LIBBPF_OPTS(bpf_test_run_opts, topts,
0589         .data_in = buff,
0590         .data_size_in = sizeof(buff),
0591         .repeat = 1,
0592     );
0593 
0594     err = bpf_prog_test_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
0595                 &obj, &prog_fd);
0596     if (CHECK_FAIL(err))
0597         return;
0598 
0599     prog = bpf_object__find_program_by_name(obj, "entry");
0600     if (CHECK_FAIL(!prog))
0601         goto out;
0602 
0603     main_fd = bpf_program__fd(prog);
0604     if (CHECK_FAIL(main_fd < 0))
0605         goto out;
0606 
0607     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0608     if (CHECK_FAIL(!prog_array))
0609         goto out;
0610 
0611     map_fd = bpf_map__fd(prog_array);
0612     if (CHECK_FAIL(map_fd < 0))
0613         goto out;
0614 
0615     prog = bpf_object__find_program_by_name(obj, "classifier_0");
0616     if (CHECK_FAIL(!prog))
0617         goto out;
0618 
0619     prog_fd = bpf_program__fd(prog);
0620     if (CHECK_FAIL(prog_fd < 0))
0621         goto out;
0622 
0623     i = 0;
0624     err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0625     if (CHECK_FAIL(err))
0626         goto out;
0627 
0628     err = bpf_prog_test_run_opts(main_fd, &topts);
0629     ASSERT_OK(err, "tailcall");
0630     ASSERT_EQ(topts.retval, 1, "tailcall retval");
0631 
0632     data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
0633     if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
0634         return;
0635 
0636     data_fd = bpf_map__fd(data_map);
0637     if (CHECK_FAIL(map_fd < 0))
0638         return;
0639 
0640     i = 0;
0641     err = bpf_map_lookup_elem(data_fd, &i, &val);
0642     ASSERT_OK(err, "tailcall count");
0643     ASSERT_EQ(val, 33, "tailcall count");
0644 
0645     i = 0;
0646     err = bpf_map_delete_elem(map_fd, &i);
0647     if (CHECK_FAIL(err))
0648         goto out;
0649 
0650     err = bpf_prog_test_run_opts(main_fd, &topts);
0651     ASSERT_OK(err, "tailcall");
0652     ASSERT_OK(topts.retval, "tailcall retval");
0653 out:
0654     bpf_object__close(obj);
0655 }
0656 
0657 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
0658  * 256 bytes) can be used within bpf subprograms that have the tailcalls
0659  * in them
0660  */
0661 static void test_tailcall_bpf2bpf_3(void)
0662 {
0663     int err, map_fd, prog_fd, main_fd, i;
0664     struct bpf_map *prog_array;
0665     struct bpf_program *prog;
0666     struct bpf_object *obj;
0667     char prog_name[32];
0668     LIBBPF_OPTS(bpf_test_run_opts, topts,
0669         .data_in = &pkt_v4,
0670         .data_size_in = sizeof(pkt_v4),
0671         .repeat = 1,
0672     );
0673 
0674     err = bpf_prog_test_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
0675                 &obj, &prog_fd);
0676     if (CHECK_FAIL(err))
0677         return;
0678 
0679     prog = bpf_object__find_program_by_name(obj, "entry");
0680     if (CHECK_FAIL(!prog))
0681         goto out;
0682 
0683     main_fd = bpf_program__fd(prog);
0684     if (CHECK_FAIL(main_fd < 0))
0685         goto out;
0686 
0687     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0688     if (CHECK_FAIL(!prog_array))
0689         goto out;
0690 
0691     map_fd = bpf_map__fd(prog_array);
0692     if (CHECK_FAIL(map_fd < 0))
0693         goto out;
0694 
0695     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0696         snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
0697 
0698         prog = bpf_object__find_program_by_name(obj, prog_name);
0699         if (CHECK_FAIL(!prog))
0700             goto out;
0701 
0702         prog_fd = bpf_program__fd(prog);
0703         if (CHECK_FAIL(prog_fd < 0))
0704             goto out;
0705 
0706         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0707         if (CHECK_FAIL(err))
0708             goto out;
0709     }
0710 
0711     err = bpf_prog_test_run_opts(main_fd, &topts);
0712     ASSERT_OK(err, "tailcall");
0713     ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
0714 
0715     i = 1;
0716     err = bpf_map_delete_elem(map_fd, &i);
0717     if (CHECK_FAIL(err))
0718         goto out;
0719 
0720     err = bpf_prog_test_run_opts(main_fd, &topts);
0721     ASSERT_OK(err, "tailcall");
0722     ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
0723 
0724     i = 0;
0725     err = bpf_map_delete_elem(map_fd, &i);
0726     if (CHECK_FAIL(err))
0727         goto out;
0728 
0729     err = bpf_prog_test_run_opts(main_fd, &topts);
0730     ASSERT_OK(err, "tailcall");
0731     ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
0732 out:
0733     bpf_object__close(obj);
0734 }
0735 
0736 #include "tailcall_bpf2bpf4.skel.h"
0737 
0738 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
0739  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
0740  * counter behaves correctly, bpf program will go through following flow:
0741  *
0742  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
0743  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
0744  * subprog2 [here bump global counter] --------^
0745  *
0746  * We go through first two tailcalls and start counting from the subprog2 where
0747  * the loop begins. At the end of the test make sure that the global counter is
0748  * equal to 31, because tailcall counter includes the first two tailcalls
0749  * whereas global counter is incremented only on loop presented on flow above.
0750  *
0751  * The noise parameter is used to insert bpf_map_update calls into the logic
0752  * to force verifier to patch instructions. This allows us to ensure jump
0753  * logic remains correct with instruction movement.
0754  */
0755 static void test_tailcall_bpf2bpf_4(bool noise)
0756 {
0757     int err, map_fd, prog_fd, main_fd, data_fd, i;
0758     struct tailcall_bpf2bpf4__bss val;
0759     struct bpf_map *prog_array, *data_map;
0760     struct bpf_program *prog;
0761     struct bpf_object *obj;
0762     char prog_name[32];
0763     LIBBPF_OPTS(bpf_test_run_opts, topts,
0764         .data_in = &pkt_v4,
0765         .data_size_in = sizeof(pkt_v4),
0766         .repeat = 1,
0767     );
0768 
0769     err = bpf_prog_test_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
0770                 &obj, &prog_fd);
0771     if (CHECK_FAIL(err))
0772         return;
0773 
0774     prog = bpf_object__find_program_by_name(obj, "entry");
0775     if (CHECK_FAIL(!prog))
0776         goto out;
0777 
0778     main_fd = bpf_program__fd(prog);
0779     if (CHECK_FAIL(main_fd < 0))
0780         goto out;
0781 
0782     prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
0783     if (CHECK_FAIL(!prog_array))
0784         goto out;
0785 
0786     map_fd = bpf_map__fd(prog_array);
0787     if (CHECK_FAIL(map_fd < 0))
0788         goto out;
0789 
0790     for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
0791         snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
0792 
0793         prog = bpf_object__find_program_by_name(obj, prog_name);
0794         if (CHECK_FAIL(!prog))
0795             goto out;
0796 
0797         prog_fd = bpf_program__fd(prog);
0798         if (CHECK_FAIL(prog_fd < 0))
0799             goto out;
0800 
0801         err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0802         if (CHECK_FAIL(err))
0803             goto out;
0804     }
0805 
0806     data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
0807     if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
0808         return;
0809 
0810     data_fd = bpf_map__fd(data_map);
0811     if (CHECK_FAIL(map_fd < 0))
0812         return;
0813 
0814     i = 0;
0815     val.noise = noise;
0816     val.count = 0;
0817     err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
0818     if (CHECK_FAIL(err))
0819         goto out;
0820 
0821     err = bpf_prog_test_run_opts(main_fd, &topts);
0822     ASSERT_OK(err, "tailcall");
0823     ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
0824 
0825     i = 0;
0826     err = bpf_map_lookup_elem(data_fd, &i, &val);
0827     ASSERT_OK(err, "tailcall count");
0828     ASSERT_EQ(val.count, 31, "tailcall count");
0829 
0830 out:
0831     bpf_object__close(obj);
0832 }
0833 
0834 #include "tailcall_bpf2bpf6.skel.h"
0835 
0836 /* Tail call counting works even when there is data on stack which is
0837  * not aligned to 8 bytes.
0838  */
0839 static void test_tailcall_bpf2bpf_6(void)
0840 {
0841     struct tailcall_bpf2bpf6 *obj;
0842     int err, map_fd, prog_fd, main_fd, data_fd, i, val;
0843     LIBBPF_OPTS(bpf_test_run_opts, topts,
0844         .data_in = &pkt_v4,
0845         .data_size_in = sizeof(pkt_v4),
0846         .repeat = 1,
0847     );
0848 
0849     obj = tailcall_bpf2bpf6__open_and_load();
0850     if (!ASSERT_OK_PTR(obj, "open and load"))
0851         return;
0852 
0853     main_fd = bpf_program__fd(obj->progs.entry);
0854     if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
0855         goto out;
0856 
0857     map_fd = bpf_map__fd(obj->maps.jmp_table);
0858     if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
0859         goto out;
0860 
0861     prog_fd = bpf_program__fd(obj->progs.classifier_0);
0862     if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
0863         goto out;
0864 
0865     i = 0;
0866     err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
0867     if (!ASSERT_OK(err, "jmp_table map update"))
0868         goto out;
0869 
0870     err = bpf_prog_test_run_opts(main_fd, &topts);
0871     ASSERT_OK(err, "entry prog test run");
0872     ASSERT_EQ(topts.retval, 0, "tailcall retval");
0873 
0874     data_fd = bpf_map__fd(obj->maps.bss);
0875     if (!ASSERT_GE(map_fd, 0, "bss map fd"))
0876         goto out;
0877 
0878     i = 0;
0879     err = bpf_map_lookup_elem(data_fd, &i, &val);
0880     ASSERT_OK(err, "bss map lookup");
0881     ASSERT_EQ(val, 1, "done flag is set");
0882 
0883 out:
0884     tailcall_bpf2bpf6__destroy(obj);
0885 }
0886 
0887 void test_tailcalls(void)
0888 {
0889     if (test__start_subtest("tailcall_1"))
0890         test_tailcall_1();
0891     if (test__start_subtest("tailcall_2"))
0892         test_tailcall_2();
0893     if (test__start_subtest("tailcall_3"))
0894         test_tailcall_3();
0895     if (test__start_subtest("tailcall_4"))
0896         test_tailcall_4();
0897     if (test__start_subtest("tailcall_5"))
0898         test_tailcall_5();
0899     if (test__start_subtest("tailcall_6"))
0900         test_tailcall_6();
0901     if (test__start_subtest("tailcall_bpf2bpf_1"))
0902         test_tailcall_bpf2bpf_1();
0903     if (test__start_subtest("tailcall_bpf2bpf_2"))
0904         test_tailcall_bpf2bpf_2();
0905     if (test__start_subtest("tailcall_bpf2bpf_3"))
0906         test_tailcall_bpf2bpf_3();
0907     if (test__start_subtest("tailcall_bpf2bpf_4"))
0908         test_tailcall_bpf2bpf_4(false);
0909     if (test__start_subtest("tailcall_bpf2bpf_5"))
0910         test_tailcall_bpf2bpf_4(true);
0911     if (test__start_subtest("tailcall_bpf2bpf_6"))
0912         test_tailcall_bpf2bpf_6();
0913 }