Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Testsuite for eBPF verifier
0004  *
0005  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
0006  * Copyright (c) 2017 Facebook
0007  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
0008  */
0009 
0010 #include <endian.h>
0011 #include <asm/types.h>
0012 #include <linux/types.h>
0013 #include <stdint.h>
0014 #include <stdio.h>
0015 #include <stdlib.h>
0016 #include <unistd.h>
0017 #include <errno.h>
0018 #include <string.h>
0019 #include <stddef.h>
0020 #include <stdbool.h>
0021 #include <sched.h>
0022 #include <limits.h>
0023 #include <assert.h>
0024 
0025 #include <linux/unistd.h>
0026 #include <linux/filter.h>
0027 #include <linux/bpf_perf_event.h>
0028 #include <linux/bpf.h>
0029 #include <linux/if_ether.h>
0030 #include <linux/btf.h>
0031 
0032 #include <bpf/btf.h>
0033 #include <bpf/bpf.h>
0034 #include <bpf/libbpf.h>
0035 
0036 #ifdef HAVE_GENHDR
0037 # include "autoconf.h"
0038 #else
0039 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
0040 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
0041 # endif
0042 #endif
0043 #include "cap_helpers.h"
0044 #include "bpf_rand.h"
0045 #include "bpf_util.h"
0046 #include "test_btf.h"
0047 #include "../../../include/linux/filter.h"
0048 
0049 #ifndef ENOTSUPP
0050 #define ENOTSUPP 524
0051 #endif
0052 
0053 #define MAX_INSNS   BPF_MAXINSNS
0054 #define MAX_EXPECTED_INSNS  32
0055 #define MAX_UNEXPECTED_INSNS    32
0056 #define MAX_TEST_INSNS  1000000
0057 #define MAX_FIXUPS  8
0058 #define MAX_NR_MAPS 23
0059 #define MAX_TEST_RUNS   8
0060 #define POINTER_VALUE   0xcafe4all
0061 #define TEST_DATA_LEN   64
0062 #define MAX_FUNC_INFOS  8
0063 #define MAX_BTF_STRINGS 256
0064 #define MAX_BTF_TYPES   256
0065 
0066 #define INSN_OFF_MASK   ((__s16)0xFFFF)
0067 #define INSN_IMM_MASK   ((__s32)0xFFFFFFFF)
0068 #define SKIP_INSNS()    BPF_RAW_INSN(0xde, 0xa, 0xd, 0xbeef, 0xdeadbeef)
0069 
0070 #define DEFAULT_LIBBPF_LOG_LEVEL    4
0071 #define VERBOSE_LIBBPF_LOG_LEVEL    1
0072 
0073 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS  (1 << 0)
0074 #define F_LOAD_WITH_STRICT_ALIGNMENT        (1 << 1)
0075 
0076 /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
0077 #define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \
0078             1ULL << CAP_PERFMON |   \
0079             1ULL << CAP_BPF)
0080 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
0081 static bool unpriv_disabled = false;
0082 static int skips;
0083 static bool verbose = false;
0084 
0085 struct kfunc_btf_id_pair {
0086     const char *kfunc;
0087     int insn_idx;
0088 };
0089 
0090 struct bpf_test {
0091     const char *descr;
0092     struct bpf_insn insns[MAX_INSNS];
0093     struct bpf_insn *fill_insns;
0094     /* If specified, test engine looks for this sequence of
0095      * instructions in the BPF program after loading. Allows to
0096      * test rewrites applied by verifier.  Use values
0097      * INSN_OFF_MASK and INSN_IMM_MASK to mask `off` and `imm`
0098      * fields if content does not matter.  The test case fails if
0099      * specified instructions are not found.
0100      *
0101      * The sequence could be split into sub-sequences by adding
0102      * SKIP_INSNS instruction at the end of each sub-sequence. In
0103      * such case sub-sequences are searched for one after another.
0104      */
0105     struct bpf_insn expected_insns[MAX_EXPECTED_INSNS];
0106     /* If specified, test engine applies same pattern matching
0107      * logic as for `expected_insns`. If the specified pattern is
0108      * matched test case is marked as failed.
0109      */
0110     struct bpf_insn unexpected_insns[MAX_UNEXPECTED_INSNS];
0111     int fixup_map_hash_8b[MAX_FIXUPS];
0112     int fixup_map_hash_48b[MAX_FIXUPS];
0113     int fixup_map_hash_16b[MAX_FIXUPS];
0114     int fixup_map_array_48b[MAX_FIXUPS];
0115     int fixup_map_sockmap[MAX_FIXUPS];
0116     int fixup_map_sockhash[MAX_FIXUPS];
0117     int fixup_map_xskmap[MAX_FIXUPS];
0118     int fixup_map_stacktrace[MAX_FIXUPS];
0119     int fixup_prog1[MAX_FIXUPS];
0120     int fixup_prog2[MAX_FIXUPS];
0121     int fixup_map_in_map[MAX_FIXUPS];
0122     int fixup_cgroup_storage[MAX_FIXUPS];
0123     int fixup_percpu_cgroup_storage[MAX_FIXUPS];
0124     int fixup_map_spin_lock[MAX_FIXUPS];
0125     int fixup_map_array_ro[MAX_FIXUPS];
0126     int fixup_map_array_wo[MAX_FIXUPS];
0127     int fixup_map_array_small[MAX_FIXUPS];
0128     int fixup_sk_storage_map[MAX_FIXUPS];
0129     int fixup_map_event_output[MAX_FIXUPS];
0130     int fixup_map_reuseport_array[MAX_FIXUPS];
0131     int fixup_map_ringbuf[MAX_FIXUPS];
0132     int fixup_map_timer[MAX_FIXUPS];
0133     int fixup_map_kptr[MAX_FIXUPS];
0134     struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
0135     /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
0136      * Can be a tab-separated sequence of expected strings. An empty string
0137      * means no log verification.
0138      */
0139     const char *errstr;
0140     const char *errstr_unpriv;
0141     uint32_t insn_processed;
0142     int prog_len;
0143     enum {
0144         UNDEF,
0145         ACCEPT,
0146         REJECT,
0147         VERBOSE_ACCEPT,
0148     } result, result_unpriv;
0149     enum bpf_prog_type prog_type;
0150     uint8_t flags;
0151     void (*fill_helper)(struct bpf_test *self);
0152     int runs;
0153 #define bpf_testdata_struct_t                   \
0154     struct {                        \
0155         uint32_t retval, retval_unpriv;         \
0156         union {                     \
0157             __u8 data[TEST_DATA_LEN];       \
0158             __u64 data64[TEST_DATA_LEN / 8];    \
0159         };                      \
0160     }
0161     union {
0162         bpf_testdata_struct_t;
0163         bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
0164     };
0165     enum bpf_attach_type expected_attach_type;
0166     const char *kfunc;
0167     struct bpf_func_info func_info[MAX_FUNC_INFOS];
0168     int func_info_cnt;
0169     char btf_strings[MAX_BTF_STRINGS];
0170     /* A set of BTF types to load when specified,
0171      * use macro definitions from test_btf.h,
0172      * must end with BTF_END_RAW
0173      */
0174     __u32 btf_types[MAX_BTF_TYPES];
0175 };
0176 
0177 /* Note we want this to be 64 bit aligned so that the end of our array is
0178  * actually the end of the structure.
0179  */
0180 #define MAX_ENTRIES 11
0181 
0182 struct test_val {
0183     unsigned int index;
0184     int foo[MAX_ENTRIES];
0185 };
0186 
0187 struct other_val {
0188     long long foo;
0189     long long bar;
0190 };
0191 
0192 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
0193 {
0194     /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
0195 #define PUSH_CNT 51
0196     /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
0197     unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
0198     struct bpf_insn *insn = self->fill_insns;
0199     int i = 0, j, k = 0;
0200 
0201     insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
0202 loop:
0203     for (j = 0; j < PUSH_CNT; j++) {
0204         insn[i++] = BPF_LD_ABS(BPF_B, 0);
0205         /* jump to error label */
0206         insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
0207         i++;
0208         insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
0209         insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
0210         insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
0211         insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
0212                      BPF_FUNC_skb_vlan_push),
0213         insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
0214         i++;
0215     }
0216 
0217     for (j = 0; j < PUSH_CNT; j++) {
0218         insn[i++] = BPF_LD_ABS(BPF_B, 0);
0219         insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
0220         i++;
0221         insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
0222         insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
0223                      BPF_FUNC_skb_vlan_pop),
0224         insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
0225         i++;
0226     }
0227     if (++k < 5)
0228         goto loop;
0229 
0230     for (; i < len - 3; i++)
0231         insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
0232     insn[len - 3] = BPF_JMP_A(1);
0233     /* error label */
0234     insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
0235     insn[len - 1] = BPF_EXIT_INSN();
0236     self->prog_len = len;
0237 }
0238 
0239 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
0240 {
0241     struct bpf_insn *insn = self->fill_insns;
0242     /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
0243      * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
0244      * to extend the error value of the inlined ld_abs sequence which then
0245      * contains 7 insns. so, set the dividend to 7 so the testcase could
0246      * work on all arches.
0247      */
0248     unsigned int len = (1 << 15) / 7;
0249     int i = 0;
0250 
0251     insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
0252     insn[i++] = BPF_LD_ABS(BPF_B, 0);
0253     insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
0254     i++;
0255     while (i < len - 1)
0256         insn[i++] = BPF_LD_ABS(BPF_B, 1);
0257     insn[i] = BPF_EXIT_INSN();
0258     self->prog_len = i + 1;
0259 }
0260 
0261 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
0262 {
0263     struct bpf_insn *insn = self->fill_insns;
0264     uint64_t res = 0;
0265     int i = 0;
0266 
0267     insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
0268     while (i < self->retval) {
0269         uint64_t val = bpf_semi_rand_get();
0270         struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
0271 
0272         res ^= val;
0273         insn[i++] = tmp[0];
0274         insn[i++] = tmp[1];
0275         insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
0276     }
0277     insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
0278     insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
0279     insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
0280     insn[i] = BPF_EXIT_INSN();
0281     self->prog_len = i + 1;
0282     res ^= (res >> 32);
0283     self->retval = (uint32_t)res;
0284 }
0285 
0286 #define MAX_JMP_SEQ 8192
0287 
0288 /* test the sequence of 8k jumps */
0289 static void bpf_fill_scale1(struct bpf_test *self)
0290 {
0291     struct bpf_insn *insn = self->fill_insns;
0292     int i = 0, k = 0;
0293 
0294     insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
0295     /* test to check that the long sequence of jumps is acceptable */
0296     while (k++ < MAX_JMP_SEQ) {
0297         insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
0298                      BPF_FUNC_get_prandom_u32);
0299         insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
0300         insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
0301         insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
0302                     -8 * (k % 64 + 1));
0303     }
0304     /* is_state_visited() doesn't allocate state for pruning for every jump.
0305      * Hence multiply jmps by 4 to accommodate that heuristic
0306      */
0307     while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
0308         insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
0309     insn[i] = BPF_EXIT_INSN();
0310     self->prog_len = i + 1;
0311     self->retval = 42;
0312 }
0313 
0314 /* test the sequence of 8k jumps in inner most function (function depth 8)*/
0315 static void bpf_fill_scale2(struct bpf_test *self)
0316 {
0317     struct bpf_insn *insn = self->fill_insns;
0318     int i = 0, k = 0;
0319 
0320 #define FUNC_NEST 7
0321     for (k = 0; k < FUNC_NEST; k++) {
0322         insn[i++] = BPF_CALL_REL(1);
0323         insn[i++] = BPF_EXIT_INSN();
0324     }
0325     insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
0326     /* test to check that the long sequence of jumps is acceptable */
0327     k = 0;
0328     while (k++ < MAX_JMP_SEQ) {
0329         insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
0330                      BPF_FUNC_get_prandom_u32);
0331         insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
0332         insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
0333         insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
0334                     -8 * (k % (64 - 4 * FUNC_NEST) + 1));
0335     }
0336     while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
0337         insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
0338     insn[i] = BPF_EXIT_INSN();
0339     self->prog_len = i + 1;
0340     self->retval = 42;
0341 }
0342 
0343 static void bpf_fill_scale(struct bpf_test *self)
0344 {
0345     switch (self->retval) {
0346     case 1:
0347         return bpf_fill_scale1(self);
0348     case 2:
0349         return bpf_fill_scale2(self);
0350     default:
0351         self->prog_len = 0;
0352         break;
0353     }
0354 }
0355 
0356 static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn)
0357 {
0358     unsigned int len = 259, hlen = 128;
0359     int i;
0360 
0361     insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
0362     for (i = 1; i <= hlen; i++) {
0363         insn[i]        = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen);
0364         insn[i + hlen] = BPF_JMP_A(hlen - i);
0365     }
0366     insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1);
0367     insn[len - 1] = BPF_EXIT_INSN();
0368 
0369     return len;
0370 }
0371 
0372 static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn)
0373 {
0374     unsigned int len = 4100, jmp_off = 2048;
0375     int i, j;
0376 
0377     insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32);
0378     for (i = 1; i <= jmp_off; i++) {
0379         insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off);
0380     }
0381     insn[i++] = BPF_JMP_A(jmp_off);
0382     for (; i <= jmp_off * 2 + 1; i+=16) {
0383         for (j = 0; j < 16; j++) {
0384             insn[i + j] = BPF_JMP_A(16 - j - 1);
0385         }
0386     }
0387 
0388     insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2);
0389     insn[len - 1] = BPF_EXIT_INSN();
0390 
0391     return len;
0392 }
0393 
0394 static void bpf_fill_torturous_jumps(struct bpf_test *self)
0395 {
0396     struct bpf_insn *insn = self->fill_insns;
0397     int i = 0;
0398 
0399     switch (self->retval) {
0400     case 1:
0401         self->prog_len = bpf_fill_torturous_jumps_insn_1(insn);
0402         return;
0403     case 2:
0404         self->prog_len = bpf_fill_torturous_jumps_insn_2(insn);
0405         return;
0406     case 3:
0407         /* main */
0408         insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4);
0409         insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262);
0410         insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0);
0411         insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3);
0412         insn[i++] = BPF_EXIT_INSN();
0413 
0414         /* subprog 1 */
0415         i += bpf_fill_torturous_jumps_insn_1(insn + i);
0416 
0417         /* subprog 2 */
0418         i += bpf_fill_torturous_jumps_insn_2(insn + i);
0419 
0420         self->prog_len = i;
0421         return;
0422     default:
0423         self->prog_len = 0;
0424         break;
0425     }
0426 }
0427 
0428 static void bpf_fill_big_prog_with_loop_1(struct bpf_test *self)
0429 {
0430     struct bpf_insn *insn = self->fill_insns;
0431     /* This test was added to catch a specific use after free
0432      * error, which happened upon BPF program reallocation.
0433      * Reallocation is handled by core.c:bpf_prog_realloc, which
0434      * reuses old memory if page boundary is not crossed. The
0435      * value of `len` is chosen to cross this boundary on bpf_loop
0436      * patching.
0437      */
0438     const int len = getpagesize() - 25;
0439     int callback_load_idx;
0440     int callback_idx;
0441     int i = 0;
0442 
0443     insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1);
0444     callback_load_idx = i;
0445     insn[i++] = BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW,
0446                  BPF_REG_2, BPF_PSEUDO_FUNC, 0,
0447                  777 /* filled below */);
0448     insn[i++] = BPF_RAW_INSN(0, 0, 0, 0, 0);
0449     insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0);
0450     insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0);
0451     insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop);
0452 
0453     while (i < len - 3)
0454         insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
0455     insn[i++] = BPF_EXIT_INSN();
0456 
0457     callback_idx = i;
0458     insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
0459     insn[i++] = BPF_EXIT_INSN();
0460 
0461     insn[callback_load_idx].imm = callback_idx - callback_load_idx - 1;
0462     self->func_info[1].insn_off = callback_idx;
0463     self->prog_len = i;
0464     assert(i == len);
0465 }
0466 
0467 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
0468 #define BPF_SK_LOOKUP(func)                     \
0469     /* struct bpf_sock_tuple tuple = {} */              \
0470     BPF_MOV64_IMM(BPF_REG_2, 0),                    \
0471     BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),          \
0472     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),        \
0473     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),        \
0474     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),        \
0475     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),        \
0476     BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),        \
0477     /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */        \
0478     BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),               \
0479     BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),             \
0480     BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),    \
0481     BPF_MOV64_IMM(BPF_REG_4, 0),                    \
0482     BPF_MOV64_IMM(BPF_REG_5, 0),                    \
0483     BPF_EMIT_CALL(BPF_FUNC_ ## func)
0484 
0485 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
0486  * value into 0 and does necessary preparation for direct packet access
0487  * through r2. The allowed access range is 8 bytes.
0488  */
0489 #define BPF_DIRECT_PKT_R2                       \
0490     BPF_MOV64_IMM(BPF_REG_0, 0),                    \
0491     BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,            \
0492             offsetof(struct __sk_buff, data)),          \
0493     BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,            \
0494             offsetof(struct __sk_buff, data_end)),      \
0495     BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),                \
0496     BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),               \
0497     BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),          \
0498     BPF_EXIT_INSN()
0499 
0500 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
0501  * positive u32, and zero-extend it into 64-bit.
0502  */
0503 #define BPF_RAND_UEXT_R7                        \
0504     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,           \
0505              BPF_FUNC_get_prandom_u32),             \
0506     BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),                \
0507     BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33),              \
0508     BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
0509 
0510 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
0511  * negative u32, and sign-extend it into 64-bit.
0512  */
0513 #define BPF_RAND_SEXT_R7                        \
0514     BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,           \
0515              BPF_FUNC_get_prandom_u32),             \
0516     BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),                \
0517     BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000),           \
0518     BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32),              \
0519     BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
0520 
0521 static struct bpf_test tests[] = {
0522 #define FILL_ARRAY
0523 #include <verifier/tests.h>
0524 #undef FILL_ARRAY
0525 };
0526 
0527 static int probe_filter_length(const struct bpf_insn *fp)
0528 {
0529     int len;
0530 
0531     for (len = MAX_INSNS - 1; len > 0; --len)
0532         if (fp[len].code != 0 || fp[len].imm != 0)
0533             break;
0534     return len + 1;
0535 }
0536 
0537 static bool skip_unsupported_map(enum bpf_map_type map_type)
0538 {
0539     if (!libbpf_probe_bpf_map_type(map_type, NULL)) {
0540         printf("SKIP (unsupported map type %d)\n", map_type);
0541         skips++;
0542         return true;
0543     }
0544     return false;
0545 }
0546 
0547 static int __create_map(uint32_t type, uint32_t size_key,
0548             uint32_t size_value, uint32_t max_elem,
0549             uint32_t extra_flags)
0550 {
0551     LIBBPF_OPTS(bpf_map_create_opts, opts);
0552     int fd;
0553 
0554     opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags;
0555     fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts);
0556     if (fd < 0) {
0557         if (skip_unsupported_map(type))
0558             return -1;
0559         printf("Failed to create hash map '%s'!\n", strerror(errno));
0560     }
0561 
0562     return fd;
0563 }
0564 
0565 static int create_map(uint32_t type, uint32_t size_key,
0566               uint32_t size_value, uint32_t max_elem)
0567 {
0568     return __create_map(type, size_key, size_value, max_elem, 0);
0569 }
0570 
0571 static void update_map(int fd, int index)
0572 {
0573     struct test_val value = {
0574         .index = (6 + 1) * sizeof(int),
0575         .foo[6] = 0xabcdef12,
0576     };
0577 
0578     assert(!bpf_map_update_elem(fd, &index, &value, 0));
0579 }
0580 
0581 static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
0582 {
0583     struct bpf_insn prog[] = {
0584         BPF_MOV64_IMM(BPF_REG_0, ret),
0585         BPF_EXIT_INSN(),
0586     };
0587 
0588     return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
0589 }
0590 
0591 static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
0592                   int idx, int ret)
0593 {
0594     struct bpf_insn prog[] = {
0595         BPF_MOV64_IMM(BPF_REG_3, idx),
0596         BPF_LD_MAP_FD(BPF_REG_2, mfd),
0597         BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
0598                  BPF_FUNC_tail_call),
0599         BPF_MOV64_IMM(BPF_REG_0, ret),
0600         BPF_EXIT_INSN(),
0601     };
0602 
0603     return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL);
0604 }
0605 
0606 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
0607                  int p1key, int p2key, int p3key)
0608 {
0609     int mfd, p1fd, p2fd, p3fd;
0610 
0611     mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int),
0612                  sizeof(int), max_elem, NULL);
0613     if (mfd < 0) {
0614         if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
0615             return -1;
0616         printf("Failed to create prog array '%s'!\n", strerror(errno));
0617         return -1;
0618     }
0619 
0620     p1fd = create_prog_dummy_simple(prog_type, 42);
0621     p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
0622     p3fd = create_prog_dummy_simple(prog_type, 24);
0623     if (p1fd < 0 || p2fd < 0 || p3fd < 0)
0624         goto err;
0625     if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
0626         goto err;
0627     if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
0628         goto err;
0629     if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
0630 err:
0631         close(mfd);
0632         mfd = -1;
0633     }
0634     close(p3fd);
0635     close(p2fd);
0636     close(p1fd);
0637     return mfd;
0638 }
0639 
0640 static int create_map_in_map(void)
0641 {
0642     LIBBPF_OPTS(bpf_map_create_opts, opts);
0643     int inner_map_fd, outer_map_fd;
0644 
0645     inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int),
0646                       sizeof(int), 1, NULL);
0647     if (inner_map_fd < 0) {
0648         if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
0649             return -1;
0650         printf("Failed to create array '%s'!\n", strerror(errno));
0651         return inner_map_fd;
0652     }
0653 
0654     opts.inner_map_fd = inner_map_fd;
0655     outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
0656                       sizeof(int), sizeof(int), 1, &opts);
0657     if (outer_map_fd < 0) {
0658         if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
0659             return -1;
0660         printf("Failed to create array of maps '%s'!\n",
0661                strerror(errno));
0662     }
0663 
0664     close(inner_map_fd);
0665 
0666     return outer_map_fd;
0667 }
0668 
0669 static int create_cgroup_storage(bool percpu)
0670 {
0671     enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
0672         BPF_MAP_TYPE_CGROUP_STORAGE;
0673     int fd;
0674 
0675     fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key),
0676                 TEST_DATA_LEN, 0, NULL);
0677     if (fd < 0) {
0678         if (skip_unsupported_map(type))
0679             return -1;
0680         printf("Failed to create cgroup storage '%s'!\n",
0681                strerror(errno));
0682     }
0683 
0684     return fd;
0685 }
0686 
0687 /* struct bpf_spin_lock {
0688  *   int val;
0689  * };
0690  * struct val {
0691  *   int cnt;
0692  *   struct bpf_spin_lock l;
0693  * };
0694  * struct bpf_timer {
0695  *   __u64 :64;
0696  *   __u64 :64;
0697  * } __attribute__((aligned(8)));
0698  * struct timer {
0699  *   struct bpf_timer t;
0700  * };
0701  * struct btf_ptr {
0702  *   struct prog_test_ref_kfunc __kptr *ptr;
0703  *   struct prog_test_ref_kfunc __kptr_ref *ptr;
0704  *   struct prog_test_member __kptr_ref *ptr;
0705  * }
0706  */
0707 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
0708                   "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_ref"
0709                   "\0prog_test_member";
0710 static __u32 btf_raw_types[] = {
0711     /* int */
0712     BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
0713     /* struct bpf_spin_lock */                      /* [2] */
0714     BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
0715     BTF_MEMBER_ENC(15, 1, 0), /* int val; */
0716     /* struct val */                                /* [3] */
0717     BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
0718     BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
0719     BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
0720     /* struct bpf_timer */                          /* [4] */
0721     BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16),
0722     /* struct timer */                              /* [5] */
0723     BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
0724     BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
0725     /* struct prog_test_ref_kfunc */        /* [6] */
0726     BTF_STRUCT_ENC(51, 0, 0),
0727     BTF_STRUCT_ENC(89, 0, 0),           /* [7] */
0728     /* type tag "kptr" */
0729     BTF_TYPE_TAG_ENC(75, 6),            /* [8] */
0730     /* type tag "kptr_ref" */
0731     BTF_TYPE_TAG_ENC(80, 6),            /* [9] */
0732     BTF_TYPE_TAG_ENC(80, 7),            /* [10] */
0733     BTF_PTR_ENC(8),                 /* [11] */
0734     BTF_PTR_ENC(9),                 /* [12] */
0735     BTF_PTR_ENC(10),                /* [13] */
0736     /* struct btf_ptr */                /* [14] */
0737     BTF_STRUCT_ENC(43, 3, 24),
0738     BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */
0739     BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */
0740     BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */
0741 };
0742 
0743 static char bpf_vlog[UINT_MAX >> 8];
0744 
0745 static int load_btf_spec(__u32 *types, int types_len,
0746              const char *strings, int strings_len)
0747 {
0748     struct btf_header hdr = {
0749         .magic = BTF_MAGIC,
0750         .version = BTF_VERSION,
0751         .hdr_len = sizeof(struct btf_header),
0752         .type_len = types_len,
0753         .str_off = types_len,
0754         .str_len = strings_len,
0755     };
0756     void *ptr, *raw_btf;
0757     int btf_fd;
0758     LIBBPF_OPTS(bpf_btf_load_opts, opts,
0759             .log_buf = bpf_vlog,
0760             .log_size = sizeof(bpf_vlog),
0761             .log_level = (verbose
0762                   ? VERBOSE_LIBBPF_LOG_LEVEL
0763                   : DEFAULT_LIBBPF_LOG_LEVEL),
0764     );
0765 
0766     raw_btf = malloc(sizeof(hdr) + types_len + strings_len);
0767 
0768     ptr = raw_btf;
0769     memcpy(ptr, &hdr, sizeof(hdr));
0770     ptr += sizeof(hdr);
0771     memcpy(ptr, types, hdr.type_len);
0772     ptr += hdr.type_len;
0773     memcpy(ptr, strings, hdr.str_len);
0774     ptr += hdr.str_len;
0775 
0776     btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, &opts);
0777     if (btf_fd < 0)
0778         printf("Failed to load BTF spec: '%s'\n", strerror(errno));
0779 
0780     free(raw_btf);
0781 
0782     return btf_fd < 0 ? -1 : btf_fd;
0783 }
0784 
0785 static int load_btf(void)
0786 {
0787     return load_btf_spec(btf_raw_types, sizeof(btf_raw_types),
0788                  btf_str_sec, sizeof(btf_str_sec));
0789 }
0790 
0791 static int load_btf_for_test(struct bpf_test *test)
0792 {
0793     int types_num = 0;
0794 
0795     while (types_num < MAX_BTF_TYPES &&
0796            test->btf_types[types_num] != BTF_END_RAW)
0797         ++types_num;
0798 
0799     int types_len = types_num * sizeof(test->btf_types[0]);
0800 
0801     return load_btf_spec(test->btf_types, types_len,
0802                  test->btf_strings, sizeof(test->btf_strings));
0803 }
0804 
0805 static int create_map_spin_lock(void)
0806 {
0807     LIBBPF_OPTS(bpf_map_create_opts, opts,
0808         .btf_key_type_id = 1,
0809         .btf_value_type_id = 3,
0810     );
0811     int fd, btf_fd;
0812 
0813     btf_fd = load_btf();
0814     if (btf_fd < 0)
0815         return -1;
0816     opts.btf_fd = btf_fd;
0817     fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts);
0818     if (fd < 0)
0819         printf("Failed to create map with spin_lock\n");
0820     return fd;
0821 }
0822 
0823 static int create_sk_storage_map(void)
0824 {
0825     LIBBPF_OPTS(bpf_map_create_opts, opts,
0826         .map_flags = BPF_F_NO_PREALLOC,
0827         .btf_key_type_id = 1,
0828         .btf_value_type_id = 3,
0829     );
0830     int fd, btf_fd;
0831 
0832     btf_fd = load_btf();
0833     if (btf_fd < 0)
0834         return -1;
0835     opts.btf_fd = btf_fd;
0836     fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts);
0837     close(opts.btf_fd);
0838     if (fd < 0)
0839         printf("Failed to create sk_storage_map\n");
0840     return fd;
0841 }
0842 
0843 static int create_map_timer(void)
0844 {
0845     LIBBPF_OPTS(bpf_map_create_opts, opts,
0846         .btf_key_type_id = 1,
0847         .btf_value_type_id = 5,
0848     );
0849     int fd, btf_fd;
0850 
0851     btf_fd = load_btf();
0852     if (btf_fd < 0)
0853         return -1;
0854 
0855     opts.btf_fd = btf_fd;
0856     fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts);
0857     if (fd < 0)
0858         printf("Failed to create map with timer\n");
0859     return fd;
0860 }
0861 
0862 static int create_map_kptr(void)
0863 {
0864     LIBBPF_OPTS(bpf_map_create_opts, opts,
0865         .btf_key_type_id = 1,
0866         .btf_value_type_id = 14,
0867     );
0868     int fd, btf_fd;
0869 
0870     btf_fd = load_btf();
0871     if (btf_fd < 0)
0872         return -1;
0873 
0874     opts.btf_fd = btf_fd;
0875     fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
0876     if (fd < 0)
0877         printf("Failed to create map with btf_id pointer\n");
0878     return fd;
0879 }
0880 
0881 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
0882               struct bpf_insn *prog, int *map_fds)
0883 {
0884     int *fixup_map_hash_8b = test->fixup_map_hash_8b;
0885     int *fixup_map_hash_48b = test->fixup_map_hash_48b;
0886     int *fixup_map_hash_16b = test->fixup_map_hash_16b;
0887     int *fixup_map_array_48b = test->fixup_map_array_48b;
0888     int *fixup_map_sockmap = test->fixup_map_sockmap;
0889     int *fixup_map_sockhash = test->fixup_map_sockhash;
0890     int *fixup_map_xskmap = test->fixup_map_xskmap;
0891     int *fixup_map_stacktrace = test->fixup_map_stacktrace;
0892     int *fixup_prog1 = test->fixup_prog1;
0893     int *fixup_prog2 = test->fixup_prog2;
0894     int *fixup_map_in_map = test->fixup_map_in_map;
0895     int *fixup_cgroup_storage = test->fixup_cgroup_storage;
0896     int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
0897     int *fixup_map_spin_lock = test->fixup_map_spin_lock;
0898     int *fixup_map_array_ro = test->fixup_map_array_ro;
0899     int *fixup_map_array_wo = test->fixup_map_array_wo;
0900     int *fixup_map_array_small = test->fixup_map_array_small;
0901     int *fixup_sk_storage_map = test->fixup_sk_storage_map;
0902     int *fixup_map_event_output = test->fixup_map_event_output;
0903     int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
0904     int *fixup_map_ringbuf = test->fixup_map_ringbuf;
0905     int *fixup_map_timer = test->fixup_map_timer;
0906     int *fixup_map_kptr = test->fixup_map_kptr;
0907     struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
0908 
0909     if (test->fill_helper) {
0910         test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
0911         test->fill_helper(test);
0912     }
0913 
0914     /* Allocating HTs with 1 elem is fine here, since we only test
0915      * for verifier and not do a runtime lookup, so the only thing
0916      * that really matters is value size in this case.
0917      */
0918     if (*fixup_map_hash_8b) {
0919         map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
0920                     sizeof(long long), 1);
0921         do {
0922             prog[*fixup_map_hash_8b].imm = map_fds[0];
0923             fixup_map_hash_8b++;
0924         } while (*fixup_map_hash_8b);
0925     }
0926 
0927     if (*fixup_map_hash_48b) {
0928         map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
0929                     sizeof(struct test_val), 1);
0930         do {
0931             prog[*fixup_map_hash_48b].imm = map_fds[1];
0932             fixup_map_hash_48b++;
0933         } while (*fixup_map_hash_48b);
0934     }
0935 
0936     if (*fixup_map_hash_16b) {
0937         map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
0938                     sizeof(struct other_val), 1);
0939         do {
0940             prog[*fixup_map_hash_16b].imm = map_fds[2];
0941             fixup_map_hash_16b++;
0942         } while (*fixup_map_hash_16b);
0943     }
0944 
0945     if (*fixup_map_array_48b) {
0946         map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
0947                     sizeof(struct test_val), 1);
0948         update_map(map_fds[3], 0);
0949         do {
0950             prog[*fixup_map_array_48b].imm = map_fds[3];
0951             fixup_map_array_48b++;
0952         } while (*fixup_map_array_48b);
0953     }
0954 
0955     if (*fixup_prog1) {
0956         map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
0957         do {
0958             prog[*fixup_prog1].imm = map_fds[4];
0959             fixup_prog1++;
0960         } while (*fixup_prog1);
0961     }
0962 
0963     if (*fixup_prog2) {
0964         map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
0965         do {
0966             prog[*fixup_prog2].imm = map_fds[5];
0967             fixup_prog2++;
0968         } while (*fixup_prog2);
0969     }
0970 
0971     if (*fixup_map_in_map) {
0972         map_fds[6] = create_map_in_map();
0973         do {
0974             prog[*fixup_map_in_map].imm = map_fds[6];
0975             fixup_map_in_map++;
0976         } while (*fixup_map_in_map);
0977     }
0978 
0979     if (*fixup_cgroup_storage) {
0980         map_fds[7] = create_cgroup_storage(false);
0981         do {
0982             prog[*fixup_cgroup_storage].imm = map_fds[7];
0983             fixup_cgroup_storage++;
0984         } while (*fixup_cgroup_storage);
0985     }
0986 
0987     if (*fixup_percpu_cgroup_storage) {
0988         map_fds[8] = create_cgroup_storage(true);
0989         do {
0990             prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
0991             fixup_percpu_cgroup_storage++;
0992         } while (*fixup_percpu_cgroup_storage);
0993     }
0994     if (*fixup_map_sockmap) {
0995         map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
0996                     sizeof(int), 1);
0997         do {
0998             prog[*fixup_map_sockmap].imm = map_fds[9];
0999             fixup_map_sockmap++;
1000         } while (*fixup_map_sockmap);
1001     }
1002     if (*fixup_map_sockhash) {
1003         map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
1004                     sizeof(int), 1);
1005         do {
1006             prog[*fixup_map_sockhash].imm = map_fds[10];
1007             fixup_map_sockhash++;
1008         } while (*fixup_map_sockhash);
1009     }
1010     if (*fixup_map_xskmap) {
1011         map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
1012                     sizeof(int), 1);
1013         do {
1014             prog[*fixup_map_xskmap].imm = map_fds[11];
1015             fixup_map_xskmap++;
1016         } while (*fixup_map_xskmap);
1017     }
1018     if (*fixup_map_stacktrace) {
1019         map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
1020                      sizeof(u64), 1);
1021         do {
1022             prog[*fixup_map_stacktrace].imm = map_fds[12];
1023             fixup_map_stacktrace++;
1024         } while (*fixup_map_stacktrace);
1025     }
1026     if (*fixup_map_spin_lock) {
1027         map_fds[13] = create_map_spin_lock();
1028         do {
1029             prog[*fixup_map_spin_lock].imm = map_fds[13];
1030             fixup_map_spin_lock++;
1031         } while (*fixup_map_spin_lock);
1032     }
1033     if (*fixup_map_array_ro) {
1034         map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1035                        sizeof(struct test_val), 1,
1036                        BPF_F_RDONLY_PROG);
1037         update_map(map_fds[14], 0);
1038         do {
1039             prog[*fixup_map_array_ro].imm = map_fds[14];
1040             fixup_map_array_ro++;
1041         } while (*fixup_map_array_ro);
1042     }
1043     if (*fixup_map_array_wo) {
1044         map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1045                        sizeof(struct test_val), 1,
1046                        BPF_F_WRONLY_PROG);
1047         update_map(map_fds[15], 0);
1048         do {
1049             prog[*fixup_map_array_wo].imm = map_fds[15];
1050             fixup_map_array_wo++;
1051         } while (*fixup_map_array_wo);
1052     }
1053     if (*fixup_map_array_small) {
1054         map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
1055                        1, 1, 0);
1056         update_map(map_fds[16], 0);
1057         do {
1058             prog[*fixup_map_array_small].imm = map_fds[16];
1059             fixup_map_array_small++;
1060         } while (*fixup_map_array_small);
1061     }
1062     if (*fixup_sk_storage_map) {
1063         map_fds[17] = create_sk_storage_map();
1064         do {
1065             prog[*fixup_sk_storage_map].imm = map_fds[17];
1066             fixup_sk_storage_map++;
1067         } while (*fixup_sk_storage_map);
1068     }
1069     if (*fixup_map_event_output) {
1070         map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
1071                        sizeof(int), sizeof(int), 1, 0);
1072         do {
1073             prog[*fixup_map_event_output].imm = map_fds[18];
1074             fixup_map_event_output++;
1075         } while (*fixup_map_event_output);
1076     }
1077     if (*fixup_map_reuseport_array) {
1078         map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
1079                        sizeof(u32), sizeof(u64), 1, 0);
1080         do {
1081             prog[*fixup_map_reuseport_array].imm = map_fds[19];
1082             fixup_map_reuseport_array++;
1083         } while (*fixup_map_reuseport_array);
1084     }
1085     if (*fixup_map_ringbuf) {
1086         map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
1087                        0, 4096);
1088         do {
1089             prog[*fixup_map_ringbuf].imm = map_fds[20];
1090             fixup_map_ringbuf++;
1091         } while (*fixup_map_ringbuf);
1092     }
1093     if (*fixup_map_timer) {
1094         map_fds[21] = create_map_timer();
1095         do {
1096             prog[*fixup_map_timer].imm = map_fds[21];
1097             fixup_map_timer++;
1098         } while (*fixup_map_timer);
1099     }
1100     if (*fixup_map_kptr) {
1101         map_fds[22] = create_map_kptr();
1102         do {
1103             prog[*fixup_map_kptr].imm = map_fds[22];
1104             fixup_map_kptr++;
1105         } while (*fixup_map_kptr);
1106     }
1107 
1108     /* Patch in kfunc BTF IDs */
1109     if (fixup_kfunc_btf_id->kfunc) {
1110         struct btf *btf;
1111         int btf_id;
1112 
1113         do {
1114             btf_id = 0;
1115             btf = btf__load_vmlinux_btf();
1116             if (btf) {
1117                 btf_id = btf__find_by_name_kind(btf,
1118                                 fixup_kfunc_btf_id->kfunc,
1119                                 BTF_KIND_FUNC);
1120                 btf_id = btf_id < 0 ? 0 : btf_id;
1121             }
1122             btf__free(btf);
1123             prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id;
1124             fixup_kfunc_btf_id++;
1125         } while (fixup_kfunc_btf_id->kfunc);
1126     }
1127 }
1128 
1129 struct libcap {
1130     struct __user_cap_header_struct hdr;
1131     struct __user_cap_data_struct data[2];
1132 };
1133 
1134 static int set_admin(bool admin)
1135 {
1136     int err;
1137 
1138     if (admin) {
1139         err = cap_enable_effective(ADMIN_CAPS, NULL);
1140         if (err)
1141             perror("cap_enable_effective(ADMIN_CAPS)");
1142     } else {
1143         err = cap_disable_effective(ADMIN_CAPS, NULL);
1144         if (err)
1145             perror("cap_disable_effective(ADMIN_CAPS)");
1146     }
1147 
1148     return err;
1149 }
1150 
1151 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
1152                 void *data, size_t size_data)
1153 {
1154     __u8 tmp[TEST_DATA_LEN << 2];
1155     __u32 size_tmp = sizeof(tmp);
1156     int err, saved_errno;
1157     LIBBPF_OPTS(bpf_test_run_opts, topts,
1158         .data_in = data,
1159         .data_size_in = size_data,
1160         .data_out = tmp,
1161         .data_size_out = size_tmp,
1162         .repeat = 1,
1163     );
1164 
1165     if (unpriv)
1166         set_admin(true);
1167     err = bpf_prog_test_run_opts(fd_prog, &topts);
1168     saved_errno = errno;
1169 
1170     if (unpriv)
1171         set_admin(false);
1172 
1173     if (err) {
1174         switch (saved_errno) {
1175         case ENOTSUPP:
1176             printf("Did not run the program (not supported) ");
1177             return 0;
1178         case EPERM:
1179             if (unpriv) {
1180                 printf("Did not run the program (no permission) ");
1181                 return 0;
1182             }
1183             /* fallthrough; */
1184         default:
1185             printf("FAIL: Unexpected bpf_prog_test_run error (%s) ",
1186                 strerror(saved_errno));
1187             return err;
1188         }
1189     }
1190 
1191     if (topts.retval != expected_val && expected_val != POINTER_VALUE) {
1192         printf("FAIL retval %d != %d ", topts.retval, expected_val);
1193         return 1;
1194     }
1195 
1196     return 0;
1197 }
1198 
1199 /* Returns true if every part of exp (tab-separated) appears in log, in order.
1200  *
1201  * If exp is an empty string, returns true.
1202  */
1203 static bool cmp_str_seq(const char *log, const char *exp)
1204 {
1205     char needle[200];
1206     const char *p, *q;
1207     int len;
1208 
1209     do {
1210         if (!strlen(exp))
1211             break;
1212         p = strchr(exp, '\t');
1213         if (!p)
1214             p = exp + strlen(exp);
1215 
1216         len = p - exp;
1217         if (len >= sizeof(needle) || !len) {
1218             printf("FAIL\nTestcase bug\n");
1219             return false;
1220         }
1221         strncpy(needle, exp, len);
1222         needle[len] = 0;
1223         q = strstr(log, needle);
1224         if (!q) {
1225             printf("FAIL\nUnexpected verifier log!\n"
1226                    "EXP: %s\nRES:\n", needle);
1227             return false;
1228         }
1229         log = q + len;
1230         exp = p + 1;
1231     } while (*p);
1232     return true;
1233 }
1234 
1235 static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
1236 {
1237     struct bpf_prog_info info = {};
1238     __u32 info_len = sizeof(info);
1239     __u32 xlated_prog_len;
1240     __u32 buf_element_size = sizeof(struct bpf_insn);
1241 
1242     if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
1243         perror("bpf_obj_get_info_by_fd failed");
1244         return -1;
1245     }
1246 
1247     xlated_prog_len = info.xlated_prog_len;
1248     if (xlated_prog_len % buf_element_size) {
1249         printf("Program length %d is not multiple of %d\n",
1250                xlated_prog_len, buf_element_size);
1251         return -1;
1252     }
1253 
1254     *cnt = xlated_prog_len / buf_element_size;
1255     *buf = calloc(*cnt, buf_element_size);
1256     if (!buf) {
1257         perror("can't allocate xlated program buffer");
1258         return -ENOMEM;
1259     }
1260 
1261     bzero(&info, sizeof(info));
1262     info.xlated_prog_len = xlated_prog_len;
1263     info.xlated_prog_insns = (__u64)*buf;
1264     if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
1265         perror("second bpf_obj_get_info_by_fd failed");
1266         goto out_free_buf;
1267     }
1268 
1269     return 0;
1270 
1271 out_free_buf:
1272     free(*buf);
1273     return -1;
1274 }
1275 
1276 static bool is_null_insn(struct bpf_insn *insn)
1277 {
1278     struct bpf_insn null_insn = {};
1279 
1280     return memcmp(insn, &null_insn, sizeof(null_insn)) == 0;
1281 }
1282 
1283 static bool is_skip_insn(struct bpf_insn *insn)
1284 {
1285     struct bpf_insn skip_insn = SKIP_INSNS();
1286 
1287     return memcmp(insn, &skip_insn, sizeof(skip_insn)) == 0;
1288 }
1289 
1290 static int null_terminated_insn_len(struct bpf_insn *seq, int max_len)
1291 {
1292     int i;
1293 
1294     for (i = 0; i < max_len; ++i) {
1295         if (is_null_insn(&seq[i]))
1296             return i;
1297     }
1298     return max_len;
1299 }
1300 
1301 static bool compare_masked_insn(struct bpf_insn *orig, struct bpf_insn *masked)
1302 {
1303     struct bpf_insn orig_masked;
1304 
1305     memcpy(&orig_masked, orig, sizeof(orig_masked));
1306     if (masked->imm == INSN_IMM_MASK)
1307         orig_masked.imm = INSN_IMM_MASK;
1308     if (masked->off == INSN_OFF_MASK)
1309         orig_masked.off = INSN_OFF_MASK;
1310 
1311     return memcmp(&orig_masked, masked, sizeof(orig_masked)) == 0;
1312 }
1313 
1314 static int find_insn_subseq(struct bpf_insn *seq, struct bpf_insn *subseq,
1315                 int seq_len, int subseq_len)
1316 {
1317     int i, j;
1318 
1319     if (subseq_len > seq_len)
1320         return -1;
1321 
1322     for (i = 0; i < seq_len - subseq_len + 1; ++i) {
1323         bool found = true;
1324 
1325         for (j = 0; j < subseq_len; ++j) {
1326             if (!compare_masked_insn(&seq[i + j], &subseq[j])) {
1327                 found = false;
1328                 break;
1329             }
1330         }
1331         if (found)
1332             return i;
1333     }
1334 
1335     return -1;
1336 }
1337 
1338 static int find_skip_insn_marker(struct bpf_insn *seq, int len)
1339 {
1340     int i;
1341 
1342     for (i = 0; i < len; ++i)
1343         if (is_skip_insn(&seq[i]))
1344             return i;
1345 
1346     return -1;
1347 }
1348 
1349 /* Return true if all sub-sequences in `subseqs` could be found in
1350  * `seq` one after another. Sub-sequences are separated by a single
1351  * nil instruction.
1352  */
1353 static bool find_all_insn_subseqs(struct bpf_insn *seq, struct bpf_insn *subseqs,
1354                   int seq_len, int max_subseqs_len)
1355 {
1356     int subseqs_len = null_terminated_insn_len(subseqs, max_subseqs_len);
1357 
1358     while (subseqs_len > 0) {
1359         int skip_idx = find_skip_insn_marker(subseqs, subseqs_len);
1360         int cur_subseq_len = skip_idx < 0 ? subseqs_len : skip_idx;
1361         int subseq_idx = find_insn_subseq(seq, subseqs,
1362                           seq_len, cur_subseq_len);
1363 
1364         if (subseq_idx < 0)
1365             return false;
1366         seq += subseq_idx + cur_subseq_len;
1367         seq_len -= subseq_idx + cur_subseq_len;
1368         subseqs += cur_subseq_len + 1;
1369         subseqs_len -= cur_subseq_len + 1;
1370     }
1371 
1372     return true;
1373 }
1374 
1375 static void print_insn(struct bpf_insn *buf, int cnt)
1376 {
1377     int i;
1378 
1379     printf("  addr  op d s off  imm\n");
1380     for (i = 0; i < cnt; ++i) {
1381         struct bpf_insn *insn = &buf[i];
1382 
1383         if (is_null_insn(insn))
1384             break;
1385 
1386         if (is_skip_insn(insn))
1387             printf("  ...\n");
1388         else
1389             printf("  %04x: %02x %1x %x %04hx %08x\n",
1390                    i, insn->code, insn->dst_reg,
1391                    insn->src_reg, insn->off, insn->imm);
1392     }
1393 }
1394 
1395 static bool check_xlated_program(struct bpf_test *test, int fd_prog)
1396 {
1397     struct bpf_insn *buf;
1398     int cnt;
1399     bool result = true;
1400     bool check_expected = !is_null_insn(test->expected_insns);
1401     bool check_unexpected = !is_null_insn(test->unexpected_insns);
1402 
1403     if (!check_expected && !check_unexpected)
1404         goto out;
1405 
1406     if (get_xlated_program(fd_prog, &buf, &cnt)) {
1407         printf("FAIL: can't get xlated program\n");
1408         result = false;
1409         goto out;
1410     }
1411 
1412     if (check_expected &&
1413         !find_all_insn_subseqs(buf, test->expected_insns,
1414                    cnt, MAX_EXPECTED_INSNS)) {
1415         printf("FAIL: can't find expected subsequence of instructions\n");
1416         result = false;
1417         if (verbose) {
1418             printf("Program:\n");
1419             print_insn(buf, cnt);
1420             printf("Expected subsequence:\n");
1421             print_insn(test->expected_insns, MAX_EXPECTED_INSNS);
1422         }
1423     }
1424 
1425     if (check_unexpected &&
1426         find_all_insn_subseqs(buf, test->unexpected_insns,
1427                   cnt, MAX_UNEXPECTED_INSNS)) {
1428         printf("FAIL: found unexpected subsequence of instructions\n");
1429         result = false;
1430         if (verbose) {
1431             printf("Program:\n");
1432             print_insn(buf, cnt);
1433             printf("Un-expected subsequence:\n");
1434             print_insn(test->unexpected_insns, MAX_UNEXPECTED_INSNS);
1435         }
1436     }
1437 
1438     free(buf);
1439  out:
1440     return result;
1441 }
1442 
1443 static void do_test_single(struct bpf_test *test, bool unpriv,
1444                int *passes, int *errors)
1445 {
1446     int fd_prog, btf_fd, expected_ret, alignment_prevented_execution;
1447     int prog_len, prog_type = test->prog_type;
1448     struct bpf_insn *prog = test->insns;
1449     LIBBPF_OPTS(bpf_prog_load_opts, opts);
1450     int run_errs, run_successes;
1451     int map_fds[MAX_NR_MAPS];
1452     const char *expected_err;
1453     int saved_errno;
1454     int fixup_skips;
1455     __u32 pflags;
1456     int i, err;
1457 
1458     fd_prog = -1;
1459     for (i = 0; i < MAX_NR_MAPS; i++)
1460         map_fds[i] = -1;
1461     btf_fd = -1;
1462 
1463     if (!prog_type)
1464         prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1465     fixup_skips = skips;
1466     do_test_fixup(test, prog_type, prog, map_fds);
1467     if (test->fill_insns) {
1468         prog = test->fill_insns;
1469         prog_len = test->prog_len;
1470     } else {
1471         prog_len = probe_filter_length(prog);
1472     }
1473     /* If there were some map skips during fixup due to missing bpf
1474      * features, skip this test.
1475      */
1476     if (fixup_skips != skips)
1477         return;
1478 
1479     pflags = BPF_F_TEST_RND_HI32;
1480     if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
1481         pflags |= BPF_F_STRICT_ALIGNMENT;
1482     if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1483         pflags |= BPF_F_ANY_ALIGNMENT;
1484     if (test->flags & ~3)
1485         pflags |= test->flags;
1486 
1487     expected_ret = unpriv && test->result_unpriv != UNDEF ?
1488                test->result_unpriv : test->result;
1489     expected_err = unpriv && test->errstr_unpriv ?
1490                test->errstr_unpriv : test->errstr;
1491 
1492     opts.expected_attach_type = test->expected_attach_type;
1493     if (verbose)
1494         opts.log_level = VERBOSE_LIBBPF_LOG_LEVEL;
1495     else if (expected_ret == VERBOSE_ACCEPT)
1496         opts.log_level = 2;
1497     else
1498         opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
1499     opts.prog_flags = pflags;
1500 
1501     if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
1502         int attach_btf_id;
1503 
1504         attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
1505                         opts.expected_attach_type);
1506         if (attach_btf_id < 0) {
1507             printf("FAIL\nFailed to find BTF ID for '%s'!\n",
1508                 test->kfunc);
1509             (*errors)++;
1510             return;
1511         }
1512 
1513         opts.attach_btf_id = attach_btf_id;
1514     }
1515 
1516     if (test->btf_types[0] != 0) {
1517         btf_fd = load_btf_for_test(test);
1518         if (btf_fd < 0)
1519             goto fail_log;
1520         opts.prog_btf_fd = btf_fd;
1521     }
1522 
1523     if (test->func_info_cnt != 0) {
1524         opts.func_info = test->func_info;
1525         opts.func_info_cnt = test->func_info_cnt;
1526         opts.func_info_rec_size = sizeof(test->func_info[0]);
1527     }
1528 
1529     opts.log_buf = bpf_vlog;
1530     opts.log_size = sizeof(bpf_vlog);
1531     fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
1532     saved_errno = errno;
1533 
1534     /* BPF_PROG_TYPE_TRACING requires more setup and
1535      * bpf_probe_prog_type won't give correct answer
1536      */
1537     if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
1538         !libbpf_probe_bpf_prog_type(prog_type, NULL)) {
1539         printf("SKIP (unsupported program type %d)\n", prog_type);
1540         skips++;
1541         goto close_fds;
1542     }
1543 
1544     if (fd_prog < 0 && saved_errno == ENOTSUPP) {
1545         printf("SKIP (program uses an unsupported feature)\n");
1546         skips++;
1547         goto close_fds;
1548     }
1549 
1550     alignment_prevented_execution = 0;
1551 
1552     if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
1553         if (fd_prog < 0) {
1554             printf("FAIL\nFailed to load prog '%s'!\n",
1555                    strerror(saved_errno));
1556             goto fail_log;
1557         }
1558 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1559         if (fd_prog >= 0 &&
1560             (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1561             alignment_prevented_execution = 1;
1562 #endif
1563         if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1564             goto fail_log;
1565         }
1566     } else {
1567         if (fd_prog >= 0) {
1568             printf("FAIL\nUnexpected success to load!\n");
1569             goto fail_log;
1570         }
1571         if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) {
1572             printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1573                   expected_err, bpf_vlog);
1574             goto fail_log;
1575         }
1576     }
1577 
1578     if (!unpriv && test->insn_processed) {
1579         uint32_t insn_processed;
1580         char *proc;
1581 
1582         proc = strstr(bpf_vlog, "processed ");
1583         insn_processed = atoi(proc + 10);
1584         if (test->insn_processed != insn_processed) {
1585             printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1586                    insn_processed, test->insn_processed);
1587             goto fail_log;
1588         }
1589     }
1590 
1591     if (verbose)
1592         printf(", verifier log:\n%s", bpf_vlog);
1593 
1594     if (!check_xlated_program(test, fd_prog))
1595         goto fail_log;
1596 
1597     run_errs = 0;
1598     run_successes = 0;
1599     if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
1600         uint32_t expected_val;
1601         int i;
1602 
1603         if (!test->runs)
1604             test->runs = 1;
1605 
1606         for (i = 0; i < test->runs; i++) {
1607             if (unpriv && test->retvals[i].retval_unpriv)
1608                 expected_val = test->retvals[i].retval_unpriv;
1609             else
1610                 expected_val = test->retvals[i].retval;
1611 
1612             err = do_prog_test_run(fd_prog, unpriv, expected_val,
1613                            test->retvals[i].data,
1614                            sizeof(test->retvals[i].data));
1615             if (err) {
1616                 printf("(run %d/%d) ", i + 1, test->runs);
1617                 run_errs++;
1618             } else {
1619                 run_successes++;
1620             }
1621         }
1622     }
1623 
1624     if (!run_errs) {
1625         (*passes)++;
1626         if (run_successes > 1)
1627             printf("%d cases ", run_successes);
1628         printf("OK");
1629         if (alignment_prevented_execution)
1630             printf(" (NOTE: not executed due to unknown alignment)");
1631         printf("\n");
1632     } else {
1633         printf("\n");
1634         goto fail_log;
1635     }
1636 close_fds:
1637     if (test->fill_insns)
1638         free(test->fill_insns);
1639     close(fd_prog);
1640     close(btf_fd);
1641     for (i = 0; i < MAX_NR_MAPS; i++)
1642         close(map_fds[i]);
1643     sched_yield();
1644     return;
1645 fail_log:
1646     (*errors)++;
1647     printf("%s", bpf_vlog);
1648     goto close_fds;
1649 }
1650 
1651 static bool is_admin(void)
1652 {
1653     __u64 caps;
1654 
1655     /* The test checks for finer cap as CAP_NET_ADMIN,
1656      * CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN.
1657      * Thus, disable CAP_SYS_ADMIN at the beginning.
1658      */
1659     if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) {
1660         perror("cap_disable_effective(CAP_SYS_ADMIN)");
1661         return false;
1662     }
1663 
1664     return (caps & ADMIN_CAPS) == ADMIN_CAPS;
1665 }
1666 
1667 static void get_unpriv_disabled()
1668 {
1669     char buf[2];
1670     FILE *fd;
1671 
1672     fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1673     if (!fd) {
1674         perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1675         unpriv_disabled = true;
1676         return;
1677     }
1678     if (fgets(buf, 2, fd) == buf && atoi(buf))
1679         unpriv_disabled = true;
1680     fclose(fd);
1681 }
1682 
1683 static bool test_as_unpriv(struct bpf_test *test)
1684 {
1685 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1686     /* Some architectures have strict alignment requirements. In
1687      * that case, the BPF verifier detects if a program has
1688      * unaligned accesses and rejects them. A user can pass
1689      * BPF_F_ANY_ALIGNMENT to a program to override this
1690      * check. That, however, will only work when a privileged user
1691      * loads a program. An unprivileged user loading a program
1692      * with this flag will be rejected prior entering the
1693      * verifier.
1694      */
1695     if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
1696         return false;
1697 #endif
1698     return !test->prog_type ||
1699            test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1700            test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1701 }
1702 
1703 static int do_test(bool unpriv, unsigned int from, unsigned int to)
1704 {
1705     int i, passes = 0, errors = 0;
1706 
1707     for (i = from; i < to; i++) {
1708         struct bpf_test *test = &tests[i];
1709 
1710         /* Program types that are not supported by non-root we
1711          * skip right away.
1712          */
1713         if (test_as_unpriv(test) && unpriv_disabled) {
1714             printf("#%d/u %s SKIP\n", i, test->descr);
1715             skips++;
1716         } else if (test_as_unpriv(test)) {
1717             if (!unpriv)
1718                 set_admin(false);
1719             printf("#%d/u %s ", i, test->descr);
1720             do_test_single(test, true, &passes, &errors);
1721             if (!unpriv)
1722                 set_admin(true);
1723         }
1724 
1725         if (unpriv) {
1726             printf("#%d/p %s SKIP\n", i, test->descr);
1727             skips++;
1728         } else {
1729             printf("#%d/p %s ", i, test->descr);
1730             do_test_single(test, false, &passes, &errors);
1731         }
1732     }
1733 
1734     printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1735            skips, errors);
1736     return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1737 }
1738 
1739 int main(int argc, char **argv)
1740 {
1741     unsigned int from = 0, to = ARRAY_SIZE(tests);
1742     bool unpriv = !is_admin();
1743     int arg = 1;
1744 
1745     if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1746         arg++;
1747         verbose = true;
1748         argc--;
1749     }
1750 
1751     if (argc == 3) {
1752         unsigned int l = atoi(argv[arg]);
1753         unsigned int u = atoi(argv[arg + 1]);
1754 
1755         if (l < to && u < to) {
1756             from = l;
1757             to   = u + 1;
1758         }
1759     } else if (argc == 2) {
1760         unsigned int t = atoi(argv[arg]);
1761 
1762         if (t < to) {
1763             from = t;
1764             to   = t + 1;
1765         }
1766     }
1767 
1768     get_unpriv_disabled();
1769     if (unpriv && unpriv_disabled) {
1770         printf("Cannot run as unprivileged user with sysctl %s.\n",
1771                UNPRIV_SYSCTL);
1772         return EXIT_FAILURE;
1773     }
1774 
1775     /* Use libbpf 1.0 API mode */
1776     libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1777 
1778     bpf_semi_rand_init();
1779     return do_test(unpriv, from, to);
1780 }