Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (c) 2019 Facebook  */
0003 #include <linux/compiler.h>
0004 #include <linux/err.h>
0005 
0006 #include <sys/resource.h>
0007 #include <sys/socket.h>
0008 #include <sys/types.h>
0009 #include <linux/btf.h>
0010 #include <unistd.h>
0011 #include <signal.h>
0012 #include <errno.h>
0013 #include <string.h>
0014 #include <pthread.h>
0015 
0016 #include <bpf/bpf.h>
0017 #include <bpf/libbpf.h>
0018 
0019 #include <test_btf.h>
0020 #include <test_maps.h>
0021 
0022 static struct bpf_map_create_opts map_opts = {
0023     .sz = sizeof(map_opts),
0024     .btf_key_type_id = 1,
0025     .btf_value_type_id = 3,
0026     .btf_fd = -1,
0027     .map_flags = BPF_F_NO_PREALLOC,
0028 };
0029 
0030 static unsigned int nr_sk_threads_done;
0031 static unsigned int nr_sk_threads_err;
0032 static unsigned int nr_sk_per_thread = 4096;
0033 static unsigned int nr_sk_threads = 4;
0034 static int sk_storage_map = -1;
0035 static unsigned int stop;
0036 static int runtime_s = 5;
0037 
0038 static bool is_stopped(void)
0039 {
0040     return READ_ONCE(stop);
0041 }
0042 
0043 static unsigned int threads_err(void)
0044 {
0045     return READ_ONCE(nr_sk_threads_err);
0046 }
0047 
0048 static void notify_thread_err(void)
0049 {
0050     __sync_add_and_fetch(&nr_sk_threads_err, 1);
0051 }
0052 
0053 static bool wait_for_threads_err(void)
0054 {
0055     while (!is_stopped() && !threads_err())
0056         usleep(500);
0057 
0058     return !is_stopped();
0059 }
0060 
0061 static unsigned int threads_done(void)
0062 {
0063     return READ_ONCE(nr_sk_threads_done);
0064 }
0065 
0066 static void notify_thread_done(void)
0067 {
0068     __sync_add_and_fetch(&nr_sk_threads_done, 1);
0069 }
0070 
0071 static void notify_thread_redo(void)
0072 {
0073     __sync_sub_and_fetch(&nr_sk_threads_done, 1);
0074 }
0075 
0076 static bool wait_for_threads_done(void)
0077 {
0078     while (threads_done() != nr_sk_threads && !is_stopped() &&
0079            !threads_err())
0080         usleep(50);
0081 
0082     return !is_stopped() && !threads_err();
0083 }
0084 
0085 static bool wait_for_threads_redo(void)
0086 {
0087     while (threads_done() && !is_stopped() && !threads_err())
0088         usleep(50);
0089 
0090     return !is_stopped() && !threads_err();
0091 }
0092 
0093 static bool wait_for_map(void)
0094 {
0095     while (READ_ONCE(sk_storage_map) == -1 && !is_stopped())
0096         usleep(50);
0097 
0098     return !is_stopped();
0099 }
0100 
0101 static bool wait_for_map_close(void)
0102 {
0103     while (READ_ONCE(sk_storage_map) != -1 && !is_stopped())
0104         ;
0105 
0106     return !is_stopped();
0107 }
0108 
0109 static int load_btf(void)
0110 {
0111     const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
0112     __u32 btf_raw_types[] = {
0113         /* int */
0114         BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
0115         /* struct bpf_spin_lock */                      /* [2] */
0116         BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
0117         BTF_MEMBER_ENC(15, 1, 0), /* int val; */
0118         /* struct val */                                /* [3] */
0119         BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
0120         BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
0121         BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
0122     };
0123     struct btf_header btf_hdr = {
0124         .magic = BTF_MAGIC,
0125         .version = BTF_VERSION,
0126         .hdr_len = sizeof(struct btf_header),
0127         .type_len = sizeof(btf_raw_types),
0128         .str_off = sizeof(btf_raw_types),
0129         .str_len = sizeof(btf_str_sec),
0130     };
0131     __u8 raw_btf[sizeof(struct btf_header) + sizeof(btf_raw_types) +
0132              sizeof(btf_str_sec)];
0133 
0134     memcpy(raw_btf, &btf_hdr, sizeof(btf_hdr));
0135     memcpy(raw_btf + sizeof(btf_hdr), btf_raw_types, sizeof(btf_raw_types));
0136     memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
0137            btf_str_sec, sizeof(btf_str_sec));
0138 
0139     return bpf_btf_load(raw_btf, sizeof(raw_btf), NULL);
0140 }
0141 
0142 static int create_sk_storage_map(void)
0143 {
0144     int btf_fd, map_fd;
0145 
0146     btf_fd = load_btf();
0147     CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
0148           btf_fd, errno);
0149     map_opts.btf_fd = btf_fd;
0150 
0151     map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts);
0152     map_opts.btf_fd = -1;
0153     close(btf_fd);
0154     CHECK(map_fd == -1,
0155           "bpf_map_create()", "errno:%d\n", errno);
0156 
0157     return map_fd;
0158 }
0159 
0160 static void *insert_close_thread(void *arg)
0161 {
0162     struct {
0163         int cnt;
0164         int lock;
0165     } value = { .cnt = 0xeB9F, .lock = 0, };
0166     int i, map_fd, err, *sk_fds;
0167 
0168     sk_fds = malloc(sizeof(*sk_fds) * nr_sk_per_thread);
0169     if (!sk_fds) {
0170         notify_thread_err();
0171         return ERR_PTR(-ENOMEM);
0172     }
0173 
0174     for (i = 0; i < nr_sk_per_thread; i++)
0175         sk_fds[i] = -1;
0176 
0177     while (!is_stopped()) {
0178         if (!wait_for_map())
0179             goto close_all;
0180 
0181         map_fd = READ_ONCE(sk_storage_map);
0182         for (i = 0; i < nr_sk_per_thread && !is_stopped(); i++) {
0183             sk_fds[i] = socket(AF_INET6, SOCK_STREAM, 0);
0184             if (sk_fds[i] == -1) {
0185                 err = -errno;
0186                 fprintf(stderr, "socket(): errno:%d\n", errno);
0187                 goto errout;
0188             }
0189             err = bpf_map_update_elem(map_fd, &sk_fds[i], &value,
0190                           BPF_NOEXIST);
0191             if (err) {
0192                 err = -errno;
0193                 fprintf(stderr,
0194                     "bpf_map_update_elem(): errno:%d\n",
0195                     errno);
0196                 goto errout;
0197             }
0198         }
0199 
0200         notify_thread_done();
0201         wait_for_map_close();
0202 
0203 close_all:
0204         for (i = 0; i < nr_sk_per_thread; i++) {
0205             close(sk_fds[i]);
0206             sk_fds[i] = -1;
0207         }
0208 
0209         notify_thread_redo();
0210     }
0211 
0212     free(sk_fds);
0213     return NULL;
0214 
0215 errout:
0216     for (i = 0; i < nr_sk_per_thread && sk_fds[i] != -1; i++)
0217         close(sk_fds[i]);
0218     free(sk_fds);
0219     notify_thread_err();
0220     return ERR_PTR(err);
0221 }
0222 
0223 static int do_sk_storage_map_stress_free(void)
0224 {
0225     int i, map_fd = -1, err = 0, nr_threads_created = 0;
0226     pthread_t *sk_thread_ids;
0227     void *thread_ret;
0228 
0229     sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
0230     if (!sk_thread_ids) {
0231         fprintf(stderr, "malloc(sk_threads): NULL\n");
0232         return -ENOMEM;
0233     }
0234 
0235     for (i = 0; i < nr_sk_threads; i++) {
0236         err = pthread_create(&sk_thread_ids[i], NULL,
0237                      insert_close_thread, NULL);
0238         if (err) {
0239             err = -errno;
0240             goto done;
0241         }
0242         nr_threads_created++;
0243     }
0244 
0245     while (!is_stopped()) {
0246         map_fd = create_sk_storage_map();
0247         WRITE_ONCE(sk_storage_map, map_fd);
0248 
0249         if (!wait_for_threads_done())
0250             break;
0251 
0252         WRITE_ONCE(sk_storage_map, -1);
0253         close(map_fd);
0254         map_fd = -1;
0255 
0256         if (!wait_for_threads_redo())
0257             break;
0258     }
0259 
0260 done:
0261     WRITE_ONCE(stop, 1);
0262     for (i = 0; i < nr_threads_created; i++) {
0263         pthread_join(sk_thread_ids[i], &thread_ret);
0264         if (IS_ERR(thread_ret) && !err) {
0265             err = PTR_ERR(thread_ret);
0266             fprintf(stderr, "threads#%u: err:%d\n", i, err);
0267         }
0268     }
0269     free(sk_thread_ids);
0270 
0271     if (map_fd != -1)
0272         close(map_fd);
0273 
0274     return err;
0275 }
0276 
0277 static void *update_thread(void *arg)
0278 {
0279     struct {
0280         int cnt;
0281         int lock;
0282     } value = { .cnt = 0xeB9F, .lock = 0, };
0283     int map_fd = READ_ONCE(sk_storage_map);
0284     int sk_fd = *(int *)arg;
0285     int err = 0; /* Suppress compiler false alarm */
0286 
0287     while (!is_stopped()) {
0288         err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
0289         if (err && errno != EAGAIN) {
0290             err = -errno;
0291             fprintf(stderr, "bpf_map_update_elem: %d %d\n",
0292                 err, errno);
0293             break;
0294         }
0295     }
0296 
0297     if (!is_stopped()) {
0298         notify_thread_err();
0299         return ERR_PTR(err);
0300     }
0301 
0302     return NULL;
0303 }
0304 
0305 static void *delete_thread(void *arg)
0306 {
0307     int map_fd = READ_ONCE(sk_storage_map);
0308     int sk_fd = *(int *)arg;
0309     int err = 0; /* Suppress compiler false alarm */
0310 
0311     while (!is_stopped()) {
0312         err = bpf_map_delete_elem(map_fd, &sk_fd);
0313         if (err && errno != ENOENT) {
0314             err = -errno;
0315             fprintf(stderr, "bpf_map_delete_elem: %d %d\n",
0316                 err, errno);
0317             break;
0318         }
0319     }
0320 
0321     if (!is_stopped()) {
0322         notify_thread_err();
0323         return ERR_PTR(err);
0324     }
0325 
0326     return NULL;
0327 }
0328 
0329 static int do_sk_storage_map_stress_change(void)
0330 {
0331     int i, sk_fd, map_fd = -1, err = 0, nr_threads_created = 0;
0332     pthread_t *sk_thread_ids;
0333     void *thread_ret;
0334 
0335     sk_thread_ids = malloc(sizeof(pthread_t) * nr_sk_threads);
0336     if (!sk_thread_ids) {
0337         fprintf(stderr, "malloc(sk_threads): NULL\n");
0338         return -ENOMEM;
0339     }
0340 
0341     sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
0342     if (sk_fd == -1) {
0343         err = -errno;
0344         goto done;
0345     }
0346 
0347     map_fd = create_sk_storage_map();
0348     WRITE_ONCE(sk_storage_map, map_fd);
0349 
0350     for (i = 0; i < nr_sk_threads; i++) {
0351         if (i & 0x1)
0352             err = pthread_create(&sk_thread_ids[i], NULL,
0353                          update_thread, &sk_fd);
0354         else
0355             err = pthread_create(&sk_thread_ids[i], NULL,
0356                          delete_thread, &sk_fd);
0357         if (err) {
0358             err = -errno;
0359             goto done;
0360         }
0361         nr_threads_created++;
0362     }
0363 
0364     wait_for_threads_err();
0365 
0366 done:
0367     WRITE_ONCE(stop, 1);
0368     for (i = 0; i < nr_threads_created; i++) {
0369         pthread_join(sk_thread_ids[i], &thread_ret);
0370         if (IS_ERR(thread_ret) && !err) {
0371             err = PTR_ERR(thread_ret);
0372             fprintf(stderr, "threads#%u: err:%d\n", i, err);
0373         }
0374     }
0375     free(sk_thread_ids);
0376 
0377     if (sk_fd != -1)
0378         close(sk_fd);
0379     close(map_fd);
0380 
0381     return err;
0382 }
0383 
0384 static void stop_handler(int signum)
0385 {
0386     if (signum != SIGALRM)
0387         printf("stopping...\n");
0388     WRITE_ONCE(stop, 1);
0389 }
0390 
0391 #define BPF_SK_STORAGE_MAP_TEST_NR_THREADS "BPF_SK_STORAGE_MAP_TEST_NR_THREADS"
0392 #define BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD "BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD"
0393 #define BPF_SK_STORAGE_MAP_TEST_RUNTIME_S "BPF_SK_STORAGE_MAP_TEST_RUNTIME_S"
0394 #define BPF_SK_STORAGE_MAP_TEST_NAME "BPF_SK_STORAGE_MAP_TEST_NAME"
0395 
0396 static void test_sk_storage_map_stress_free(void)
0397 {
0398     struct rlimit rlim_old, rlim_new = {};
0399     int err;
0400 
0401     getrlimit(RLIMIT_NOFILE, &rlim_old);
0402 
0403     signal(SIGTERM, stop_handler);
0404     signal(SIGINT, stop_handler);
0405     if (runtime_s > 0) {
0406         signal(SIGALRM, stop_handler);
0407         alarm(runtime_s);
0408     }
0409 
0410     if (rlim_old.rlim_cur < nr_sk_threads * nr_sk_per_thread) {
0411         rlim_new.rlim_cur = nr_sk_threads * nr_sk_per_thread + 128;
0412         rlim_new.rlim_max = rlim_new.rlim_cur + 128;
0413         err = setrlimit(RLIMIT_NOFILE, &rlim_new);
0414         CHECK(err, "setrlimit(RLIMIT_NOFILE)", "rlim_new:%lu errno:%d",
0415               rlim_new.rlim_cur, errno);
0416     }
0417 
0418     err = do_sk_storage_map_stress_free();
0419 
0420     signal(SIGTERM, SIG_DFL);
0421     signal(SIGINT, SIG_DFL);
0422     if (runtime_s > 0) {
0423         signal(SIGALRM, SIG_DFL);
0424         alarm(0);
0425     }
0426 
0427     if (rlim_new.rlim_cur)
0428         setrlimit(RLIMIT_NOFILE, &rlim_old);
0429 
0430     CHECK(err, "test_sk_storage_map_stress_free", "err:%d\n", err);
0431 }
0432 
0433 static void test_sk_storage_map_stress_change(void)
0434 {
0435     int err;
0436 
0437     signal(SIGTERM, stop_handler);
0438     signal(SIGINT, stop_handler);
0439     if (runtime_s > 0) {
0440         signal(SIGALRM, stop_handler);
0441         alarm(runtime_s);
0442     }
0443 
0444     err = do_sk_storage_map_stress_change();
0445 
0446     signal(SIGTERM, SIG_DFL);
0447     signal(SIGINT, SIG_DFL);
0448     if (runtime_s > 0) {
0449         signal(SIGALRM, SIG_DFL);
0450         alarm(0);
0451     }
0452 
0453     CHECK(err, "test_sk_storage_map_stress_change", "err:%d\n", err);
0454 }
0455 
0456 static void test_sk_storage_map_basic(void)
0457 {
0458     struct {
0459         int cnt;
0460         int lock;
0461     } value = { .cnt = 0xeB9f, .lock = 0, }, lookup_value;
0462     struct bpf_map_create_opts bad_xattr;
0463     int btf_fd, map_fd, sk_fd, err;
0464 
0465     btf_fd = load_btf();
0466     CHECK(btf_fd == -1, "bpf_load_btf", "btf_fd:%d errno:%d\n",
0467           btf_fd, errno);
0468     map_opts.btf_fd = btf_fd;
0469 
0470     sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
0471     CHECK(sk_fd == -1, "socket()", "sk_fd:%d errno:%d\n",
0472           sk_fd, errno);
0473 
0474     map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts);
0475     CHECK(map_fd == -1, "bpf_map_create(good_xattr)",
0476           "map_fd:%d errno:%d\n", map_fd, errno);
0477 
0478     /* Add new elem */
0479     memcpy(&lookup_value, &value, sizeof(value));
0480     err = bpf_map_update_elem(map_fd, &sk_fd, &value,
0481                   BPF_NOEXIST | BPF_F_LOCK);
0482     CHECK(err, "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
0483           "err:%d errno:%d\n", err, errno);
0484     err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
0485                     BPF_F_LOCK);
0486     CHECK(err || lookup_value.cnt != value.cnt,
0487           "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
0488           "err:%d errno:%d cnt:%x(%x)\n",
0489           err, errno, lookup_value.cnt, value.cnt);
0490 
0491     /* Bump the cnt and update with BPF_EXIST | BPF_F_LOCK */
0492     value.cnt += 1;
0493     err = bpf_map_update_elem(map_fd, &sk_fd, &value,
0494                   BPF_EXIST | BPF_F_LOCK);
0495     CHECK(err, "bpf_map_update_elem(BPF_EXIST|BPF_F_LOCK)",
0496           "err:%d errno:%d\n", err, errno);
0497     err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
0498                     BPF_F_LOCK);
0499     CHECK(err || lookup_value.cnt != value.cnt,
0500           "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
0501           "err:%d errno:%d cnt:%x(%x)\n",
0502           err, errno, lookup_value.cnt, value.cnt);
0503 
0504     /* Bump the cnt and update with BPF_EXIST */
0505     value.cnt += 1;
0506     err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_EXIST);
0507     CHECK(err, "bpf_map_update_elem(BPF_EXIST)",
0508           "err:%d errno:%d\n", err, errno);
0509     err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
0510                     BPF_F_LOCK);
0511     CHECK(err || lookup_value.cnt != value.cnt,
0512           "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
0513           "err:%d errno:%d cnt:%x(%x)\n",
0514           err, errno, lookup_value.cnt, value.cnt);
0515 
0516     /* Update with BPF_NOEXIST */
0517     value.cnt += 1;
0518     err = bpf_map_update_elem(map_fd, &sk_fd, &value,
0519                   BPF_NOEXIST | BPF_F_LOCK);
0520     CHECK(!err || errno != EEXIST,
0521           "bpf_map_update_elem(BPF_NOEXIST|BPF_F_LOCK)",
0522           "err:%d errno:%d\n", err, errno);
0523     err = bpf_map_update_elem(map_fd, &sk_fd, &value, BPF_NOEXIST);
0524     CHECK(!err || errno != EEXIST, "bpf_map_update_elem(BPF_NOEXIST)",
0525           "err:%d errno:%d\n", err, errno);
0526     value.cnt -= 1;
0527     err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
0528                     BPF_F_LOCK);
0529     CHECK(err || lookup_value.cnt != value.cnt,
0530           "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
0531           "err:%d errno:%d cnt:%x(%x)\n",
0532           err, errno, lookup_value.cnt, value.cnt);
0533 
0534     /* Bump the cnt again and update with map_flags == 0 */
0535     value.cnt += 1;
0536     err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
0537     CHECK(err, "bpf_map_update_elem()", "err:%d errno:%d\n",
0538           err, errno);
0539     err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
0540                     BPF_F_LOCK);
0541     CHECK(err || lookup_value.cnt != value.cnt,
0542           "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
0543           "err:%d errno:%d cnt:%x(%x)\n",
0544           err, errno, lookup_value.cnt, value.cnt);
0545 
0546     /* Test delete elem */
0547     err = bpf_map_delete_elem(map_fd, &sk_fd);
0548     CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n",
0549           err, errno);
0550     err = bpf_map_lookup_elem_flags(map_fd, &sk_fd, &lookup_value,
0551                     BPF_F_LOCK);
0552     CHECK(!err || errno != ENOENT,
0553           "bpf_map_lookup_elem_flags(BPF_F_LOCK)",
0554           "err:%d errno:%d\n", err, errno);
0555     err = bpf_map_delete_elem(map_fd, &sk_fd);
0556     CHECK(!err || errno != ENOENT, "bpf_map_delete_elem()",
0557           "err:%d errno:%d\n", err, errno);
0558 
0559     memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
0560     bad_xattr.btf_key_type_id = 0;
0561     err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
0562     CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
0563           "err:%d errno:%d\n", err, errno);
0564 
0565     memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
0566     bad_xattr.btf_key_type_id = 3;
0567     err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
0568     CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
0569           "err:%d errno:%d\n", err, errno);
0570 
0571     err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 1, &map_opts);
0572     CHECK(!err || errno != EINVAL, "bpf_map_create(bad_xattr)",
0573           "err:%d errno:%d\n", err, errno);
0574 
0575     memcpy(&bad_xattr, &map_opts, sizeof(map_opts));
0576     bad_xattr.map_flags = 0;
0577     err = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &bad_xattr);
0578     CHECK(!err || errno != EINVAL, "bap_create_map_xattr(bad_xattr)",
0579           "err:%d errno:%d\n", err, errno);
0580 
0581     map_opts.btf_fd = -1;
0582     close(btf_fd);
0583     close(map_fd);
0584     close(sk_fd);
0585 }
0586 
0587 void test_sk_storage_map(void)
0588 {
0589     const char *test_name, *env_opt;
0590     bool test_ran = false;
0591 
0592     test_name = getenv(BPF_SK_STORAGE_MAP_TEST_NAME);
0593 
0594     env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_NR_THREADS);
0595     if (env_opt)
0596         nr_sk_threads = atoi(env_opt);
0597 
0598     env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_SK_PER_THREAD);
0599     if (env_opt)
0600         nr_sk_per_thread = atoi(env_opt);
0601 
0602     env_opt = getenv(BPF_SK_STORAGE_MAP_TEST_RUNTIME_S);
0603     if (env_opt)
0604         runtime_s = atoi(env_opt);
0605 
0606     if (!test_name || !strcmp(test_name, "basic")) {
0607         test_sk_storage_map_basic();
0608         test_ran = true;
0609     }
0610     if (!test_name || !strcmp(test_name, "stress_free")) {
0611         test_sk_storage_map_stress_free();
0612         test_ran = true;
0613     }
0614     if (!test_name || !strcmp(test_name, "stress_change")) {
0615         test_sk_storage_map_stress_change();
0616         test_ran = true;
0617     }
0618 
0619     if (test_ran)
0620         printf("%s:PASS\n", __func__);
0621     else
0622         CHECK(1, "Invalid test_name", "%s\n", test_name);
0623 }