0001
0002
0003
0004 #include <test_progs.h>
0005 #include "test_map_lookup_percpu_elem.skel.h"
0006
0007 void test_map_lookup_percpu_elem(void)
0008 {
0009 struct test_map_lookup_percpu_elem *skel;
0010 __u64 key = 0, sum;
0011 int ret, i, nr_cpus = libbpf_num_possible_cpus();
0012 __u64 *buf;
0013
0014 buf = malloc(nr_cpus*sizeof(__u64));
0015 if (!ASSERT_OK_PTR(buf, "malloc"))
0016 return;
0017
0018 for (i = 0; i < nr_cpus; i++)
0019 buf[i] = i;
0020 sum = (nr_cpus - 1) * nr_cpus / 2;
0021
0022 skel = test_map_lookup_percpu_elem__open();
0023 if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open"))
0024 goto exit;
0025
0026 skel->rodata->my_pid = getpid();
0027 skel->rodata->nr_cpus = nr_cpus;
0028
0029 ret = test_map_lookup_percpu_elem__load(skel);
0030 if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load"))
0031 goto cleanup;
0032
0033 ret = test_map_lookup_percpu_elem__attach(skel);
0034 if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"))
0035 goto cleanup;
0036
0037 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0);
0038 ASSERT_OK(ret, "percpu_array_map update");
0039
0040 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0);
0041 ASSERT_OK(ret, "percpu_hash_map update");
0042
0043 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0);
0044 ASSERT_OK(ret, "percpu_lru_hash_map update");
0045
0046 syscall(__NR_getuid);
0047
0048 test_map_lookup_percpu_elem__detach(skel);
0049
0050 ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem");
0051 ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem");
0052 ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem");
0053
0054 cleanup:
0055 test_map_lookup_percpu_elem__destroy(skel);
0056 exit:
0057 free(buf);
0058 }