0001
0002
0003 #include <test_progs.h>
0004 #include <network_helpers.h>
0005 #include "for_each_hash_map_elem.skel.h"
0006 #include "for_each_array_map_elem.skel.h"
0007 #include "for_each_map_elem_write_key.skel.h"
0008
0009 static unsigned int duration;
0010
0011 static void test_hash_map(void)
0012 {
0013 int i, err, max_entries;
0014 struct for_each_hash_map_elem *skel;
0015 __u64 *percpu_valbuf = NULL;
0016 size_t percpu_val_sz;
0017 __u32 key, num_cpus;
0018 __u64 val;
0019 LIBBPF_OPTS(bpf_test_run_opts, topts,
0020 .data_in = &pkt_v4,
0021 .data_size_in = sizeof(pkt_v4),
0022 .repeat = 1,
0023 );
0024
0025 skel = for_each_hash_map_elem__open_and_load();
0026 if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
0027 return;
0028
0029 max_entries = bpf_map__max_entries(skel->maps.hashmap);
0030 for (i = 0; i < max_entries; i++) {
0031 key = i;
0032 val = i + 1;
0033 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
0034 &val, sizeof(val), BPF_ANY);
0035 if (!ASSERT_OK(err, "map_update"))
0036 goto out;
0037 }
0038
0039 num_cpus = bpf_num_possible_cpus();
0040 percpu_val_sz = sizeof(__u64) * num_cpus;
0041 percpu_valbuf = malloc(percpu_val_sz);
0042 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
0043 goto out;
0044
0045 key = 1;
0046 for (i = 0; i < num_cpus; i++)
0047 percpu_valbuf[i] = i + 1;
0048 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
0049 percpu_valbuf, percpu_val_sz, BPF_ANY);
0050 if (!ASSERT_OK(err, "percpu_map_update"))
0051 goto out;
0052
0053 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
0054 duration = topts.duration;
0055 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
0056 err, errno, topts.retval))
0057 goto out;
0058
0059 ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
0060 ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
0061
0062 key = 1;
0063 err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
0064 ASSERT_ERR(err, "hashmap_lookup");
0065
0066 ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
0067 ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
0068 ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
0069 ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
0070 ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
0071 ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
0072 out:
0073 free(percpu_valbuf);
0074 for_each_hash_map_elem__destroy(skel);
0075 }
0076
0077 static void test_array_map(void)
0078 {
0079 __u32 key, num_cpus, max_entries;
0080 int i, err;
0081 struct for_each_array_map_elem *skel;
0082 __u64 *percpu_valbuf = NULL;
0083 size_t percpu_val_sz;
0084 __u64 val, expected_total;
0085 LIBBPF_OPTS(bpf_test_run_opts, topts,
0086 .data_in = &pkt_v4,
0087 .data_size_in = sizeof(pkt_v4),
0088 .repeat = 1,
0089 );
0090
0091 skel = for_each_array_map_elem__open_and_load();
0092 if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
0093 return;
0094
0095 expected_total = 0;
0096 max_entries = bpf_map__max_entries(skel->maps.arraymap);
0097 for (i = 0; i < max_entries; i++) {
0098 key = i;
0099 val = i + 1;
0100
0101 if (i != max_entries - 1)
0102 expected_total += val;
0103 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
0104 &val, sizeof(val), BPF_ANY);
0105 if (!ASSERT_OK(err, "map_update"))
0106 goto out;
0107 }
0108
0109 num_cpus = bpf_num_possible_cpus();
0110 percpu_val_sz = sizeof(__u64) * num_cpus;
0111 percpu_valbuf = malloc(percpu_val_sz);
0112 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
0113 goto out;
0114
0115 key = 0;
0116 for (i = 0; i < num_cpus; i++)
0117 percpu_valbuf[i] = i + 1;
0118 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
0119 percpu_valbuf, percpu_val_sz, BPF_ANY);
0120 if (!ASSERT_OK(err, "percpu_map_update"))
0121 goto out;
0122
0123 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
0124 duration = topts.duration;
0125 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
0126 err, errno, topts.retval))
0127 goto out;
0128
0129 ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
0130 ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
0131
0132 out:
0133 free(percpu_valbuf);
0134 for_each_array_map_elem__destroy(skel);
0135 }
0136
0137 static void test_write_map_key(void)
0138 {
0139 struct for_each_map_elem_write_key *skel;
0140
0141 skel = for_each_map_elem_write_key__open_and_load();
0142 if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
0143 for_each_map_elem_write_key__destroy(skel);
0144 }
0145
0146 void test_for_each(void)
0147 {
0148 if (test__start_subtest("hash_map"))
0149 test_hash_map();
0150 if (test__start_subtest("array_map"))
0151 test_array_map();
0152 if (test__start_subtest("write_map_key"))
0153 test_write_map_key();
0154 }