0001
0002
0003 #include <test_progs.h>
0004 #include "test_lookup_and_delete.skel.h"
0005
0006 #define START_VALUE 1234
0007 #define NEW_VALUE 4321
0008 #define MAX_ENTRIES 2
0009
0010 static int duration;
0011 static int nr_cpus;
0012
0013 static int fill_values(int map_fd)
0014 {
0015 __u64 key, value = START_VALUE;
0016 int err;
0017
0018 for (key = 1; key < MAX_ENTRIES + 1; key++) {
0019 err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST);
0020 if (!ASSERT_OK(err, "bpf_map_update_elem"))
0021 return -1;
0022 }
0023
0024 return 0;
0025 }
0026
0027 static int fill_values_percpu(int map_fd)
0028 {
0029 __u64 key, value[nr_cpus];
0030 int i, err;
0031
0032 for (i = 0; i < nr_cpus; i++)
0033 value[i] = START_VALUE;
0034
0035 for (key = 1; key < MAX_ENTRIES + 1; key++) {
0036 err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
0037 if (!ASSERT_OK(err, "bpf_map_update_elem"))
0038 return -1;
0039 }
0040
0041 return 0;
0042 }
0043
0044 static struct test_lookup_and_delete *setup_prog(enum bpf_map_type map_type,
0045 int *map_fd)
0046 {
0047 struct test_lookup_and_delete *skel;
0048 int err;
0049
0050 skel = test_lookup_and_delete__open();
0051 if (!ASSERT_OK_PTR(skel, "test_lookup_and_delete__open"))
0052 return NULL;
0053
0054 err = bpf_map__set_type(skel->maps.hash_map, map_type);
0055 if (!ASSERT_OK(err, "bpf_map__set_type"))
0056 goto cleanup;
0057
0058 err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES);
0059 if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
0060 goto cleanup;
0061
0062 err = test_lookup_and_delete__load(skel);
0063 if (!ASSERT_OK(err, "test_lookup_and_delete__load"))
0064 goto cleanup;
0065
0066 *map_fd = bpf_map__fd(skel->maps.hash_map);
0067 if (!ASSERT_GE(*map_fd, 0, "bpf_map__fd"))
0068 goto cleanup;
0069
0070 return skel;
0071
0072 cleanup:
0073 test_lookup_and_delete__destroy(skel);
0074 return NULL;
0075 }
0076
0077
0078 static int trigger_tp(struct test_lookup_and_delete *skel, __u64 key,
0079 __u64 value)
0080 {
0081 int err;
0082
0083 skel->bss->set_pid = getpid();
0084 skel->bss->set_key = key;
0085 skel->bss->set_value = value;
0086
0087 err = test_lookup_and_delete__attach(skel);
0088 if (!ASSERT_OK(err, "test_lookup_and_delete__attach"))
0089 return -1;
0090
0091 syscall(__NR_getpgid);
0092
0093 test_lookup_and_delete__detach(skel);
0094
0095 return 0;
0096 }
0097
0098 static void test_lookup_and_delete_hash(void)
0099 {
0100 struct test_lookup_and_delete *skel;
0101 __u64 key, value;
0102 int map_fd, err;
0103
0104
0105 skel = setup_prog(BPF_MAP_TYPE_HASH, &map_fd);
0106 if (!ASSERT_OK_PTR(skel, "setup_prog"))
0107 return;
0108
0109 err = fill_values(map_fd);
0110 if (!ASSERT_OK(err, "fill_values"))
0111 goto cleanup;
0112
0113
0114 key = 1;
0115 err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
0116 &key, sizeof(key), &value, sizeof(value), 0);
0117 if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
0118 goto cleanup;
0119
0120
0121 if (CHECK(value != START_VALUE, "bpf_map_lookup_and_delete_elem",
0122 "unexpected value=%lld\n", value))
0123 goto cleanup;
0124
0125
0126 err = bpf_map_lookup_elem(map_fd, &key, &value);
0127 if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
0128 goto cleanup;
0129
0130 cleanup:
0131 test_lookup_and_delete__destroy(skel);
0132 }
0133
0134 static void test_lookup_and_delete_percpu_hash(void)
0135 {
0136 struct test_lookup_and_delete *skel;
0137 __u64 key, val, value[nr_cpus];
0138 int map_fd, err, i;
0139
0140
0141 skel = setup_prog(BPF_MAP_TYPE_PERCPU_HASH, &map_fd);
0142 if (!ASSERT_OK_PTR(skel, "setup_prog"))
0143 return;
0144
0145 err = fill_values_percpu(map_fd);
0146 if (!ASSERT_OK(err, "fill_values_percpu"))
0147 goto cleanup;
0148
0149
0150 key = 1;
0151 err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
0152 &key, sizeof(key), value, sizeof(value), 0);
0153 if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
0154 goto cleanup;
0155
0156 for (i = 0; i < nr_cpus; i++) {
0157 val = value[i];
0158
0159
0160 if (CHECK(val != START_VALUE, "map value",
0161 "unexpected for cpu %d: %lld\n", i, val))
0162 goto cleanup;
0163 }
0164
0165
0166 err = bpf_map_lookup_elem(map_fd, &key, value);
0167 if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
0168 goto cleanup;
0169
0170 cleanup:
0171 test_lookup_and_delete__destroy(skel);
0172 }
0173
0174 static void test_lookup_and_delete_lru_hash(void)
0175 {
0176 struct test_lookup_and_delete *skel;
0177 __u64 key, value;
0178 int map_fd, err;
0179
0180
0181 skel = setup_prog(BPF_MAP_TYPE_LRU_HASH, &map_fd);
0182 if (!ASSERT_OK_PTR(skel, "setup_prog"))
0183 return;
0184
0185 err = fill_values(map_fd);
0186 if (!ASSERT_OK(err, "fill_values"))
0187 goto cleanup;
0188
0189
0190 key = 3;
0191 err = trigger_tp(skel, key, NEW_VALUE);
0192 if (!ASSERT_OK(err, "trigger_tp"))
0193 goto cleanup;
0194
0195
0196 err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
0197 &key, sizeof(key), &value, sizeof(value), 0);
0198 if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
0199 goto cleanup;
0200
0201
0202 if (CHECK(value != NEW_VALUE, "bpf_map_lookup_and_delete_elem",
0203 "unexpected value=%lld\n", value))
0204 goto cleanup;
0205
0206
0207 err = bpf_map_lookup_elem(map_fd, &key, &value);
0208 if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
0209 goto cleanup;
0210
0211 key = 1;
0212 err = bpf_map_lookup_elem(map_fd, &key, &value);
0213 if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
0214 goto cleanup;
0215
0216 cleanup:
0217 test_lookup_and_delete__destroy(skel);
0218 }
0219
0220 static void test_lookup_and_delete_lru_percpu_hash(void)
0221 {
0222 struct test_lookup_and_delete *skel;
0223 __u64 key, val, value[nr_cpus];
0224 int map_fd, err, i, cpucnt = 0;
0225
0226
0227 skel = setup_prog(BPF_MAP_TYPE_LRU_PERCPU_HASH, &map_fd);
0228 if (!ASSERT_OK_PTR(skel, "setup_prog"))
0229 return;
0230
0231 err = fill_values_percpu(map_fd);
0232 if (!ASSERT_OK(err, "fill_values_percpu"))
0233 goto cleanup;
0234
0235
0236 key = 3;
0237 err = trigger_tp(skel, key, NEW_VALUE);
0238 if (!ASSERT_OK(err, "trigger_tp"))
0239 goto cleanup;
0240
0241
0242 for (i = 0; i < nr_cpus; i++)
0243 value[i] = 0;
0244
0245
0246 err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
0247 &key, sizeof(key), value, sizeof(value), 0);
0248 if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
0249 goto cleanup;
0250
0251
0252 for (i = 0; i < nr_cpus; i++) {
0253 val = value[i];
0254 if (val) {
0255 if (CHECK(val != NEW_VALUE, "map value",
0256 "unexpected for cpu %d: %lld\n", i, val))
0257 goto cleanup;
0258 cpucnt++;
0259 }
0260 }
0261 if (CHECK(cpucnt != 1, "map value", "set for %d CPUs instead of 1!\n",
0262 cpucnt))
0263 goto cleanup;
0264
0265
0266 err = bpf_map_lookup_elem(map_fd, &key, &value);
0267 if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
0268 goto cleanup;
0269
0270 key = 1;
0271 err = bpf_map_lookup_elem(map_fd, &key, &value);
0272 if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
0273 goto cleanup;
0274
0275 cleanup:
0276 test_lookup_and_delete__destroy(skel);
0277 }
0278
0279 void test_lookup_and_delete(void)
0280 {
0281 nr_cpus = bpf_num_possible_cpus();
0282
0283 if (test__start_subtest("lookup_and_delete"))
0284 test_lookup_and_delete_hash();
0285 if (test__start_subtest("lookup_and_delete_percpu"))
0286 test_lookup_and_delete_percpu_hash();
0287 if (test__start_subtest("lookup_and_delete_lru"))
0288 test_lookup_and_delete_lru_hash();
0289 if (test__start_subtest("lookup_and_delete_lru_percpu"))
0290 test_lookup_and_delete_lru_percpu_hash();
0291 }