0001
0002
0003
0004 #include <test_progs.h>
0005 #include <bpf/btf.h>
0006
0007 #include "test_unpriv_bpf_disabled.skel.h"
0008
0009 #include "cap_helpers.h"
0010
0011
0012
0013
0014
0015
0016
0017 #define ALL_CAPS ((2ULL << CAP_BPF) - 1)
0018
0019 #define PINPATH "/sys/fs/bpf/unpriv_bpf_disabled_"
0020 #define NUM_MAPS 7
0021
0022 static __u32 got_perfbuf_val;
0023 static __u32 got_ringbuf_val;
0024
0025 static int process_ringbuf(void *ctx, void *data, size_t len)
0026 {
0027 if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid"))
0028 got_ringbuf_val = *(__u32 *)data;
0029 return 0;
0030 }
0031
0032 static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len)
0033 {
0034 if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid"))
0035 got_perfbuf_val = *(__u32 *)data;
0036 }
0037
0038 static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val)
0039 {
0040 int ret = 0;
0041 FILE *fp;
0042
0043 fp = fopen(sysctl_path, "r+");
0044 if (!fp)
0045 return -errno;
0046 if (old_val && fscanf(fp, "%s", old_val) <= 0) {
0047 ret = -ENOENT;
0048 } else if (!old_val || strcmp(old_val, new_val) != 0) {
0049 fseek(fp, 0, SEEK_SET);
0050 if (fprintf(fp, "%s", new_val) < 0)
0051 ret = -errno;
0052 }
0053 fclose(fp);
0054
0055 return ret;
0056 }
0057
0058 static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel,
0059 __u32 prog_id, int prog_fd, int perf_fd,
0060 char **map_paths, int *map_fds)
0061 {
0062 struct perf_buffer *perfbuf = NULL;
0063 struct ring_buffer *ringbuf = NULL;
0064 int i, nr_cpus, link_fd = -1;
0065
0066 nr_cpus = bpf_num_possible_cpus();
0067
0068 skel->bss->perfbuf_val = 1;
0069 skel->bss->ringbuf_val = 2;
0070
0071
0072
0073
0074
0075
0076
0077 perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL,
0078 NULL);
0079 if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new"))
0080 goto cleanup;
0081
0082 ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL);
0083 if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
0084 goto cleanup;
0085
0086
0087 usleep(1);
0088
0089 ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll");
0090 ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val");
0091 ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume");
0092 ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val");
0093
0094 for (i = 0; i < NUM_MAPS; i++) {
0095 map_fds[i] = bpf_obj_get(map_paths[i]);
0096 if (!ASSERT_GT(map_fds[i], -1, "obj_get"))
0097 goto cleanup;
0098 }
0099
0100 for (i = 0; i < NUM_MAPS; i++) {
0101 bool prog_array = strstr(map_paths[i], "prog_array") != NULL;
0102 bool array = strstr(map_paths[i], "array") != NULL;
0103 bool buf = strstr(map_paths[i], "buf") != NULL;
0104 __u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus];
0105 __u32 expected_val = 1;
0106 int j;
0107
0108
0109 if (buf)
0110 continue;
0111
0112 for (j = 0; j < nr_cpus; j++)
0113 vals[j] = expected_val;
0114
0115 if (prog_array) {
0116
0117 vals[0] = prog_fd;
0118
0119 expected_val = prog_id;
0120 }
0121 ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem");
0122 ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem");
0123 ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values");
0124 if (!array)
0125 ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem");
0126 }
0127
0128 link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd,
0129 BPF_PERF_EVENT, NULL);
0130 ASSERT_GT(link_fd, 0, "link_create");
0131
0132 cleanup:
0133 if (link_fd)
0134 close(link_fd);
0135 if (perfbuf)
0136 perf_buffer__free(perfbuf);
0137 if (ringbuf)
0138 ring_buffer__free(ringbuf);
0139 }
0140
0141 static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel,
0142 __u32 prog_id, int prog_fd, int perf_fd,
0143 char **map_paths, int *map_fds)
0144 {
0145 const struct bpf_insn prog_insns[] = {
0146 BPF_MOV64_IMM(BPF_REG_0, 0),
0147 BPF_EXIT_INSN(),
0148 };
0149 const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
0150 LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
0151 struct bpf_map_info map_info = {};
0152 __u32 map_info_len = sizeof(map_info);
0153 struct bpf_link_info link_info = {};
0154 __u32 link_info_len = sizeof(link_info);
0155 struct btf *btf = NULL;
0156 __u32 attach_flags = 0;
0157 __u32 prog_ids[3] = {};
0158 __u32 prog_cnt = 3;
0159 __u32 next;
0160 int i;
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170 ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL",
0171 prog_insns, prog_insn_cnt, &load_opts),
0172 -EPERM, "prog_load_fails");
0173
0174 for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++)
0175 ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
0176 -EPERM, "map_create_fails");
0177
0178 ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails");
0179 ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails");
0180 ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails");
0181
0182 if (ASSERT_OK(bpf_obj_get_info_by_fd(map_fds[0], &map_info, &map_info_len),
0183 "obj_get_info_by_fd")) {
0184 ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails");
0185 ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM,
0186 "map_get_next_id_fails");
0187 }
0188 ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails");
0189
0190 if (ASSERT_OK(bpf_obj_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter),
0191 &link_info, &link_info_len),
0192 "obj_get_info_by_fd")) {
0193 ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails");
0194 ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM,
0195 "link_get_next_id_fails");
0196 }
0197 ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails");
0198
0199 ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids,
0200 &prog_cnt), -EPERM, "prog_query_fails");
0201
0202 btf = btf__new_empty();
0203 if (ASSERT_OK_PTR(btf, "empty_btf") &&
0204 ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) {
0205 const void *raw_btf_data;
0206 __u32 raw_btf_size;
0207
0208 raw_btf_data = btf__raw_data(btf, &raw_btf_size);
0209 if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
0210 ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM,
0211 "bpf_btf_load_fails");
0212 }
0213 btf__free(btf);
0214 }
0215
0216 void test_unpriv_bpf_disabled(void)
0217 {
0218 char *map_paths[NUM_MAPS] = { PINPATH "array",
0219 PINPATH "percpu_array",
0220 PINPATH "hash",
0221 PINPATH "percpu_hash",
0222 PINPATH "perfbuf",
0223 PINPATH "ringbuf",
0224 PINPATH "prog_array" };
0225 int map_fds[NUM_MAPS];
0226 struct test_unpriv_bpf_disabled *skel;
0227 char unprivileged_bpf_disabled_orig[32] = {};
0228 char perf_event_paranoid_orig[32] = {};
0229 struct bpf_prog_info prog_info = {};
0230 __u32 prog_info_len = sizeof(prog_info);
0231 struct perf_event_attr attr = {};
0232 int prog_fd, perf_fd = -1, i, ret;
0233 __u64 save_caps = 0;
0234 __u32 prog_id;
0235
0236 skel = test_unpriv_bpf_disabled__open_and_load();
0237 if (!ASSERT_OK_PTR(skel, "skel_open"))
0238 return;
0239
0240 skel->bss->test_pid = getpid();
0241
0242 map_fds[0] = bpf_map__fd(skel->maps.array);
0243 map_fds[1] = bpf_map__fd(skel->maps.percpu_array);
0244 map_fds[2] = bpf_map__fd(skel->maps.hash);
0245 map_fds[3] = bpf_map__fd(skel->maps.percpu_hash);
0246 map_fds[4] = bpf_map__fd(skel->maps.perfbuf);
0247 map_fds[5] = bpf_map__fd(skel->maps.ringbuf);
0248 map_fds[6] = bpf_map__fd(skel->maps.prog_array);
0249
0250 for (i = 0; i < NUM_MAPS; i++)
0251 ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd");
0252
0253
0254 if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig,
0255 "-1"),
0256 "set_perf_event_paranoid"))
0257 goto cleanup;
0258
0259 ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled",
0260 unprivileged_bpf_disabled_orig, "2");
0261 if (ret == -EPERM) {
0262
0263 if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"),
0264 "unprivileged_bpf_disabled_on"))
0265 goto cleanup;
0266 } else {
0267 if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled"))
0268 goto cleanup;
0269 }
0270
0271 prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter);
0272 ASSERT_OK(bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len),
0273 "obj_get_info_by_fd");
0274 prog_id = prog_info.id;
0275 ASSERT_GT(prog_id, 0, "valid_prog_id");
0276
0277 attr.size = sizeof(attr);
0278 attr.type = PERF_TYPE_SOFTWARE;
0279 attr.config = PERF_COUNT_SW_CPU_CLOCK;
0280 attr.freq = 1;
0281 attr.sample_freq = 1000;
0282 perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
0283 if (!ASSERT_GE(perf_fd, 0, "perf_fd"))
0284 goto cleanup;
0285
0286 if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach"))
0287 goto cleanup;
0288
0289 if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps"))
0290 goto cleanup;
0291
0292 if (test__start_subtest("unpriv_bpf_disabled_positive"))
0293 test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths,
0294 map_fds);
0295
0296 if (test__start_subtest("unpriv_bpf_disabled_negative"))
0297 test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths,
0298 map_fds);
0299
0300 cleanup:
0301 close(perf_fd);
0302 if (save_caps)
0303 cap_enable_effective(save_caps, NULL);
0304 if (strlen(perf_event_paranoid_orig) > 0)
0305 sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig);
0306 if (strlen(unprivileged_bpf_disabled_orig) > 0)
0307 sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL,
0308 unprivileged_bpf_disabled_orig);
0309 for (i = 0; i < NUM_MAPS; i++)
0310 unlink(map_paths[i]);
0311 test_unpriv_bpf_disabled__destroy(skel);
0312 }