Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <test_progs.h>
0003 #include "test_stacktrace_build_id.skel.h"
0004 
0005 static __u64 read_perf_max_sample_freq(void)
0006 {
0007     __u64 sample_freq = 5000; /* fallback to 5000 on error */
0008     FILE *f;
0009     __u32 duration = 0;
0010 
0011     f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
0012     if (f == NULL)
0013         return sample_freq;
0014     CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
0015           "return default value: 5000,err %d\n", -errno);
0016     fclose(f);
0017     return sample_freq;
0018 }
0019 
0020 void test_stacktrace_build_id_nmi(void)
0021 {
0022     int control_map_fd, stackid_hmap_fd, stackmap_fd;
0023     struct test_stacktrace_build_id *skel;
0024     int err, pmu_fd;
0025     struct perf_event_attr attr = {
0026         .freq = 1,
0027         .type = PERF_TYPE_HARDWARE,
0028         .config = PERF_COUNT_HW_CPU_CYCLES,
0029     };
0030     __u32 key, prev_key, val, duration = 0;
0031     char buf[256];
0032     int i, j;
0033     struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
0034     int build_id_matches = 0;
0035     int retry = 1;
0036 
0037     attr.sample_freq = read_perf_max_sample_freq();
0038 
0039 retry:
0040     skel = test_stacktrace_build_id__open();
0041     if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
0042         return;
0043 
0044     /* override program type */
0045     bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT);
0046 
0047     err = test_stacktrace_build_id__load(skel);
0048     if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
0049         goto cleanup;
0050 
0051     pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0052              0 /* cpu 0 */, -1 /* group id */,
0053              0 /* flags */);
0054     if (pmu_fd < 0 && errno == ENOENT) {
0055         printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
0056         test__skip();
0057         goto cleanup;
0058     }
0059     if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
0060           pmu_fd, errno))
0061         goto cleanup;
0062 
0063     skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
0064                                pmu_fd);
0065     if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
0066         close(pmu_fd);
0067         goto cleanup;
0068     }
0069 
0070     /* find map fds */
0071     control_map_fd = bpf_map__fd(skel->maps.control_map);
0072     stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
0073     stackmap_fd = bpf_map__fd(skel->maps.stackmap);
0074 
0075     if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
0076         goto cleanup;
0077     if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
0078         goto cleanup;
0079     /* disable stack trace collection */
0080     key = 0;
0081     val = 1;
0082     bpf_map_update_elem(control_map_fd, &key, &val, 0);
0083 
0084     /* for every element in stackid_hmap, we can find a corresponding one
0085      * in stackmap, and vise versa.
0086      */
0087     err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
0088     if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
0089           "err %d errno %d\n", err, errno))
0090         goto cleanup;
0091 
0092     err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
0093     if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
0094           "err %d errno %d\n", err, errno))
0095         goto cleanup;
0096 
0097     err = extract_build_id(buf, 256);
0098 
0099     if (CHECK(err, "get build_id with readelf",
0100           "err %d errno %d\n", err, errno))
0101         goto cleanup;
0102 
0103     err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
0104     if (CHECK(err, "get_next_key from stackmap",
0105           "err %d, errno %d\n", err, errno))
0106         goto cleanup;
0107 
0108     do {
0109         char build_id[64];
0110 
0111         err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
0112                        id_offs, sizeof(id_offs), 0);
0113         if (CHECK(err, "lookup_elem from stackmap",
0114               "err %d, errno %d\n", err, errno))
0115             goto cleanup;
0116         for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
0117             if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
0118                 id_offs[i].offset != 0) {
0119                 for (j = 0; j < 20; ++j)
0120                     sprintf(build_id + 2 * j, "%02x",
0121                         id_offs[i].build_id[j] & 0xff);
0122                 if (strstr(buf, build_id) != NULL)
0123                     build_id_matches = 1;
0124             }
0125         prev_key = key;
0126     } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
0127 
0128     /* stack_map_get_build_id_offset() is racy and sometimes can return
0129      * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
0130      * try it one more time.
0131      */
0132     if (build_id_matches < 1 && retry--) {
0133         test_stacktrace_build_id__destroy(skel);
0134         printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
0135                __func__);
0136         goto retry;
0137     }
0138 
0139     if (CHECK(build_id_matches < 1, "build id match",
0140           "Didn't find expected build ID from the map\n"))
0141         goto cleanup;
0142 
0143     /*
0144      * We intentionally skip compare_stack_ips(). This is because we
0145      * only support one in_nmi() ips-to-build_id translation per cpu
0146      * at any time, thus stack_amap here will always fallback to
0147      * BPF_STACK_BUILD_ID_IP;
0148      */
0149 
0150 cleanup:
0151     test_stacktrace_build_id__destroy(skel);
0152 }