Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #define _GNU_SOURCE
0003 #include <pthread.h>
0004 #include <sched.h>
0005 #include <sys/socket.h>
0006 #include <test_progs.h>
0007 
0008 #define MAX_CNT_RAWTP   10ull
0009 #define MAX_STACK_RAWTP 100
0010 
0011 static int duration = 0;
0012 
0013 struct get_stack_trace_t {
0014     int pid;
0015     int kern_stack_size;
0016     int user_stack_size;
0017     int user_stack_buildid_size;
0018     __u64 kern_stack[MAX_STACK_RAWTP];
0019     __u64 user_stack[MAX_STACK_RAWTP];
0020     struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
0021 };
0022 
0023 static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
0024 {
0025     bool good_kern_stack = false, good_user_stack = false;
0026     const char *nonjit_func = "___bpf_prog_run";
0027     /* perfbuf-submitted data is 4-byte aligned, but we need 8-byte
0028      * alignment, so copy data into a local variable, for simplicity
0029      */
0030     struct get_stack_trace_t e;
0031     int i, num_stack;
0032     struct ksym *ks;
0033 
0034     memset(&e, 0, sizeof(e));
0035     memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e));
0036 
0037     if (size < sizeof(struct get_stack_trace_t)) {
0038         __u64 *raw_data = data;
0039         bool found = false;
0040 
0041         num_stack = size / sizeof(__u64);
0042         /* If jit is enabled, we do not have a good way to
0043          * verify the sanity of the kernel stack. So we
0044          * just assume it is good if the stack is not empty.
0045          * This could be improved in the future.
0046          */
0047         if (env.jit_enabled) {
0048             found = num_stack > 0;
0049         } else {
0050             for (i = 0; i < num_stack; i++) {
0051                 ks = ksym_search(raw_data[i]);
0052                 if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
0053                     found = true;
0054                     break;
0055                 }
0056             }
0057         }
0058         if (found) {
0059             good_kern_stack = true;
0060             good_user_stack = true;
0061         }
0062     } else {
0063         num_stack = e.kern_stack_size / sizeof(__u64);
0064         if (env.jit_enabled) {
0065             good_kern_stack = num_stack > 0;
0066         } else {
0067             for (i = 0; i < num_stack; i++) {
0068                 ks = ksym_search(e.kern_stack[i]);
0069                 if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
0070                     good_kern_stack = true;
0071                     break;
0072                 }
0073             }
0074         }
0075         if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0)
0076             good_user_stack = true;
0077     }
0078 
0079     if (!good_kern_stack)
0080         CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n");
0081     if (!good_user_stack)
0082         CHECK(!good_user_stack, "user_stack", "corrupted user stack\n");
0083 }
0084 
0085 void test_get_stack_raw_tp(void)
0086 {
0087     const char *file = "./test_get_stack_rawtp.o";
0088     const char *file_err = "./test_get_stack_rawtp_err.o";
0089     const char *prog_name = "bpf_prog1";
0090     int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
0091     struct perf_buffer *pb = NULL;
0092     struct bpf_link *link = NULL;
0093     struct timespec tv = {0, 10};
0094     struct bpf_program *prog;
0095     struct bpf_object *obj;
0096     struct bpf_map *map;
0097     cpu_set_t cpu_set;
0098 
0099     err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
0100     if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
0101         return;
0102 
0103     err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
0104     if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
0105         return;
0106 
0107     prog = bpf_object__find_program_by_name(obj, prog_name);
0108     if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
0109         goto close_prog;
0110 
0111     map = bpf_object__find_map_by_name(obj, "perfmap");
0112     if (CHECK(!map, "bpf_find_map", "not found\n"))
0113         goto close_prog;
0114 
0115     err = load_kallsyms();
0116     if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
0117         goto close_prog;
0118 
0119     CPU_ZERO(&cpu_set);
0120     CPU_SET(0, &cpu_set);
0121     err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
0122     if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
0123         goto close_prog;
0124 
0125     link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
0126     if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
0127         goto close_prog;
0128 
0129     pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output,
0130                   NULL, NULL, NULL);
0131     if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
0132         goto close_prog;
0133 
0134     /* trigger some syscall action */
0135     for (i = 0; i < MAX_CNT_RAWTP; i++)
0136         nanosleep(&tv, NULL);
0137 
0138     while (exp_cnt > 0) {
0139         err = perf_buffer__poll(pb, 100);
0140         if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err))
0141             goto close_prog;
0142         exp_cnt -= err;
0143     }
0144 
0145 close_prog:
0146     bpf_link__destroy(link);
0147     perf_buffer__free(pb);
0148     bpf_object__close(obj);
0149 }