Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright (c) 2020 Facebook
0003 #define _GNU_SOURCE
0004 #include <pthread.h>
0005 #include <sched.h>
0006 #include <test_progs.h>
0007 #include "perf_event_stackmap.skel.h"
0008 
0009 #ifndef noinline
0010 #define noinline __attribute__((noinline))
0011 #endif
0012 
0013 noinline int func_1(void)
0014 {
0015     static int val = 1;
0016 
0017     val += 1;
0018 
0019     usleep(100);
0020     return val;
0021 }
0022 
0023 noinline int func_2(void)
0024 {
0025     return func_1();
0026 }
0027 
0028 noinline int func_3(void)
0029 {
0030     return func_2();
0031 }
0032 
0033 noinline int func_4(void)
0034 {
0035     return func_3();
0036 }
0037 
0038 noinline int func_5(void)
0039 {
0040     return func_4();
0041 }
0042 
0043 noinline int func_6(void)
0044 {
0045     int i, val = 1;
0046 
0047     for (i = 0; i < 100; i++)
0048         val += func_5();
0049 
0050     return val;
0051 }
0052 
0053 void test_perf_event_stackmap(void)
0054 {
0055     struct perf_event_attr attr = {
0056         /* .type = PERF_TYPE_SOFTWARE, */
0057         .type = PERF_TYPE_HARDWARE,
0058         .config = PERF_COUNT_HW_CPU_CYCLES,
0059         .precise_ip = 2,
0060         .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK |
0061             PERF_SAMPLE_CALLCHAIN,
0062         .branch_sample_type = PERF_SAMPLE_BRANCH_USER |
0063             PERF_SAMPLE_BRANCH_NO_FLAGS |
0064             PERF_SAMPLE_BRANCH_NO_CYCLES |
0065             PERF_SAMPLE_BRANCH_CALL_STACK,
0066         .sample_period = 5000,
0067         .size = sizeof(struct perf_event_attr),
0068     };
0069     struct perf_event_stackmap *skel;
0070     __u32 duration = 0;
0071     cpu_set_t cpu_set;
0072     int pmu_fd, err;
0073 
0074     skel = perf_event_stackmap__open();
0075 
0076     if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
0077         return;
0078 
0079     err = perf_event_stackmap__load(skel);
0080     if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
0081         goto cleanup;
0082 
0083     CPU_ZERO(&cpu_set);
0084     CPU_SET(0, &cpu_set);
0085     err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
0086     if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
0087         goto cleanup;
0088 
0089     pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
0090              0 /* cpu 0 */, -1 /* group id */,
0091              0 /* flags */);
0092     if (pmu_fd < 0) {
0093         printf("%s:SKIP:cpu doesn't support the event\n", __func__);
0094         test__skip();
0095         goto cleanup;
0096     }
0097 
0098     skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
0099                                pmu_fd);
0100     if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
0101         close(pmu_fd);
0102         goto cleanup;
0103     }
0104 
0105     /* create kernel and user stack traces for testing */
0106     func_6();
0107 
0108     CHECK(skel->data->stackid_kernel != 2, "get_stackid_kernel", "failed\n");
0109     CHECK(skel->data->stackid_user != 2, "get_stackid_user", "failed\n");
0110     CHECK(skel->data->stack_kernel != 2, "get_stack_kernel", "failed\n");
0111     CHECK(skel->data->stack_user != 2, "get_stack_user", "failed\n");
0112 
0113 cleanup:
0114     perf_event_stackmap__destroy(skel);
0115 }