Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/compiler.h>
0003 #include <linux/types.h>
0004 #include <linux/zalloc.h>
0005 #include <inttypes.h>
0006 #include <limits.h>
0007 #include <unistd.h>
0008 #include "tests.h"
0009 #include "debug.h"
0010 #include "machine.h"
0011 #include "event.h"
0012 #include "../util/unwind.h"
0013 #include "perf_regs.h"
0014 #include "map.h"
0015 #include "symbol.h"
0016 #include "thread.h"
0017 #include "callchain.h"
0018 #include "util/synthetic-events.h"
0019 
0020 /* For bsearch. We try to unwind functions in shared object. */
0021 #include <stdlib.h>
0022 
0023 /*
0024  * The test will assert frames are on the stack but tail call optimizations lose
0025  * the frame of the caller. Clang can disable this optimization on a called
0026  * function but GCC currently (11/2020) lacks this attribute. The barrier is
0027  * used to inhibit tail calls in these cases.
0028  */
0029 #ifdef __has_attribute
0030 #if __has_attribute(disable_tail_calls)
0031 #define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
0032 #define NO_TAIL_CALL_BARRIER
0033 #endif
0034 #endif
0035 #ifndef NO_TAIL_CALL_ATTRIBUTE
0036 #define NO_TAIL_CALL_ATTRIBUTE
0037 #define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
0038 #endif
0039 
0040 static int mmap_handler(struct perf_tool *tool __maybe_unused,
0041             union perf_event *event,
0042             struct perf_sample *sample,
0043             struct machine *machine)
0044 {
0045     return machine__process_mmap2_event(machine, event, sample);
0046 }
0047 
0048 static int init_live_machine(struct machine *machine)
0049 {
0050     union perf_event event;
0051     pid_t pid = getpid();
0052 
0053     memset(&event, 0, sizeof(event));
0054     return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
0055                           mmap_handler, machine, true);
0056 }
0057 
0058 /*
0059  * We need to keep these functions global, despite the
0060  * fact that they are used only locally in this object,
0061  * in order to keep them around even if the binary is
0062  * stripped. If they are gone, the unwind check for
0063  * symbol fails.
0064  */
0065 int test_dwarf_unwind__thread(struct thread *thread);
0066 int test_dwarf_unwind__compare(void *p1, void *p2);
0067 int test_dwarf_unwind__krava_3(struct thread *thread);
0068 int test_dwarf_unwind__krava_2(struct thread *thread);
0069 int test_dwarf_unwind__krava_1(struct thread *thread);
0070 
0071 #define MAX_STACK 8
0072 
0073 static int unwind_entry(struct unwind_entry *entry, void *arg)
0074 {
0075     unsigned long *cnt = (unsigned long *) arg;
0076     char *symbol = entry->ms.sym ? entry->ms.sym->name : NULL;
0077     static const char *funcs[MAX_STACK] = {
0078         "test__arch_unwind_sample",
0079         "test_dwarf_unwind__thread",
0080         "test_dwarf_unwind__compare",
0081         "bsearch",
0082         "test_dwarf_unwind__krava_3",
0083         "test_dwarf_unwind__krava_2",
0084         "test_dwarf_unwind__krava_1",
0085         "test__dwarf_unwind"
0086     };
0087     /*
0088      * The funcs[MAX_STACK] array index, based on the
0089      * callchain order setup.
0090      */
0091     int idx = callchain_param.order == ORDER_CALLER ?
0092           MAX_STACK - *cnt - 1 : *cnt;
0093 
0094     if (*cnt >= MAX_STACK) {
0095         pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
0096         return -1;
0097     }
0098 
0099     if (!symbol) {
0100         pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
0101              entry->ip);
0102         return -1;
0103     }
0104 
0105     (*cnt)++;
0106     pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
0107          symbol, entry->ip, funcs[idx]);
0108     return strcmp((const char *) symbol, funcs[idx]);
0109 }
0110 
0111 NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
0112 {
0113     struct perf_sample sample;
0114     unsigned long cnt = 0;
0115     int err = -1;
0116 
0117     memset(&sample, 0, sizeof(sample));
0118 
0119     if (test__arch_unwind_sample(&sample, thread)) {
0120         pr_debug("failed to get unwind sample\n");
0121         goto out;
0122     }
0123 
0124     err = unwind__get_entries(unwind_entry, &cnt, thread,
0125                   &sample, MAX_STACK, false);
0126     if (err)
0127         pr_debug("unwind failed\n");
0128     else if (cnt != MAX_STACK) {
0129         pr_debug("got wrong number of stack entries %lu != %d\n",
0130              cnt, MAX_STACK);
0131         err = -1;
0132     }
0133 
0134  out:
0135     zfree(&sample.user_stack.data);
0136     zfree(&sample.user_regs.regs);
0137     return err;
0138 }
0139 
0140 static int global_unwind_retval = -INT_MAX;
0141 
0142 NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
0143 {
0144     /* Any possible value should be 'thread' */
0145     struct thread *thread = *(struct thread **)p1;
0146 
0147     if (global_unwind_retval == -INT_MAX) {
0148         /* Call unwinder twice for both callchain orders. */
0149         callchain_param.order = ORDER_CALLER;
0150 
0151         global_unwind_retval = test_dwarf_unwind__thread(thread);
0152         if (!global_unwind_retval) {
0153             callchain_param.order = ORDER_CALLEE;
0154             global_unwind_retval = test_dwarf_unwind__thread(thread);
0155         }
0156     }
0157 
0158     return p1 - p2;
0159 }
0160 
0161 NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
0162 {
0163     struct thread *array[2] = {thread, thread};
0164     void *fp = &bsearch;
0165     /*
0166      * make _bsearch a volatile function pointer to
0167      * prevent potential optimization, which may expand
0168      * bsearch and call compare directly from this function,
0169      * instead of libc shared object.
0170      */
0171     void *(*volatile _bsearch)(void *, void *, size_t,
0172             size_t, int (*)(void *, void *));
0173 
0174     _bsearch = fp;
0175     _bsearch(array, &thread, 2, sizeof(struct thread **),
0176          test_dwarf_unwind__compare);
0177     return global_unwind_retval;
0178 }
0179 
0180 NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
0181 {
0182     int ret;
0183 
0184     ret =  test_dwarf_unwind__krava_3(thread);
0185     NO_TAIL_CALL_BARRIER;
0186     return ret;
0187 }
0188 
0189 NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
0190 {
0191     int ret;
0192 
0193     ret =  test_dwarf_unwind__krava_2(thread);
0194     NO_TAIL_CALL_BARRIER;
0195     return ret;
0196 }
0197 
0198 static int test__dwarf_unwind(struct test_suite *test __maybe_unused,
0199                   int subtest __maybe_unused)
0200 {
0201     struct machine *machine;
0202     struct thread *thread;
0203     int err = -1;
0204 
0205     machine = machine__new_host();
0206     if (!machine) {
0207         pr_err("Could not get machine\n");
0208         return -1;
0209     }
0210 
0211     if (machine__create_kernel_maps(machine)) {
0212         pr_err("Failed to create kernel maps\n");
0213         return -1;
0214     }
0215 
0216     callchain_param.record_mode = CALLCHAIN_DWARF;
0217     dwarf_callchain_users = true;
0218 
0219     if (init_live_machine(machine)) {
0220         pr_err("Could not init machine\n");
0221         goto out;
0222     }
0223 
0224     if (verbose > 1)
0225         machine__fprintf(machine, stderr);
0226 
0227     thread = machine__find_thread(machine, getpid(), getpid());
0228     if (!thread) {
0229         pr_err("Could not get thread\n");
0230         goto out;
0231     }
0232 
0233     err = test_dwarf_unwind__krava_1(thread);
0234     thread__put(thread);
0235 
0236  out:
0237     machine__delete_threads(machine);
0238     machine__delete(machine);
0239     return err;
0240 }
0241 
0242 DEFINE_SUITE("Test dwarf unwind", dwarf_unwind);