Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include "unwind.h"
0003 #include "dso.h"
0004 #include "map.h"
0005 #include "thread.h"
0006 #include "session.h"
0007 #include "debug.h"
0008 #include "env.h"
0009 #include "callchain.h"
0010 
0011 struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
0012 struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
0013 struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
0014 
0015 static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops)
0016 {
0017     maps->unwind_libunwind_ops = ops;
0018 }
0019 
0020 int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
0021 {
0022     const char *arch;
0023     enum dso_type dso_type;
0024     struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
0025     int err;
0026 
0027     if (!dwarf_callchain_users)
0028         return 0;
0029 
0030     if (maps->addr_space) {
0031         pr_debug("unwind: thread map already set, dso=%s\n",
0032              map->dso->name);
0033         if (initialized)
0034             *initialized = true;
0035         return 0;
0036     }
0037 
0038     /* env->arch is NULL for live-mode (i.e. perf top) */
0039     if (!maps->machine->env || !maps->machine->env->arch)
0040         goto out_register;
0041 
0042     dso_type = dso__type(map->dso, maps->machine);
0043     if (dso_type == DSO__TYPE_UNKNOWN)
0044         return 0;
0045 
0046     arch = perf_env__arch(maps->machine->env);
0047 
0048     if (!strcmp(arch, "x86")) {
0049         if (dso_type != DSO__TYPE_64BIT)
0050             ops = x86_32_unwind_libunwind_ops;
0051     } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
0052         if (dso_type == DSO__TYPE_64BIT)
0053             ops = arm64_unwind_libunwind_ops;
0054     }
0055 
0056     if (!ops) {
0057         pr_err("unwind: target platform=%s is not supported\n", arch);
0058         return 0;
0059     }
0060 out_register:
0061     unwind__register_ops(maps, ops);
0062 
0063     err = maps->unwind_libunwind_ops->prepare_access(maps);
0064     if (initialized)
0065         *initialized = err ? false : true;
0066     return err;
0067 }
0068 
0069 void unwind__flush_access(struct maps *maps)
0070 {
0071     if (maps->unwind_libunwind_ops)
0072         maps->unwind_libunwind_ops->flush_access(maps);
0073 }
0074 
0075 void unwind__finish_access(struct maps *maps)
0076 {
0077     if (maps->unwind_libunwind_ops)
0078         maps->unwind_libunwind_ops->finish_access(maps);
0079 }
0080 
0081 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
0082              struct thread *thread,
0083              struct perf_sample *data, int max_stack,
0084              bool best_effort)
0085 {
0086     if (thread->maps->unwind_libunwind_ops)
0087         return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
0088                                        max_stack, best_effort);
0089     return 0;
0090 }