Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Stack tracing support
0004  *
0005  * Copyright (C) 2012 ARM Ltd.
0006  */
0007 #include <linux/kernel.h>
0008 #include <linux/export.h>
0009 #include <linux/ftrace.h>
0010 #include <linux/sched.h>
0011 #include <linux/sched/debug.h>
0012 #include <linux/sched/task_stack.h>
0013 #include <linux/stacktrace.h>
0014 
0015 #include <asm/irq.h>
0016 #include <asm/stack_pointer.h>
0017 #include <asm/stacktrace.h>
0018 
0019 /*
0020  * Start an unwind from a pt_regs.
0021  *
0022  * The unwind will begin at the PC within the regs.
0023  *
0024  * The regs must be on a stack currently owned by the calling task.
0025  */
0026 static inline void unwind_init_from_regs(struct unwind_state *state,
0027                      struct pt_regs *regs)
0028 {
0029     unwind_init_common(state, current);
0030 
0031     state->fp = regs->regs[29];
0032     state->pc = regs->pc;
0033 }
0034 
0035 /*
0036  * Start an unwind from a caller.
0037  *
0038  * The unwind will begin at the caller of whichever function this is inlined
0039  * into.
0040  *
0041  * The function which invokes this must be noinline.
0042  */
0043 static __always_inline void unwind_init_from_caller(struct unwind_state *state)
0044 {
0045     unwind_init_common(state, current);
0046 
0047     state->fp = (unsigned long)__builtin_frame_address(1);
0048     state->pc = (unsigned long)__builtin_return_address(0);
0049 }
0050 
0051 /*
0052  * Start an unwind from a blocked task.
0053  *
0054  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
0055  * cpu_switch_to()).
0056  *
0057  * The caller should ensure the task is blocked in cpu_switch_to() for the
0058  * duration of the unwind, or the unwind will be bogus. It is never valid to
0059  * call this for the current task.
0060  */
0061 static inline void unwind_init_from_task(struct unwind_state *state,
0062                      struct task_struct *task)
0063 {
0064     unwind_init_common(state, task);
0065 
0066     state->fp = thread_saved_fp(task);
0067     state->pc = thread_saved_pc(task);
0068 }
0069 
0070 /*
0071  * We can only safely access per-cpu stacks from current in a non-preemptible
0072  * context.
0073  */
0074 static bool on_accessible_stack(const struct task_struct *tsk,
0075                 unsigned long sp, unsigned long size,
0076                 struct stack_info *info)
0077 {
0078     if (info)
0079         info->type = STACK_TYPE_UNKNOWN;
0080 
0081     if (on_task_stack(tsk, sp, size, info))
0082         return true;
0083     if (tsk != current || preemptible())
0084         return false;
0085     if (on_irq_stack(sp, size, info))
0086         return true;
0087     if (on_overflow_stack(sp, size, info))
0088         return true;
0089     if (on_sdei_stack(sp, size, info))
0090         return true;
0091 
0092     return false;
0093 }
0094 
0095 /*
0096  * Unwind from one frame record (A) to the next frame record (B).
0097  *
0098  * We terminate early if the location of B indicates a malformed chain of frame
0099  * records (e.g. a cycle), determined based on the location and fp value of A
0100  * and the location (but not the fp value) of B.
0101  */
0102 static int notrace unwind_next(struct unwind_state *state)
0103 {
0104     struct task_struct *tsk = state->task;
0105     unsigned long fp = state->fp;
0106     struct stack_info info;
0107     int err;
0108 
0109     /* Final frame; nothing to unwind */
0110     if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
0111         return -ENOENT;
0112 
0113     err = unwind_next_common(state, &info, on_accessible_stack, NULL);
0114     if (err)
0115         return err;
0116 
0117     state->pc = ptrauth_strip_insn_pac(state->pc);
0118 
0119 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0120     if (tsk->ret_stack &&
0121         (state->pc == (unsigned long)return_to_handler)) {
0122         unsigned long orig_pc;
0123         /*
0124          * This is a case where function graph tracer has
0125          * modified a return address (LR) in a stack frame
0126          * to hook a function return.
0127          * So replace it to an original value.
0128          */
0129         orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
0130                         (void *)state->fp);
0131         if (WARN_ON_ONCE(state->pc == orig_pc))
0132             return -EINVAL;
0133         state->pc = orig_pc;
0134     }
0135 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
0136 #ifdef CONFIG_KRETPROBES
0137     if (is_kretprobe_trampoline(state->pc))
0138         state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
0139 #endif
0140 
0141     return 0;
0142 }
0143 NOKPROBE_SYMBOL(unwind_next);
0144 
0145 static void notrace unwind(struct unwind_state *state,
0146                stack_trace_consume_fn consume_entry, void *cookie)
0147 {
0148     while (1) {
0149         int ret;
0150 
0151         if (!consume_entry(cookie, state->pc))
0152             break;
0153         ret = unwind_next(state);
0154         if (ret < 0)
0155             break;
0156     }
0157 }
0158 NOKPROBE_SYMBOL(unwind);
0159 
0160 static bool dump_backtrace_entry(void *arg, unsigned long where)
0161 {
0162     char *loglvl = arg;
0163     printk("%s %pSb\n", loglvl, (void *)where);
0164     return true;
0165 }
0166 
0167 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
0168             const char *loglvl)
0169 {
0170     pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
0171 
0172     if (regs && user_mode(regs))
0173         return;
0174 
0175     if (!tsk)
0176         tsk = current;
0177 
0178     if (!try_get_task_stack(tsk))
0179         return;
0180 
0181     printk("%sCall trace:\n", loglvl);
0182     arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
0183 
0184     put_task_stack(tsk);
0185 }
0186 
0187 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
0188 {
0189     dump_backtrace(NULL, tsk, loglvl);
0190     barrier();
0191 }
0192 
0193 noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
0194                   void *cookie, struct task_struct *task,
0195                   struct pt_regs *regs)
0196 {
0197     struct unwind_state state;
0198 
0199     if (regs) {
0200         if (task != current)
0201             return;
0202         unwind_init_from_regs(&state, regs);
0203     } else if (task == current) {
0204         unwind_init_from_caller(&state);
0205     } else {
0206         unwind_init_from_task(&state, task);
0207     }
0208 
0209     unwind(&state, consume_entry, cookie);
0210 }