0001
0002
0003
0004
0005 #ifndef __ASM_STACKTRACE_H
0006 #define __ASM_STACKTRACE_H
0007
0008 #include <linux/percpu.h>
0009 #include <linux/sched.h>
0010 #include <linux/sched/task_stack.h>
0011 #include <linux/llist.h>
0012
0013 #include <asm/memory.h>
0014 #include <asm/pointer_auth.h>
0015 #include <asm/ptrace.h>
0016 #include <asm/sdei.h>
0017
0018 #include <asm/stacktrace/common.h>
0019
0020 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
0021 const char *loglvl);
0022
0023 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
0024
0025 static inline bool on_irq_stack(unsigned long sp, unsigned long size,
0026 struct stack_info *info)
0027 {
0028 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
0029 unsigned long high = low + IRQ_STACK_SIZE;
0030
0031 return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
0032 }
0033
0034 static inline bool on_task_stack(const struct task_struct *tsk,
0035 unsigned long sp, unsigned long size,
0036 struct stack_info *info)
0037 {
0038 unsigned long low = (unsigned long)task_stack_page(tsk);
0039 unsigned long high = low + THREAD_SIZE;
0040
0041 return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
0042 }
0043
0044 #ifdef CONFIG_VMAP_STACK
0045 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
0046
0047 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
0048 struct stack_info *info)
0049 {
0050 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
0051 unsigned long high = low + OVERFLOW_STACK_SIZE;
0052
0053 return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
0054 }
0055 #else
0056 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
0057 struct stack_info *info) { return false; }
0058 #endif
0059
0060 #endif