Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  Copyright (C) 1991, 1992  Linus Torvalds
0004  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
0005  */
0006 #include <linux/sched/debug.h>
0007 #include <linux/kallsyms.h>
0008 #include <linux/kprobes.h>
0009 #include <linux/uaccess.h>
0010 #include <linux/hardirq.h>
0011 #include <linux/kdebug.h>
0012 #include <linux/export.h>
0013 #include <linux/ptrace.h>
0014 #include <linux/kexec.h>
0015 #include <linux/sysfs.h>
0016 #include <linux/bug.h>
0017 #include <linux/nmi.h>
0018 
0019 #include <asm/cpu_entry_area.h>
0020 #include <asm/stacktrace.h>
0021 
0022 static const char * const exception_stack_names[] = {
0023         [ ESTACK_DF ]   = "#DF",
0024         [ ESTACK_NMI    ]   = "NMI",
0025         [ ESTACK_DB ]   = "#DB",
0026         [ ESTACK_MCE    ]   = "#MC",
0027         [ ESTACK_VC ]   = "#VC",
0028         [ ESTACK_VC2    ]   = "#VC2",
0029 };
0030 
0031 const char *stack_type_name(enum stack_type type)
0032 {
0033     BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
0034 
0035     if (type == STACK_TYPE_TASK)
0036         return "TASK";
0037 
0038     if (type == STACK_TYPE_IRQ)
0039         return "IRQ";
0040 
0041     if (type == STACK_TYPE_SOFTIRQ)
0042         return "SOFTIRQ";
0043 
0044     if (type == STACK_TYPE_ENTRY) {
0045         /*
0046          * On 64-bit, we have a generic entry stack that we
0047          * use for all the kernel entry points, including
0048          * SYSENTER.
0049          */
0050         return "ENTRY_TRAMPOLINE";
0051     }
0052 
0053     if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
0054         return exception_stack_names[type - STACK_TYPE_EXCEPTION];
0055 
0056     return NULL;
0057 }
0058 
0059 /**
0060  * struct estack_pages - Page descriptor for exception stacks
0061  * @offs:   Offset from the start of the exception stack area
0062  * @size:   Size of the exception stack
0063  * @type:   Type to store in the stack_info struct
0064  */
0065 struct estack_pages {
0066     u32 offs;
0067     u16 size;
0068     u16 type;
0069 };
0070 
0071 #define EPAGERANGE(st)                          \
0072     [PFN_DOWN(CEA_ESTACK_OFFS(st)) ...              \
0073      PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = {   \
0074         .offs   = CEA_ESTACK_OFFS(st),              \
0075         .size   = CEA_ESTACK_SIZE(st),              \
0076         .type   = STACK_TYPE_EXCEPTION + ESTACK_ ##st, }
0077 
0078 /*
0079  * Array of exception stack page descriptors. If the stack is larger than
0080  * PAGE_SIZE, all pages covering a particular stack will have the same
0081  * info. The guard pages including the not mapped DB2 stack are zeroed
0082  * out.
0083  */
0084 static const
0085 struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
0086     EPAGERANGE(DF),
0087     EPAGERANGE(NMI),
0088     EPAGERANGE(DB),
0089     EPAGERANGE(MCE),
0090     EPAGERANGE(VC),
0091     EPAGERANGE(VC2),
0092 };
0093 
0094 static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info)
0095 {
0096     unsigned long begin, end, stk = (unsigned long)stack;
0097     const struct estack_pages *ep;
0098     struct pt_regs *regs;
0099     unsigned int k;
0100 
0101     BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
0102 
0103     begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
0104     /*
0105      * Handle the case where stack trace is collected _before_
0106      * cea_exception_stacks had been initialized.
0107      */
0108     if (!begin)
0109         return false;
0110 
0111     end = begin + sizeof(struct cea_exception_stacks);
0112     /* Bail if @stack is outside the exception stack area. */
0113     if (stk < begin || stk >= end)
0114         return false;
0115 
0116     /* Calc page offset from start of exception stacks */
0117     k = (stk - begin) >> PAGE_SHIFT;
0118     /* Lookup the page descriptor */
0119     ep = &estack_pages[k];
0120     /* Guard page? */
0121     if (!ep->size)
0122         return false;
0123 
0124     begin += (unsigned long)ep->offs;
0125     end = begin + (unsigned long)ep->size;
0126     regs = (struct pt_regs *)end - 1;
0127 
0128     info->type  = ep->type;
0129     info->begin = (unsigned long *)begin;
0130     info->end   = (unsigned long *)end;
0131     info->next_sp   = (unsigned long *)regs->sp;
0132     return true;
0133 }
0134 
0135 static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
0136 {
0137     unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
0138     unsigned long *begin;
0139 
0140     /*
0141      * @end points directly to the top most stack entry to avoid a -8
0142      * adjustment in the stack switch hotpath. Adjust it back before
0143      * calculating @begin.
0144      */
0145     end++;
0146     begin = end - (IRQ_STACK_SIZE / sizeof(long));
0147 
0148     /*
0149      * Due to the switching logic RSP can never be == @end because the
0150      * final operation is 'popq %rsp' which means after that RSP points
0151      * to the original stack and not to @end.
0152      */
0153     if (stack < begin || stack >= end)
0154         return false;
0155 
0156     info->type  = STACK_TYPE_IRQ;
0157     info->begin = begin;
0158     info->end   = end;
0159 
0160     /*
0161      * The next stack pointer is stored at the top of the irq stack
0162      * before switching to the irq stack. Actual stack entries are all
0163      * below that.
0164      */
0165     info->next_sp = (unsigned long *)*(end - 1);
0166 
0167     return true;
0168 }
0169 
0170 bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
0171                     struct stack_info *info)
0172 {
0173     if (in_task_stack(stack, task, info))
0174         return true;
0175 
0176     if (task != current)
0177         return false;
0178 
0179     if (in_exception_stack(stack, info))
0180         return true;
0181 
0182     if (in_irq_stack(stack, info))
0183         return true;
0184 
0185     if (in_entry_stack(stack, info))
0186         return true;
0187 
0188     return false;
0189 }
0190 
0191 int get_stack_info(unsigned long *stack, struct task_struct *task,
0192            struct stack_info *info, unsigned long *visit_mask)
0193 {
0194     task = task ? : current;
0195 
0196     if (!stack)
0197         goto unknown;
0198 
0199     if (!get_stack_info_noinstr(stack, task, info))
0200         goto unknown;
0201 
0202     /*
0203      * Make sure we don't iterate through any given stack more than once.
0204      * If it comes up a second time then there's something wrong going on:
0205      * just break out and report an unknown stack type.
0206      */
0207     if (visit_mask) {
0208         if (*visit_mask & (1UL << info->type)) {
0209             if (task == current)
0210                 printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
0211             goto unknown;
0212         }
0213         *visit_mask |= 1UL << info->type;
0214     }
0215 
0216     return 0;
0217 
0218 unknown:
0219     info->type = STACK_TYPE_UNKNOWN;
0220     return -EINVAL;
0221 }