Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 #include <linux/objtool.h>
0003 #include <linux/module.h>
0004 #include <linux/sort.h>
0005 #include <asm/ptrace.h>
0006 #include <asm/stacktrace.h>
0007 #include <asm/unwind.h>
0008 #include <asm/orc_types.h>
0009 #include <asm/orc_lookup.h>
0010 
0011 #define orc_warn(fmt, ...) \
0012     printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
0013 
0014 #define orc_warn_current(args...)                   \
0015 ({                                  \
0016     if (state->task == current && !state->error)            \
0017         orc_warn(args);                     \
0018 })
0019 
0020 extern int __start_orc_unwind_ip[];
0021 extern int __stop_orc_unwind_ip[];
0022 extern struct orc_entry __start_orc_unwind[];
0023 extern struct orc_entry __stop_orc_unwind[];
0024 
0025 static bool orc_init __ro_after_init;
0026 static unsigned int lookup_num_blocks __ro_after_init;
0027 
0028 static inline unsigned long orc_ip(const int *ip)
0029 {
0030     return (unsigned long)ip + *ip;
0031 }
0032 
0033 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
0034                     unsigned int num_entries, unsigned long ip)
0035 {
0036     int *first = ip_table;
0037     int *last = ip_table + num_entries - 1;
0038     int *mid = first, *found = first;
0039 
0040     if (!num_entries)
0041         return NULL;
0042 
0043     /*
0044      * Do a binary range search to find the rightmost duplicate of a given
0045      * starting address.  Some entries are section terminators which are
0046      * "weak" entries for ensuring there are no gaps.  They should be
0047      * ignored when they conflict with a real entry.
0048      */
0049     while (first <= last) {
0050         mid = first + ((last - first) / 2);
0051 
0052         if (orc_ip(mid) <= ip) {
0053             found = mid;
0054             first = mid + 1;
0055         } else
0056             last = mid - 1;
0057     }
0058 
0059     return u_table + (found - ip_table);
0060 }
0061 
0062 #ifdef CONFIG_MODULES
0063 static struct orc_entry *orc_module_find(unsigned long ip)
0064 {
0065     struct module *mod;
0066 
0067     mod = __module_address(ip);
0068     if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
0069         return NULL;
0070     return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
0071               mod->arch.num_orcs, ip);
0072 }
0073 #else
0074 static struct orc_entry *orc_module_find(unsigned long ip)
0075 {
0076     return NULL;
0077 }
0078 #endif
0079 
0080 #ifdef CONFIG_DYNAMIC_FTRACE
0081 static struct orc_entry *orc_find(unsigned long ip);
0082 
0083 /*
0084  * Ftrace dynamic trampolines do not have orc entries of their own.
0085  * But they are copies of the ftrace entries that are static and
0086  * defined in ftrace_*.S, which do have orc entries.
0087  *
0088  * If the unwinder comes across a ftrace trampoline, then find the
0089  * ftrace function that was used to create it, and use that ftrace
0090  * function's orc entry, as the placement of the return code in
0091  * the stack will be identical.
0092  */
0093 static struct orc_entry *orc_ftrace_find(unsigned long ip)
0094 {
0095     struct ftrace_ops *ops;
0096     unsigned long tramp_addr, offset;
0097 
0098     ops = ftrace_ops_trampoline(ip);
0099     if (!ops)
0100         return NULL;
0101 
0102     /* Set tramp_addr to the start of the code copied by the trampoline */
0103     if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
0104         tramp_addr = (unsigned long)ftrace_regs_caller;
0105     else
0106         tramp_addr = (unsigned long)ftrace_caller;
0107 
0108     /* Now place tramp_addr to the location within the trampoline ip is at */
0109     offset = ip - ops->trampoline;
0110     tramp_addr += offset;
0111 
0112     /* Prevent unlikely recursion */
0113     if (ip == tramp_addr)
0114         return NULL;
0115 
0116     return orc_find(tramp_addr);
0117 }
0118 #else
0119 static struct orc_entry *orc_ftrace_find(unsigned long ip)
0120 {
0121     return NULL;
0122 }
0123 #endif
0124 
0125 /*
0126  * If we crash with IP==0, the last successfully executed instruction
0127  * was probably an indirect function call with a NULL function pointer,
0128  * and we don't have unwind information for NULL.
0129  * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
0130  * pointer into its parent and then continue normally from there.
0131  */
0132 static struct orc_entry null_orc_entry = {
0133     .sp_offset = sizeof(long),
0134     .sp_reg = ORC_REG_SP,
0135     .bp_reg = ORC_REG_UNDEFINED,
0136     .type = UNWIND_HINT_TYPE_CALL
0137 };
0138 
0139 /* Fake frame pointer entry -- used as a fallback for generated code */
0140 static struct orc_entry orc_fp_entry = {
0141     .type       = UNWIND_HINT_TYPE_CALL,
0142     .sp_reg     = ORC_REG_BP,
0143     .sp_offset  = 16,
0144     .bp_reg     = ORC_REG_PREV_SP,
0145     .bp_offset  = -16,
0146     .end        = 0,
0147 };
0148 
0149 static struct orc_entry *orc_find(unsigned long ip)
0150 {
0151     static struct orc_entry *orc;
0152 
0153     if (ip == 0)
0154         return &null_orc_entry;
0155 
0156     /* For non-init vmlinux addresses, use the fast lookup table: */
0157     if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
0158         unsigned int idx, start, stop;
0159 
0160         idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
0161 
0162         if (unlikely((idx >= lookup_num_blocks-1))) {
0163             orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
0164                  idx, lookup_num_blocks, (void *)ip);
0165             return NULL;
0166         }
0167 
0168         start = orc_lookup[idx];
0169         stop = orc_lookup[idx + 1] + 1;
0170 
0171         if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
0172                  (__start_orc_unwind + stop > __stop_orc_unwind))) {
0173             orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
0174                  idx, lookup_num_blocks, start, stop, (void *)ip);
0175             return NULL;
0176         }
0177 
0178         return __orc_find(__start_orc_unwind_ip + start,
0179                   __start_orc_unwind + start, stop - start, ip);
0180     }
0181 
0182     /* vmlinux .init slow lookup: */
0183     if (is_kernel_inittext(ip))
0184         return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
0185                   __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
0186 
0187     /* Module lookup: */
0188     orc = orc_module_find(ip);
0189     if (orc)
0190         return orc;
0191 
0192     return orc_ftrace_find(ip);
0193 }
0194 
0195 #ifdef CONFIG_MODULES
0196 
0197 static DEFINE_MUTEX(sort_mutex);
0198 static int *cur_orc_ip_table = __start_orc_unwind_ip;
0199 static struct orc_entry *cur_orc_table = __start_orc_unwind;
0200 
0201 static void orc_sort_swap(void *_a, void *_b, int size)
0202 {
0203     struct orc_entry *orc_a, *orc_b;
0204     struct orc_entry orc_tmp;
0205     int *a = _a, *b = _b, tmp;
0206     int delta = _b - _a;
0207 
0208     /* Swap the .orc_unwind_ip entries: */
0209     tmp = *a;
0210     *a = *b + delta;
0211     *b = tmp - delta;
0212 
0213     /* Swap the corresponding .orc_unwind entries: */
0214     orc_a = cur_orc_table + (a - cur_orc_ip_table);
0215     orc_b = cur_orc_table + (b - cur_orc_ip_table);
0216     orc_tmp = *orc_a;
0217     *orc_a = *orc_b;
0218     *orc_b = orc_tmp;
0219 }
0220 
0221 static int orc_sort_cmp(const void *_a, const void *_b)
0222 {
0223     struct orc_entry *orc_a;
0224     const int *a = _a, *b = _b;
0225     unsigned long a_val = orc_ip(a);
0226     unsigned long b_val = orc_ip(b);
0227 
0228     if (a_val > b_val)
0229         return 1;
0230     if (a_val < b_val)
0231         return -1;
0232 
0233     /*
0234      * The "weak" section terminator entries need to always be on the left
0235      * to ensure the lookup code skips them in favor of real entries.
0236      * These terminator entries exist to handle any gaps created by
0237      * whitelisted .o files which didn't get objtool generation.
0238      */
0239     orc_a = cur_orc_table + (a - cur_orc_ip_table);
0240     return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
0241 }
0242 
0243 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
0244             void *_orc, size_t orc_size)
0245 {
0246     int *orc_ip = _orc_ip;
0247     struct orc_entry *orc = _orc;
0248     unsigned int num_entries = orc_ip_size / sizeof(int);
0249 
0250     WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
0251              orc_size % sizeof(*orc) != 0 ||
0252              num_entries != orc_size / sizeof(*orc));
0253 
0254     /*
0255      * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
0256      * associate an .orc_unwind_ip table entry with its corresponding
0257      * .orc_unwind entry so they can both be swapped.
0258      */
0259     mutex_lock(&sort_mutex);
0260     cur_orc_ip_table = orc_ip;
0261     cur_orc_table = orc;
0262     sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
0263     mutex_unlock(&sort_mutex);
0264 
0265     mod->arch.orc_unwind_ip = orc_ip;
0266     mod->arch.orc_unwind = orc;
0267     mod->arch.num_orcs = num_entries;
0268 }
0269 #endif
0270 
0271 void __init unwind_init(void)
0272 {
0273     size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
0274     size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
0275     size_t num_entries = orc_ip_size / sizeof(int);
0276     struct orc_entry *orc;
0277     int i;
0278 
0279     if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
0280         orc_size % sizeof(struct orc_entry) != 0 ||
0281         num_entries != orc_size / sizeof(struct orc_entry)) {
0282         orc_warn("WARNING: Bad or missing .orc_unwind table.  Disabling unwinder.\n");
0283         return;
0284     }
0285 
0286     /*
0287      * Note, the orc_unwind and orc_unwind_ip tables were already
0288      * sorted at build time via the 'sorttable' tool.
0289      * It's ready for binary search straight away, no need to sort it.
0290      */
0291 
0292     /* Initialize the fast lookup table: */
0293     lookup_num_blocks = orc_lookup_end - orc_lookup;
0294     for (i = 0; i < lookup_num_blocks-1; i++) {
0295         orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
0296                  num_entries,
0297                  LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
0298         if (!orc) {
0299             orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
0300             return;
0301         }
0302 
0303         orc_lookup[i] = orc - __start_orc_unwind;
0304     }
0305 
0306     /* Initialize the ending block: */
0307     orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
0308              LOOKUP_STOP_IP);
0309     if (!orc) {
0310         orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
0311         return;
0312     }
0313     orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
0314 
0315     orc_init = true;
0316 }
0317 
0318 unsigned long unwind_get_return_address(struct unwind_state *state)
0319 {
0320     if (unwind_done(state))
0321         return 0;
0322 
0323     return __kernel_text_address(state->ip) ? state->ip : 0;
0324 }
0325 EXPORT_SYMBOL_GPL(unwind_get_return_address);
0326 
0327 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
0328 {
0329     if (unwind_done(state))
0330         return NULL;
0331 
0332     if (state->regs)
0333         return &state->regs->ip;
0334 
0335     if (state->sp)
0336         return (unsigned long *)state->sp - 1;
0337 
0338     return NULL;
0339 }
0340 
0341 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
0342                 size_t len)
0343 {
0344     struct stack_info *info = &state->stack_info;
0345     void *addr = (void *)_addr;
0346 
0347     if (on_stack(info, addr, len))
0348         return true;
0349 
0350     return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
0351         on_stack(info, addr, len);
0352 }
0353 
0354 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
0355                 unsigned long *val)
0356 {
0357     if (!stack_access_ok(state, addr, sizeof(long)))
0358         return false;
0359 
0360     *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
0361     return true;
0362 }
0363 
0364 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
0365                  unsigned long *ip, unsigned long *sp)
0366 {
0367     struct pt_regs *regs = (struct pt_regs *)addr;
0368 
0369     /* x86-32 support will be more complicated due to the &regs->sp hack */
0370     BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
0371 
0372     if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
0373         return false;
0374 
0375     *ip = READ_ONCE_NOCHECK(regs->ip);
0376     *sp = READ_ONCE_NOCHECK(regs->sp);
0377     return true;
0378 }
0379 
0380 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
0381                   unsigned long *ip, unsigned long *sp)
0382 {
0383     struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
0384 
0385     if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
0386         return false;
0387 
0388     *ip = READ_ONCE_NOCHECK(regs->ip);
0389     *sp = READ_ONCE_NOCHECK(regs->sp);
0390     return true;
0391 }
0392 
0393 /*
0394  * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
0395  * value from state->regs.
0396  *
0397  * Otherwise, if state->regs just points to IRET regs, and the previous frame
0398  * had full regs, it's safe to get the value from the previous regs.  This can
0399  * happen when early/late IRQ entry code gets interrupted by an NMI.
0400  */
0401 static bool get_reg(struct unwind_state *state, unsigned int reg_off,
0402             unsigned long *val)
0403 {
0404     unsigned int reg = reg_off/8;
0405 
0406     if (!state->regs)
0407         return false;
0408 
0409     if (state->full_regs) {
0410         *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
0411         return true;
0412     }
0413 
0414     if (state->prev_regs) {
0415         *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
0416         return true;
0417     }
0418 
0419     return false;
0420 }
0421 
0422 bool unwind_next_frame(struct unwind_state *state)
0423 {
0424     unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
0425     enum stack_type prev_type = state->stack_info.type;
0426     struct orc_entry *orc;
0427     bool indirect = false;
0428 
0429     if (unwind_done(state))
0430         return false;
0431 
0432     /* Don't let modules unload while we're reading their ORC data. */
0433     preempt_disable();
0434 
0435     /* End-of-stack check for user tasks: */
0436     if (state->regs && user_mode(state->regs))
0437         goto the_end;
0438 
0439     /*
0440      * Find the orc_entry associated with the text address.
0441      *
0442      * For a call frame (as opposed to a signal frame), state->ip points to
0443      * the instruction after the call.  That instruction's stack layout
0444      * could be different from the call instruction's layout, for example
0445      * if the call was to a noreturn function.  So get the ORC data for the
0446      * call instruction itself.
0447      */
0448     orc = orc_find(state->signal ? state->ip : state->ip - 1);
0449     if (!orc) {
0450         /*
0451          * As a fallback, try to assume this code uses a frame pointer.
0452          * This is useful for generated code, like BPF, which ORC
0453          * doesn't know about.  This is just a guess, so the rest of
0454          * the unwind is no longer considered reliable.
0455          */
0456         orc = &orc_fp_entry;
0457         state->error = true;
0458     }
0459 
0460     /* End-of-stack check for kernel threads: */
0461     if (orc->sp_reg == ORC_REG_UNDEFINED) {
0462         if (!orc->end)
0463             goto err;
0464 
0465         goto the_end;
0466     }
0467 
0468     /* Find the previous frame's stack: */
0469     switch (orc->sp_reg) {
0470     case ORC_REG_SP:
0471         sp = state->sp + orc->sp_offset;
0472         break;
0473 
0474     case ORC_REG_BP:
0475         sp = state->bp + orc->sp_offset;
0476         break;
0477 
0478     case ORC_REG_SP_INDIRECT:
0479         sp = state->sp;
0480         indirect = true;
0481         break;
0482 
0483     case ORC_REG_BP_INDIRECT:
0484         sp = state->bp + orc->sp_offset;
0485         indirect = true;
0486         break;
0487 
0488     case ORC_REG_R10:
0489         if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
0490             orc_warn_current("missing R10 value at %pB\n",
0491                      (void *)state->ip);
0492             goto err;
0493         }
0494         break;
0495 
0496     case ORC_REG_R13:
0497         if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
0498             orc_warn_current("missing R13 value at %pB\n",
0499                      (void *)state->ip);
0500             goto err;
0501         }
0502         break;
0503 
0504     case ORC_REG_DI:
0505         if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
0506             orc_warn_current("missing RDI value at %pB\n",
0507                      (void *)state->ip);
0508             goto err;
0509         }
0510         break;
0511 
0512     case ORC_REG_DX:
0513         if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
0514             orc_warn_current("missing DX value at %pB\n",
0515                      (void *)state->ip);
0516             goto err;
0517         }
0518         break;
0519 
0520     default:
0521         orc_warn("unknown SP base reg %d at %pB\n",
0522              orc->sp_reg, (void *)state->ip);
0523         goto err;
0524     }
0525 
0526     if (indirect) {
0527         if (!deref_stack_reg(state, sp, &sp))
0528             goto err;
0529 
0530         if (orc->sp_reg == ORC_REG_SP_INDIRECT)
0531             sp += orc->sp_offset;
0532     }
0533 
0534     /* Find IP, SP and possibly regs: */
0535     switch (orc->type) {
0536     case UNWIND_HINT_TYPE_CALL:
0537         ip_p = sp - sizeof(long);
0538 
0539         if (!deref_stack_reg(state, ip_p, &state->ip))
0540             goto err;
0541 
0542         state->ip = unwind_recover_ret_addr(state, state->ip,
0543                             (unsigned long *)ip_p);
0544         state->sp = sp;
0545         state->regs = NULL;
0546         state->prev_regs = NULL;
0547         state->signal = false;
0548         break;
0549 
0550     case UNWIND_HINT_TYPE_REGS:
0551         if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
0552             orc_warn_current("can't access registers at %pB\n",
0553                      (void *)orig_ip);
0554             goto err;
0555         }
0556         /*
0557          * There is a small chance to interrupt at the entry of
0558          * arch_rethook_trampoline() where the ORC info doesn't exist.
0559          * That point is right after the RET to arch_rethook_trampoline()
0560          * which was modified return address.
0561          * At that point, the @addr_p of the unwind_recover_rethook()
0562          * (this has to point the address of the stack entry storing
0563          * the modified return address) must be "SP - (a stack entry)"
0564          * because SP is incremented by the RET.
0565          */
0566         state->ip = unwind_recover_rethook(state, state->ip,
0567                 (unsigned long *)(state->sp - sizeof(long)));
0568         state->regs = (struct pt_regs *)sp;
0569         state->prev_regs = NULL;
0570         state->full_regs = true;
0571         state->signal = true;
0572         break;
0573 
0574     case UNWIND_HINT_TYPE_REGS_PARTIAL:
0575         if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
0576             orc_warn_current("can't access iret registers at %pB\n",
0577                      (void *)orig_ip);
0578             goto err;
0579         }
0580         /* See UNWIND_HINT_TYPE_REGS case comment. */
0581         state->ip = unwind_recover_rethook(state, state->ip,
0582                 (unsigned long *)(state->sp - sizeof(long)));
0583 
0584         if (state->full_regs)
0585             state->prev_regs = state->regs;
0586         state->regs = (void *)sp - IRET_FRAME_OFFSET;
0587         state->full_regs = false;
0588         state->signal = true;
0589         break;
0590 
0591     default:
0592         orc_warn("unknown .orc_unwind entry type %d at %pB\n",
0593              orc->type, (void *)orig_ip);
0594         goto err;
0595     }
0596 
0597     /* Find BP: */
0598     switch (orc->bp_reg) {
0599     case ORC_REG_UNDEFINED:
0600         if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
0601             state->bp = tmp;
0602         break;
0603 
0604     case ORC_REG_PREV_SP:
0605         if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
0606             goto err;
0607         break;
0608 
0609     case ORC_REG_BP:
0610         if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
0611             goto err;
0612         break;
0613 
0614     default:
0615         orc_warn("unknown BP base reg %d for ip %pB\n",
0616              orc->bp_reg, (void *)orig_ip);
0617         goto err;
0618     }
0619 
0620     /* Prevent a recursive loop due to bad ORC data: */
0621     if (state->stack_info.type == prev_type &&
0622         on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
0623         state->sp <= prev_sp) {
0624         orc_warn_current("stack going in the wrong direction? at %pB\n",
0625                  (void *)orig_ip);
0626         goto err;
0627     }
0628 
0629     preempt_enable();
0630     return true;
0631 
0632 err:
0633     state->error = true;
0634 
0635 the_end:
0636     preempt_enable();
0637     state->stack_info.type = STACK_TYPE_UNKNOWN;
0638     return false;
0639 }
0640 EXPORT_SYMBOL_GPL(unwind_next_frame);
0641 
0642 void __unwind_start(struct unwind_state *state, struct task_struct *task,
0643             struct pt_regs *regs, unsigned long *first_frame)
0644 {
0645     memset(state, 0, sizeof(*state));
0646     state->task = task;
0647 
0648     if (!orc_init)
0649         goto err;
0650 
0651     /*
0652      * Refuse to unwind the stack of a task while it's executing on another
0653      * CPU.  This check is racy, but that's ok: the unwinder has other
0654      * checks to prevent it from going off the rails.
0655      */
0656     if (task_on_another_cpu(task))
0657         goto err;
0658 
0659     if (regs) {
0660         if (user_mode(regs))
0661             goto the_end;
0662 
0663         state->ip = regs->ip;
0664         state->sp = regs->sp;
0665         state->bp = regs->bp;
0666         state->regs = regs;
0667         state->full_regs = true;
0668         state->signal = true;
0669 
0670     } else if (task == current) {
0671         asm volatile("lea (%%rip), %0\n\t"
0672                  "mov %%rsp, %1\n\t"
0673                  "mov %%rbp, %2\n\t"
0674                  : "=r" (state->ip), "=r" (state->sp),
0675                    "=r" (state->bp));
0676 
0677     } else {
0678         struct inactive_task_frame *frame = (void *)task->thread.sp;
0679 
0680         state->sp = task->thread.sp + sizeof(*frame);
0681         state->bp = READ_ONCE_NOCHECK(frame->bp);
0682         state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
0683         state->signal = (void *)state->ip == ret_from_fork;
0684     }
0685 
0686     if (get_stack_info((unsigned long *)state->sp, state->task,
0687                &state->stack_info, &state->stack_mask)) {
0688         /*
0689          * We weren't on a valid stack.  It's possible that
0690          * we overflowed a valid stack into a guard page.
0691          * See if the next page up is valid so that we can
0692          * generate some kind of backtrace if this happens.
0693          */
0694         void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
0695         state->error = true;
0696         if (get_stack_info(next_page, state->task, &state->stack_info,
0697                    &state->stack_mask))
0698             return;
0699     }
0700 
0701     /*
0702      * The caller can provide the address of the first frame directly
0703      * (first_frame) or indirectly (regs->sp) to indicate which stack frame
0704      * to start unwinding at.  Skip ahead until we reach it.
0705      */
0706 
0707     /* When starting from regs, skip the regs frame: */
0708     if (regs) {
0709         unwind_next_frame(state);
0710         return;
0711     }
0712 
0713     /* Otherwise, skip ahead to the user-specified starting frame: */
0714     while (!unwind_done(state) &&
0715            (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
0716             state->sp < (unsigned long)first_frame))
0717         unwind_next_frame(state);
0718 
0719     return;
0720 
0721 err:
0722     state->error = true;
0723 the_end:
0724     state->stack_info.type = STACK_TYPE_UNKNOWN;
0725 }
0726 EXPORT_SYMBOL_GPL(__unwind_start);