0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #ifndef __CHECKER__
0017 #if !defined (__ARM_EABI__)
0018 #warning Your compiler does not have EABI support.
0019 #warning ARM unwind is known to compile only with EABI compilers.
0020 #warning Change compiler or disable ARM_UNWIND option.
0021 #endif
0022 #endif
0023
0024 #include <linux/kernel.h>
0025 #include <linux/init.h>
0026 #include <linux/export.h>
0027 #include <linux/sched.h>
0028 #include <linux/slab.h>
0029 #include <linux/spinlock.h>
0030 #include <linux/list.h>
0031
0032 #include <asm/stacktrace.h>
0033 #include <asm/traps.h>
0034 #include <asm/unwind.h>
0035
0036 #include "reboot.h"
0037
0038
0039 void __aeabi_unwind_cpp_pr0(void)
0040 {
0041 };
0042 EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
0043
0044 void __aeabi_unwind_cpp_pr1(void)
0045 {
0046 };
0047 EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
0048
0049 void __aeabi_unwind_cpp_pr2(void)
0050 {
0051 };
0052 EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
0053
0054 struct unwind_ctrl_block {
0055 unsigned long vrs[16];
0056 const unsigned long *insn;
0057 unsigned long sp_high;
0058 unsigned long *lr_addr;
0059
0060
0061
0062
0063 int check_each_pop;
0064 int entries;
0065 int byte;
0066 };
0067
0068 enum regs {
0069 #ifdef CONFIG_THUMB2_KERNEL
0070 FP = 7,
0071 #else
0072 FP = 11,
0073 #endif
0074 SP = 13,
0075 LR = 14,
0076 PC = 15
0077 };
0078
0079 extern const struct unwind_idx __start_unwind_idx[];
0080 static const struct unwind_idx *__origin_unwind_idx;
0081 extern const struct unwind_idx __stop_unwind_idx[];
0082
0083 static DEFINE_RAW_SPINLOCK(unwind_lock);
0084 static LIST_HEAD(unwind_tables);
0085
0086
0087 #define prel31_to_addr(ptr) \
0088 ({ \
0089 \
0090 long offset = (((long)*(ptr)) << 1) >> 1; \
0091 (unsigned long)(ptr) + offset; \
0092 })
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 static const struct unwind_idx *search_index(unsigned long addr,
0103 const struct unwind_idx *start,
0104 const struct unwind_idx *origin,
0105 const struct unwind_idx *stop)
0106 {
0107 unsigned long addr_prel31;
0108
0109 pr_debug("%s(%08lx, %p, %p, %p)\n",
0110 __func__, addr, start, origin, stop);
0111
0112
0113
0114
0115
0116 if (addr < (unsigned long)start)
0117
0118 stop = origin;
0119 else
0120
0121 start = origin;
0122
0123
0124 addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
0125
0126 while (start < stop - 1) {
0127 const struct unwind_idx *mid = start + ((stop - start) >> 1);
0128
0129
0130
0131
0132
0133 if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
0134 mid->addr_offset)
0135 stop = mid;
0136 else {
0137
0138 addr_prel31 -= ((unsigned long)mid -
0139 (unsigned long)start);
0140 start = mid;
0141 }
0142 }
0143
0144 if (likely(start->addr_offset <= addr_prel31))
0145 return start;
0146 else {
0147 pr_warn("unwind: Unknown symbol address %08lx\n", addr);
0148 return NULL;
0149 }
0150 }
0151
0152 static const struct unwind_idx *unwind_find_origin(
0153 const struct unwind_idx *start, const struct unwind_idx *stop)
0154 {
0155 pr_debug("%s(%p, %p)\n", __func__, start, stop);
0156 while (start < stop) {
0157 const struct unwind_idx *mid = start + ((stop - start) >> 1);
0158
0159 if (mid->addr_offset >= 0x40000000)
0160
0161 start = mid + 1;
0162 else
0163
0164 stop = mid;
0165 }
0166 pr_debug("%s -> %p\n", __func__, stop);
0167 return stop;
0168 }
0169
0170 static const struct unwind_idx *unwind_find_idx(unsigned long addr)
0171 {
0172 const struct unwind_idx *idx = NULL;
0173 unsigned long flags;
0174
0175 pr_debug("%s(%08lx)\n", __func__, addr);
0176
0177 if (core_kernel_text(addr)) {
0178 if (unlikely(!__origin_unwind_idx))
0179 __origin_unwind_idx =
0180 unwind_find_origin(__start_unwind_idx,
0181 __stop_unwind_idx);
0182
0183
0184 idx = search_index(addr, __start_unwind_idx,
0185 __origin_unwind_idx,
0186 __stop_unwind_idx);
0187 } else {
0188
0189 struct unwind_table *table;
0190
0191 raw_spin_lock_irqsave(&unwind_lock, flags);
0192 list_for_each_entry(table, &unwind_tables, list) {
0193 if (addr >= table->begin_addr &&
0194 addr < table->end_addr) {
0195 idx = search_index(addr, table->start,
0196 table->origin,
0197 table->stop);
0198
0199 list_move(&table->list, &unwind_tables);
0200 break;
0201 }
0202 }
0203 raw_spin_unlock_irqrestore(&unwind_lock, flags);
0204 }
0205
0206 pr_debug("%s: idx = %p\n", __func__, idx);
0207 return idx;
0208 }
0209
0210 static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
0211 {
0212 unsigned long ret;
0213
0214 if (ctrl->entries <= 0) {
0215 pr_warn("unwind: Corrupt unwind table\n");
0216 return 0;
0217 }
0218
0219 ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
0220
0221 if (ctrl->byte == 0) {
0222 ctrl->insn++;
0223 ctrl->entries--;
0224 ctrl->byte = 3;
0225 } else
0226 ctrl->byte--;
0227
0228 return ret;
0229 }
0230
0231
0232 static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
0233 unsigned long **vsp, unsigned int reg)
0234 {
0235 if (unlikely(ctrl->check_each_pop))
0236 if (*vsp >= (unsigned long *)ctrl->sp_high)
0237 return -URC_FAILURE;
0238
0239
0240
0241
0242 ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp));
0243 if (reg == 14)
0244 ctrl->lr_addr = *vsp;
0245 (*vsp)++;
0246 return URC_OK;
0247 }
0248
0249
0250 static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
0251 unsigned long mask)
0252 {
0253 unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
0254 int load_sp, reg = 4;
0255
0256 load_sp = mask & (1 << (13 - 4));
0257 while (mask) {
0258 if (mask & 1)
0259 if (unwind_pop_register(ctrl, &vsp, reg))
0260 return -URC_FAILURE;
0261 mask >>= 1;
0262 reg++;
0263 }
0264 if (!load_sp) {
0265 ctrl->vrs[SP] = (unsigned long)vsp;
0266 }
0267
0268 return URC_OK;
0269 }
0270
0271 static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
0272 unsigned long insn)
0273 {
0274 unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
0275 int reg;
0276
0277
0278 for (reg = 4; reg <= 4 + (insn & 7); reg++)
0279 if (unwind_pop_register(ctrl, &vsp, reg))
0280 return -URC_FAILURE;
0281
0282 if (insn & 0x8)
0283 if (unwind_pop_register(ctrl, &vsp, 14))
0284 return -URC_FAILURE;
0285
0286 ctrl->vrs[SP] = (unsigned long)vsp;
0287
0288 return URC_OK;
0289 }
0290
0291 static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
0292 unsigned long mask)
0293 {
0294 unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
0295 int reg = 0;
0296
0297
0298 while (mask) {
0299 if (mask & 1)
0300 if (unwind_pop_register(ctrl, &vsp, reg))
0301 return -URC_FAILURE;
0302 mask >>= 1;
0303 reg++;
0304 }
0305 ctrl->vrs[SP] = (unsigned long)vsp;
0306
0307 return URC_OK;
0308 }
0309
0310
0311
0312
0313 static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
0314 {
0315 unsigned long insn = unwind_get_byte(ctrl);
0316 int ret = URC_OK;
0317
0318 pr_debug("%s: insn = %08lx\n", __func__, insn);
0319
0320 if ((insn & 0xc0) == 0x00)
0321 ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
0322 else if ((insn & 0xc0) == 0x40) {
0323 ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
0324 } else if ((insn & 0xf0) == 0x80) {
0325 unsigned long mask;
0326
0327 insn = (insn << 8) | unwind_get_byte(ctrl);
0328 mask = insn & 0x0fff;
0329 if (mask == 0) {
0330 pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n",
0331 insn);
0332 return -URC_FAILURE;
0333 }
0334
0335 ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask);
0336 if (ret)
0337 goto error;
0338 } else if ((insn & 0xf0) == 0x90 &&
0339 (insn & 0x0d) != 0x0d) {
0340 ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
0341 } else if ((insn & 0xf0) == 0xa0) {
0342 ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
0343 if (ret)
0344 goto error;
0345 } else if (insn == 0xb0) {
0346 if (ctrl->vrs[PC] == 0)
0347 ctrl->vrs[PC] = ctrl->vrs[LR];
0348
0349 ctrl->entries = 0;
0350 } else if (insn == 0xb1) {
0351 unsigned long mask = unwind_get_byte(ctrl);
0352
0353 if (mask == 0 || mask & 0xf0) {
0354 pr_warn("unwind: Spare encoding %04lx\n",
0355 (insn << 8) | mask);
0356 return -URC_FAILURE;
0357 }
0358
0359 ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask);
0360 if (ret)
0361 goto error;
0362 } else if (insn == 0xb2) {
0363 unsigned long uleb128 = unwind_get_byte(ctrl);
0364
0365 ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
0366 } else {
0367 pr_warn("unwind: Unhandled instruction %02lx\n", insn);
0368 return -URC_FAILURE;
0369 }
0370
0371 pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
0372 ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
0373
0374 error:
0375 return ret;
0376 }
0377
0378
0379
0380
0381
0382 int unwind_frame(struct stackframe *frame)
0383 {
0384 const struct unwind_idx *idx;
0385 struct unwind_ctrl_block ctrl;
0386 unsigned long sp_low;
0387
0388
0389 sp_low = frame->sp;
0390 ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN)
0391 + THREAD_SIZE;
0392
0393 pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
0394 frame->pc, frame->lr, frame->sp);
0395
0396 idx = unwind_find_idx(frame->pc);
0397 if (!idx) {
0398 if (frame->pc && kernel_text_address(frame->pc))
0399 pr_warn("unwind: Index not found %08lx\n", frame->pc);
0400 return -URC_FAILURE;
0401 }
0402
0403 ctrl.vrs[FP] = frame->fp;
0404 ctrl.vrs[SP] = frame->sp;
0405 ctrl.vrs[LR] = frame->lr;
0406 ctrl.vrs[PC] = 0;
0407
0408 if (idx->insn == 1)
0409
0410 return -URC_FAILURE;
0411 else if (frame->pc == prel31_to_addr(&idx->addr_offset)) {
0412
0413
0414
0415
0416
0417
0418
0419
0420 if (frame->pc == frame->lr)
0421 return -URC_FAILURE;
0422 frame->pc = frame->lr;
0423 return URC_OK;
0424 } else if ((idx->insn & 0x80000000) == 0)
0425
0426 ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
0427 else if ((idx->insn & 0xff000000) == 0x80000000)
0428
0429 ctrl.insn = &idx->insn;
0430 else {
0431 pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n",
0432 idx->insn, idx);
0433 return -URC_FAILURE;
0434 }
0435
0436
0437 if ((*ctrl.insn & 0xff000000) == 0x80000000) {
0438 ctrl.byte = 2;
0439 ctrl.entries = 1;
0440 } else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
0441 ctrl.byte = 1;
0442 ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
0443 } else {
0444 pr_warn("unwind: Unsupported personality routine %08lx at %p\n",
0445 *ctrl.insn, ctrl.insn);
0446 return -URC_FAILURE;
0447 }
0448
0449 ctrl.check_each_pop = 0;
0450
0451 if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) {
0452
0453
0454
0455
0456
0457 sp_low = frame->fp;
0458 ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE);
0459 }
0460
0461 while (ctrl.entries > 0) {
0462 int urc;
0463 if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
0464 ctrl.check_each_pop = 1;
0465 urc = unwind_exec_insn(&ctrl);
0466 if (urc < 0)
0467 return urc;
0468 if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high)
0469 return -URC_FAILURE;
0470 }
0471
0472 if (ctrl.vrs[PC] == 0)
0473 ctrl.vrs[PC] = ctrl.vrs[LR];
0474
0475
0476 if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP])
0477 return -URC_FAILURE;
0478
0479 frame->fp = ctrl.vrs[FP];
0480 frame->sp = ctrl.vrs[SP];
0481 frame->lr = ctrl.vrs[LR];
0482 frame->pc = ctrl.vrs[PC];
0483 frame->lr_addr = ctrl.lr_addr;
0484
0485 return URC_OK;
0486 }
0487
0488 void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
0489 const char *loglvl)
0490 {
0491 struct stackframe frame;
0492
0493 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
0494
0495 if (!tsk)
0496 tsk = current;
0497
0498 if (regs) {
0499 arm_get_current_stackframe(regs, &frame);
0500
0501 if (!kernel_text_address(regs->ARM_pc))
0502 frame.pc = regs->ARM_lr;
0503 } else if (tsk == current) {
0504 frame.fp = (unsigned long)__builtin_frame_address(0);
0505 frame.sp = current_stack_pointer;
0506 frame.lr = (unsigned long)__builtin_return_address(0);
0507
0508
0509
0510
0511 here:
0512 frame.pc = (unsigned long)&&here;
0513 } else {
0514
0515 frame.fp = thread_saved_fp(tsk);
0516 frame.sp = thread_saved_sp(tsk);
0517
0518
0519
0520
0521 frame.lr = 0;
0522 frame.pc = thread_saved_pc(tsk);
0523 }
0524
0525 while (1) {
0526 int urc;
0527 unsigned long where = frame.pc;
0528
0529 urc = unwind_frame(&frame);
0530 if (urc < 0)
0531 break;
0532 dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
0533 }
0534 }
0535
0536 struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
0537 unsigned long text_addr,
0538 unsigned long text_size)
0539 {
0540 unsigned long flags;
0541 struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
0542
0543 pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
0544 text_addr, text_size);
0545
0546 if (!tab)
0547 return tab;
0548
0549 tab->start = (const struct unwind_idx *)start;
0550 tab->stop = (const struct unwind_idx *)(start + size);
0551 tab->origin = unwind_find_origin(tab->start, tab->stop);
0552 tab->begin_addr = text_addr;
0553 tab->end_addr = text_addr + text_size;
0554
0555 raw_spin_lock_irqsave(&unwind_lock, flags);
0556 list_add_tail(&tab->list, &unwind_tables);
0557 raw_spin_unlock_irqrestore(&unwind_lock, flags);
0558
0559 return tab;
0560 }
0561
0562 void unwind_table_del(struct unwind_table *tab)
0563 {
0564 unsigned long flags;
0565
0566 if (!tab)
0567 return;
0568
0569 raw_spin_lock_irqsave(&unwind_lock, flags);
0570 list_del(&tab->list);
0571 raw_spin_unlock_irqrestore(&unwind_lock, flags);
0572
0573 kfree(tab);
0574 }