0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bug.h>
0010 #include <linux/context_tracking.h>
0011 #include <linux/signal.h>
0012 #include <linux/kallsyms.h>
0013 #include <linux/kprobes.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/uaccess.h>
0016 #include <linux/hardirq.h>
0017 #include <linux/kdebug.h>
0018 #include <linux/module.h>
0019 #include <linux/kexec.h>
0020 #include <linux/delay.h>
0021 #include <linux/init.h>
0022 #include <linux/sched/signal.h>
0023 #include <linux/sched/debug.h>
0024 #include <linux/sched/task_stack.h>
0025 #include <linux/sizes.h>
0026 #include <linux/syscalls.h>
0027 #include <linux/mm_types.h>
0028 #include <linux/kasan.h>
0029
0030 #include <asm/atomic.h>
0031 #include <asm/bug.h>
0032 #include <asm/cpufeature.h>
0033 #include <asm/daifflags.h>
0034 #include <asm/debug-monitors.h>
0035 #include <asm/esr.h>
0036 #include <asm/exception.h>
0037 #include <asm/extable.h>
0038 #include <asm/insn.h>
0039 #include <asm/kprobes.h>
0040 #include <asm/patching.h>
0041 #include <asm/traps.h>
0042 #include <asm/smp.h>
0043 #include <asm/stack_pointer.h>
0044 #include <asm/stacktrace.h>
0045 #include <asm/system_misc.h>
0046 #include <asm/sysreg.h>
0047
0048 static bool __kprobes __check_eq(unsigned long pstate)
0049 {
0050 return (pstate & PSR_Z_BIT) != 0;
0051 }
0052
0053 static bool __kprobes __check_ne(unsigned long pstate)
0054 {
0055 return (pstate & PSR_Z_BIT) == 0;
0056 }
0057
0058 static bool __kprobes __check_cs(unsigned long pstate)
0059 {
0060 return (pstate & PSR_C_BIT) != 0;
0061 }
0062
0063 static bool __kprobes __check_cc(unsigned long pstate)
0064 {
0065 return (pstate & PSR_C_BIT) == 0;
0066 }
0067
0068 static bool __kprobes __check_mi(unsigned long pstate)
0069 {
0070 return (pstate & PSR_N_BIT) != 0;
0071 }
0072
0073 static bool __kprobes __check_pl(unsigned long pstate)
0074 {
0075 return (pstate & PSR_N_BIT) == 0;
0076 }
0077
0078 static bool __kprobes __check_vs(unsigned long pstate)
0079 {
0080 return (pstate & PSR_V_BIT) != 0;
0081 }
0082
0083 static bool __kprobes __check_vc(unsigned long pstate)
0084 {
0085 return (pstate & PSR_V_BIT) == 0;
0086 }
0087
0088 static bool __kprobes __check_hi(unsigned long pstate)
0089 {
0090 pstate &= ~(pstate >> 1);
0091 return (pstate & PSR_C_BIT) != 0;
0092 }
0093
0094 static bool __kprobes __check_ls(unsigned long pstate)
0095 {
0096 pstate &= ~(pstate >> 1);
0097 return (pstate & PSR_C_BIT) == 0;
0098 }
0099
0100 static bool __kprobes __check_ge(unsigned long pstate)
0101 {
0102 pstate ^= (pstate << 3);
0103 return (pstate & PSR_N_BIT) == 0;
0104 }
0105
0106 static bool __kprobes __check_lt(unsigned long pstate)
0107 {
0108 pstate ^= (pstate << 3);
0109 return (pstate & PSR_N_BIT) != 0;
0110 }
0111
0112 static bool __kprobes __check_gt(unsigned long pstate)
0113 {
0114
0115 unsigned long temp = pstate ^ (pstate << 3);
0116
0117 temp |= (pstate << 1);
0118 return (temp & PSR_N_BIT) == 0;
0119 }
0120
0121 static bool __kprobes __check_le(unsigned long pstate)
0122 {
0123
0124 unsigned long temp = pstate ^ (pstate << 3);
0125
0126 temp |= (pstate << 1);
0127 return (temp & PSR_N_BIT) != 0;
0128 }
0129
0130 static bool __kprobes __check_al(unsigned long pstate)
0131 {
0132 return true;
0133 }
0134
0135
0136
0137
0138
0139 pstate_check_t * const aarch32_opcode_cond_checks[16] = {
0140 __check_eq, __check_ne, __check_cs, __check_cc,
0141 __check_mi, __check_pl, __check_vs, __check_vc,
0142 __check_hi, __check_ls, __check_ge, __check_lt,
0143 __check_gt, __check_le, __check_al, __check_al
0144 };
0145
0146 int show_unhandled_signals = 0;
0147
0148 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
0149 {
0150 unsigned long addr = instruction_pointer(regs);
0151 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
0152 int i;
0153
0154 if (user_mode(regs))
0155 return;
0156
0157 for (i = -4; i < 1; i++) {
0158 unsigned int val, bad;
0159
0160 bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
0161
0162 if (!bad)
0163 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
0164 else {
0165 p += sprintf(p, "bad PC value");
0166 break;
0167 }
0168 }
0169
0170 printk("%sCode: %s\n", lvl, str);
0171 }
0172
0173 #ifdef CONFIG_PREEMPT
0174 #define S_PREEMPT " PREEMPT"
0175 #elif defined(CONFIG_PREEMPT_RT)
0176 #define S_PREEMPT " PREEMPT_RT"
0177 #else
0178 #define S_PREEMPT ""
0179 #endif
0180
0181 #define S_SMP " SMP"
0182
0183 static int __die(const char *str, int err, struct pt_regs *regs)
0184 {
0185 static int die_counter;
0186 int ret;
0187
0188 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
0189 str, err, ++die_counter);
0190
0191
0192 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
0193 if (ret == NOTIFY_STOP)
0194 return ret;
0195
0196 print_modules();
0197 show_regs(regs);
0198
0199 dump_kernel_instr(KERN_EMERG, regs);
0200
0201 return ret;
0202 }
0203
0204 static DEFINE_RAW_SPINLOCK(die_lock);
0205
0206
0207
0208
0209 void die(const char *str, struct pt_regs *regs, int err)
0210 {
0211 int ret;
0212 unsigned long flags;
0213
0214 raw_spin_lock_irqsave(&die_lock, flags);
0215
0216 oops_enter();
0217
0218 console_verbose();
0219 bust_spinlocks(1);
0220 ret = __die(str, err, regs);
0221
0222 if (regs && kexec_should_crash(current))
0223 crash_kexec(regs);
0224
0225 bust_spinlocks(0);
0226 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
0227 oops_exit();
0228
0229 if (in_interrupt())
0230 panic("%s: Fatal exception in interrupt", str);
0231 if (panic_on_oops)
0232 panic("%s: Fatal exception", str);
0233
0234 raw_spin_unlock_irqrestore(&die_lock, flags);
0235
0236 if (ret != NOTIFY_STOP)
0237 make_task_dead(SIGSEGV);
0238 }
0239
0240 static void arm64_show_signal(int signo, const char *str)
0241 {
0242 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
0243 DEFAULT_RATELIMIT_BURST);
0244 struct task_struct *tsk = current;
0245 unsigned long esr = tsk->thread.fault_code;
0246 struct pt_regs *regs = task_pt_regs(tsk);
0247
0248
0249 if (!show_unhandled_signals ||
0250 !unhandled_signal(tsk, signo) ||
0251 !__ratelimit(&rs))
0252 return;
0253
0254 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
0255 if (esr)
0256 pr_cont("%s, ESR 0x%016lx, ", esr_get_class_string(esr), esr);
0257
0258 pr_cont("%s", str);
0259 print_vma_addr(KERN_CONT " in ", regs->pc);
0260 pr_cont("\n");
0261 __show_regs(regs);
0262 }
0263
0264 void arm64_force_sig_fault(int signo, int code, unsigned long far,
0265 const char *str)
0266 {
0267 arm64_show_signal(signo, str);
0268 if (signo == SIGKILL)
0269 force_sig(SIGKILL);
0270 else
0271 force_sig_fault(signo, code, (void __user *)far);
0272 }
0273
0274 void arm64_force_sig_mceerr(int code, unsigned long far, short lsb,
0275 const char *str)
0276 {
0277 arm64_show_signal(SIGBUS, str);
0278 force_sig_mceerr(code, (void __user *)far, lsb);
0279 }
0280
0281 void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far,
0282 const char *str)
0283 {
0284 arm64_show_signal(SIGTRAP, str);
0285 force_sig_ptrace_errno_trap(errno, (void __user *)far);
0286 }
0287
0288 void arm64_notify_die(const char *str, struct pt_regs *regs,
0289 int signo, int sicode, unsigned long far,
0290 unsigned long err)
0291 {
0292 if (user_mode(regs)) {
0293 WARN_ON(regs != current_pt_regs());
0294 current->thread.fault_address = 0;
0295 current->thread.fault_code = err;
0296
0297 arm64_force_sig_fault(signo, sicode, far, str);
0298 } else {
0299 die(str, regs, err);
0300 }
0301 }
0302
0303 #ifdef CONFIG_COMPAT
0304 #define PSTATE_IT_1_0_SHIFT 25
0305 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
0306 #define PSTATE_IT_7_2_SHIFT 10
0307 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
0308
0309 static u32 compat_get_it_state(struct pt_regs *regs)
0310 {
0311 u32 it, pstate = regs->pstate;
0312
0313 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
0314 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
0315
0316 return it;
0317 }
0318
0319 static void compat_set_it_state(struct pt_regs *regs, u32 it)
0320 {
0321 u32 pstate_it;
0322
0323 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
0324 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
0325
0326 regs->pstate &= ~PSR_AA32_IT_MASK;
0327 regs->pstate |= pstate_it;
0328 }
0329
0330 static void advance_itstate(struct pt_regs *regs)
0331 {
0332 u32 it;
0333
0334
0335 if (!(regs->pstate & PSR_AA32_T_BIT) ||
0336 !(regs->pstate & PSR_AA32_IT_MASK))
0337 return;
0338
0339 it = compat_get_it_state(regs);
0340
0341
0342
0343
0344
0345 if (!(it & 7))
0346 it = 0;
0347 else
0348 it = (it & 0xe0) | ((it << 1) & 0x1f);
0349
0350 compat_set_it_state(regs, it);
0351 }
0352 #else
0353 static void advance_itstate(struct pt_regs *regs)
0354 {
0355 }
0356 #endif
0357
0358 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
0359 {
0360 regs->pc += size;
0361
0362
0363
0364
0365
0366 if (user_mode(regs))
0367 user_fastforward_single_step(current);
0368
0369 if (compat_user_mode(regs))
0370 advance_itstate(regs);
0371 else
0372 regs->pstate &= ~PSR_BTYPE_MASK;
0373 }
0374
0375 static LIST_HEAD(undef_hook);
0376 static DEFINE_RAW_SPINLOCK(undef_lock);
0377
0378 void register_undef_hook(struct undef_hook *hook)
0379 {
0380 unsigned long flags;
0381
0382 raw_spin_lock_irqsave(&undef_lock, flags);
0383 list_add(&hook->node, &undef_hook);
0384 raw_spin_unlock_irqrestore(&undef_lock, flags);
0385 }
0386
0387 void unregister_undef_hook(struct undef_hook *hook)
0388 {
0389 unsigned long flags;
0390
0391 raw_spin_lock_irqsave(&undef_lock, flags);
0392 list_del(&hook->node);
0393 raw_spin_unlock_irqrestore(&undef_lock, flags);
0394 }
0395
0396 static int call_undef_hook(struct pt_regs *regs)
0397 {
0398 struct undef_hook *hook;
0399 unsigned long flags;
0400 u32 instr;
0401 int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
0402 unsigned long pc = instruction_pointer(regs);
0403
0404 if (!user_mode(regs)) {
0405 __le32 instr_le;
0406 if (get_kernel_nofault(instr_le, (__le32 *)pc))
0407 goto exit;
0408 instr = le32_to_cpu(instr_le);
0409 } else if (compat_thumb_mode(regs)) {
0410
0411 __le16 instr_le;
0412 if (get_user(instr_le, (__le16 __user *)pc))
0413 goto exit;
0414 instr = le16_to_cpu(instr_le);
0415 if (aarch32_insn_is_wide(instr)) {
0416 u32 instr2;
0417
0418 if (get_user(instr_le, (__le16 __user *)(pc + 2)))
0419 goto exit;
0420 instr2 = le16_to_cpu(instr_le);
0421 instr = (instr << 16) | instr2;
0422 }
0423 } else {
0424
0425 __le32 instr_le;
0426 if (get_user(instr_le, (__le32 __user *)pc))
0427 goto exit;
0428 instr = le32_to_cpu(instr_le);
0429 }
0430
0431 raw_spin_lock_irqsave(&undef_lock, flags);
0432 list_for_each_entry(hook, &undef_hook, node)
0433 if ((instr & hook->instr_mask) == hook->instr_val &&
0434 (regs->pstate & hook->pstate_mask) == hook->pstate_val)
0435 fn = hook->fn;
0436
0437 raw_spin_unlock_irqrestore(&undef_lock, flags);
0438 exit:
0439 return fn ? fn(regs, instr) : 1;
0440 }
0441
0442 void force_signal_inject(int signal, int code, unsigned long address, unsigned long err)
0443 {
0444 const char *desc;
0445 struct pt_regs *regs = current_pt_regs();
0446
0447 if (WARN_ON(!user_mode(regs)))
0448 return;
0449
0450 switch (signal) {
0451 case SIGILL:
0452 desc = "undefined instruction";
0453 break;
0454 case SIGSEGV:
0455 desc = "illegal memory access";
0456 break;
0457 default:
0458 desc = "unknown or unrecoverable error";
0459 break;
0460 }
0461
0462
0463 if (WARN_ON(signal != SIGKILL &&
0464 siginfo_layout(signal, code) != SIL_FAULT)) {
0465 signal = SIGKILL;
0466 }
0467
0468 arm64_notify_die(desc, regs, signal, code, address, err);
0469 }
0470
0471
0472
0473
0474 void arm64_notify_segfault(unsigned long addr)
0475 {
0476 int code;
0477
0478 mmap_read_lock(current->mm);
0479 if (find_vma(current->mm, untagged_addr(addr)) == NULL)
0480 code = SEGV_MAPERR;
0481 else
0482 code = SEGV_ACCERR;
0483 mmap_read_unlock(current->mm);
0484
0485 force_signal_inject(SIGSEGV, code, addr, 0);
0486 }
0487
0488 void do_undefinstr(struct pt_regs *regs)
0489 {
0490
0491 if (!aarch32_break_handler(regs))
0492 return;
0493
0494 if (call_undef_hook(regs) == 0)
0495 return;
0496
0497 BUG_ON(!user_mode(regs));
0498 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
0499 }
0500 NOKPROBE_SYMBOL(do_undefinstr);
0501
0502 void do_bti(struct pt_regs *regs)
0503 {
0504 BUG_ON(!user_mode(regs));
0505 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
0506 }
0507 NOKPROBE_SYMBOL(do_bti);
0508
0509 void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
0510 {
0511
0512
0513
0514
0515 BUG_ON(!user_mode(regs));
0516 force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
0517 }
0518 NOKPROBE_SYMBOL(do_ptrauth_fault);
0519
0520 #define __user_cache_maint(insn, address, res) \
0521 if (address >= TASK_SIZE_MAX) { \
0522 res = -EFAULT; \
0523 } else { \
0524 uaccess_ttbr0_enable(); \
0525 asm volatile ( \
0526 "1: " insn ", %1\n" \
0527 " mov %w0, #0\n" \
0528 "2:\n" \
0529 _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
0530 : "=r" (res) \
0531 : "r" (address)); \
0532 uaccess_ttbr0_disable(); \
0533 }
0534
0535 static void user_cache_maint_handler(unsigned long esr, struct pt_regs *regs)
0536 {
0537 unsigned long tagged_address, address;
0538 int rt = ESR_ELx_SYS64_ISS_RT(esr);
0539 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
0540 int ret = 0;
0541
0542 tagged_address = pt_regs_read_reg(regs, rt);
0543 address = untagged_addr(tagged_address);
0544
0545 switch (crm) {
0546 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:
0547 __user_cache_maint("dc civac", address, ret);
0548 break;
0549 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:
0550 __user_cache_maint("dc civac", address, ret);
0551 break;
0552 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP:
0553 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
0554 break;
0555 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:
0556 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
0557 break;
0558 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC:
0559 __user_cache_maint("dc civac", address, ret);
0560 break;
0561 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU:
0562 __user_cache_maint("ic ivau", address, ret);
0563 break;
0564 default:
0565 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
0566 return;
0567 }
0568
0569 if (ret)
0570 arm64_notify_segfault(tagged_address);
0571 else
0572 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
0573 }
0574
0575 static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
0576 {
0577 int rt = ESR_ELx_SYS64_ISS_RT(esr);
0578 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
0579
0580 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
0581
0582 val &= ~BIT(CTR_EL0_DIC_SHIFT);
0583
0584
0585 val &= ~CTR_EL0_IminLine_MASK;
0586 val |= (PAGE_SHIFT - 2) & CTR_EL0_IminLine_MASK;
0587 }
0588
0589 pt_regs_write_reg(regs, rt, val);
0590
0591 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
0592 }
0593
0594 static void cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
0595 {
0596 int rt = ESR_ELx_SYS64_ISS_RT(esr);
0597
0598 pt_regs_write_reg(regs, rt, arch_timer_read_counter());
0599 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
0600 }
0601
0602 static void cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
0603 {
0604 int rt = ESR_ELx_SYS64_ISS_RT(esr);
0605
0606 pt_regs_write_reg(regs, rt, arch_timer_get_rate());
0607 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
0608 }
0609
0610 static void mrs_handler(unsigned long esr, struct pt_regs *regs)
0611 {
0612 u32 sysreg, rt;
0613
0614 rt = ESR_ELx_SYS64_ISS_RT(esr);
0615 sysreg = esr_sys64_to_sysreg(esr);
0616
0617 if (do_emulate_mrs(regs, sysreg, rt) != 0)
0618 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
0619 }
0620
0621 static void wfi_handler(unsigned long esr, struct pt_regs *regs)
0622 {
0623 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
0624 }
0625
0626 struct sys64_hook {
0627 unsigned long esr_mask;
0628 unsigned long esr_val;
0629 void (*handler)(unsigned long esr, struct pt_regs *regs);
0630 };
0631
0632 static const struct sys64_hook sys64_hooks[] = {
0633 {
0634 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
0635 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
0636 .handler = user_cache_maint_handler,
0637 },
0638 {
0639
0640 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
0641 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
0642 .handler = ctr_read_handler,
0643 },
0644 {
0645
0646 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
0647 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
0648 .handler = cntvct_read_handler,
0649 },
0650 {
0651
0652 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
0653 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCTSS,
0654 .handler = cntvct_read_handler,
0655 },
0656 {
0657
0658 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
0659 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
0660 .handler = cntfrq_read_handler,
0661 },
0662 {
0663
0664 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
0665 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
0666 .handler = mrs_handler,
0667 },
0668 {
0669
0670 .esr_mask = ESR_ELx_WFx_MASK,
0671 .esr_val = ESR_ELx_WFx_WFI_VAL,
0672 .handler = wfi_handler,
0673 },
0674 {},
0675 };
0676
0677 #ifdef CONFIG_COMPAT
0678 static bool cp15_cond_valid(unsigned long esr, struct pt_regs *regs)
0679 {
0680 int cond;
0681
0682
0683 if (!(esr & ESR_ELx_CV)) {
0684 u32 it;
0685
0686 it = compat_get_it_state(regs);
0687 if (!it)
0688 return true;
0689
0690 cond = it >> 4;
0691 } else {
0692 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
0693 }
0694
0695 return aarch32_opcode_cond_checks[cond](regs->pstate);
0696 }
0697
0698 static void compat_cntfrq_read_handler(unsigned long esr, struct pt_regs *regs)
0699 {
0700 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
0701
0702 pt_regs_write_reg(regs, reg, arch_timer_get_rate());
0703 arm64_skip_faulting_instruction(regs, 4);
0704 }
0705
0706 static const struct sys64_hook cp15_32_hooks[] = {
0707 {
0708 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
0709 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
0710 .handler = compat_cntfrq_read_handler,
0711 },
0712 {},
0713 };
0714
0715 static void compat_cntvct_read_handler(unsigned long esr, struct pt_regs *regs)
0716 {
0717 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
0718 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
0719 u64 val = arch_timer_read_counter();
0720
0721 pt_regs_write_reg(regs, rt, lower_32_bits(val));
0722 pt_regs_write_reg(regs, rt2, upper_32_bits(val));
0723 arm64_skip_faulting_instruction(regs, 4);
0724 }
0725
0726 static const struct sys64_hook cp15_64_hooks[] = {
0727 {
0728 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
0729 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
0730 .handler = compat_cntvct_read_handler,
0731 },
0732 {
0733 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
0734 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCTSS,
0735 .handler = compat_cntvct_read_handler,
0736 },
0737 {},
0738 };
0739
0740 void do_cp15instr(unsigned long esr, struct pt_regs *regs)
0741 {
0742 const struct sys64_hook *hook, *hook_base;
0743
0744 if (!cp15_cond_valid(esr, regs)) {
0745
0746
0747
0748
0749 arm64_skip_faulting_instruction(regs, 4);
0750 return;
0751 }
0752
0753 switch (ESR_ELx_EC(esr)) {
0754 case ESR_ELx_EC_CP15_32:
0755 hook_base = cp15_32_hooks;
0756 break;
0757 case ESR_ELx_EC_CP15_64:
0758 hook_base = cp15_64_hooks;
0759 break;
0760 default:
0761 do_undefinstr(regs);
0762 return;
0763 }
0764
0765 for (hook = hook_base; hook->handler; hook++)
0766 if ((hook->esr_mask & esr) == hook->esr_val) {
0767 hook->handler(esr, regs);
0768 return;
0769 }
0770
0771
0772
0773
0774
0775
0776 do_undefinstr(regs);
0777 }
0778 NOKPROBE_SYMBOL(do_cp15instr);
0779 #endif
0780
0781 void do_sysinstr(unsigned long esr, struct pt_regs *regs)
0782 {
0783 const struct sys64_hook *hook;
0784
0785 for (hook = sys64_hooks; hook->handler; hook++)
0786 if ((hook->esr_mask & esr) == hook->esr_val) {
0787 hook->handler(esr, regs);
0788 return;
0789 }
0790
0791
0792
0793
0794
0795
0796 do_undefinstr(regs);
0797 }
0798 NOKPROBE_SYMBOL(do_sysinstr);
0799
0800 static const char *esr_class_str[] = {
0801 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
0802 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
0803 [ESR_ELx_EC_WFx] = "WFI/WFE",
0804 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
0805 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
0806 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
0807 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
0808 [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
0809 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
0810 [ESR_ELx_EC_PAC] = "PAC",
0811 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
0812 [ESR_ELx_EC_BTI] = "BTI",
0813 [ESR_ELx_EC_ILL] = "PSTATE.IL",
0814 [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
0815 [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
0816 [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
0817 [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
0818 [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
0819 [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
0820 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
0821 [ESR_ELx_EC_SVE] = "SVE",
0822 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
0823 [ESR_ELx_EC_FPAC] = "FPAC",
0824 [ESR_ELx_EC_SME] = "SME",
0825 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
0826 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
0827 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
0828 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
0829 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
0830 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
0831 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
0832 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
0833 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
0834 [ESR_ELx_EC_SERROR] = "SError",
0835 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
0836 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
0837 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
0838 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
0839 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
0840 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
0841 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
0842 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
0843 [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
0844 };
0845
0846 const char *esr_get_class_string(unsigned long esr)
0847 {
0848 return esr_class_str[ESR_ELx_EC(esr)];
0849 }
0850
0851
0852
0853
0854
0855 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr)
0856 {
0857 unsigned long pc = instruction_pointer(regs);
0858
0859 current->thread.fault_address = 0;
0860 current->thread.fault_code = esr;
0861
0862 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
0863 "Bad EL0 synchronous exception");
0864 }
0865
0866 #ifdef CONFIG_VMAP_STACK
0867
0868 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
0869 __aligned(16);
0870
0871 void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far)
0872 {
0873 unsigned long tsk_stk = (unsigned long)current->stack;
0874 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
0875 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
0876
0877 console_verbose();
0878 pr_emerg("Insufficient stack space to handle exception!");
0879
0880 pr_emerg("ESR: 0x%016lx -- %s\n", esr, esr_get_class_string(esr));
0881 pr_emerg("FAR: 0x%016lx\n", far);
0882
0883 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
0884 tsk_stk, tsk_stk + THREAD_SIZE);
0885 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
0886 irq_stk, irq_stk + IRQ_STACK_SIZE);
0887 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
0888 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
0889
0890 __show_regs(regs);
0891
0892
0893
0894
0895
0896 nmi_panic(NULL, "kernel stack overflow");
0897 cpu_park_loop();
0898 }
0899 #endif
0900
0901 void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr)
0902 {
0903 console_verbose();
0904
0905 pr_crit("SError Interrupt on CPU%d, code 0x%016lx -- %s\n",
0906 smp_processor_id(), esr, esr_get_class_string(esr));
0907 if (regs)
0908 __show_regs(regs);
0909
0910 nmi_panic(regs, "Asynchronous SError Interrupt");
0911
0912 cpu_park_loop();
0913 unreachable();
0914 }
0915
0916 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr)
0917 {
0918 unsigned long aet = arm64_ras_serror_get_severity(esr);
0919
0920 switch (aet) {
0921 case ESR_ELx_AET_CE:
0922 case ESR_ELx_AET_UEO:
0923
0924
0925
0926
0927 return false;
0928
0929 case ESR_ELx_AET_UEU:
0930 case ESR_ELx_AET_UER:
0931
0932
0933
0934
0935
0936
0937
0938
0939 return true;
0940
0941 case ESR_ELx_AET_UC:
0942 default:
0943
0944 arm64_serror_panic(regs, esr);
0945 }
0946 }
0947
0948 void do_serror(struct pt_regs *regs, unsigned long esr)
0949 {
0950
0951 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
0952 arm64_serror_panic(regs, esr);
0953 }
0954
0955
0956
0957 int is_valid_bugaddr(unsigned long addr)
0958 {
0959
0960
0961
0962
0963
0964
0965
0966 return 1;
0967 }
0968
0969 static int bug_handler(struct pt_regs *regs, unsigned long esr)
0970 {
0971 switch (report_bug(regs->pc, regs)) {
0972 case BUG_TRAP_TYPE_BUG:
0973 die("Oops - BUG", regs, 0);
0974 break;
0975
0976 case BUG_TRAP_TYPE_WARN:
0977 break;
0978
0979 default:
0980
0981 return DBG_HOOK_ERROR;
0982 }
0983
0984
0985 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
0986 return DBG_HOOK_HANDLED;
0987 }
0988
0989 static struct break_hook bug_break_hook = {
0990 .fn = bug_handler,
0991 .imm = BUG_BRK_IMM,
0992 };
0993
0994 static int reserved_fault_handler(struct pt_regs *regs, unsigned long esr)
0995 {
0996 pr_err("%s generated an invalid instruction at %pS!\n",
0997 "Kernel text patching",
0998 (void *)instruction_pointer(regs));
0999
1000
1001 return DBG_HOOK_ERROR;
1002 }
1003
1004 static struct break_hook fault_break_hook = {
1005 .fn = reserved_fault_handler,
1006 .imm = FAULT_BRK_IMM,
1007 };
1008
1009 #ifdef CONFIG_KASAN_SW_TAGS
1010
1011 #define KASAN_ESR_RECOVER 0x20
1012 #define KASAN_ESR_WRITE 0x10
1013 #define KASAN_ESR_SIZE_MASK 0x0f
1014 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
1015
1016 static int kasan_handler(struct pt_regs *regs, unsigned long esr)
1017 {
1018 bool recover = esr & KASAN_ESR_RECOVER;
1019 bool write = esr & KASAN_ESR_WRITE;
1020 size_t size = KASAN_ESR_SIZE(esr);
1021 u64 addr = regs->regs[0];
1022 u64 pc = regs->pc;
1023
1024 kasan_report(addr, size, write, pc);
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 if (!recover)
1041 die("Oops - KASAN", regs, 0);
1042
1043
1044 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1045 return DBG_HOOK_HANDLED;
1046 }
1047
1048 static struct break_hook kasan_break_hook = {
1049 .fn = kasan_handler,
1050 .imm = KASAN_BRK_IMM,
1051 .mask = KASAN_BRK_MASK,
1052 };
1053 #endif
1054
1055
1056
1057
1058
1059 int __init early_brk64(unsigned long addr, unsigned long esr,
1060 struct pt_regs *regs)
1061 {
1062 #ifdef CONFIG_KASAN_SW_TAGS
1063 unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
1064
1065 if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
1066 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1067 #endif
1068 return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1069 }
1070
1071 void __init trap_init(void)
1072 {
1073 register_kernel_break_hook(&bug_break_hook);
1074 register_kernel_break_hook(&fault_break_hook);
1075 #ifdef CONFIG_KASAN_SW_TAGS
1076 register_kernel_break_hook(&kasan_break_hook);
1077 #endif
1078 debug_traps_init();
1079 }