0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/context_tracking.h>
0009 #include <linux/kasan.h>
0010 #include <linux/linkage.h>
0011 #include <linux/lockdep.h>
0012 #include <linux/ptrace.h>
0013 #include <linux/sched.h>
0014 #include <linux/sched/debug.h>
0015 #include <linux/thread_info.h>
0016
0017 #include <asm/cpufeature.h>
0018 #include <asm/daifflags.h>
0019 #include <asm/esr.h>
0020 #include <asm/exception.h>
0021 #include <asm/irq_regs.h>
0022 #include <asm/kprobes.h>
0023 #include <asm/mmu.h>
0024 #include <asm/processor.h>
0025 #include <asm/sdei.h>
0026 #include <asm/stacktrace.h>
0027 #include <asm/sysreg.h>
0028 #include <asm/system_misc.h>
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
0039 {
0040 regs->exit_rcu = false;
0041
0042 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
0043 lockdep_hardirqs_off(CALLER_ADDR0);
0044 ct_irq_enter();
0045 trace_hardirqs_off_finish();
0046
0047 regs->exit_rcu = true;
0048 return;
0049 }
0050
0051 lockdep_hardirqs_off(CALLER_ADDR0);
0052 rcu_irq_enter_check_tick();
0053 trace_hardirqs_off_finish();
0054 }
0055
0056 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
0057 {
0058 __enter_from_kernel_mode(regs);
0059 mte_check_tfsr_entry();
0060 mte_disable_tco_entry(current);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
0072 {
0073 lockdep_assert_irqs_disabled();
0074
0075 if (interrupts_enabled(regs)) {
0076 if (regs->exit_rcu) {
0077 trace_hardirqs_on_prepare();
0078 lockdep_hardirqs_on_prepare();
0079 ct_irq_exit();
0080 lockdep_hardirqs_on(CALLER_ADDR0);
0081 return;
0082 }
0083
0084 trace_hardirqs_on();
0085 } else {
0086 if (regs->exit_rcu)
0087 ct_irq_exit();
0088 }
0089 }
0090
0091 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
0092 {
0093 mte_check_tfsr_exit();
0094 __exit_to_kernel_mode(regs);
0095 }
0096
0097
0098
0099
0100
0101
0102 static __always_inline void __enter_from_user_mode(void)
0103 {
0104 lockdep_hardirqs_off(CALLER_ADDR0);
0105 CT_WARN_ON(ct_state() != CONTEXT_USER);
0106 user_exit_irqoff();
0107 trace_hardirqs_off_finish();
0108 mte_disable_tco_entry(current);
0109 }
0110
0111 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
0112 {
0113 __enter_from_user_mode();
0114 }
0115
0116
0117
0118
0119
0120
0121 static __always_inline void __exit_to_user_mode(void)
0122 {
0123 trace_hardirqs_on_prepare();
0124 lockdep_hardirqs_on_prepare();
0125 user_enter_irqoff();
0126 lockdep_hardirqs_on(CALLER_ADDR0);
0127 }
0128
0129 static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
0130 {
0131 unsigned long flags;
0132
0133 local_daif_mask();
0134
0135 flags = read_thread_flags();
0136 if (unlikely(flags & _TIF_WORK_MASK))
0137 do_notify_resume(regs, flags);
0138 }
0139
0140 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
0141 {
0142 prepare_exit_to_user_mode(regs);
0143 mte_check_tfsr_exit();
0144 __exit_to_user_mode();
0145 }
0146
0147 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
0148 {
0149 exit_to_user_mode(regs);
0150 }
0151
0152
0153
0154
0155
0156
0157 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
0158 {
0159 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
0160
0161 __nmi_enter();
0162 lockdep_hardirqs_off(CALLER_ADDR0);
0163 lockdep_hardirq_enter();
0164 ct_nmi_enter();
0165
0166 trace_hardirqs_off_finish();
0167 ftrace_nmi_enter();
0168 }
0169
0170
0171
0172
0173
0174
0175 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
0176 {
0177 bool restore = regs->lockdep_hardirqs;
0178
0179 ftrace_nmi_exit();
0180 if (restore) {
0181 trace_hardirqs_on_prepare();
0182 lockdep_hardirqs_on_prepare();
0183 }
0184
0185 ct_nmi_exit();
0186 lockdep_hardirq_exit();
0187 if (restore)
0188 lockdep_hardirqs_on(CALLER_ADDR0);
0189 __nmi_exit();
0190 }
0191
0192
0193
0194
0195
0196
0197 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
0198 {
0199 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
0200
0201 lockdep_hardirqs_off(CALLER_ADDR0);
0202 ct_nmi_enter();
0203
0204 trace_hardirqs_off_finish();
0205 }
0206
0207
0208
0209
0210
0211
0212 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
0213 {
0214 bool restore = regs->lockdep_hardirqs;
0215
0216 if (restore) {
0217 trace_hardirqs_on_prepare();
0218 lockdep_hardirqs_on_prepare();
0219 }
0220
0221 ct_nmi_exit();
0222 if (restore)
0223 lockdep_hardirqs_on(CALLER_ADDR0);
0224 }
0225
0226 #ifdef CONFIG_PREEMPT_DYNAMIC
0227 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
0228 #define need_irq_preemption() \
0229 (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
0230 #else
0231 #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
0232 #endif
0233
0234 static void __sched arm64_preempt_schedule_irq(void)
0235 {
0236 if (!need_irq_preemption())
0237 return;
0238
0239
0240
0241
0242
0243
0244 if (READ_ONCE(current_thread_info()->preempt_count) != 0)
0245 return;
0246
0247
0248
0249
0250
0251
0252
0253 if (system_uses_irq_prio_masking() && read_sysreg(daif))
0254 return;
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 if (system_capabilities_finalized())
0265 preempt_schedule_irq();
0266 }
0267
0268 static void do_interrupt_handler(struct pt_regs *regs,
0269 void (*handler)(struct pt_regs *))
0270 {
0271 struct pt_regs *old_regs = set_irq_regs(regs);
0272
0273 if (on_thread_stack())
0274 call_on_irq_stack(regs, handler);
0275 else
0276 handler(regs);
0277
0278 set_irq_regs(old_regs);
0279 }
0280
0281 extern void (*handle_arch_irq)(struct pt_regs *);
0282 extern void (*handle_arch_fiq)(struct pt_regs *);
0283
0284 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
0285 unsigned long esr)
0286 {
0287 arm64_enter_nmi(regs);
0288
0289 console_verbose();
0290
0291 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
0292 vector, smp_processor_id(), esr,
0293 esr_get_class_string(esr));
0294
0295 __show_regs(regs);
0296 panic("Unhandled exception");
0297 }
0298
0299 #define UNHANDLED(el, regsize, vector) \
0300 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
0301 { \
0302 const char *desc = #regsize "-bit " #el " " #vector; \
0303 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
0304 }
0305
0306 #ifdef CONFIG_ARM64_ERRATUM_1463225
0307 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
0308
0309 static void cortex_a76_erratum_1463225_svc_handler(void)
0310 {
0311 u32 reg, val;
0312
0313 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
0314 return;
0315
0316 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
0317 return;
0318
0319 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
0320 reg = read_sysreg(mdscr_el1);
0321 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
0322 write_sysreg(val, mdscr_el1);
0323 asm volatile("msr daifclr, #8");
0324 isb();
0325
0326
0327
0328 write_sysreg(reg, mdscr_el1);
0329 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
0330 }
0331
0332 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
0333 {
0334 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
0335 return false;
0336
0337
0338
0339
0340
0341
0342
0343
0344 regs->pstate |= PSR_D_BIT;
0345 return true;
0346 }
0347 #else
0348 static void cortex_a76_erratum_1463225_svc_handler(void) { }
0349 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
0350 {
0351 return false;
0352 }
0353 #endif
0354
0355 UNHANDLED(el1t, 64, sync)
0356 UNHANDLED(el1t, 64, irq)
0357 UNHANDLED(el1t, 64, fiq)
0358 UNHANDLED(el1t, 64, error)
0359
0360 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
0361 {
0362 unsigned long far = read_sysreg(far_el1);
0363
0364 enter_from_kernel_mode(regs);
0365 local_daif_inherit(regs);
0366 do_mem_abort(far, esr, regs);
0367 local_daif_mask();
0368 exit_to_kernel_mode(regs);
0369 }
0370
0371 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
0372 {
0373 unsigned long far = read_sysreg(far_el1);
0374
0375 enter_from_kernel_mode(regs);
0376 local_daif_inherit(regs);
0377 do_sp_pc_abort(far, esr, regs);
0378 local_daif_mask();
0379 exit_to_kernel_mode(regs);
0380 }
0381
0382 static void noinstr el1_undef(struct pt_regs *regs)
0383 {
0384 enter_from_kernel_mode(regs);
0385 local_daif_inherit(regs);
0386 do_undefinstr(regs);
0387 local_daif_mask();
0388 exit_to_kernel_mode(regs);
0389 }
0390
0391 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
0392 {
0393 unsigned long far = read_sysreg(far_el1);
0394
0395 arm64_enter_el1_dbg(regs);
0396 if (!cortex_a76_erratum_1463225_debug_handler(regs))
0397 do_debug_exception(far, esr, regs);
0398 arm64_exit_el1_dbg(regs);
0399 }
0400
0401 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
0402 {
0403 enter_from_kernel_mode(regs);
0404 local_daif_inherit(regs);
0405 do_ptrauth_fault(regs, esr);
0406 local_daif_mask();
0407 exit_to_kernel_mode(regs);
0408 }
0409
0410 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
0411 {
0412 unsigned long esr = read_sysreg(esr_el1);
0413
0414 switch (ESR_ELx_EC(esr)) {
0415 case ESR_ELx_EC_DABT_CUR:
0416 case ESR_ELx_EC_IABT_CUR:
0417 el1_abort(regs, esr);
0418 break;
0419
0420
0421
0422
0423 case ESR_ELx_EC_PC_ALIGN:
0424 el1_pc(regs, esr);
0425 break;
0426 case ESR_ELx_EC_SYS64:
0427 case ESR_ELx_EC_UNKNOWN:
0428 el1_undef(regs);
0429 break;
0430 case ESR_ELx_EC_BREAKPT_CUR:
0431 case ESR_ELx_EC_SOFTSTP_CUR:
0432 case ESR_ELx_EC_WATCHPT_CUR:
0433 case ESR_ELx_EC_BRK64:
0434 el1_dbg(regs, esr);
0435 break;
0436 case ESR_ELx_EC_FPAC:
0437 el1_fpac(regs, esr);
0438 break;
0439 default:
0440 __panic_unhandled(regs, "64-bit el1h sync", esr);
0441 }
0442 }
0443
0444 static __always_inline void __el1_pnmi(struct pt_regs *regs,
0445 void (*handler)(struct pt_regs *))
0446 {
0447 arm64_enter_nmi(regs);
0448 do_interrupt_handler(regs, handler);
0449 arm64_exit_nmi(regs);
0450 }
0451
0452 static __always_inline void __el1_irq(struct pt_regs *regs,
0453 void (*handler)(struct pt_regs *))
0454 {
0455 enter_from_kernel_mode(regs);
0456
0457 irq_enter_rcu();
0458 do_interrupt_handler(regs, handler);
0459 irq_exit_rcu();
0460
0461 arm64_preempt_schedule_irq();
0462
0463 exit_to_kernel_mode(regs);
0464 }
0465 static void noinstr el1_interrupt(struct pt_regs *regs,
0466 void (*handler)(struct pt_regs *))
0467 {
0468 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
0469
0470 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
0471 __el1_pnmi(regs, handler);
0472 else
0473 __el1_irq(regs, handler);
0474 }
0475
0476 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
0477 {
0478 el1_interrupt(regs, handle_arch_irq);
0479 }
0480
0481 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
0482 {
0483 el1_interrupt(regs, handle_arch_fiq);
0484 }
0485
0486 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
0487 {
0488 unsigned long esr = read_sysreg(esr_el1);
0489
0490 local_daif_restore(DAIF_ERRCTX);
0491 arm64_enter_nmi(regs);
0492 do_serror(regs, esr);
0493 arm64_exit_nmi(regs);
0494 }
0495
0496 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
0497 {
0498 unsigned long far = read_sysreg(far_el1);
0499
0500 enter_from_user_mode(regs);
0501 local_daif_restore(DAIF_PROCCTX);
0502 do_mem_abort(far, esr, regs);
0503 exit_to_user_mode(regs);
0504 }
0505
0506 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
0507 {
0508 unsigned long far = read_sysreg(far_el1);
0509
0510
0511
0512
0513
0514
0515 if (!is_ttbr0_addr(far))
0516 arm64_apply_bp_hardening();
0517
0518 enter_from_user_mode(regs);
0519 local_daif_restore(DAIF_PROCCTX);
0520 do_mem_abort(far, esr, regs);
0521 exit_to_user_mode(regs);
0522 }
0523
0524 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
0525 {
0526 enter_from_user_mode(regs);
0527 local_daif_restore(DAIF_PROCCTX);
0528 do_fpsimd_acc(esr, regs);
0529 exit_to_user_mode(regs);
0530 }
0531
0532 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
0533 {
0534 enter_from_user_mode(regs);
0535 local_daif_restore(DAIF_PROCCTX);
0536 do_sve_acc(esr, regs);
0537 exit_to_user_mode(regs);
0538 }
0539
0540 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
0541 {
0542 enter_from_user_mode(regs);
0543 local_daif_restore(DAIF_PROCCTX);
0544 do_sme_acc(esr, regs);
0545 exit_to_user_mode(regs);
0546 }
0547
0548 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
0549 {
0550 enter_from_user_mode(regs);
0551 local_daif_restore(DAIF_PROCCTX);
0552 do_fpsimd_exc(esr, regs);
0553 exit_to_user_mode(regs);
0554 }
0555
0556 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
0557 {
0558 enter_from_user_mode(regs);
0559 local_daif_restore(DAIF_PROCCTX);
0560 do_sysinstr(esr, regs);
0561 exit_to_user_mode(regs);
0562 }
0563
0564 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
0565 {
0566 unsigned long far = read_sysreg(far_el1);
0567
0568 if (!is_ttbr0_addr(instruction_pointer(regs)))
0569 arm64_apply_bp_hardening();
0570
0571 enter_from_user_mode(regs);
0572 local_daif_restore(DAIF_PROCCTX);
0573 do_sp_pc_abort(far, esr, regs);
0574 exit_to_user_mode(regs);
0575 }
0576
0577 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
0578 {
0579 enter_from_user_mode(regs);
0580 local_daif_restore(DAIF_PROCCTX);
0581 do_sp_pc_abort(regs->sp, esr, regs);
0582 exit_to_user_mode(regs);
0583 }
0584
0585 static void noinstr el0_undef(struct pt_regs *regs)
0586 {
0587 enter_from_user_mode(regs);
0588 local_daif_restore(DAIF_PROCCTX);
0589 do_undefinstr(regs);
0590 exit_to_user_mode(regs);
0591 }
0592
0593 static void noinstr el0_bti(struct pt_regs *regs)
0594 {
0595 enter_from_user_mode(regs);
0596 local_daif_restore(DAIF_PROCCTX);
0597 do_bti(regs);
0598 exit_to_user_mode(regs);
0599 }
0600
0601 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
0602 {
0603 enter_from_user_mode(regs);
0604 local_daif_restore(DAIF_PROCCTX);
0605 bad_el0_sync(regs, 0, esr);
0606 exit_to_user_mode(regs);
0607 }
0608
0609 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
0610 {
0611
0612 unsigned long far = read_sysreg(far_el1);
0613
0614 enter_from_user_mode(regs);
0615 do_debug_exception(far, esr, regs);
0616 local_daif_restore(DAIF_PROCCTX);
0617 exit_to_user_mode(regs);
0618 }
0619
0620 static void noinstr el0_svc(struct pt_regs *regs)
0621 {
0622 enter_from_user_mode(regs);
0623 cortex_a76_erratum_1463225_svc_handler();
0624 do_el0_svc(regs);
0625 exit_to_user_mode(regs);
0626 }
0627
0628 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
0629 {
0630 enter_from_user_mode(regs);
0631 local_daif_restore(DAIF_PROCCTX);
0632 do_ptrauth_fault(regs, esr);
0633 exit_to_user_mode(regs);
0634 }
0635
0636 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
0637 {
0638 unsigned long esr = read_sysreg(esr_el1);
0639
0640 switch (ESR_ELx_EC(esr)) {
0641 case ESR_ELx_EC_SVC64:
0642 el0_svc(regs);
0643 break;
0644 case ESR_ELx_EC_DABT_LOW:
0645 el0_da(regs, esr);
0646 break;
0647 case ESR_ELx_EC_IABT_LOW:
0648 el0_ia(regs, esr);
0649 break;
0650 case ESR_ELx_EC_FP_ASIMD:
0651 el0_fpsimd_acc(regs, esr);
0652 break;
0653 case ESR_ELx_EC_SVE:
0654 el0_sve_acc(regs, esr);
0655 break;
0656 case ESR_ELx_EC_SME:
0657 el0_sme_acc(regs, esr);
0658 break;
0659 case ESR_ELx_EC_FP_EXC64:
0660 el0_fpsimd_exc(regs, esr);
0661 break;
0662 case ESR_ELx_EC_SYS64:
0663 case ESR_ELx_EC_WFx:
0664 el0_sys(regs, esr);
0665 break;
0666 case ESR_ELx_EC_SP_ALIGN:
0667 el0_sp(regs, esr);
0668 break;
0669 case ESR_ELx_EC_PC_ALIGN:
0670 el0_pc(regs, esr);
0671 break;
0672 case ESR_ELx_EC_UNKNOWN:
0673 el0_undef(regs);
0674 break;
0675 case ESR_ELx_EC_BTI:
0676 el0_bti(regs);
0677 break;
0678 case ESR_ELx_EC_BREAKPT_LOW:
0679 case ESR_ELx_EC_SOFTSTP_LOW:
0680 case ESR_ELx_EC_WATCHPT_LOW:
0681 case ESR_ELx_EC_BRK64:
0682 el0_dbg(regs, esr);
0683 break;
0684 case ESR_ELx_EC_FPAC:
0685 el0_fpac(regs, esr);
0686 break;
0687 default:
0688 el0_inv(regs, esr);
0689 }
0690 }
0691
0692 static void noinstr el0_interrupt(struct pt_regs *regs,
0693 void (*handler)(struct pt_regs *))
0694 {
0695 enter_from_user_mode(regs);
0696
0697 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
0698
0699 if (regs->pc & BIT(55))
0700 arm64_apply_bp_hardening();
0701
0702 irq_enter_rcu();
0703 do_interrupt_handler(regs, handler);
0704 irq_exit_rcu();
0705
0706 exit_to_user_mode(regs);
0707 }
0708
0709 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
0710 {
0711 el0_interrupt(regs, handle_arch_irq);
0712 }
0713
0714 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
0715 {
0716 __el0_irq_handler_common(regs);
0717 }
0718
0719 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
0720 {
0721 el0_interrupt(regs, handle_arch_fiq);
0722 }
0723
0724 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
0725 {
0726 __el0_fiq_handler_common(regs);
0727 }
0728
0729 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
0730 {
0731 unsigned long esr = read_sysreg(esr_el1);
0732
0733 enter_from_user_mode(regs);
0734 local_daif_restore(DAIF_ERRCTX);
0735 arm64_enter_nmi(regs);
0736 do_serror(regs, esr);
0737 arm64_exit_nmi(regs);
0738 local_daif_restore(DAIF_PROCCTX);
0739 exit_to_user_mode(regs);
0740 }
0741
0742 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
0743 {
0744 __el0_error_handler_common(regs);
0745 }
0746
0747 #ifdef CONFIG_COMPAT
0748 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
0749 {
0750 enter_from_user_mode(regs);
0751 local_daif_restore(DAIF_PROCCTX);
0752 do_cp15instr(esr, regs);
0753 exit_to_user_mode(regs);
0754 }
0755
0756 static void noinstr el0_svc_compat(struct pt_regs *regs)
0757 {
0758 enter_from_user_mode(regs);
0759 cortex_a76_erratum_1463225_svc_handler();
0760 do_el0_svc_compat(regs);
0761 exit_to_user_mode(regs);
0762 }
0763
0764 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
0765 {
0766 unsigned long esr = read_sysreg(esr_el1);
0767
0768 switch (ESR_ELx_EC(esr)) {
0769 case ESR_ELx_EC_SVC32:
0770 el0_svc_compat(regs);
0771 break;
0772 case ESR_ELx_EC_DABT_LOW:
0773 el0_da(regs, esr);
0774 break;
0775 case ESR_ELx_EC_IABT_LOW:
0776 el0_ia(regs, esr);
0777 break;
0778 case ESR_ELx_EC_FP_ASIMD:
0779 el0_fpsimd_acc(regs, esr);
0780 break;
0781 case ESR_ELx_EC_FP_EXC32:
0782 el0_fpsimd_exc(regs, esr);
0783 break;
0784 case ESR_ELx_EC_PC_ALIGN:
0785 el0_pc(regs, esr);
0786 break;
0787 case ESR_ELx_EC_UNKNOWN:
0788 case ESR_ELx_EC_CP14_MR:
0789 case ESR_ELx_EC_CP14_LS:
0790 case ESR_ELx_EC_CP14_64:
0791 el0_undef(regs);
0792 break;
0793 case ESR_ELx_EC_CP15_32:
0794 case ESR_ELx_EC_CP15_64:
0795 el0_cp15(regs, esr);
0796 break;
0797 case ESR_ELx_EC_BREAKPT_LOW:
0798 case ESR_ELx_EC_SOFTSTP_LOW:
0799 case ESR_ELx_EC_WATCHPT_LOW:
0800 case ESR_ELx_EC_BKPT32:
0801 el0_dbg(regs, esr);
0802 break;
0803 default:
0804 el0_inv(regs, esr);
0805 }
0806 }
0807
0808 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
0809 {
0810 __el0_irq_handler_common(regs);
0811 }
0812
0813 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
0814 {
0815 __el0_fiq_handler_common(regs);
0816 }
0817
0818 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
0819 {
0820 __el0_error_handler_common(regs);
0821 }
0822 #else
0823 UNHANDLED(el0t, 32, sync)
0824 UNHANDLED(el0t, 32, irq)
0825 UNHANDLED(el0t, 32, fiq)
0826 UNHANDLED(el0t, 32, error)
0827 #endif
0828
0829 #ifdef CONFIG_VMAP_STACK
0830 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
0831 {
0832 unsigned long esr = read_sysreg(esr_el1);
0833 unsigned long far = read_sysreg(far_el1);
0834
0835 arm64_enter_nmi(regs);
0836 panic_bad_stack(regs, esr, far);
0837 }
0838 #endif
0839
0840 #ifdef CONFIG_ARM_SDE_INTERFACE
0841 asmlinkage noinstr unsigned long
0842 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
0843 {
0844 unsigned long ret;
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863 if (system_uses_hw_pan())
0864 set_pstate_pan(1);
0865 else if (cpu_has_pan())
0866 set_pstate_pan(0);
0867
0868 arm64_enter_nmi(regs);
0869 ret = do_sdei_event(regs, arg);
0870 arm64_exit_nmi(regs);
0871
0872 return ret;
0873 }
0874 #endif