0001
0002
0003 #include <linux/context_tracking.h>
0004 #include <linux/err.h>
0005 #include <linux/compat.h>
0006 #include <linux/sched/debug.h> /* for show_regs */
0007
0008 #include <asm/kup.h>
0009 #include <asm/cputime.h>
0010 #include <asm/hw_irq.h>
0011 #include <asm/interrupt.h>
0012 #include <asm/kprobes.h>
0013 #include <asm/paca.h>
0014 #include <asm/ptrace.h>
0015 #include <asm/reg.h>
0016 #include <asm/signal.h>
0017 #include <asm/switch_to.h>
0018 #include <asm/syscall.h>
0019 #include <asm/time.h>
0020 #include <asm/tm.h>
0021 #include <asm/unistd.h>
0022
0023 #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
0024 unsigned long global_dbcr0[NR_CPUS];
0025 #endif
0026
0027 #ifdef CONFIG_PPC_BOOK3S_64
0028 DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
0029 static inline bool exit_must_hard_disable(void)
0030 {
0031 return static_branch_unlikely(&interrupt_exit_not_reentrant);
0032 }
0033 #else
0034 static inline bool exit_must_hard_disable(void)
0035 {
0036 return true;
0037 }
0038 #endif
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
0052 {
0053
0054 trace_hardirqs_on();
0055
0056 if (exit_must_hard_disable() || !restartable)
0057 __hard_EE_RI_disable();
0058
0059 #ifdef CONFIG_PPC64
0060
0061 if (unlikely(lazy_irq_pending_nocheck())) {
0062 if (exit_must_hard_disable() || !restartable) {
0063 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
0064 __hard_RI_enable();
0065 }
0066 trace_hardirqs_off();
0067
0068 return false;
0069 }
0070 #endif
0071 return true;
0072 }
0073
0074 static notrace void booke_load_dbcr0(void)
0075 {
0076 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
0077 unsigned long dbcr0 = current->thread.debug.dbcr0;
0078
0079 if (likely(!(dbcr0 & DBCR0_IDM)))
0080 return;
0081
0082
0083
0084
0085
0086 mtmsr(mfmsr() & ~MSR_DE);
0087 if (IS_ENABLED(CONFIG_PPC32)) {
0088 isync();
0089 global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
0090 }
0091 mtspr(SPRN_DBCR0, dbcr0);
0092 mtspr(SPRN_DBSR, -1);
0093 #endif
0094 }
0095
0096 static void check_return_regs_valid(struct pt_regs *regs)
0097 {
0098 #ifdef CONFIG_PPC_BOOK3S_64
0099 unsigned long trap, srr0, srr1;
0100 static bool warned;
0101 u8 *validp;
0102 char *h;
0103
0104 if (trap_is_scv(regs))
0105 return;
0106
0107 trap = TRAP(regs);
0108
0109 if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
0110 trap = 0xea0;
0111
0112 switch (trap) {
0113 case 0x980:
0114 case INTERRUPT_H_DATA_STORAGE:
0115 case 0xe20:
0116 case 0xe40:
0117 case INTERRUPT_HMI:
0118 case 0xe80:
0119 case 0xea0:
0120 case INTERRUPT_H_FAC_UNAVAIL:
0121 case 0x1200:
0122 case 0x1500:
0123 case 0x1600:
0124 case 0x1800:
0125 validp = &local_paca->hsrr_valid;
0126 if (!*validp)
0127 return;
0128
0129 srr0 = mfspr(SPRN_HSRR0);
0130 srr1 = mfspr(SPRN_HSRR1);
0131 h = "H";
0132
0133 break;
0134 default:
0135 validp = &local_paca->srr_valid;
0136 if (!*validp)
0137 return;
0138
0139 srr0 = mfspr(SPRN_SRR0);
0140 srr1 = mfspr(SPRN_SRR1);
0141 h = "";
0142 break;
0143 }
0144
0145 if (srr0 == regs->nip && srr1 == regs->msr)
0146 return;
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 barrier();
0163
0164 if (!*validp)
0165 return;
0166
0167 if (!warned) {
0168 warned = true;
0169 printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
0170 printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
0171 show_regs(regs);
0172 }
0173
0174 *validp = 0;
0175 #endif
0176 }
0177
0178 static notrace unsigned long
0179 interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
0180 {
0181 unsigned long ti_flags;
0182
0183 again:
0184 ti_flags = read_thread_flags();
0185 while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
0186 local_irq_enable();
0187 if (ti_flags & _TIF_NEED_RESCHED) {
0188 schedule();
0189 } else {
0190
0191
0192
0193
0194
0195 if (ti_flags & _TIF_SIGPENDING)
0196 ret |= _TIF_RESTOREALL;
0197 do_notify_resume(regs, ti_flags);
0198 }
0199 local_irq_disable();
0200 ti_flags = read_thread_flags();
0201 }
0202
0203 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
0204 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
0205 unlikely((ti_flags & _TIF_RESTORE_TM))) {
0206 restore_tm_state(regs);
0207 } else {
0208 unsigned long mathflags = MSR_FP;
0209
0210 if (cpu_has_feature(CPU_FTR_VSX))
0211 mathflags |= MSR_VEC | MSR_VSX;
0212 else if (cpu_has_feature(CPU_FTR_ALTIVEC))
0213 mathflags |= MSR_VEC;
0214
0215
0216
0217
0218
0219
0220
0221
0222 if ((regs->msr & mathflags) != mathflags)
0223 restore_math(regs);
0224 }
0225 }
0226
0227 check_return_regs_valid(regs);
0228
0229 user_enter_irqoff();
0230 if (!prep_irq_for_enabled_exit(true)) {
0231 user_exit_irqoff();
0232 local_irq_enable();
0233 local_irq_disable();
0234 goto again;
0235 }
0236
0237 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0238 local_paca->tm_scratch = regs->msr;
0239 #endif
0240
0241 booke_load_dbcr0();
0242
0243 account_cpu_user_exit();
0244
0245
0246 kuap_user_restore(regs);
0247
0248 return ret;
0249 }
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 notrace unsigned long syscall_exit_prepare(unsigned long r3,
0261 struct pt_regs *regs,
0262 long scv)
0263 {
0264 unsigned long ti_flags;
0265 unsigned long ret = 0;
0266 bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
0267
0268 CT_WARN_ON(ct_state() == CONTEXT_USER);
0269
0270 kuap_assert_locked();
0271
0272 regs->result = r3;
0273
0274
0275 rseq_syscall(regs);
0276
0277 ti_flags = read_thread_flags();
0278
0279 if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
0280 if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
0281 r3 = -r3;
0282 regs->ccr |= 0x10000000;
0283 }
0284 }
0285
0286 if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
0287 if (ti_flags & _TIF_RESTOREALL)
0288 ret = _TIF_RESTOREALL;
0289 else
0290 regs->gpr[3] = r3;
0291 clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags);
0292 } else {
0293 regs->gpr[3] = r3;
0294 }
0295
0296 if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
0297 do_syscall_trace_leave(regs);
0298 ret |= _TIF_RESTOREALL;
0299 }
0300
0301 local_irq_disable();
0302 ret = interrupt_exit_user_prepare_main(ret, regs);
0303
0304 #ifdef CONFIG_PPC64
0305 regs->exit_result = ret;
0306 #endif
0307
0308 return ret;
0309 }
0310
0311 #ifdef CONFIG_PPC64
0312 notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
0313 {
0314
0315
0316
0317
0318
0319
0320
0321 __hard_irq_disable();
0322 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
0323
0324 #ifdef CONFIG_PPC_BOOK3S_64
0325 set_kuap(AMR_KUAP_BLOCKED);
0326 #endif
0327
0328 trace_hardirqs_off();
0329 user_exit_irqoff();
0330 account_cpu_user_entry();
0331
0332 BUG_ON(!user_mode(regs));
0333
0334 regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
0335
0336 return regs->exit_result;
0337 }
0338 #endif
0339
0340 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
0341 {
0342 unsigned long ret;
0343
0344 BUG_ON(regs_is_unrecoverable(regs));
0345 BUG_ON(arch_irq_disabled_regs(regs));
0346 CT_WARN_ON(ct_state() == CONTEXT_USER);
0347
0348
0349
0350
0351
0352 kuap_assert_locked();
0353
0354 local_irq_disable();
0355
0356 ret = interrupt_exit_user_prepare_main(0, regs);
0357
0358 #ifdef CONFIG_PPC64
0359 regs->exit_result = ret;
0360 #endif
0361
0362 return ret;
0363 }
0364
0365 void preempt_schedule_irq(void);
0366
0367 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
0368 {
0369 unsigned long flags;
0370 unsigned long ret = 0;
0371 unsigned long kuap;
0372 bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
0373
0374 if (regs_is_unrecoverable(regs))
0375 unrecoverable_exception(regs);
0376
0377
0378
0379
0380 if (TRAP(regs) != INTERRUPT_PROGRAM)
0381 CT_WARN_ON(ct_state() == CONTEXT_USER);
0382
0383 kuap = kuap_get_and_assert_locked();
0384
0385 local_irq_save(flags);
0386
0387 if (!arch_irq_disabled_regs(regs)) {
0388
0389 WARN_ON_ONCE(!(regs->msr & MSR_EE));
0390 again:
0391 if (IS_ENABLED(CONFIG_PREEMPT)) {
0392
0393 if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
0394 if (preempt_count() == 0)
0395 preempt_schedule_irq();
0396 }
0397 }
0398
0399 check_return_regs_valid(regs);
0400
0401
0402
0403
0404
0405 if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
0406
0407
0408
0409
0410
0411
0412
0413
0414 hard_irq_disable();
0415 replay_soft_interrupts();
0416
0417 goto again;
0418 }
0419 #ifdef CONFIG_PPC64
0420
0421
0422
0423
0424
0425
0426
0427 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
0428
0429 } else {
0430 check_return_regs_valid(regs);
0431
0432 if (unlikely(stack_store))
0433 __hard_EE_RI_disable();
0434
0435
0436
0437
0438
0439
0440
0441
0442 if (regs->msr & MSR_EE)
0443 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
0444 #endif
0445 }
0446
0447 if (unlikely(stack_store)) {
0448 clear_bits(_TIF_EMULATE_STACK_STORE, ¤t_thread_info()->flags);
0449 ret = 1;
0450 }
0451
0452 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0453 local_paca->tm_scratch = regs->msr;
0454 #endif
0455
0456
0457
0458
0459
0460
0461 kuap_kernel_restore(regs, kuap);
0462
0463 return ret;
0464 }
0465
0466 #ifdef CONFIG_PPC64
0467 notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
0468 {
0469 __hard_irq_disable();
0470 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
0471
0472 #ifdef CONFIG_PPC_BOOK3S_64
0473 set_kuap(AMR_KUAP_BLOCKED);
0474 #endif
0475
0476 trace_hardirqs_off();
0477 user_exit_irqoff();
0478 account_cpu_user_entry();
0479
0480 BUG_ON(!user_mode(regs));
0481
0482 regs->exit_result |= interrupt_exit_user_prepare(regs);
0483
0484 return regs->exit_result;
0485 }
0486
0487
0488
0489
0490
0491 notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
0492 {
0493 __hard_irq_disable();
0494 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
0495
0496 #ifdef CONFIG_PPC_BOOK3S_64
0497 set_kuap(AMR_KUAP_BLOCKED);
0498 #endif
0499
0500 if (regs->softe == IRQS_ENABLED)
0501 trace_hardirqs_off();
0502
0503 BUG_ON(user_mode(regs));
0504
0505 return interrupt_exit_kernel_prepare(regs);
0506 }
0507 #endif