0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/errno.h>
0014 #include <linux/sched.h>
0015 #include <linux/sched/debug.h>
0016 #include <linux/sched/task.h>
0017 #include <linux/sched/task_stack.h>
0018 #include <linux/kernel.h>
0019 #include <linux/mm.h>
0020 #include <linux/smp.h>
0021 #include <linux/stddef.h>
0022 #include <linux/unistd.h>
0023 #include <linux/ptrace.h>
0024 #include <linux/slab.h>
0025 #include <linux/user.h>
0026 #include <linux/elf.h>
0027 #include <linux/prctl.h>
0028 #include <linux/init_task.h>
0029 #include <linux/export.h>
0030 #include <linux/kallsyms.h>
0031 #include <linux/mqueue.h>
0032 #include <linux/hardirq.h>
0033 #include <linux/utsname.h>
0034 #include <linux/ftrace.h>
0035 #include <linux/kernel_stat.h>
0036 #include <linux/personality.h>
0037 #include <linux/hw_breakpoint.h>
0038 #include <linux/uaccess.h>
0039 #include <linux/pkeys.h>
0040 #include <linux/seq_buf.h>
0041
0042 #include <asm/interrupt.h>
0043 #include <asm/io.h>
0044 #include <asm/processor.h>
0045 #include <asm/mmu.h>
0046 #include <asm/machdep.h>
0047 #include <asm/time.h>
0048 #include <asm/runlatch.h>
0049 #include <asm/syscalls.h>
0050 #include <asm/switch_to.h>
0051 #include <asm/tm.h>
0052 #include <asm/debug.h>
0053 #ifdef CONFIG_PPC64
0054 #include <asm/firmware.h>
0055 #include <asm/hw_irq.h>
0056 #endif
0057 #include <asm/code-patching.h>
0058 #include <asm/exec.h>
0059 #include <asm/livepatch.h>
0060 #include <asm/cpu_has_feature.h>
0061 #include <asm/asm-prototypes.h>
0062 #include <asm/stacktrace.h>
0063 #include <asm/hw_breakpoint.h>
0064
0065 #include <linux/kprobes.h>
0066 #include <linux/kdebug.h>
0067
0068
0069 #ifdef TM_DEBUG_SW
0070 #define TM_DEBUG(x...) printk(KERN_INFO x)
0071 #else
0072 #define TM_DEBUG(x...) do { } while(0)
0073 #endif
0074
0075 extern unsigned long _get_SP(void);
0076
0077 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0078
0079
0080
0081
0082
0083 bool tm_suspend_disabled __ro_after_init = false;
0084
0085 static void check_if_tm_restore_required(struct task_struct *tsk)
0086 {
0087
0088
0089
0090
0091
0092
0093 if (tsk == current && tsk->thread.regs &&
0094 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
0095 !test_thread_flag(TIF_RESTORE_TM)) {
0096 regs_set_return_msr(&tsk->thread.ckpt_regs,
0097 tsk->thread.regs->msr);
0098 set_thread_flag(TIF_RESTORE_TM);
0099 }
0100 }
0101
0102 #else
0103 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
0104 #endif
0105
0106 bool strict_msr_control;
0107 EXPORT_SYMBOL(strict_msr_control);
0108
0109 static int __init enable_strict_msr_control(char *str)
0110 {
0111 strict_msr_control = true;
0112 pr_info("Enabling strict facility control\n");
0113
0114 return 0;
0115 }
0116 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
0117
0118
0119 unsigned long notrace msr_check_and_set(unsigned long bits)
0120 {
0121 unsigned long oldmsr = mfmsr();
0122 unsigned long newmsr;
0123
0124 newmsr = oldmsr | bits;
0125
0126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
0127 newmsr |= MSR_VSX;
0128
0129 if (oldmsr != newmsr)
0130 mtmsr_isync(newmsr);
0131
0132 return newmsr;
0133 }
0134 EXPORT_SYMBOL_GPL(msr_check_and_set);
0135
0136
0137 void notrace __msr_check_and_clear(unsigned long bits)
0138 {
0139 unsigned long oldmsr = mfmsr();
0140 unsigned long newmsr;
0141
0142 newmsr = oldmsr & ~bits;
0143
0144 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
0145 newmsr &= ~MSR_VSX;
0146
0147 if (oldmsr != newmsr)
0148 mtmsr_isync(newmsr);
0149 }
0150 EXPORT_SYMBOL(__msr_check_and_clear);
0151
0152 #ifdef CONFIG_PPC_FPU
0153 static void __giveup_fpu(struct task_struct *tsk)
0154 {
0155 unsigned long msr;
0156
0157 save_fpu(tsk);
0158 msr = tsk->thread.regs->msr;
0159 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
0160 if (cpu_has_feature(CPU_FTR_VSX))
0161 msr &= ~MSR_VSX;
0162 regs_set_return_msr(tsk->thread.regs, msr);
0163 }
0164
0165 void giveup_fpu(struct task_struct *tsk)
0166 {
0167 check_if_tm_restore_required(tsk);
0168
0169 msr_check_and_set(MSR_FP);
0170 __giveup_fpu(tsk);
0171 msr_check_and_clear(MSR_FP);
0172 }
0173 EXPORT_SYMBOL(giveup_fpu);
0174
0175
0176
0177
0178
0179 void flush_fp_to_thread(struct task_struct *tsk)
0180 {
0181 if (tsk->thread.regs) {
0182
0183
0184
0185
0186
0187
0188
0189
0190 preempt_disable();
0191 if (tsk->thread.regs->msr & MSR_FP) {
0192
0193
0194
0195
0196
0197
0198
0199 BUG_ON(tsk != current);
0200 giveup_fpu(tsk);
0201 }
0202 preempt_enable();
0203 }
0204 }
0205 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
0206
0207 void enable_kernel_fp(void)
0208 {
0209 unsigned long cpumsr;
0210
0211 WARN_ON(preemptible());
0212
0213 cpumsr = msr_check_and_set(MSR_FP);
0214
0215 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
0216 check_if_tm_restore_required(current);
0217
0218
0219
0220
0221
0222
0223
0224 if (!MSR_TM_ACTIVE(cpumsr) &&
0225 MSR_TM_ACTIVE(current->thread.regs->msr))
0226 return;
0227 __giveup_fpu(current);
0228 }
0229 }
0230 EXPORT_SYMBOL(enable_kernel_fp);
0231 #else
0232 static inline void __giveup_fpu(struct task_struct *tsk) { }
0233 #endif
0234
0235 #ifdef CONFIG_ALTIVEC
0236 static void __giveup_altivec(struct task_struct *tsk)
0237 {
0238 unsigned long msr;
0239
0240 save_altivec(tsk);
0241 msr = tsk->thread.regs->msr;
0242 msr &= ~MSR_VEC;
0243 if (cpu_has_feature(CPU_FTR_VSX))
0244 msr &= ~MSR_VSX;
0245 regs_set_return_msr(tsk->thread.regs, msr);
0246 }
0247
0248 void giveup_altivec(struct task_struct *tsk)
0249 {
0250 check_if_tm_restore_required(tsk);
0251
0252 msr_check_and_set(MSR_VEC);
0253 __giveup_altivec(tsk);
0254 msr_check_and_clear(MSR_VEC);
0255 }
0256 EXPORT_SYMBOL(giveup_altivec);
0257
0258 void enable_kernel_altivec(void)
0259 {
0260 unsigned long cpumsr;
0261
0262 WARN_ON(preemptible());
0263
0264 cpumsr = msr_check_and_set(MSR_VEC);
0265
0266 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
0267 check_if_tm_restore_required(current);
0268
0269
0270
0271
0272
0273
0274
0275 if (!MSR_TM_ACTIVE(cpumsr) &&
0276 MSR_TM_ACTIVE(current->thread.regs->msr))
0277 return;
0278 __giveup_altivec(current);
0279 }
0280 }
0281 EXPORT_SYMBOL(enable_kernel_altivec);
0282
0283
0284
0285
0286
0287 void flush_altivec_to_thread(struct task_struct *tsk)
0288 {
0289 if (tsk->thread.regs) {
0290 preempt_disable();
0291 if (tsk->thread.regs->msr & MSR_VEC) {
0292 BUG_ON(tsk != current);
0293 giveup_altivec(tsk);
0294 }
0295 preempt_enable();
0296 }
0297 }
0298 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
0299 #endif
0300
0301 #ifdef CONFIG_VSX
0302 static void __giveup_vsx(struct task_struct *tsk)
0303 {
0304 unsigned long msr = tsk->thread.regs->msr;
0305
0306
0307
0308
0309
0310 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
0311
0312
0313 if (msr & MSR_FP)
0314 __giveup_fpu(tsk);
0315 if (msr & MSR_VEC)
0316 __giveup_altivec(tsk);
0317 }
0318
0319 static void giveup_vsx(struct task_struct *tsk)
0320 {
0321 check_if_tm_restore_required(tsk);
0322
0323 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
0324 __giveup_vsx(tsk);
0325 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
0326 }
0327
0328 void enable_kernel_vsx(void)
0329 {
0330 unsigned long cpumsr;
0331
0332 WARN_ON(preemptible());
0333
0334 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
0335
0336 if (current->thread.regs &&
0337 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
0338 check_if_tm_restore_required(current);
0339
0340
0341
0342
0343
0344
0345
0346 if (!MSR_TM_ACTIVE(cpumsr) &&
0347 MSR_TM_ACTIVE(current->thread.regs->msr))
0348 return;
0349 __giveup_vsx(current);
0350 }
0351 }
0352 EXPORT_SYMBOL(enable_kernel_vsx);
0353
0354 void flush_vsx_to_thread(struct task_struct *tsk)
0355 {
0356 if (tsk->thread.regs) {
0357 preempt_disable();
0358 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
0359 BUG_ON(tsk != current);
0360 giveup_vsx(tsk);
0361 }
0362 preempt_enable();
0363 }
0364 }
0365 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
0366 #endif
0367
0368 #ifdef CONFIG_SPE
0369 void giveup_spe(struct task_struct *tsk)
0370 {
0371 check_if_tm_restore_required(tsk);
0372
0373 msr_check_and_set(MSR_SPE);
0374 __giveup_spe(tsk);
0375 msr_check_and_clear(MSR_SPE);
0376 }
0377 EXPORT_SYMBOL(giveup_spe);
0378
0379 void enable_kernel_spe(void)
0380 {
0381 WARN_ON(preemptible());
0382
0383 msr_check_and_set(MSR_SPE);
0384
0385 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
0386 check_if_tm_restore_required(current);
0387 __giveup_spe(current);
0388 }
0389 }
0390 EXPORT_SYMBOL(enable_kernel_spe);
0391
0392 void flush_spe_to_thread(struct task_struct *tsk)
0393 {
0394 if (tsk->thread.regs) {
0395 preempt_disable();
0396 if (tsk->thread.regs->msr & MSR_SPE) {
0397 BUG_ON(tsk != current);
0398 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
0399 giveup_spe(tsk);
0400 }
0401 preempt_enable();
0402 }
0403 }
0404 #endif
0405
0406 static unsigned long msr_all_available;
0407
0408 static int __init init_msr_all_available(void)
0409 {
0410 if (IS_ENABLED(CONFIG_PPC_FPU))
0411 msr_all_available |= MSR_FP;
0412 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0413 msr_all_available |= MSR_VEC;
0414 if (cpu_has_feature(CPU_FTR_VSX))
0415 msr_all_available |= MSR_VSX;
0416 if (cpu_has_feature(CPU_FTR_SPE))
0417 msr_all_available |= MSR_SPE;
0418
0419 return 0;
0420 }
0421 early_initcall(init_msr_all_available);
0422
0423 void giveup_all(struct task_struct *tsk)
0424 {
0425 unsigned long usermsr;
0426
0427 if (!tsk->thread.regs)
0428 return;
0429
0430 check_if_tm_restore_required(tsk);
0431
0432 usermsr = tsk->thread.regs->msr;
0433
0434 if ((usermsr & msr_all_available) == 0)
0435 return;
0436
0437 msr_check_and_set(msr_all_available);
0438
0439 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
0440
0441 if (usermsr & MSR_FP)
0442 __giveup_fpu(tsk);
0443 if (usermsr & MSR_VEC)
0444 __giveup_altivec(tsk);
0445 if (usermsr & MSR_SPE)
0446 __giveup_spe(tsk);
0447
0448 msr_check_and_clear(msr_all_available);
0449 }
0450 EXPORT_SYMBOL(giveup_all);
0451
0452 #ifdef CONFIG_PPC_BOOK3S_64
0453 #ifdef CONFIG_PPC_FPU
0454 static bool should_restore_fp(void)
0455 {
0456 if (current->thread.load_fp) {
0457 current->thread.load_fp++;
0458 return true;
0459 }
0460 return false;
0461 }
0462
0463 static void do_restore_fp(void)
0464 {
0465 load_fp_state(¤t->thread.fp_state);
0466 }
0467 #else
0468 static bool should_restore_fp(void) { return false; }
0469 static void do_restore_fp(void) { }
0470 #endif
0471
0472 #ifdef CONFIG_ALTIVEC
0473 static bool should_restore_altivec(void)
0474 {
0475 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
0476 current->thread.load_vec++;
0477 return true;
0478 }
0479 return false;
0480 }
0481
0482 static void do_restore_altivec(void)
0483 {
0484 load_vr_state(¤t->thread.vr_state);
0485 current->thread.used_vr = 1;
0486 }
0487 #else
0488 static bool should_restore_altivec(void) { return false; }
0489 static void do_restore_altivec(void) { }
0490 #endif
0491
0492 static bool should_restore_vsx(void)
0493 {
0494 if (cpu_has_feature(CPU_FTR_VSX))
0495 return true;
0496 return false;
0497 }
0498 #ifdef CONFIG_VSX
0499 static void do_restore_vsx(void)
0500 {
0501 current->thread.used_vsr = 1;
0502 }
0503 #else
0504 static void do_restore_vsx(void) { }
0505 #endif
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517 void notrace restore_math(struct pt_regs *regs)
0518 {
0519 unsigned long msr;
0520 unsigned long new_msr = 0;
0521
0522 msr = regs->msr;
0523
0524
0525
0526
0527
0528
0529 if ((!(msr & MSR_FP)) && should_restore_fp())
0530 new_msr |= MSR_FP;
0531
0532 if ((!(msr & MSR_VEC)) && should_restore_altivec())
0533 new_msr |= MSR_VEC;
0534
0535 if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
0536 if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
0537 new_msr |= MSR_VSX;
0538 }
0539
0540 if (new_msr) {
0541 unsigned long fpexc_mode = 0;
0542
0543 msr_check_and_set(new_msr);
0544
0545 if (new_msr & MSR_FP) {
0546 do_restore_fp();
0547
0548
0549 fpexc_mode = current->thread.fpexc_mode;
0550 }
0551
0552 if (new_msr & MSR_VEC)
0553 do_restore_altivec();
0554
0555 if (new_msr & MSR_VSX)
0556 do_restore_vsx();
0557
0558 msr_check_and_clear(new_msr);
0559
0560 regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
0561 }
0562 }
0563 #endif
0564
0565 static void save_all(struct task_struct *tsk)
0566 {
0567 unsigned long usermsr;
0568
0569 if (!tsk->thread.regs)
0570 return;
0571
0572 usermsr = tsk->thread.regs->msr;
0573
0574 if ((usermsr & msr_all_available) == 0)
0575 return;
0576
0577 msr_check_and_set(msr_all_available);
0578
0579 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
0580
0581 if (usermsr & MSR_FP)
0582 save_fpu(tsk);
0583
0584 if (usermsr & MSR_VEC)
0585 save_altivec(tsk);
0586
0587 if (usermsr & MSR_SPE)
0588 __giveup_spe(tsk);
0589
0590 msr_check_and_clear(msr_all_available);
0591 }
0592
0593 void flush_all_to_thread(struct task_struct *tsk)
0594 {
0595 if (tsk->thread.regs) {
0596 preempt_disable();
0597 BUG_ON(tsk != current);
0598 #ifdef CONFIG_SPE
0599 if (tsk->thread.regs->msr & MSR_SPE)
0600 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
0601 #endif
0602 save_all(tsk);
0603
0604 preempt_enable();
0605 }
0606 }
0607 EXPORT_SYMBOL(flush_all_to_thread);
0608
0609 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
0610 void do_send_trap(struct pt_regs *regs, unsigned long address,
0611 unsigned long error_code, int breakpt)
0612 {
0613 current->thread.trap_nr = TRAP_HWBKPT;
0614 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
0615 11, SIGSEGV) == NOTIFY_STOP)
0616 return;
0617
0618
0619 force_sig_ptrace_errno_trap(breakpt,
0620 (void __user *)address);
0621 }
0622 #else
0623
0624 static void do_break_handler(struct pt_regs *regs)
0625 {
0626 struct arch_hw_breakpoint null_brk = {0};
0627 struct arch_hw_breakpoint *info;
0628 ppc_inst_t instr = ppc_inst(0);
0629 int type = 0;
0630 int size = 0;
0631 unsigned long ea;
0632 int i;
0633
0634
0635
0636
0637
0638 if (nr_wp_slots() == 1) {
0639 __set_breakpoint(0, &null_brk);
0640 current->thread.hw_brk[0] = null_brk;
0641 current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
0642 return;
0643 }
0644
0645
0646 wp_get_instr_detail(regs, &instr, &type, &size, &ea);
0647
0648 for (i = 0; i < nr_wp_slots(); i++) {
0649 info = ¤t->thread.hw_brk[i];
0650 if (!info->address)
0651 continue;
0652
0653 if (wp_check_constraints(regs, instr, ea, type, size, info)) {
0654 __set_breakpoint(i, &null_brk);
0655 current->thread.hw_brk[i] = null_brk;
0656 current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
0657 }
0658 }
0659 }
0660
0661 DEFINE_INTERRUPT_HANDLER(do_break)
0662 {
0663 current->thread.trap_nr = TRAP_HWBKPT;
0664 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
0665 11, SIGSEGV) == NOTIFY_STOP)
0666 return;
0667
0668 if (debugger_break_match(regs))
0669 return;
0670
0671
0672
0673
0674
0675
0676
0677
0678 if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
0679 do_break_handler(regs);
0680
0681
0682 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
0683 }
0684 #endif
0685
0686 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
0687
0688 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
0689
0690
0691
0692 static void set_debug_reg_defaults(struct thread_struct *thread)
0693 {
0694 thread->debug.iac1 = thread->debug.iac2 = 0;
0695 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
0696 thread->debug.iac3 = thread->debug.iac4 = 0;
0697 #endif
0698 thread->debug.dac1 = thread->debug.dac2 = 0;
0699 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
0700 thread->debug.dvc1 = thread->debug.dvc2 = 0;
0701 #endif
0702 thread->debug.dbcr0 = 0;
0703 #ifdef CONFIG_BOOKE
0704
0705
0706
0707 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
0708 DBCR1_IAC3US | DBCR1_IAC4US;
0709
0710
0711
0712
0713 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
0714 #else
0715 thread->debug.dbcr1 = 0;
0716 #endif
0717 }
0718
0719 static void prime_debug_regs(struct debug_reg *debug)
0720 {
0721
0722
0723
0724
0725
0726 mtmsr(mfmsr() & ~MSR_DE);
0727
0728 mtspr(SPRN_IAC1, debug->iac1);
0729 mtspr(SPRN_IAC2, debug->iac2);
0730 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
0731 mtspr(SPRN_IAC3, debug->iac3);
0732 mtspr(SPRN_IAC4, debug->iac4);
0733 #endif
0734 mtspr(SPRN_DAC1, debug->dac1);
0735 mtspr(SPRN_DAC2, debug->dac2);
0736 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
0737 mtspr(SPRN_DVC1, debug->dvc1);
0738 mtspr(SPRN_DVC2, debug->dvc2);
0739 #endif
0740 mtspr(SPRN_DBCR0, debug->dbcr0);
0741 mtspr(SPRN_DBCR1, debug->dbcr1);
0742 #ifdef CONFIG_BOOKE
0743 mtspr(SPRN_DBCR2, debug->dbcr2);
0744 #endif
0745 }
0746
0747
0748
0749
0750
0751 void switch_booke_debug_regs(struct debug_reg *new_debug)
0752 {
0753 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
0754 || (new_debug->dbcr0 & DBCR0_IDM))
0755 prime_debug_regs(new_debug);
0756 }
0757 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
0758 #else
0759 #ifndef CONFIG_HAVE_HW_BREAKPOINT
0760 static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
0761 {
0762 preempt_disable();
0763 __set_breakpoint(i, brk);
0764 preempt_enable();
0765 }
0766
0767 static void set_debug_reg_defaults(struct thread_struct *thread)
0768 {
0769 int i;
0770 struct arch_hw_breakpoint null_brk = {0};
0771
0772 for (i = 0; i < nr_wp_slots(); i++) {
0773 thread->hw_brk[i] = null_brk;
0774 if (ppc_breakpoint_available())
0775 set_breakpoint(i, &thread->hw_brk[i]);
0776 }
0777 }
0778
0779 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
0780 struct arch_hw_breakpoint *b)
0781 {
0782 if (a->address != b->address)
0783 return false;
0784 if (a->type != b->type)
0785 return false;
0786 if (a->len != b->len)
0787 return false;
0788
0789 return true;
0790 }
0791
0792 static void switch_hw_breakpoint(struct task_struct *new)
0793 {
0794 int i;
0795
0796 for (i = 0; i < nr_wp_slots(); i++) {
0797 if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]),
0798 &new->thread.hw_brk[i])))
0799 continue;
0800
0801 __set_breakpoint(i, &new->thread.hw_brk[i]);
0802 }
0803 }
0804 #endif
0805 #endif
0806
0807 static inline int set_dabr(struct arch_hw_breakpoint *brk)
0808 {
0809 unsigned long dabr, dabrx;
0810
0811 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
0812 dabrx = ((brk->type >> 3) & 0x7);
0813
0814 if (ppc_md.set_dabr)
0815 return ppc_md.set_dabr(dabr, dabrx);
0816
0817 if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
0818 mtspr(SPRN_DAC1, dabr);
0819 if (IS_ENABLED(CONFIG_PPC_47x))
0820 isync();
0821 return 0;
0822 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
0823 mtspr(SPRN_DABR, dabr);
0824 if (cpu_has_feature(CPU_FTR_DABRX))
0825 mtspr(SPRN_DABRX, dabrx);
0826 return 0;
0827 } else {
0828 return -EINVAL;
0829 }
0830 }
0831
0832 static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
0833 {
0834 unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
0835 LCTRL1_CRWF_RW;
0836 unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
0837 unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
0838 unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
0839
0840 if (start_addr == 0)
0841 lctrl2 |= LCTRL2_LW0LA_F;
0842 else if (end_addr == 0)
0843 lctrl2 |= LCTRL2_LW0LA_E;
0844 else
0845 lctrl2 |= LCTRL2_LW0LA_EandF;
0846
0847 mtspr(SPRN_LCTRL2, 0);
0848
0849 if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
0850 return 0;
0851
0852 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
0853 lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
0854 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
0855 lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
0856
0857 mtspr(SPRN_CMPE, start_addr - 1);
0858 mtspr(SPRN_CMPF, end_addr);
0859 mtspr(SPRN_LCTRL1, lctrl1);
0860 mtspr(SPRN_LCTRL2, lctrl2);
0861
0862 return 0;
0863 }
0864
0865 void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
0866 {
0867 memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk));
0868
0869 if (dawr_enabled())
0870
0871 set_dawr(nr, brk);
0872 else if (IS_ENABLED(CONFIG_PPC_8xx))
0873 set_breakpoint_8xx(brk);
0874 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
0875
0876 set_dabr(brk);
0877 else
0878
0879 WARN_ON_ONCE(1);
0880 }
0881
0882
0883 bool ppc_breakpoint_available(void)
0884 {
0885 if (dawr_enabled())
0886 return true;
0887 if (cpu_has_feature(CPU_FTR_ARCH_207S))
0888 return false;
0889
0890 return true;
0891 }
0892 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
0893
0894 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0895
0896 static inline bool tm_enabled(struct task_struct *tsk)
0897 {
0898 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
0899 }
0900
0901 static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
0902 {
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918 if (!MSR_TM_SUSPENDED(mfmsr()))
0919 return;
0920
0921 giveup_all(container_of(thr, struct task_struct, thread));
0922
0923 tm_reclaim(thr, cause);
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
0938 memcpy(&thr->ckfp_state, &thr->fp_state,
0939 sizeof(struct thread_fp_state));
0940 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
0941 memcpy(&thr->ckvr_state, &thr->vr_state,
0942 sizeof(struct thread_vr_state));
0943 }
0944
0945 void tm_reclaim_current(uint8_t cause)
0946 {
0947 tm_enable();
0948 tm_reclaim_thread(¤t->thread, cause);
0949 }
0950
0951 static inline void tm_reclaim_task(struct task_struct *tsk)
0952 {
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 struct thread_struct *thr = &tsk->thread;
0964
0965 if (!thr->regs)
0966 return;
0967
0968 if (!MSR_TM_ACTIVE(thr->regs->msr))
0969 goto out_and_saveregs;
0970
0971 WARN_ON(tm_suspend_disabled);
0972
0973 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
0974 "ccr=%lx, msr=%lx, trap=%lx)\n",
0975 tsk->pid, thr->regs->nip,
0976 thr->regs->ccr, thr->regs->msr,
0977 thr->regs->trap);
0978
0979 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
0980
0981 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
0982 tsk->pid);
0983
0984 out_and_saveregs:
0985
0986
0987
0988
0989
0990 tm_save_sprs(thr);
0991 }
0992
0993 extern void __tm_recheckpoint(struct thread_struct *thread);
0994
0995 void tm_recheckpoint(struct thread_struct *thread)
0996 {
0997 unsigned long flags;
0998
0999 if (!(thread->regs->msr & MSR_TM))
1000 return;
1001
1002
1003
1004
1005
1006 local_irq_save(flags);
1007 hard_irq_disable();
1008
1009
1010
1011
1012 tm_restore_sprs(thread);
1013
1014 __tm_recheckpoint(thread);
1015
1016 local_irq_restore(flags);
1017 }
1018
1019 static inline void tm_recheckpoint_new_task(struct task_struct *new)
1020 {
1021 if (!cpu_has_feature(CPU_FTR_TM))
1022 return;
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032 if (!tm_enabled(new))
1033 return;
1034
1035 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1036 tm_restore_sprs(&new->thread);
1037 return;
1038 }
1039
1040 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1041 new->pid, new->thread.regs->msr);
1042
1043 tm_recheckpoint(&new->thread);
1044
1045
1046
1047
1048
1049
1050 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1051
1052 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1053 "(kernel msr 0x%lx)\n",
1054 new->pid, mfmsr());
1055 }
1056
1057 static inline void __switch_to_tm(struct task_struct *prev,
1058 struct task_struct *new)
1059 {
1060 if (cpu_has_feature(CPU_FTR_TM)) {
1061 if (tm_enabled(prev) || tm_enabled(new))
1062 tm_enable();
1063
1064 if (tm_enabled(prev)) {
1065 prev->thread.load_tm++;
1066 tm_reclaim_task(prev);
1067 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1068 prev->thread.regs->msr &= ~MSR_TM;
1069 }
1070
1071 tm_recheckpoint_new_task(new);
1072 }
1073 }
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 void restore_tm_state(struct pt_regs *regs)
1090 {
1091 unsigned long msr_diff;
1092
1093
1094
1095
1096
1097
1098
1099 clear_thread_flag(TIF_RESTORE_TM);
1100 if (!MSR_TM_ACTIVE(regs->msr))
1101 return;
1102
1103 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1104 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1105
1106
1107 if (msr_diff & MSR_FP)
1108 current->thread.load_fp = 1;
1109 #ifdef CONFIG_ALTIVEC
1110 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1111 current->thread.load_vec = 1;
1112 #endif
1113 restore_math(regs);
1114
1115 regs_set_return_msr(regs, regs->msr | msr_diff);
1116 }
1117
1118 #else
1119 #define tm_recheckpoint_new_task(new)
1120 #define __switch_to_tm(prev, new)
1121 void tm_reclaim_current(uint8_t cause) {}
1122 #endif
1123
1124 static inline void save_sprs(struct thread_struct *t)
1125 {
1126 #ifdef CONFIG_ALTIVEC
1127 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1128 t->vrsave = mfspr(SPRN_VRSAVE);
1129 #endif
1130 #ifdef CONFIG_SPE
1131 if (cpu_has_feature(CPU_FTR_SPE))
1132 t->spefscr = mfspr(SPRN_SPEFSCR);
1133 #endif
1134 #ifdef CONFIG_PPC_BOOK3S_64
1135 if (cpu_has_feature(CPU_FTR_DSCR))
1136 t->dscr = mfspr(SPRN_DSCR);
1137
1138 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1139 t->bescr = mfspr(SPRN_BESCR);
1140 t->ebbhr = mfspr(SPRN_EBBHR);
1141 t->ebbrr = mfspr(SPRN_EBBRR);
1142
1143 t->fscr = mfspr(SPRN_FSCR);
1144
1145
1146
1147
1148
1149
1150
1151 t->tar = mfspr(SPRN_TAR);
1152 }
1153 #endif
1154 }
1155
1156 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1157 void kvmppc_save_user_regs(void)
1158 {
1159 unsigned long usermsr;
1160
1161 if (!current->thread.regs)
1162 return;
1163
1164 usermsr = current->thread.regs->msr;
1165
1166 if (usermsr & MSR_FP)
1167 save_fpu(current);
1168
1169 if (usermsr & MSR_VEC)
1170 save_altivec(current);
1171
1172 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1173 if (usermsr & MSR_TM) {
1174 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
1175 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
1176 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
1177 current->thread.regs->msr &= ~MSR_TM;
1178 }
1179 #endif
1180 }
1181 EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
1182
1183 void kvmppc_save_current_sprs(void)
1184 {
1185 save_sprs(¤t->thread);
1186 }
1187 EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
1188 #endif
1189
1190 static inline void restore_sprs(struct thread_struct *old_thread,
1191 struct thread_struct *new_thread)
1192 {
1193 #ifdef CONFIG_ALTIVEC
1194 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1195 old_thread->vrsave != new_thread->vrsave)
1196 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1197 #endif
1198 #ifdef CONFIG_SPE
1199 if (cpu_has_feature(CPU_FTR_SPE) &&
1200 old_thread->spefscr != new_thread->spefscr)
1201 mtspr(SPRN_SPEFSCR, new_thread->spefscr);
1202 #endif
1203 #ifdef CONFIG_PPC_BOOK3S_64
1204 if (cpu_has_feature(CPU_FTR_DSCR)) {
1205 u64 dscr = get_paca()->dscr_default;
1206 if (new_thread->dscr_inherit)
1207 dscr = new_thread->dscr;
1208
1209 if (old_thread->dscr != dscr)
1210 mtspr(SPRN_DSCR, dscr);
1211 }
1212
1213 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1214 if (old_thread->bescr != new_thread->bescr)
1215 mtspr(SPRN_BESCR, new_thread->bescr);
1216 if (old_thread->ebbhr != new_thread->ebbhr)
1217 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1218 if (old_thread->ebbrr != new_thread->ebbrr)
1219 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1220
1221 if (old_thread->fscr != new_thread->fscr)
1222 mtspr(SPRN_FSCR, new_thread->fscr);
1223
1224 if (old_thread->tar != new_thread->tar)
1225 mtspr(SPRN_TAR, new_thread->tar);
1226 }
1227
1228 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1229 old_thread->tidr != new_thread->tidr)
1230 mtspr(SPRN_TIDR, new_thread->tidr);
1231 #endif
1232
1233 }
1234
1235 struct task_struct *__switch_to(struct task_struct *prev,
1236 struct task_struct *new)
1237 {
1238 struct thread_struct *new_thread, *old_thread;
1239 struct task_struct *last;
1240 #ifdef CONFIG_PPC_64S_HASH_MMU
1241 struct ppc64_tlb_batch *batch;
1242 #endif
1243
1244 new_thread = &new->thread;
1245 old_thread = ¤t->thread;
1246
1247 WARN_ON(!irqs_disabled());
1248
1249 #ifdef CONFIG_PPC_64S_HASH_MMU
1250 batch = this_cpu_ptr(&ppc64_tlb_batch);
1251 if (batch->active) {
1252 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1253 if (batch->index)
1254 __flush_tlb_pending(batch);
1255 batch->active = 0;
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
1268 atomic_read(&new->mm->context.vas_windows)))
1269 asm volatile(PPC_CP_ABORT);
1270 #endif
1271
1272 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1273 switch_booke_debug_regs(&new->thread.debug);
1274 #else
1275
1276
1277
1278
1279 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1280 switch_hw_breakpoint(new);
1281 #endif
1282 #endif
1283
1284
1285
1286
1287
1288 save_sprs(&prev->thread);
1289
1290
1291 giveup_all(prev);
1292
1293 __switch_to_tm(prev, new);
1294
1295 if (!radix_enabled()) {
1296
1297
1298
1299
1300
1301 hard_irq_disable();
1302 }
1303
1304
1305
1306
1307
1308
1309
1310
1311 restore_sprs(old_thread, new_thread);
1312
1313 set_return_regs_changed();
1314
1315 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1316 kuap_assert_locked();
1317
1318 last = _switch(old_thread, new_thread);
1319
1320
1321
1322
1323
1324
1325
1326
1327 #ifdef CONFIG_PPC_BOOK3S_64
1328 #ifdef CONFIG_PPC_64S_HASH_MMU
1329
1330
1331
1332
1333
1334
1335 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1336 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1337 batch = this_cpu_ptr(&ppc64_tlb_batch);
1338 batch->active = 1;
1339 }
1340 #endif
1341
1342
1343
1344
1345
1346
1347 if (current->thread.regs)
1348 restore_math(current->thread.regs);
1349 #endif
1350
1351 return last;
1352 }
1353
1354 #define NR_INSN_TO_PRINT 16
1355
1356 static void show_instructions(struct pt_regs *regs)
1357 {
1358 int i;
1359 unsigned long nip = regs->nip;
1360 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1361
1362 printk("Instruction dump:");
1363
1364
1365
1366
1367
1368 if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1369 pc = (unsigned long)phys_to_virt(pc);
1370 nip = (unsigned long)phys_to_virt(regs->nip);
1371 }
1372
1373 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1374 int instr;
1375
1376 if (!(i % 8))
1377 pr_cont("\n");
1378
1379 if (!__kernel_text_address(pc) ||
1380 get_kernel_nofault(instr, (const void *)pc)) {
1381 pr_cont("XXXXXXXX ");
1382 } else {
1383 if (nip == pc)
1384 pr_cont("<%08x> ", instr);
1385 else
1386 pr_cont("%08x ", instr);
1387 }
1388
1389 pc += sizeof(int);
1390 }
1391
1392 pr_cont("\n");
1393 }
1394
1395 void show_user_instructions(struct pt_regs *regs)
1396 {
1397 unsigned long pc;
1398 int n = NR_INSN_TO_PRINT;
1399 struct seq_buf s;
1400 char buf[96];
1401
1402 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1403
1404 seq_buf_init(&s, buf, sizeof(buf));
1405
1406 while (n) {
1407 int i;
1408
1409 seq_buf_clear(&s);
1410
1411 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1412 int instr;
1413
1414 if (copy_from_user_nofault(&instr, (void __user *)pc,
1415 sizeof(instr))) {
1416 seq_buf_printf(&s, "XXXXXXXX ");
1417 continue;
1418 }
1419 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1420 }
1421
1422 if (!seq_buf_has_overflowed(&s))
1423 pr_info("%s[%d]: code: %s\n", current->comm,
1424 current->pid, s.buffer);
1425 }
1426 }
1427
1428 struct regbit {
1429 unsigned long bit;
1430 const char *name;
1431 };
1432
1433 static struct regbit msr_bits[] = {
1434 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1435 {MSR_SF, "SF"},
1436 {MSR_HV, "HV"},
1437 #endif
1438 {MSR_VEC, "VEC"},
1439 {MSR_VSX, "VSX"},
1440 #ifdef CONFIG_BOOKE
1441 {MSR_CE, "CE"},
1442 #endif
1443 {MSR_EE, "EE"},
1444 {MSR_PR, "PR"},
1445 {MSR_FP, "FP"},
1446 {MSR_ME, "ME"},
1447 #ifdef CONFIG_BOOKE
1448 {MSR_DE, "DE"},
1449 #else
1450 {MSR_SE, "SE"},
1451 {MSR_BE, "BE"},
1452 #endif
1453 {MSR_IR, "IR"},
1454 {MSR_DR, "DR"},
1455 {MSR_PMM, "PMM"},
1456 #ifndef CONFIG_BOOKE
1457 {MSR_RI, "RI"},
1458 {MSR_LE, "LE"},
1459 #endif
1460 {0, NULL}
1461 };
1462
1463 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1464 {
1465 const char *s = "";
1466
1467 for (; bits->bit; ++bits)
1468 if (val & bits->bit) {
1469 pr_cont("%s%s", s, bits->name);
1470 s = sep;
1471 }
1472 }
1473
1474 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1475 static struct regbit msr_tm_bits[] = {
1476 {MSR_TS_T, "T"},
1477 {MSR_TS_S, "S"},
1478 {MSR_TM, "E"},
1479 {0, NULL}
1480 };
1481
1482 static void print_tm_bits(unsigned long val)
1483 {
1484
1485
1486
1487
1488
1489
1490
1491 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1492 pr_cont(",TM[");
1493 print_bits(val, msr_tm_bits, "");
1494 pr_cont("]");
1495 }
1496 }
1497 #else
1498 static void print_tm_bits(unsigned long val) {}
1499 #endif
1500
1501 static void print_msr_bits(unsigned long val)
1502 {
1503 pr_cont("<");
1504 print_bits(val, msr_bits, ",");
1505 print_tm_bits(val);
1506 pr_cont(">");
1507 }
1508
1509 #ifdef CONFIG_PPC64
1510 #define REG "%016lx"
1511 #define REGS_PER_LINE 4
1512 #else
1513 #define REG "%08lx"
1514 #define REGS_PER_LINE 8
1515 #endif
1516
1517 static void __show_regs(struct pt_regs *regs)
1518 {
1519 int i, trap;
1520
1521 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1522 regs->nip, regs->link, regs->ctr);
1523 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1524 regs, regs->trap, print_tainted(), init_utsname()->release);
1525 printk("MSR: "REG" ", regs->msr);
1526 print_msr_bits(regs->msr);
1527 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1528 trap = TRAP(regs);
1529 if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1530 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1531 if (trap == INTERRUPT_MACHINE_CHECK ||
1532 trap == INTERRUPT_DATA_STORAGE ||
1533 trap == INTERRUPT_ALIGNMENT) {
1534 if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
1535 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
1536 else
1537 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1538 }
1539
1540 #ifdef CONFIG_PPC64
1541 pr_cont("IRQMASK: %lx ", regs->softe);
1542 #endif
1543 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1544 if (MSR_TM_ACTIVE(regs->msr))
1545 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1546 #endif
1547
1548 for (i = 0; i < 32; i++) {
1549 if ((i % REGS_PER_LINE) == 0)
1550 pr_cont("\nGPR%02d: ", i);
1551 pr_cont(REG " ", regs->gpr[i]);
1552 }
1553 pr_cont("\n");
1554
1555
1556
1557
1558 if (IS_ENABLED(CONFIG_KALLSYMS)) {
1559 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1560 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1561 }
1562 }
1563
1564 void show_regs(struct pt_regs *regs)
1565 {
1566 show_regs_print_info(KERN_DEFAULT);
1567 __show_regs(regs);
1568 show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1569 if (!user_mode(regs))
1570 show_instructions(regs);
1571 }
1572
1573 void flush_thread(void)
1574 {
1575 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1576 flush_ptrace_hw_breakpoint(current);
1577 #else
1578 set_debug_reg_defaults(¤t->thread);
1579 #endif
1580 }
1581
1582 void arch_setup_new_exec(void)
1583 {
1584
1585 #ifdef CONFIG_PPC_BOOK3S_64
1586 if (!radix_enabled())
1587 hash__setup_new_exec();
1588 #endif
1589
1590
1591
1592
1593 if (!current->thread.regs) {
1594 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1595 current->thread.regs = regs - 1;
1596 }
1597
1598 #ifdef CONFIG_PPC_MEM_KEYS
1599 current->thread.regs->amr = default_amr;
1600 current->thread.regs->iamr = default_iamr;
1601 #endif
1602 }
1603
1604 #ifdef CONFIG_PPC64
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 int set_thread_tidr(struct task_struct *t)
1639 {
1640 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1641 return -EINVAL;
1642
1643 if (t != current)
1644 return -EINVAL;
1645
1646 if (t->thread.tidr)
1647 return 0;
1648
1649 t->thread.tidr = (u16)task_pid_nr(t);
1650 mtspr(SPRN_TIDR, t->thread.tidr);
1651
1652 return 0;
1653 }
1654 EXPORT_SYMBOL_GPL(set_thread_tidr);
1655
1656 #endif
1657
1658 void
1659 release_thread(struct task_struct *t)
1660 {
1661 }
1662
1663
1664
1665
1666
1667 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1668 {
1669 flush_all_to_thread(src);
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 __switch_to_tm(src, src);
1681
1682 *dst = *src;
1683
1684 clear_task_ebb(dst);
1685
1686 return 0;
1687 }
1688
1689 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1690 {
1691 #ifdef CONFIG_PPC_64S_HASH_MMU
1692 unsigned long sp_vsid;
1693 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1694
1695 if (radix_enabled())
1696 return;
1697
1698 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1699 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1700 << SLB_VSID_SHIFT_1T;
1701 else
1702 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1703 << SLB_VSID_SHIFT;
1704 sp_vsid |= SLB_VSID_KERNEL | llp;
1705 p->thread.ksp_vsid = sp_vsid;
1706 #endif
1707 }
1708
1709
1710
1711
1712
1713
1714
1715
1716 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
1717 {
1718 unsigned long clone_flags = args->flags;
1719 unsigned long usp = args->stack;
1720 unsigned long tls = args->tls;
1721 struct pt_regs *childregs, *kregs;
1722 extern void ret_from_fork(void);
1723 extern void ret_from_fork_scv(void);
1724 extern void ret_from_kernel_thread(void);
1725 void (*f)(void);
1726 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1727 struct thread_info *ti = task_thread_info(p);
1728 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1729 int i;
1730 #endif
1731
1732 klp_init_thread_info(p);
1733
1734
1735 sp -= sizeof(struct pt_regs);
1736 childregs = (struct pt_regs *) sp;
1737 if (unlikely(args->fn)) {
1738
1739 memset(childregs, 0, sizeof(struct pt_regs));
1740 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1741
1742 if (args->fn)
1743 childregs->gpr[14] = ppc_function_entry((void *)args->fn);
1744 #ifdef CONFIG_PPC64
1745 clear_tsk_thread_flag(p, TIF_32BIT);
1746 childregs->softe = IRQS_ENABLED;
1747 #endif
1748 childregs->gpr[15] = (unsigned long)args->fn_arg;
1749 p->thread.regs = NULL;
1750 ti->flags |= _TIF_RESTOREALL;
1751 f = ret_from_kernel_thread;
1752 } else {
1753
1754 struct pt_regs *regs = current_pt_regs();
1755 *childregs = *regs;
1756 if (usp)
1757 childregs->gpr[1] = usp;
1758 p->thread.regs = childregs;
1759
1760 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1761 childregs->gpr[3] = 0;
1762 if (clone_flags & CLONE_SETTLS) {
1763 if (!is_32bit_task())
1764 childregs->gpr[13] = tls;
1765 else
1766 childregs->gpr[2] = tls;
1767 }
1768
1769 if (trap_is_scv(regs))
1770 f = ret_from_fork_scv;
1771 else
1772 f = ret_from_fork;
1773 }
1774 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1775 sp -= STACK_FRAME_OVERHEAD;
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785 ((unsigned long *)sp)[0] = 0;
1786 sp -= sizeof(struct pt_regs);
1787 kregs = (struct pt_regs *) sp;
1788 sp -= STACK_FRAME_OVERHEAD;
1789 p->thread.ksp = sp;
1790 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1791 for (i = 0; i < nr_wp_slots(); i++)
1792 p->thread.ptrace_bps[i] = NULL;
1793 #endif
1794
1795 #ifdef CONFIG_PPC_FPU_REGS
1796 p->thread.fp_save_area = NULL;
1797 #endif
1798 #ifdef CONFIG_ALTIVEC
1799 p->thread.vr_save_area = NULL;
1800 #endif
1801 #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
1802 p->thread.kuap = KUAP_NONE;
1803 #endif
1804 #if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
1805 p->thread.pid = MMU_NO_CONTEXT;
1806 #endif
1807
1808 setup_ksp_vsid(p, sp);
1809
1810 #ifdef CONFIG_PPC64
1811 if (cpu_has_feature(CPU_FTR_DSCR)) {
1812 p->thread.dscr_inherit = current->thread.dscr_inherit;
1813 p->thread.dscr = mfspr(SPRN_DSCR);
1814 }
1815 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1816 childregs->ppr = DEFAULT_PPR;
1817
1818 p->thread.tidr = 0;
1819 #endif
1820
1821
1822
1823 #ifdef CONFIG_PPC_PKEY
1824 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
1825 kregs->amr = AMR_KUAP_BLOCKED;
1826
1827 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
1828 kregs->iamr = AMR_KUEP_BLOCKED;
1829 #endif
1830 kregs->nip = ppc_function_entry(f);
1831 return 0;
1832 }
1833
1834 void preload_new_slb_context(unsigned long start, unsigned long sp);
1835
1836
1837
1838
1839 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1840 {
1841 #ifdef CONFIG_PPC64
1842 unsigned long load_addr = regs->gpr[2];
1843
1844 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
1845 preload_new_slb_context(start, sp);
1846 #endif
1847
1848 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1849
1850
1851
1852
1853
1854 if (MSR_TM_SUSPENDED(mfmsr()))
1855 tm_reclaim_current(0);
1856 #endif
1857
1858 memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
1859 regs->ctr = 0;
1860 regs->link = 0;
1861 regs->xer = 0;
1862 regs->ccr = 0;
1863 regs->gpr[1] = sp;
1864
1865 #ifdef CONFIG_PPC32
1866 regs->mq = 0;
1867 regs->nip = start;
1868 regs->msr = MSR_USER;
1869 #else
1870 if (!is_32bit_task()) {
1871 unsigned long entry;
1872
1873 if (is_elf2_task()) {
1874
1875 entry = start;
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 regs->gpr[12] = start;
1886
1887 set_thread_flag(TIF_RESTOREALL);
1888 } else {
1889 unsigned long toc;
1890
1891
1892
1893
1894
1895
1896
1897 __get_user(entry, (unsigned long __user *)start);
1898 __get_user(toc, (unsigned long __user *)start+1);
1899
1900
1901
1902
1903 if (load_addr != 0) {
1904 entry += load_addr;
1905 toc += load_addr;
1906 }
1907 regs->gpr[2] = toc;
1908 }
1909 regs_set_return_ip(regs, entry);
1910 regs_set_return_msr(regs, MSR_USER64);
1911 } else {
1912 regs->gpr[2] = 0;
1913 regs_set_return_ip(regs, start);
1914 regs_set_return_msr(regs, MSR_USER32);
1915 }
1916
1917 #endif
1918 #ifdef CONFIG_VSX
1919 current->thread.used_vsr = 0;
1920 #endif
1921 current->thread.load_slb = 0;
1922 current->thread.load_fp = 0;
1923 #ifdef CONFIG_PPC_FPU_REGS
1924 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1925 current->thread.fp_save_area = NULL;
1926 #endif
1927 #ifdef CONFIG_ALTIVEC
1928 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1929 current->thread.vr_state.vscr.u[3] = 0x00010000;
1930 current->thread.vr_save_area = NULL;
1931 current->thread.vrsave = 0;
1932 current->thread.used_vr = 0;
1933 current->thread.load_vec = 0;
1934 #endif
1935 #ifdef CONFIG_SPE
1936 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1937 current->thread.acc = 0;
1938 current->thread.spefscr = 0;
1939 current->thread.used_spe = 0;
1940 #endif
1941 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1942 current->thread.tm_tfhar = 0;
1943 current->thread.tm_texasr = 0;
1944 current->thread.tm_tfiar = 0;
1945 current->thread.load_tm = 0;
1946 #endif
1947 }
1948 EXPORT_SYMBOL(start_thread);
1949
1950 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1951 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1952
1953 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1954 {
1955 struct pt_regs *regs = tsk->thread.regs;
1956
1957
1958
1959
1960
1961 if (val & PR_FP_EXC_SW_ENABLE) {
1962 if (cpu_has_feature(CPU_FTR_SPE)) {
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 #ifdef CONFIG_SPE
1976 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1977 tsk->thread.fpexc_mode = val &
1978 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1979 #endif
1980 return 0;
1981 } else {
1982 return -EINVAL;
1983 }
1984 }
1985
1986
1987
1988
1989
1990
1991 if (val > PR_FP_EXC_PRECISE)
1992 return -EINVAL;
1993 tsk->thread.fpexc_mode = __pack_fe01(val);
1994 if (regs != NULL && (regs->msr & MSR_FP) != 0) {
1995 regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
1996 | tsk->thread.fpexc_mode);
1997 }
1998 return 0;
1999 }
2000
2001 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
2002 {
2003 unsigned int val = 0;
2004
2005 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
2006 if (cpu_has_feature(CPU_FTR_SPE)) {
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019 #ifdef CONFIG_SPE
2020 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
2021 val = tsk->thread.fpexc_mode;
2022 #endif
2023 } else
2024 return -EINVAL;
2025 } else {
2026 val = __unpack_fe01(tsk->thread.fpexc_mode);
2027 }
2028 return put_user(val, (unsigned int __user *) adr);
2029 }
2030
2031 int set_endian(struct task_struct *tsk, unsigned int val)
2032 {
2033 struct pt_regs *regs = tsk->thread.regs;
2034
2035 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
2036 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
2037 return -EINVAL;
2038
2039 if (regs == NULL)
2040 return -EINVAL;
2041
2042 if (val == PR_ENDIAN_BIG)
2043 regs_set_return_msr(regs, regs->msr & ~MSR_LE);
2044 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
2045 regs_set_return_msr(regs, regs->msr | MSR_LE);
2046 else
2047 return -EINVAL;
2048
2049 return 0;
2050 }
2051
2052 int get_endian(struct task_struct *tsk, unsigned long adr)
2053 {
2054 struct pt_regs *regs = tsk->thread.regs;
2055 unsigned int val;
2056
2057 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2058 !cpu_has_feature(CPU_FTR_REAL_LE))
2059 return -EINVAL;
2060
2061 if (regs == NULL)
2062 return -EINVAL;
2063
2064 if (regs->msr & MSR_LE) {
2065 if (cpu_has_feature(CPU_FTR_REAL_LE))
2066 val = PR_ENDIAN_LITTLE;
2067 else
2068 val = PR_ENDIAN_PPC_LITTLE;
2069 } else
2070 val = PR_ENDIAN_BIG;
2071
2072 return put_user(val, (unsigned int __user *)adr);
2073 }
2074
2075 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2076 {
2077 tsk->thread.align_ctl = val;
2078 return 0;
2079 }
2080
2081 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2082 {
2083 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2084 }
2085
2086 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2087 unsigned long nbytes)
2088 {
2089 unsigned long stack_page;
2090 unsigned long cpu = task_cpu(p);
2091
2092 stack_page = (unsigned long)hardirq_ctx[cpu];
2093 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2094 return 1;
2095
2096 stack_page = (unsigned long)softirq_ctx[cpu];
2097 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2098 return 1;
2099
2100 return 0;
2101 }
2102
2103 static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2104 unsigned long nbytes)
2105 {
2106 #ifdef CONFIG_PPC64
2107 unsigned long stack_page;
2108 unsigned long cpu = task_cpu(p);
2109
2110 if (!paca_ptrs)
2111 return 0;
2112
2113 stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2114 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2115 return 1;
2116
2117 # ifdef CONFIG_PPC_BOOK3S_64
2118 stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2119 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2120 return 1;
2121
2122 stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2123 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2124 return 1;
2125 # endif
2126 #endif
2127
2128 return 0;
2129 }
2130
2131
2132 int validate_sp(unsigned long sp, struct task_struct *p,
2133 unsigned long nbytes)
2134 {
2135 unsigned long stack_page = (unsigned long)task_stack_page(p);
2136
2137 if (sp < THREAD_SIZE)
2138 return 0;
2139
2140 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2141 return 1;
2142
2143 if (valid_irq_stack(sp, p, nbytes))
2144 return 1;
2145
2146 return valid_emergency_stack(sp, p, nbytes);
2147 }
2148
2149 EXPORT_SYMBOL(validate_sp);
2150
2151 static unsigned long ___get_wchan(struct task_struct *p)
2152 {
2153 unsigned long ip, sp;
2154 int count = 0;
2155
2156 sp = p->thread.ksp;
2157 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2158 return 0;
2159
2160 do {
2161 sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
2162 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2163 task_is_running(p))
2164 return 0;
2165 if (count > 0) {
2166 ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
2167 if (!in_sched_functions(ip))
2168 return ip;
2169 }
2170 } while (count++ < 16);
2171 return 0;
2172 }
2173
2174 unsigned long __get_wchan(struct task_struct *p)
2175 {
2176 unsigned long ret;
2177
2178 if (!try_get_task_stack(p))
2179 return 0;
2180
2181 ret = ___get_wchan(p);
2182
2183 put_task_stack(p);
2184
2185 return ret;
2186 }
2187
2188 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2189
2190 void __no_sanitize_address show_stack(struct task_struct *tsk,
2191 unsigned long *stack,
2192 const char *loglvl)
2193 {
2194 unsigned long sp, ip, lr, newsp;
2195 int count = 0;
2196 int firstframe = 1;
2197 unsigned long ret_addr;
2198 int ftrace_idx = 0;
2199
2200 if (tsk == NULL)
2201 tsk = current;
2202
2203 if (!try_get_task_stack(tsk))
2204 return;
2205
2206 sp = (unsigned long) stack;
2207 if (sp == 0) {
2208 if (tsk == current)
2209 sp = current_stack_frame();
2210 else
2211 sp = tsk->thread.ksp;
2212 }
2213
2214 lr = 0;
2215 printk("%sCall Trace:\n", loglvl);
2216 do {
2217 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2218 break;
2219
2220 stack = (unsigned long *) sp;
2221 newsp = stack[0];
2222 ip = stack[STACK_FRAME_LR_SAVE];
2223 if (!firstframe || ip != lr) {
2224 printk("%s["REG"] ["REG"] %pS",
2225 loglvl, sp, ip, (void *)ip);
2226 ret_addr = ftrace_graph_ret_addr(current,
2227 &ftrace_idx, ip, stack);
2228 if (ret_addr != ip)
2229 pr_cont(" (%pS)", (void *)ret_addr);
2230 if (firstframe)
2231 pr_cont(" (unreliable)");
2232 pr_cont("\n");
2233 }
2234 firstframe = 0;
2235
2236
2237
2238
2239
2240 if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
2241 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2242 struct pt_regs *regs = (struct pt_regs *)
2243 (sp + STACK_FRAME_OVERHEAD);
2244
2245 lr = regs->link;
2246 printk("%s--- interrupt: %lx at %pS\n",
2247 loglvl, regs->trap, (void *)regs->nip);
2248 __show_regs(regs);
2249 printk("%s--- interrupt: %lx\n",
2250 loglvl, regs->trap);
2251
2252 firstframe = 1;
2253 }
2254
2255 sp = newsp;
2256 } while (count++ < kstack_depth_to_print);
2257
2258 put_task_stack(tsk);
2259 }
2260
2261 #ifdef CONFIG_PPC64
2262
2263 void notrace __ppc64_runlatch_on(void)
2264 {
2265 struct thread_info *ti = current_thread_info();
2266
2267 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2268
2269
2270
2271
2272
2273 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2274 } else {
2275 unsigned long ctrl;
2276
2277
2278
2279
2280
2281 ctrl = mfspr(SPRN_CTRLF);
2282 ctrl |= CTRL_RUNLATCH;
2283 mtspr(SPRN_CTRLT, ctrl);
2284 }
2285
2286 ti->local_flags |= _TLF_RUNLATCH;
2287 }
2288
2289
2290 void notrace __ppc64_runlatch_off(void)
2291 {
2292 struct thread_info *ti = current_thread_info();
2293
2294 ti->local_flags &= ~_TLF_RUNLATCH;
2295
2296 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2297 mtspr(SPRN_CTRLT, 0);
2298 } else {
2299 unsigned long ctrl;
2300
2301 ctrl = mfspr(SPRN_CTRLF);
2302 ctrl &= ~CTRL_RUNLATCH;
2303 mtspr(SPRN_CTRLT, ctrl);
2304 }
2305 }
2306 #endif
2307
2308 unsigned long arch_align_stack(unsigned long sp)
2309 {
2310 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2311 sp -= get_random_int() & ~PAGE_MASK;
2312 return sp & ~0xf;
2313 }