0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/errno.h>
0013 #include <linux/export.h>
0014 #include <linux/sched.h>
0015 #include <linux/sched/debug.h>
0016 #include <linux/sched/task.h>
0017 #include <linux/sched/task_stack.h>
0018 #include <linux/kernel.h>
0019 #include <linux/mm.h>
0020 #include <linux/fs.h>
0021 #include <linux/smp.h>
0022 #include <linux/stddef.h>
0023 #include <linux/ptrace.h>
0024 #include <linux/slab.h>
0025 #include <linux/user.h>
0026 #include <linux/delay.h>
0027 #include <linux/compat.h>
0028 #include <linux/tick.h>
0029 #include <linux/init.h>
0030 #include <linux/cpu.h>
0031 #include <linux/perf_event.h>
0032 #include <linux/elfcore.h>
0033 #include <linux/sysrq.h>
0034 #include <linux/nmi.h>
0035 #include <linux/context_tracking.h>
0036 #include <linux/signal.h>
0037
0038 #include <linux/uaccess.h>
0039 #include <asm/page.h>
0040 #include <asm/pgalloc.h>
0041 #include <asm/processor.h>
0042 #include <asm/pstate.h>
0043 #include <asm/elf.h>
0044 #include <asm/fpumacro.h>
0045 #include <asm/head.h>
0046 #include <asm/cpudata.h>
0047 #include <asm/mmu_context.h>
0048 #include <asm/unistd.h>
0049 #include <asm/hypervisor.h>
0050 #include <asm/syscalls.h>
0051 #include <asm/irq_regs.h>
0052 #include <asm/smp.h>
0053 #include <asm/pcr.h>
0054
0055 #include "kstack.h"
0056
0057
0058 void arch_cpu_idle(void)
0059 {
0060 if (tlb_type != hypervisor) {
0061 touch_nmi_watchdog();
0062 raw_local_irq_enable();
0063 } else {
0064 unsigned long pstate;
0065
0066 raw_local_irq_enable();
0067
0068
0069
0070
0071 __asm__ __volatile__(
0072 "rdpr %%pstate, %0\n\t"
0073 "andn %0, %1, %0\n\t"
0074 "wrpr %0, %%g0, %%pstate"
0075 : "=&r" (pstate)
0076 : "i" (PSTATE_IE));
0077
0078 if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
0079 sun4v_cpu_yield();
0080
0081
0082
0083 scheduler_poke();
0084 }
0085
0086
0087 __asm__ __volatile__(
0088 "rdpr %%pstate, %0\n\t"
0089 "or %0, %1, %0\n\t"
0090 "wrpr %0, %%g0, %%pstate"
0091 : "=&r" (pstate)
0092 : "i" (PSTATE_IE));
0093 }
0094 }
0095
0096 #ifdef CONFIG_HOTPLUG_CPU
0097 void arch_cpu_idle_dead(void)
0098 {
0099 sched_preempt_enable_no_resched();
0100 cpu_play_dead();
0101 }
0102 #endif
0103
0104 #ifdef CONFIG_COMPAT
0105 static void show_regwindow32(struct pt_regs *regs)
0106 {
0107 struct reg_window32 __user *rw;
0108 struct reg_window32 r_w;
0109
0110 __asm__ __volatile__ ("flushw");
0111 rw = compat_ptr((unsigned int)regs->u_regs[14]);
0112 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
0113 return;
0114 }
0115
0116 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
0117 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
0118 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
0119 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
0120 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
0121 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
0122 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
0123 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
0124 }
0125 #else
0126 #define show_regwindow32(regs) do { } while (0)
0127 #endif
0128
0129 static void show_regwindow(struct pt_regs *regs)
0130 {
0131 struct reg_window __user *rw;
0132 struct reg_window *rwk;
0133 struct reg_window r_w;
0134
0135 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
0136 __asm__ __volatile__ ("flushw");
0137 rw = (struct reg_window __user *)
0138 (regs->u_regs[14] + STACK_BIAS);
0139 rwk = (struct reg_window *)
0140 (regs->u_regs[14] + STACK_BIAS);
0141 if (!(regs->tstate & TSTATE_PRIV)) {
0142 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
0143 return;
0144 }
0145 rwk = &r_w;
0146 }
0147 } else {
0148 show_regwindow32(regs);
0149 return;
0150 }
0151 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
0152 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
0153 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
0154 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
0155 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
0156 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
0157 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
0158 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
0159 if (regs->tstate & TSTATE_PRIV)
0160 printk("I7: <%pS>\n", (void *) rwk->ins[7]);
0161 }
0162
0163 void show_regs(struct pt_regs *regs)
0164 {
0165 show_regs_print_info(KERN_DEFAULT);
0166
0167 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
0168 regs->tpc, regs->tnpc, regs->y, print_tainted());
0169 printk("TPC: <%pS>\n", (void *) regs->tpc);
0170 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
0171 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
0172 regs->u_regs[3]);
0173 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
0174 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
0175 regs->u_regs[7]);
0176 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
0177 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
0178 regs->u_regs[11]);
0179 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
0180 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
0181 regs->u_regs[15]);
0182 printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
0183 show_regwindow(regs);
0184 show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT);
0185 }
0186
0187 union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
0188 static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
0189
0190 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
0191 int this_cpu)
0192 {
0193 struct global_reg_snapshot *rp;
0194
0195 flushw_all();
0196
0197 rp = &global_cpu_snapshot[this_cpu].reg;
0198
0199 rp->tstate = regs->tstate;
0200 rp->tpc = regs->tpc;
0201 rp->tnpc = regs->tnpc;
0202 rp->o7 = regs->u_regs[UREG_I7];
0203
0204 if (regs->tstate & TSTATE_PRIV) {
0205 struct reg_window *rw;
0206
0207 rw = (struct reg_window *)
0208 (regs->u_regs[UREG_FP] + STACK_BIAS);
0209 if (kstack_valid(tp, (unsigned long) rw)) {
0210 rp->i7 = rw->ins[7];
0211 rw = (struct reg_window *)
0212 (rw->ins[6] + STACK_BIAS);
0213 if (kstack_valid(tp, (unsigned long) rw))
0214 rp->rpc = rw->ins[7];
0215 }
0216 } else {
0217 rp->i7 = 0;
0218 rp->rpc = 0;
0219 }
0220 rp->thread = tp;
0221 }
0222
0223
0224
0225
0226
0227
0228 static void __global_reg_poll(struct global_reg_snapshot *gp)
0229 {
0230 int limit = 0;
0231
0232 while (!gp->thread && ++limit < 100) {
0233 barrier();
0234 udelay(1);
0235 }
0236 }
0237
0238 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
0239 {
0240 struct thread_info *tp = current_thread_info();
0241 struct pt_regs *regs = get_irq_regs();
0242 unsigned long flags;
0243 int this_cpu, cpu;
0244
0245 if (!regs)
0246 regs = tp->kregs;
0247
0248 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
0249
0250 this_cpu = raw_smp_processor_id();
0251
0252 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
0253
0254 if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
0255 __global_reg_self(tp, regs, this_cpu);
0256
0257 smp_fetch_global_regs();
0258
0259 for_each_cpu(cpu, mask) {
0260 struct global_reg_snapshot *gp;
0261
0262 if (exclude_self && cpu == this_cpu)
0263 continue;
0264
0265 gp = &global_cpu_snapshot[cpu].reg;
0266
0267 __global_reg_poll(gp);
0268
0269 tp = gp->thread;
0270 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
0271 (cpu == this_cpu ? '*' : ' '), cpu,
0272 gp->tstate, gp->tpc, gp->tnpc,
0273 ((tp && tp->task) ? tp->task->comm : "NULL"),
0274 ((tp && tp->task) ? tp->task->pid : -1));
0275
0276 if (gp->tstate & TSTATE_PRIV) {
0277 printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
0278 (void *) gp->tpc,
0279 (void *) gp->o7,
0280 (void *) gp->i7,
0281 (void *) gp->rpc);
0282 } else {
0283 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
0284 gp->tpc, gp->o7, gp->i7, gp->rpc);
0285 }
0286
0287 touch_nmi_watchdog();
0288 }
0289
0290 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
0291
0292 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
0293 }
0294
0295 #ifdef CONFIG_MAGIC_SYSRQ
0296
0297 static void sysrq_handle_globreg(int key)
0298 {
0299 trigger_all_cpu_backtrace();
0300 }
0301
0302 static const struct sysrq_key_op sparc_globalreg_op = {
0303 .handler = sysrq_handle_globreg,
0304 .help_msg = "global-regs(y)",
0305 .action_msg = "Show Global CPU Regs",
0306 };
0307
0308 static void __global_pmu_self(int this_cpu)
0309 {
0310 struct global_pmu_snapshot *pp;
0311 int i, num;
0312
0313 if (!pcr_ops)
0314 return;
0315
0316 pp = &global_cpu_snapshot[this_cpu].pmu;
0317
0318 num = 1;
0319 if (tlb_type == hypervisor &&
0320 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
0321 num = 4;
0322
0323 for (i = 0; i < num; i++) {
0324 pp->pcr[i] = pcr_ops->read_pcr(i);
0325 pp->pic[i] = pcr_ops->read_pic(i);
0326 }
0327 }
0328
0329 static void __global_pmu_poll(struct global_pmu_snapshot *pp)
0330 {
0331 int limit = 0;
0332
0333 while (!pp->pcr[0] && ++limit < 100) {
0334 barrier();
0335 udelay(1);
0336 }
0337 }
0338
0339 static void pmu_snapshot_all_cpus(void)
0340 {
0341 unsigned long flags;
0342 int this_cpu, cpu;
0343
0344 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
0345
0346 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
0347
0348 this_cpu = raw_smp_processor_id();
0349
0350 __global_pmu_self(this_cpu);
0351
0352 smp_fetch_global_pmu();
0353
0354 for_each_online_cpu(cpu) {
0355 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
0356
0357 __global_pmu_poll(pp);
0358
0359 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
0360 (cpu == this_cpu ? '*' : ' '), cpu,
0361 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
0362 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
0363
0364 touch_nmi_watchdog();
0365 }
0366
0367 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
0368
0369 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
0370 }
0371
0372 static void sysrq_handle_globpmu(int key)
0373 {
0374 pmu_snapshot_all_cpus();
0375 }
0376
0377 static const struct sysrq_key_op sparc_globalpmu_op = {
0378 .handler = sysrq_handle_globpmu,
0379 .help_msg = "global-pmu(x)",
0380 .action_msg = "Show Global PMU Regs",
0381 };
0382
0383 static int __init sparc_sysrq_init(void)
0384 {
0385 int ret = register_sysrq_key('y', &sparc_globalreg_op);
0386
0387 if (!ret)
0388 ret = register_sysrq_key('x', &sparc_globalpmu_op);
0389 return ret;
0390 }
0391
0392 core_initcall(sparc_sysrq_init);
0393
0394 #endif
0395
0396
0397 void exit_thread(struct task_struct *tsk)
0398 {
0399 struct thread_info *t = task_thread_info(tsk);
0400
0401 if (t->utraps) {
0402 if (t->utraps[0] < 2)
0403 kfree (t->utraps);
0404 else
0405 t->utraps[0]--;
0406 }
0407 }
0408
0409 void flush_thread(void)
0410 {
0411 struct thread_info *t = current_thread_info();
0412 struct mm_struct *mm;
0413
0414 mm = t->task->mm;
0415 if (mm)
0416 tsb_context_switch(mm);
0417
0418 set_thread_wsaved(0);
0419
0420
0421 t->fpsaved[0] = 0;
0422 }
0423
0424
0425 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
0426 {
0427 bool stack_64bit = test_thread_64bit_stack(psp);
0428 unsigned long fp, distance, rval;
0429
0430 if (stack_64bit) {
0431 csp += STACK_BIAS;
0432 psp += STACK_BIAS;
0433 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
0434 fp += STACK_BIAS;
0435 if (test_thread_flag(TIF_32BIT))
0436 fp &= 0xffffffff;
0437 } else
0438 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
0439
0440
0441
0442
0443
0444 csp &= ~15UL;
0445
0446 distance = fp - psp;
0447 rval = (csp - distance);
0448 if (raw_copy_in_user((void __user *)rval, (void __user *)psp, distance))
0449 rval = 0;
0450 else if (!stack_64bit) {
0451 if (put_user(((u32)csp),
0452 &(((struct reg_window32 __user *)rval)->ins[6])))
0453 rval = 0;
0454 } else {
0455 if (put_user(((u64)csp - STACK_BIAS),
0456 &(((struct reg_window __user *)rval)->ins[6])))
0457 rval = 0;
0458 else
0459 rval = rval - STACK_BIAS;
0460 }
0461
0462 return rval;
0463 }
0464
0465
0466 static inline void shift_window_buffer(int first_win, int last_win,
0467 struct thread_info *t)
0468 {
0469 int i;
0470
0471 for (i = first_win; i < last_win; i++) {
0472 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
0473 memcpy(&t->reg_window[i], &t->reg_window[i+1],
0474 sizeof(struct reg_window));
0475 }
0476 }
0477
0478 void synchronize_user_stack(void)
0479 {
0480 struct thread_info *t = current_thread_info();
0481 unsigned long window;
0482
0483 flush_user_windows();
0484 if ((window = get_thread_wsaved()) != 0) {
0485 window -= 1;
0486 do {
0487 struct reg_window *rwin = &t->reg_window[window];
0488 int winsize = sizeof(struct reg_window);
0489 unsigned long sp;
0490
0491 sp = t->rwbuf_stkptrs[window];
0492
0493 if (test_thread_64bit_stack(sp))
0494 sp += STACK_BIAS;
0495 else
0496 winsize = sizeof(struct reg_window32);
0497
0498 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
0499 shift_window_buffer(window, get_thread_wsaved() - 1, t);
0500 set_thread_wsaved(get_thread_wsaved() - 1);
0501 }
0502 } while (window--);
0503 }
0504 }
0505
0506 static void stack_unaligned(unsigned long sp)
0507 {
0508 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp);
0509 }
0510
0511 static const char uwfault32[] = KERN_INFO \
0512 "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
0513 static const char uwfault64[] = KERN_INFO \
0514 "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
0515
0516 void fault_in_user_windows(struct pt_regs *regs)
0517 {
0518 struct thread_info *t = current_thread_info();
0519 unsigned long window;
0520
0521 flush_user_windows();
0522 window = get_thread_wsaved();
0523
0524 if (likely(window != 0)) {
0525 window -= 1;
0526 do {
0527 struct reg_window *rwin = &t->reg_window[window];
0528 int winsize = sizeof(struct reg_window);
0529 unsigned long sp, orig_sp;
0530
0531 orig_sp = sp = t->rwbuf_stkptrs[window];
0532
0533 if (test_thread_64bit_stack(sp))
0534 sp += STACK_BIAS;
0535 else
0536 winsize = sizeof(struct reg_window32);
0537
0538 if (unlikely(sp & 0x7UL))
0539 stack_unaligned(sp);
0540
0541 if (unlikely(copy_to_user((char __user *)sp,
0542 rwin, winsize))) {
0543 if (show_unhandled_signals)
0544 printk_ratelimited(is_compat_task() ?
0545 uwfault32 : uwfault64,
0546 current->comm, current->pid,
0547 sp, orig_sp,
0548 regs->tpc,
0549 regs->u_regs[UREG_I7]);
0550 goto barf;
0551 }
0552 } while (window--);
0553 }
0554 set_thread_wsaved(0);
0555 return;
0556
0557 barf:
0558 set_thread_wsaved(window + 1);
0559 force_sig(SIGSEGV);
0560 }
0561
0562
0563
0564
0565
0566
0567 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
0568 {
0569 unsigned long clone_flags = args->flags;
0570 unsigned long sp = args->stack;
0571 unsigned long tls = args->tls;
0572 struct thread_info *t = task_thread_info(p);
0573 struct pt_regs *regs = current_pt_regs();
0574 struct sparc_stackf *parent_sf;
0575 unsigned long child_stack_sz;
0576 char *child_trap_frame;
0577
0578
0579 child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
0580 child_trap_frame = (task_stack_page(p) +
0581 (THREAD_SIZE - child_stack_sz));
0582
0583 t->new_child = 1;
0584 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
0585 t->kregs = (struct pt_regs *) (child_trap_frame +
0586 sizeof(struct sparc_stackf));
0587 t->fpsaved[0] = 0;
0588
0589 if (unlikely(args->fn)) {
0590 memset(child_trap_frame, 0, child_stack_sz);
0591 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
0592 (current_pt_regs()->tstate + 1) & TSTATE_CWP;
0593 t->kregs->u_regs[UREG_G1] = (unsigned long) args->fn;
0594 t->kregs->u_regs[UREG_G2] = (unsigned long) args->fn_arg;
0595 return 0;
0596 }
0597
0598 parent_sf = ((struct sparc_stackf *) regs) - 1;
0599 memcpy(child_trap_frame, parent_sf, child_stack_sz);
0600 if (t->flags & _TIF_32BIT) {
0601 sp &= 0x00000000ffffffffUL;
0602 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
0603 }
0604 t->kregs->u_regs[UREG_FP] = sp;
0605 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
0606 (regs->tstate + 1) & TSTATE_CWP;
0607 if (sp != regs->u_regs[UREG_FP]) {
0608 unsigned long csp;
0609
0610 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
0611 if (!csp)
0612 return -EFAULT;
0613 t->kregs->u_regs[UREG_FP] = csp;
0614 }
0615 if (t->utraps)
0616 t->utraps[0]++;
0617
0618
0619 t->kregs->u_regs[UREG_I0] = current->pid;
0620 t->kregs->u_regs[UREG_I1] = 1;
0621
0622
0623 regs->u_regs[UREG_I1] = 0;
0624
0625 if (clone_flags & CLONE_SETTLS)
0626 t->kregs->u_regs[UREG_G7] = tls;
0627
0628 return 0;
0629 }
0630
0631
0632
0633
0634
0635 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
0636 {
0637 if (adi_capable()) {
0638 register unsigned long tmp_mcdper;
0639
0640 __asm__ __volatile__(
0641 ".word 0x83438000\n\t"
0642 "mov %%g1, %0\n\t"
0643 : "=r" (tmp_mcdper)
0644 :
0645 : "g1");
0646 if (tmp_mcdper)
0647 set_thread_flag(TIF_MCDPER);
0648 else
0649 clear_thread_flag(TIF_MCDPER);
0650 }
0651
0652 *dst = *src;
0653 return 0;
0654 }
0655
0656 unsigned long __get_wchan(struct task_struct *task)
0657 {
0658 unsigned long pc, fp, bias = 0;
0659 struct thread_info *tp;
0660 struct reg_window *rw;
0661 unsigned long ret = 0;
0662 int count = 0;
0663
0664 tp = task_thread_info(task);
0665 bias = STACK_BIAS;
0666 fp = task_thread_info(task)->ksp + bias;
0667
0668 do {
0669 if (!kstack_valid(tp, fp))
0670 break;
0671 rw = (struct reg_window *) fp;
0672 pc = rw->ins[7];
0673 if (!in_sched_functions(pc)) {
0674 ret = pc;
0675 goto out;
0676 }
0677 fp = rw->ins[6] + bias;
0678 } while (++count < 16);
0679
0680 out:
0681 return ret;
0682 }