0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/bitops.h>
0016 #include <linux/bug.h>
0017 #include <linux/compiler.h>
0018 #include <linux/context_tracking.h>
0019 #include <linux/cpu_pm.h>
0020 #include <linux/kexec.h>
0021 #include <linux/init.h>
0022 #include <linux/kernel.h>
0023 #include <linux/module.h>
0024 #include <linux/extable.h>
0025 #include <linux/mm.h>
0026 #include <linux/sched/mm.h>
0027 #include <linux/sched/debug.h>
0028 #include <linux/smp.h>
0029 #include <linux/spinlock.h>
0030 #include <linux/kallsyms.h>
0031 #include <linux/memblock.h>
0032 #include <linux/interrupt.h>
0033 #include <linux/ptrace.h>
0034 #include <linux/kgdb.h>
0035 #include <linux/kdebug.h>
0036 #include <linux/kprobes.h>
0037 #include <linux/notifier.h>
0038 #include <linux/kdb.h>
0039 #include <linux/irq.h>
0040 #include <linux/perf_event.h>
0041
0042 #include <asm/addrspace.h>
0043 #include <asm/bootinfo.h>
0044 #include <asm/branch.h>
0045 #include <asm/break.h>
0046 #include <asm/cop2.h>
0047 #include <asm/cpu.h>
0048 #include <asm/cpu-type.h>
0049 #include <asm/dsp.h>
0050 #include <asm/fpu.h>
0051 #include <asm/fpu_emulator.h>
0052 #include <asm/idle.h>
0053 #include <asm/isa-rev.h>
0054 #include <asm/mips-cps.h>
0055 #include <asm/mips-r2-to-r6-emul.h>
0056 #include <asm/mipsregs.h>
0057 #include <asm/mipsmtregs.h>
0058 #include <asm/module.h>
0059 #include <asm/msa.h>
0060 #include <asm/ptrace.h>
0061 #include <asm/sections.h>
0062 #include <asm/siginfo.h>
0063 #include <asm/tlbdebug.h>
0064 #include <asm/traps.h>
0065 #include <linux/uaccess.h>
0066 #include <asm/watch.h>
0067 #include <asm/mmu_context.h>
0068 #include <asm/types.h>
0069 #include <asm/stacktrace.h>
0070 #include <asm/tlbex.h>
0071 #include <asm/uasm.h>
0072
0073 #include <asm/mach-loongson64/cpucfg-emul.h>
0074
0075 #include "access-helper.h"
0076
0077 extern void check_wait(void);
0078 extern asmlinkage void rollback_handle_int(void);
0079 extern asmlinkage void handle_int(void);
0080 extern asmlinkage void handle_adel(void);
0081 extern asmlinkage void handle_ades(void);
0082 extern asmlinkage void handle_ibe(void);
0083 extern asmlinkage void handle_dbe(void);
0084 extern asmlinkage void handle_sys(void);
0085 extern asmlinkage void handle_bp(void);
0086 extern asmlinkage void handle_ri(void);
0087 extern asmlinkage void handle_ri_rdhwr_tlbp(void);
0088 extern asmlinkage void handle_ri_rdhwr(void);
0089 extern asmlinkage void handle_cpu(void);
0090 extern asmlinkage void handle_ov(void);
0091 extern asmlinkage void handle_tr(void);
0092 extern asmlinkage void handle_msa_fpe(void);
0093 extern asmlinkage void handle_fpe(void);
0094 extern asmlinkage void handle_ftlb(void);
0095 extern asmlinkage void handle_gsexc(void);
0096 extern asmlinkage void handle_msa(void);
0097 extern asmlinkage void handle_mdmx(void);
0098 extern asmlinkage void handle_watch(void);
0099 extern asmlinkage void handle_mt(void);
0100 extern asmlinkage void handle_dsp(void);
0101 extern asmlinkage void handle_mcheck(void);
0102 extern asmlinkage void handle_reserved(void);
0103 extern void tlb_do_page_fault_0(void);
0104
0105 void (*board_be_init)(void);
0106 static int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
0107 void (*board_nmi_handler_setup)(void);
0108 void (*board_ejtag_handler_setup)(void);
0109 void (*board_bind_eic_interrupt)(int irq, int regset);
0110 void (*board_ebase_setup)(void);
0111 void(*board_cache_error_setup)(void);
0112
0113 void mips_set_be_handler(int (*handler)(struct pt_regs *regs, int is_fixup))
0114 {
0115 board_be_handler = handler;
0116 }
0117 EXPORT_SYMBOL_GPL(mips_set_be_handler);
0118
0119 static void show_raw_backtrace(unsigned long reg29, const char *loglvl,
0120 bool user)
0121 {
0122 unsigned long *sp = (unsigned long *)(reg29 & ~3);
0123 unsigned long addr;
0124
0125 printk("%sCall Trace:", loglvl);
0126 #ifdef CONFIG_KALLSYMS
0127 printk("%s\n", loglvl);
0128 #endif
0129 while (!kstack_end(sp)) {
0130 if (__get_addr(&addr, sp++, user)) {
0131 printk("%s (Bad stack address)", loglvl);
0132 break;
0133 }
0134 if (__kernel_text_address(addr))
0135 print_ip_sym(loglvl, addr);
0136 }
0137 printk("%s\n", loglvl);
0138 }
0139
0140 #ifdef CONFIG_KALLSYMS
0141 int raw_show_trace;
0142 static int __init set_raw_show_trace(char *str)
0143 {
0144 raw_show_trace = 1;
0145 return 1;
0146 }
0147 __setup("raw_show_trace", set_raw_show_trace);
0148 #endif
0149
0150 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
0151 const char *loglvl, bool user)
0152 {
0153 unsigned long sp = regs->regs[29];
0154 unsigned long ra = regs->regs[31];
0155 unsigned long pc = regs->cp0_epc;
0156
0157 if (!task)
0158 task = current;
0159
0160 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
0161 show_raw_backtrace(sp, loglvl, user);
0162 return;
0163 }
0164 printk("%sCall Trace:\n", loglvl);
0165 do {
0166 print_ip_sym(loglvl, pc);
0167 pc = unwind_stack(task, &sp, pc, &ra);
0168 } while (pc);
0169 pr_cont("\n");
0170 }
0171
0172
0173
0174
0175
0176 static void show_stacktrace(struct task_struct *task,
0177 const struct pt_regs *regs, const char *loglvl, bool user)
0178 {
0179 const int field = 2 * sizeof(unsigned long);
0180 unsigned long stackdata;
0181 int i;
0182 unsigned long *sp = (unsigned long *)regs->regs[29];
0183
0184 printk("%sStack :", loglvl);
0185 i = 0;
0186 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
0187 if (i && ((i % (64 / field)) == 0)) {
0188 pr_cont("\n");
0189 printk("%s ", loglvl);
0190 }
0191 if (i > 39) {
0192 pr_cont(" ...");
0193 break;
0194 }
0195
0196 if (__get_addr(&stackdata, sp++, user)) {
0197 pr_cont(" (Bad stack address)");
0198 break;
0199 }
0200
0201 pr_cont(" %0*lx", field, stackdata);
0202 i++;
0203 }
0204 pr_cont("\n");
0205 show_backtrace(task, regs, loglvl, user);
0206 }
0207
0208 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
0209 {
0210 struct pt_regs regs;
0211
0212 regs.cp0_status = KSU_KERNEL;
0213 if (sp) {
0214 regs.regs[29] = (unsigned long)sp;
0215 regs.regs[31] = 0;
0216 regs.cp0_epc = 0;
0217 } else {
0218 if (task && task != current) {
0219 regs.regs[29] = task->thread.reg29;
0220 regs.regs[31] = 0;
0221 regs.cp0_epc = task->thread.reg31;
0222 } else {
0223 prepare_frametrace(®s);
0224 }
0225 }
0226 show_stacktrace(task, ®s, loglvl, false);
0227 }
0228
0229 static void show_code(void *pc, bool user)
0230 {
0231 long i;
0232 unsigned short *pc16 = NULL;
0233
0234 printk("Code:");
0235
0236 if ((unsigned long)pc & 1)
0237 pc16 = (u16 *)((unsigned long)pc & ~1);
0238
0239 for(i = -3 ; i < 6 ; i++) {
0240 if (pc16) {
0241 u16 insn16;
0242
0243 if (__get_inst16(&insn16, pc16 + i, user))
0244 goto bad_address;
0245
0246 pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>'));
0247 } else {
0248 u32 insn32;
0249
0250 if (__get_inst32(&insn32, (u32 *)pc + i, user))
0251 goto bad_address;
0252
0253 pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>'));
0254 }
0255 }
0256 pr_cont("\n");
0257 return;
0258
0259 bad_address:
0260 pr_cont(" (Bad address in epc)\n\n");
0261 }
0262
0263 static void __show_regs(const struct pt_regs *regs)
0264 {
0265 const int field = 2 * sizeof(unsigned long);
0266 unsigned int cause = regs->cp0_cause;
0267 unsigned int exccode;
0268 int i;
0269
0270 show_regs_print_info(KERN_DEFAULT);
0271
0272
0273
0274
0275 for (i = 0; i < 32; ) {
0276 if ((i % 4) == 0)
0277 printk("$%2d :", i);
0278 if (i == 0)
0279 pr_cont(" %0*lx", field, 0UL);
0280 else if (i == 26 || i == 27)
0281 pr_cont(" %*s", field, "");
0282 else
0283 pr_cont(" %0*lx", field, regs->regs[i]);
0284
0285 i++;
0286 if ((i % 4) == 0)
0287 pr_cont("\n");
0288 }
0289
0290 #ifdef CONFIG_CPU_HAS_SMARTMIPS
0291 printk("Acx : %0*lx\n", field, regs->acx);
0292 #endif
0293 if (MIPS_ISA_REV < 6) {
0294 printk("Hi : %0*lx\n", field, regs->hi);
0295 printk("Lo : %0*lx\n", field, regs->lo);
0296 }
0297
0298
0299
0300
0301 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
0302 (void *) regs->cp0_epc);
0303 printk("ra : %0*lx %pS\n", field, regs->regs[31],
0304 (void *) regs->regs[31]);
0305
0306 printk("Status: %08x ", (uint32_t) regs->cp0_status);
0307
0308 if (cpu_has_3kex) {
0309 if (regs->cp0_status & ST0_KUO)
0310 pr_cont("KUo ");
0311 if (regs->cp0_status & ST0_IEO)
0312 pr_cont("IEo ");
0313 if (regs->cp0_status & ST0_KUP)
0314 pr_cont("KUp ");
0315 if (regs->cp0_status & ST0_IEP)
0316 pr_cont("IEp ");
0317 if (regs->cp0_status & ST0_KUC)
0318 pr_cont("KUc ");
0319 if (regs->cp0_status & ST0_IEC)
0320 pr_cont("IEc ");
0321 } else if (cpu_has_4kex) {
0322 if (regs->cp0_status & ST0_KX)
0323 pr_cont("KX ");
0324 if (regs->cp0_status & ST0_SX)
0325 pr_cont("SX ");
0326 if (regs->cp0_status & ST0_UX)
0327 pr_cont("UX ");
0328 switch (regs->cp0_status & ST0_KSU) {
0329 case KSU_USER:
0330 pr_cont("USER ");
0331 break;
0332 case KSU_SUPERVISOR:
0333 pr_cont("SUPERVISOR ");
0334 break;
0335 case KSU_KERNEL:
0336 pr_cont("KERNEL ");
0337 break;
0338 default:
0339 pr_cont("BAD_MODE ");
0340 break;
0341 }
0342 if (regs->cp0_status & ST0_ERL)
0343 pr_cont("ERL ");
0344 if (regs->cp0_status & ST0_EXL)
0345 pr_cont("EXL ");
0346 if (regs->cp0_status & ST0_IE)
0347 pr_cont("IE ");
0348 }
0349 pr_cont("\n");
0350
0351 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
0352 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
0353
0354 if (1 <= exccode && exccode <= 5)
0355 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
0356
0357 printk("PrId : %08x (%s)\n", read_c0_prid(),
0358 cpu_name_string());
0359 }
0360
0361
0362
0363
0364 void show_regs(struct pt_regs *regs)
0365 {
0366 __show_regs(regs);
0367 dump_stack();
0368 }
0369
0370 void show_registers(struct pt_regs *regs)
0371 {
0372 const int field = 2 * sizeof(unsigned long);
0373
0374 __show_regs(regs);
0375 print_modules();
0376 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
0377 current->comm, current->pid, current_thread_info(), current,
0378 field, current_thread_info()->tp_value);
0379 if (cpu_has_userlocal) {
0380 unsigned long tls;
0381
0382 tls = read_c0_userlocal();
0383 if (tls != current_thread_info()->tp_value)
0384 printk("*HwTLS: %0*lx\n", field, tls);
0385 }
0386
0387 show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
0388 show_code((void *)regs->cp0_epc, user_mode(regs));
0389 printk("\n");
0390 }
0391
0392 static DEFINE_RAW_SPINLOCK(die_lock);
0393
0394 void __noreturn die(const char *str, struct pt_regs *regs)
0395 {
0396 static int die_counter;
0397 int sig = SIGSEGV;
0398
0399 oops_enter();
0400
0401 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
0402 SIGSEGV) == NOTIFY_STOP)
0403 sig = 0;
0404
0405 console_verbose();
0406 raw_spin_lock_irq(&die_lock);
0407 bust_spinlocks(1);
0408
0409 printk("%s[#%d]:\n", str, ++die_counter);
0410 show_registers(regs);
0411 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
0412 raw_spin_unlock_irq(&die_lock);
0413
0414 oops_exit();
0415
0416 if (in_interrupt())
0417 panic("Fatal exception in interrupt");
0418
0419 if (panic_on_oops)
0420 panic("Fatal exception");
0421
0422 if (regs && kexec_should_crash(current))
0423 crash_kexec(regs);
0424
0425 make_task_dead(sig);
0426 }
0427
0428 extern struct exception_table_entry __start___dbe_table[];
0429 extern struct exception_table_entry __stop___dbe_table[];
0430
0431 __asm__(
0432 " .section __dbe_table, \"a\"\n"
0433 " .previous \n");
0434
0435
0436 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
0437 {
0438 const struct exception_table_entry *e;
0439
0440 e = search_extable(__start___dbe_table,
0441 __stop___dbe_table - __start___dbe_table, addr);
0442 if (!e)
0443 e = search_module_dbetables(addr);
0444 return e;
0445 }
0446
0447 asmlinkage void do_be(struct pt_regs *regs)
0448 {
0449 const int field = 2 * sizeof(unsigned long);
0450 const struct exception_table_entry *fixup = NULL;
0451 int data = regs->cp0_cause & 4;
0452 int action = MIPS_BE_FATAL;
0453 enum ctx_state prev_state;
0454
0455 prev_state = exception_enter();
0456
0457 if (data && !user_mode(regs))
0458 fixup = search_dbe_tables(exception_epc(regs));
0459
0460 if (fixup)
0461 action = MIPS_BE_FIXUP;
0462
0463 if (board_be_handler)
0464 action = board_be_handler(regs, fixup != NULL);
0465 else
0466 mips_cm_error_report();
0467
0468 switch (action) {
0469 case MIPS_BE_DISCARD:
0470 goto out;
0471 case MIPS_BE_FIXUP:
0472 if (fixup) {
0473 regs->cp0_epc = fixup->nextinsn;
0474 goto out;
0475 }
0476 break;
0477 default:
0478 break;
0479 }
0480
0481
0482
0483
0484 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
0485 data ? "Data" : "Instruction",
0486 field, regs->cp0_epc, field, regs->regs[31]);
0487 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
0488 SIGBUS) == NOTIFY_STOP)
0489 goto out;
0490
0491 die_if_kernel("Oops", regs);
0492 force_sig(SIGBUS);
0493
0494 out:
0495 exception_exit(prev_state);
0496 }
0497
0498
0499
0500
0501
0502 #define OPCODE 0xfc000000
0503 #define BASE 0x03e00000
0504 #define RT 0x001f0000
0505 #define OFFSET 0x0000ffff
0506 #define LL 0xc0000000
0507 #define SC 0xe0000000
0508 #define SPEC0 0x00000000
0509 #define SPEC3 0x7c000000
0510 #define RD 0x0000f800
0511 #define FUNC 0x0000003f
0512 #define SYNC 0x0000000f
0513 #define RDHWR 0x0000003b
0514
0515
0516 #define MM_POOL32A_FUNC 0xfc00ffff
0517 #define MM_RDHWR 0x00006b3c
0518 #define MM_RS 0x001f0000
0519 #define MM_RT 0x03e00000
0520
0521
0522
0523
0524
0525 unsigned int ll_bit;
0526 struct task_struct *ll_task;
0527
0528 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
0529 {
0530 unsigned long value, __user *vaddr;
0531 long offset;
0532
0533
0534
0535
0536
0537
0538
0539 offset = opcode & OFFSET;
0540 offset <<= 16;
0541 offset >>= 16;
0542
0543 vaddr = (unsigned long __user *)
0544 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
0545
0546 if ((unsigned long)vaddr & 3)
0547 return SIGBUS;
0548 if (get_user(value, vaddr))
0549 return SIGSEGV;
0550
0551 preempt_disable();
0552
0553 if (ll_task == NULL || ll_task == current) {
0554 ll_bit = 1;
0555 } else {
0556 ll_bit = 0;
0557 }
0558 ll_task = current;
0559
0560 preempt_enable();
0561
0562 regs->regs[(opcode & RT) >> 16] = value;
0563
0564 return 0;
0565 }
0566
0567 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
0568 {
0569 unsigned long __user *vaddr;
0570 unsigned long reg;
0571 long offset;
0572
0573
0574
0575
0576
0577
0578
0579 offset = opcode & OFFSET;
0580 offset <<= 16;
0581 offset >>= 16;
0582
0583 vaddr = (unsigned long __user *)
0584 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
0585 reg = (opcode & RT) >> 16;
0586
0587 if ((unsigned long)vaddr & 3)
0588 return SIGBUS;
0589
0590 preempt_disable();
0591
0592 if (ll_bit == 0 || ll_task != current) {
0593 regs->regs[reg] = 0;
0594 preempt_enable();
0595 return 0;
0596 }
0597
0598 preempt_enable();
0599
0600 if (put_user(regs->regs[reg], vaddr))
0601 return SIGSEGV;
0602
0603 regs->regs[reg] = 1;
0604
0605 return 0;
0606 }
0607
0608
0609
0610
0611
0612
0613
0614
0615 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
0616 {
0617 if ((opcode & OPCODE) == LL) {
0618 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
0619 1, regs, 0);
0620 return simulate_ll(regs, opcode);
0621 }
0622 if ((opcode & OPCODE) == SC) {
0623 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
0624 1, regs, 0);
0625 return simulate_sc(regs, opcode);
0626 }
0627
0628 return -1;
0629 }
0630
0631
0632
0633
0634
0635 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
0636 {
0637 struct thread_info *ti = task_thread_info(current);
0638
0639 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
0640 1, regs, 0);
0641 switch (rd) {
0642 case MIPS_HWR_CPUNUM:
0643 regs->regs[rt] = smp_processor_id();
0644 return 0;
0645 case MIPS_HWR_SYNCISTEP:
0646 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
0647 current_cpu_data.icache.linesz);
0648 return 0;
0649 case MIPS_HWR_CC:
0650 regs->regs[rt] = read_c0_count();
0651 return 0;
0652 case MIPS_HWR_CCRES:
0653 switch (current_cpu_type()) {
0654 case CPU_20KC:
0655 case CPU_25KF:
0656 regs->regs[rt] = 1;
0657 break;
0658 default:
0659 regs->regs[rt] = 2;
0660 }
0661 return 0;
0662 case MIPS_HWR_ULR:
0663 regs->regs[rt] = ti->tp_value;
0664 return 0;
0665 default:
0666 return -1;
0667 }
0668 }
0669
0670 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
0671 {
0672 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
0673 int rd = (opcode & RD) >> 11;
0674 int rt = (opcode & RT) >> 16;
0675
0676 simulate_rdhwr(regs, rd, rt);
0677 return 0;
0678 }
0679
0680
0681 return -1;
0682 }
0683
0684 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
0685 {
0686 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
0687 int rd = (opcode & MM_RS) >> 16;
0688 int rt = (opcode & MM_RT) >> 21;
0689 simulate_rdhwr(regs, rd, rt);
0690 return 0;
0691 }
0692
0693
0694 return -1;
0695 }
0696
0697 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
0698 {
0699 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
0700 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
0701 1, regs, 0);
0702 return 0;
0703 }
0704
0705 return -1;
0706 }
0707
0708
0709
0710
0711
0712 #ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
0713
0714 #define LWC2 0xc8000000
0715 #define RS BASE
0716 #define CSR_OPCODE2 0x00000118
0717 #define CSR_OPCODE2_MASK 0x000007ff
0718 #define CSR_FUNC_MASK RT
0719 #define CSR_FUNC_CPUCFG 0x8
0720
0721 static int simulate_loongson3_cpucfg(struct pt_regs *regs,
0722 unsigned int opcode)
0723 {
0724 int op = opcode & OPCODE;
0725 int op2 = opcode & CSR_OPCODE2_MASK;
0726 int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
0727
0728 if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
0729 int rd = (opcode & RD) >> 11;
0730 int rs = (opcode & RS) >> 21;
0731 __u64 sel = regs->regs[rs];
0732
0733 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
0734
0735
0736 preempt_disable();
0737 if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) {
0738 preempt_enable();
0739 return -1;
0740 }
0741 regs->regs[rd] = loongson3_cpucfg_read_synthesized(
0742 ¤t_cpu_data, sel);
0743 preempt_enable();
0744 return 0;
0745 }
0746
0747
0748 return -1;
0749 }
0750 #endif
0751
0752 asmlinkage void do_ov(struct pt_regs *regs)
0753 {
0754 enum ctx_state prev_state;
0755
0756 prev_state = exception_enter();
0757 die_if_kernel("Integer overflow", regs);
0758
0759 force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
0760 exception_exit(prev_state);
0761 }
0762
0763 #ifdef CONFIG_MIPS_FP_SUPPORT
0764
0765
0766
0767
0768
0769
0770
0771 void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
0772 struct task_struct *tsk)
0773 {
0774 int si_code = FPE_FLTUNK;
0775
0776 if (fcr31 & FPU_CSR_INV_X)
0777 si_code = FPE_FLTINV;
0778 else if (fcr31 & FPU_CSR_DIV_X)
0779 si_code = FPE_FLTDIV;
0780 else if (fcr31 & FPU_CSR_OVF_X)
0781 si_code = FPE_FLTOVF;
0782 else if (fcr31 & FPU_CSR_UDF_X)
0783 si_code = FPE_FLTUND;
0784 else if (fcr31 & FPU_CSR_INE_X)
0785 si_code = FPE_FLTRES;
0786
0787 force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
0788 }
0789
0790 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
0791 {
0792 int si_code;
0793
0794 switch (sig) {
0795 case 0:
0796 return 0;
0797
0798 case SIGFPE:
0799 force_fcr31_sig(fcr31, fault_addr, current);
0800 return 1;
0801
0802 case SIGBUS:
0803 force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
0804 return 1;
0805
0806 case SIGSEGV:
0807 mmap_read_lock(current->mm);
0808 if (vma_lookup(current->mm, (unsigned long)fault_addr))
0809 si_code = SEGV_ACCERR;
0810 else
0811 si_code = SEGV_MAPERR;
0812 mmap_read_unlock(current->mm);
0813 force_sig_fault(SIGSEGV, si_code, fault_addr);
0814 return 1;
0815
0816 default:
0817 force_sig(sig);
0818 return 1;
0819 }
0820 }
0821
0822 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
0823 unsigned long old_epc, unsigned long old_ra)
0824 {
0825 union mips_instruction inst = { .word = opcode };
0826 void __user *fault_addr;
0827 unsigned long fcr31;
0828 int sig;
0829
0830
0831 switch (inst.i_format.opcode) {
0832 case cop1_op:
0833 case cop1x_op:
0834 case lwc1_op:
0835 case ldc1_op:
0836 case swc1_op:
0837 case sdc1_op:
0838 break;
0839
0840 default:
0841 return -1;
0842 }
0843
0844
0845
0846
0847
0848 regs->cp0_epc = old_epc;
0849 regs->regs[31] = old_ra;
0850
0851
0852 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
0853 &fault_addr);
0854
0855
0856
0857
0858
0859 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
0860 current->thread.fpu.fcr31 &= ~fcr31;
0861
0862
0863 own_fpu(1);
0864
0865
0866 process_fpemu_return(sig, fault_addr, fcr31);
0867
0868 return 0;
0869 }
0870
0871
0872
0873
0874 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
0875 {
0876 enum ctx_state prev_state;
0877 void __user *fault_addr;
0878 int sig;
0879
0880 prev_state = exception_enter();
0881 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
0882 SIGFPE) == NOTIFY_STOP)
0883 goto out;
0884
0885
0886 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
0887 local_irq_enable();
0888
0889 die_if_kernel("FP exception in kernel code", regs);
0890
0891 if (fcr31 & FPU_CSR_UNI_X) {
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
0905 &fault_addr);
0906
0907
0908
0909
0910
0911 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
0912 current->thread.fpu.fcr31 &= ~fcr31;
0913
0914
0915 own_fpu(1);
0916 } else {
0917 sig = SIGFPE;
0918 fault_addr = (void __user *) regs->cp0_epc;
0919 }
0920
0921
0922 process_fpemu_return(sig, fault_addr, fcr31);
0923
0924 out:
0925 exception_exit(prev_state);
0926 }
0927
0928
0929
0930
0931
0932
0933 static void mt_ase_fp_affinity(void)
0934 {
0935 #ifdef CONFIG_MIPS_MT_FPAFF
0936 if (mt_fpemul_threshold > 0 &&
0937 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
0938
0939
0940
0941
0942
0943 if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
0944 cpumask_t tmask;
0945
0946 current->thread.user_cpus_allowed
0947 = current->cpus_mask;
0948 cpumask_and(&tmask, ¤t->cpus_mask,
0949 &mt_fpu_cpumask);
0950 set_cpus_allowed_ptr(current, &tmask);
0951 set_thread_flag(TIF_FPUBOUND);
0952 }
0953 }
0954 #endif
0955 }
0956
0957 #else
0958
0959 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
0960 unsigned long old_epc, unsigned long old_ra)
0961 {
0962 return -1;
0963 }
0964
0965 #endif
0966
0967 void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
0968 const char *str)
0969 {
0970 char b[40];
0971
0972 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
0973 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
0974 SIGTRAP) == NOTIFY_STOP)
0975 return;
0976 #endif
0977
0978 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
0979 SIGTRAP) == NOTIFY_STOP)
0980 return;
0981
0982
0983
0984
0985
0986
0987
0988 switch (code) {
0989 case BRK_OVERFLOW:
0990 case BRK_DIVZERO:
0991 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
0992 die_if_kernel(b, regs);
0993 force_sig_fault(SIGFPE,
0994 code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
0995 (void __user *) regs->cp0_epc);
0996 break;
0997 case BRK_BUG:
0998 die_if_kernel("Kernel bug detected", regs);
0999 force_sig(SIGTRAP);
1000 break;
1001 case BRK_MEMU:
1002
1003
1004
1005
1006
1007
1008
1009
1010 if (do_dsemulret(regs))
1011 return;
1012
1013 die_if_kernel("Math emu break/trap", regs);
1014 force_sig(SIGTRAP);
1015 break;
1016 default:
1017 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
1018 die_if_kernel(b, regs);
1019 if (si_code) {
1020 force_sig_fault(SIGTRAP, si_code, NULL);
1021 } else {
1022 force_sig(SIGTRAP);
1023 }
1024 }
1025 }
1026
1027 asmlinkage void do_bp(struct pt_regs *regs)
1028 {
1029 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1030 unsigned int opcode, bcode;
1031 enum ctx_state prev_state;
1032 bool user = user_mode(regs);
1033
1034 prev_state = exception_enter();
1035 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1036 if (get_isa16_mode(regs->cp0_epc)) {
1037 u16 instr[2];
1038
1039 if (__get_inst16(&instr[0], (u16 *)epc, user))
1040 goto out_sigsegv;
1041
1042 if (!cpu_has_mmips) {
1043
1044 bcode = (instr[0] >> 5) & 0x3f;
1045 } else if (mm_insn_16bit(instr[0])) {
1046
1047 bcode = instr[0] & 0xf;
1048 } else {
1049
1050 if (__get_inst16(&instr[1], (u16 *)(epc + 2), user))
1051 goto out_sigsegv;
1052 opcode = (instr[0] << 16) | instr[1];
1053 bcode = (opcode >> 6) & ((1 << 20) - 1);
1054 }
1055 } else {
1056 if (__get_inst32(&opcode, (u32 *)epc, user))
1057 goto out_sigsegv;
1058 bcode = (opcode >> 6) & ((1 << 20) - 1);
1059 }
1060
1061
1062
1063
1064
1065
1066
1067 if (bcode >= (1 << 10))
1068 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1069
1070
1071
1072
1073
1074 switch (bcode) {
1075 case BRK_UPROBE:
1076 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1077 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1078 goto out;
1079 else
1080 break;
1081 case BRK_UPROBE_XOL:
1082 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1083 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1084 goto out;
1085 else
1086 break;
1087 case BRK_KPROBE_BP:
1088 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1089 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1090 goto out;
1091 else
1092 break;
1093 case BRK_KPROBE_SSTEPBP:
1094 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1095 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1096 goto out;
1097 else
1098 break;
1099 default:
1100 break;
1101 }
1102
1103 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1104
1105 out:
1106 exception_exit(prev_state);
1107 return;
1108
1109 out_sigsegv:
1110 force_sig(SIGSEGV);
1111 goto out;
1112 }
1113
1114 asmlinkage void do_tr(struct pt_regs *regs)
1115 {
1116 u32 opcode, tcode = 0;
1117 enum ctx_state prev_state;
1118 u16 instr[2];
1119 bool user = user_mode(regs);
1120 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1121
1122 prev_state = exception_enter();
1123 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1124 if (get_isa16_mode(regs->cp0_epc)) {
1125 if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) ||
1126 __get_inst16(&instr[1], (u16 *)(epc + 2), user))
1127 goto out_sigsegv;
1128 opcode = (instr[0] << 16) | instr[1];
1129
1130 if (!(opcode & OPCODE))
1131 tcode = (opcode >> 12) & ((1 << 4) - 1);
1132 } else {
1133 if (__get_inst32(&opcode, (u32 *)epc, user))
1134 goto out_sigsegv;
1135
1136 if (!(opcode & OPCODE))
1137 tcode = (opcode >> 6) & ((1 << 10) - 1);
1138 }
1139
1140 do_trap_or_bp(regs, tcode, 0, "Trap");
1141
1142 out:
1143 exception_exit(prev_state);
1144 return;
1145
1146 out_sigsegv:
1147 force_sig(SIGSEGV);
1148 goto out;
1149 }
1150
1151 asmlinkage void do_ri(struct pt_regs *regs)
1152 {
1153 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1154 unsigned long old_epc = regs->cp0_epc;
1155 unsigned long old31 = regs->regs[31];
1156 enum ctx_state prev_state;
1157 unsigned int opcode = 0;
1158 int status = -1;
1159
1160
1161
1162
1163
1164 if (mipsr2_emulation && cpu_has_mips_r6 &&
1165 likely(user_mode(regs)) &&
1166 likely(get_user(opcode, epc) >= 0)) {
1167 unsigned long fcr31 = 0;
1168
1169 status = mipsr2_decoder(regs, opcode, &fcr31);
1170 switch (status) {
1171 case 0:
1172 case SIGEMT:
1173 return;
1174 case SIGILL:
1175 goto no_r2_instr;
1176 default:
1177 process_fpemu_return(status,
1178 ¤t->thread.cp0_baduaddr,
1179 fcr31);
1180 return;
1181 }
1182 }
1183
1184 no_r2_instr:
1185
1186 prev_state = exception_enter();
1187 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1188
1189 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1190 SIGILL) == NOTIFY_STOP)
1191 goto out;
1192
1193 die_if_kernel("Reserved instruction in kernel code", regs);
1194
1195 if (unlikely(compute_return_epc(regs) < 0))
1196 goto out;
1197
1198 if (!get_isa16_mode(regs->cp0_epc)) {
1199 if (unlikely(get_user(opcode, epc) < 0))
1200 status = SIGSEGV;
1201
1202 if (!cpu_has_llsc && status < 0)
1203 status = simulate_llsc(regs, opcode);
1204
1205 if (status < 0)
1206 status = simulate_rdhwr_normal(regs, opcode);
1207
1208 if (status < 0)
1209 status = simulate_sync(regs, opcode);
1210
1211 if (status < 0)
1212 status = simulate_fp(regs, opcode, old_epc, old31);
1213
1214 #ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
1215 if (status < 0)
1216 status = simulate_loongson3_cpucfg(regs, opcode);
1217 #endif
1218 } else if (cpu_has_mmips) {
1219 unsigned short mmop[2] = { 0 };
1220
1221 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1222 status = SIGSEGV;
1223 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1224 status = SIGSEGV;
1225 opcode = mmop[0];
1226 opcode = (opcode << 16) | mmop[1];
1227
1228 if (status < 0)
1229 status = simulate_rdhwr_mm(regs, opcode);
1230 }
1231
1232 if (status < 0)
1233 status = SIGILL;
1234
1235 if (unlikely(status > 0)) {
1236 regs->cp0_epc = old_epc;
1237 regs->regs[31] = old31;
1238 force_sig(status);
1239 }
1240
1241 out:
1242 exception_exit(prev_state);
1243 }
1244
1245
1246
1247
1248 static RAW_NOTIFIER_HEAD(cu2_chain);
1249
1250 int __ref register_cu2_notifier(struct notifier_block *nb)
1251 {
1252 return raw_notifier_chain_register(&cu2_chain, nb);
1253 }
1254
1255 int cu2_notifier_call_chain(unsigned long val, void *v)
1256 {
1257 return raw_notifier_call_chain(&cu2_chain, val, v);
1258 }
1259
1260 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1261 void *data)
1262 {
1263 struct pt_regs *regs = data;
1264
1265 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1266 "instruction", regs);
1267 force_sig(SIGILL);
1268
1269 return NOTIFY_OK;
1270 }
1271
1272 #ifdef CONFIG_MIPS_FP_SUPPORT
1273
1274 static int enable_restore_fp_context(int msa)
1275 {
1276 int err, was_fpu_owner, prior_msa;
1277 bool first_fp;
1278
1279
1280 first_fp = init_fp_ctx(current);
1281
1282 if (first_fp) {
1283 preempt_disable();
1284 err = own_fpu_inatomic(1);
1285 if (msa && !err) {
1286 enable_msa();
1287
1288
1289
1290
1291
1292
1293 write_msa_csr(current->thread.fpu.msacsr);
1294
1295
1296
1297
1298 init_msa_upper();
1299 set_thread_flag(TIF_USEDMSA);
1300 set_thread_flag(TIF_MSA_CTX_LIVE);
1301 }
1302 preempt_enable();
1303 return err;
1304 }
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 if (!msa && !thread_msa_context_live())
1334 return own_fpu(1);
1335
1336
1337
1338
1339
1340 preempt_disable();
1341 was_fpu_owner = is_fpu_owner();
1342 err = own_fpu_inatomic(0);
1343 if (err)
1344 goto out;
1345
1346 enable_msa();
1347 write_msa_csr(current->thread.fpu.msacsr);
1348 set_thread_flag(TIF_USEDMSA);
1349
1350
1351
1352
1353
1354
1355
1356
1357 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1358 if (!prior_msa && was_fpu_owner) {
1359 init_msa_upper();
1360
1361 goto out;
1362 }
1363
1364 if (!prior_msa) {
1365
1366
1367
1368
1369 _restore_fp(current);
1370
1371
1372
1373
1374
1375
1376 init_msa_upper();
1377 } else {
1378
1379 restore_msa(current);
1380
1381
1382 if (!was_fpu_owner)
1383 write_32bit_cp1_register(CP1_STATUS,
1384 current->thread.fpu.fcr31);
1385 }
1386
1387 out:
1388 preempt_enable();
1389
1390 return 0;
1391 }
1392
1393 #else
1394
1395 static int enable_restore_fp_context(int msa)
1396 {
1397 return SIGILL;
1398 }
1399
1400 #endif
1401
1402 asmlinkage void do_cpu(struct pt_regs *regs)
1403 {
1404 enum ctx_state prev_state;
1405 unsigned int __user *epc;
1406 unsigned long old_epc, old31;
1407 unsigned int opcode;
1408 unsigned int cpid;
1409 int status;
1410
1411 prev_state = exception_enter();
1412 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1413
1414 if (cpid != 2)
1415 die_if_kernel("do_cpu invoked from kernel context!", regs);
1416
1417 switch (cpid) {
1418 case 0:
1419 epc = (unsigned int __user *)exception_epc(regs);
1420 old_epc = regs->cp0_epc;
1421 old31 = regs->regs[31];
1422 opcode = 0;
1423 status = -1;
1424
1425 if (unlikely(compute_return_epc(regs) < 0))
1426 break;
1427
1428 if (!get_isa16_mode(regs->cp0_epc)) {
1429 if (unlikely(get_user(opcode, epc) < 0))
1430 status = SIGSEGV;
1431
1432 if (!cpu_has_llsc && status < 0)
1433 status = simulate_llsc(regs, opcode);
1434 }
1435
1436 if (status < 0)
1437 status = SIGILL;
1438
1439 if (unlikely(status > 0)) {
1440 regs->cp0_epc = old_epc;
1441 regs->regs[31] = old31;
1442 force_sig(status);
1443 }
1444
1445 break;
1446
1447 #ifdef CONFIG_MIPS_FP_SUPPORT
1448 case 3:
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1462 force_sig(SIGILL);
1463 break;
1464 }
1465 fallthrough;
1466 case 1: {
1467 void __user *fault_addr;
1468 unsigned long fcr31;
1469 int err, sig;
1470
1471 err = enable_restore_fp_context(0);
1472
1473 if (raw_cpu_has_fpu && !err)
1474 break;
1475
1476 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1477 &fault_addr);
1478
1479
1480
1481
1482
1483 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1484 current->thread.fpu.fcr31 &= ~fcr31;
1485
1486
1487 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1488 mt_ase_fp_affinity();
1489
1490 break;
1491 }
1492 #else
1493 case 1:
1494 case 3:
1495 force_sig(SIGILL);
1496 break;
1497 #endif
1498
1499 case 2:
1500 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1501 break;
1502 }
1503
1504 exception_exit(prev_state);
1505 }
1506
1507 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1508 {
1509 enum ctx_state prev_state;
1510
1511 prev_state = exception_enter();
1512 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1513 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1514 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1515 goto out;
1516
1517
1518 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1519 local_irq_enable();
1520
1521 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1522 force_sig(SIGFPE);
1523 out:
1524 exception_exit(prev_state);
1525 }
1526
1527 asmlinkage void do_msa(struct pt_regs *regs)
1528 {
1529 enum ctx_state prev_state;
1530 int err;
1531
1532 prev_state = exception_enter();
1533
1534 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1535 force_sig(SIGILL);
1536 goto out;
1537 }
1538
1539 die_if_kernel("do_msa invoked from kernel context!", regs);
1540
1541 err = enable_restore_fp_context(1);
1542 if (err)
1543 force_sig(SIGILL);
1544 out:
1545 exception_exit(prev_state);
1546 }
1547
1548 asmlinkage void do_mdmx(struct pt_regs *regs)
1549 {
1550 enum ctx_state prev_state;
1551
1552 prev_state = exception_enter();
1553 force_sig(SIGILL);
1554 exception_exit(prev_state);
1555 }
1556
1557
1558
1559
1560 asmlinkage void do_watch(struct pt_regs *regs)
1561 {
1562 enum ctx_state prev_state;
1563
1564 prev_state = exception_enter();
1565
1566
1567
1568
1569 clear_c0_cause(CAUSEF_WP);
1570
1571
1572
1573
1574
1575
1576 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1577 mips_read_watch_registers();
1578 local_irq_enable();
1579 force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1580 } else {
1581 mips_clear_watch_registers();
1582 local_irq_enable();
1583 }
1584 exception_exit(prev_state);
1585 }
1586
1587 asmlinkage void do_mcheck(struct pt_regs *regs)
1588 {
1589 int multi_match = regs->cp0_status & ST0_TS;
1590 enum ctx_state prev_state;
1591
1592 prev_state = exception_enter();
1593 show_regs(regs);
1594
1595 if (multi_match) {
1596 dump_tlb_regs();
1597 pr_info("\n");
1598 dump_tlb_all();
1599 }
1600
1601 show_code((void *)regs->cp0_epc, user_mode(regs));
1602
1603
1604
1605
1606
1607 panic("Caught Machine Check exception - %scaused by multiple "
1608 "matching entries in the TLB.",
1609 (multi_match) ? "" : "not ");
1610 }
1611
1612 asmlinkage void do_mt(struct pt_regs *regs)
1613 {
1614 int subcode;
1615
1616 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1617 >> VPECONTROL_EXCPT_SHIFT;
1618 switch (subcode) {
1619 case 0:
1620 printk(KERN_DEBUG "Thread Underflow\n");
1621 break;
1622 case 1:
1623 printk(KERN_DEBUG "Thread Overflow\n");
1624 break;
1625 case 2:
1626 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1627 break;
1628 case 3:
1629 printk(KERN_DEBUG "Gating Storage Exception\n");
1630 break;
1631 case 4:
1632 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1633 break;
1634 case 5:
1635 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1636 break;
1637 default:
1638 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1639 subcode);
1640 break;
1641 }
1642 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1643
1644 force_sig(SIGILL);
1645 }
1646
1647
1648 asmlinkage void do_dsp(struct pt_regs *regs)
1649 {
1650 if (cpu_has_dsp)
1651 panic("Unexpected DSP exception");
1652
1653 force_sig(SIGILL);
1654 }
1655
1656 asmlinkage void do_reserved(struct pt_regs *regs)
1657 {
1658
1659
1660
1661
1662
1663 show_regs(regs);
1664 panic("Caught reserved exception %ld - should not happen.",
1665 (regs->cp0_cause & 0x7f) >> 2);
1666 }
1667
1668 static int __initdata l1parity = 1;
1669 static int __init nol1parity(char *s)
1670 {
1671 l1parity = 0;
1672 return 1;
1673 }
1674 __setup("nol1par", nol1parity);
1675 static int __initdata l2parity = 1;
1676 static int __init nol2parity(char *s)
1677 {
1678 l2parity = 0;
1679 return 1;
1680 }
1681 __setup("nol2par", nol2parity);
1682
1683
1684
1685
1686
1687 static inline __init void parity_protection_init(void)
1688 {
1689 #define ERRCTL_PE 0x80000000
1690 #define ERRCTL_L2P 0x00800000
1691
1692 if (mips_cm_revision() >= CM_REV_CM3) {
1693 ulong gcr_ectl, cp0_ectl;
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703 l1parity &= l2parity;
1704 l2parity &= l1parity;
1705
1706
1707 cp0_ectl = read_c0_ecc();
1708 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1709 back_to_back_c0_hazard();
1710 cp0_ectl = read_c0_ecc();
1711
1712
1713 gcr_ectl = read_gcr_err_control();
1714
1715 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1716 !(cp0_ectl & ERRCTL_PE)) {
1717
1718
1719
1720
1721 l1parity = l2parity = 0;
1722 }
1723
1724
1725 if (l1parity)
1726 cp0_ectl |= ERRCTL_PE;
1727 else
1728 cp0_ectl &= ~ERRCTL_PE;
1729 write_c0_ecc(cp0_ectl);
1730 back_to_back_c0_hazard();
1731 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1732
1733
1734 if (l2parity)
1735 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1736 else
1737 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1738 write_gcr_err_control(gcr_ectl);
1739 gcr_ectl = read_gcr_err_control();
1740 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1741 WARN_ON(!!gcr_ectl != l2parity);
1742
1743 pr_info("Cache parity protection %sabled\n",
1744 l1parity ? "en" : "dis");
1745 return;
1746 }
1747
1748 switch (current_cpu_type()) {
1749 case CPU_24K:
1750 case CPU_34K:
1751 case CPU_74K:
1752 case CPU_1004K:
1753 case CPU_1074K:
1754 case CPU_INTERAPTIV:
1755 case CPU_PROAPTIV:
1756 case CPU_P5600:
1757 case CPU_QEMU_GENERIC:
1758 case CPU_P6600:
1759 {
1760 unsigned long errctl;
1761 unsigned int l1parity_present, l2parity_present;
1762
1763 errctl = read_c0_ecc();
1764 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1765
1766
1767 write_c0_ecc(errctl | ERRCTL_PE);
1768 back_to_back_c0_hazard();
1769 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1770
1771
1772 write_c0_ecc(errctl|ERRCTL_L2P);
1773 back_to_back_c0_hazard();
1774 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1775
1776 if (l1parity_present && l2parity_present) {
1777 if (l1parity)
1778 errctl |= ERRCTL_PE;
1779 if (l1parity ^ l2parity)
1780 errctl |= ERRCTL_L2P;
1781 } else if (l1parity_present) {
1782 if (l1parity)
1783 errctl |= ERRCTL_PE;
1784 } else if (l2parity_present) {
1785 if (l2parity)
1786 errctl |= ERRCTL_L2P;
1787 } else {
1788
1789 }
1790
1791 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1792
1793 write_c0_ecc(errctl);
1794 back_to_back_c0_hazard();
1795 errctl = read_c0_ecc();
1796 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1797
1798 if (l1parity_present)
1799 printk(KERN_INFO "Cache parity protection %sabled\n",
1800 (errctl & ERRCTL_PE) ? "en" : "dis");
1801
1802 if (l2parity_present) {
1803 if (l1parity_present && l1parity)
1804 errctl ^= ERRCTL_L2P;
1805 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1806 (errctl & ERRCTL_L2P) ? "en" : "dis");
1807 }
1808 }
1809 break;
1810
1811 case CPU_5KC:
1812 case CPU_5KE:
1813 case CPU_LOONGSON32:
1814 write_c0_ecc(0x80000000);
1815 back_to_back_c0_hazard();
1816
1817 printk(KERN_INFO "Cache parity protection %sabled\n",
1818 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1819 break;
1820 case CPU_20KC:
1821 case CPU_25KF:
1822
1823 printk(KERN_INFO "Enable cache parity protection for "
1824 "MIPS 20KC/25KF CPUs.\n");
1825 clear_c0_status(ST0_DE);
1826 break;
1827 default:
1828 break;
1829 }
1830 }
1831
1832 asmlinkage void cache_parity_error(void)
1833 {
1834 const int field = 2 * sizeof(unsigned long);
1835 unsigned int reg_val;
1836
1837
1838 printk("Cache error exception:\n");
1839 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1840 reg_val = read_c0_cacheerr();
1841 printk("c0_cacheerr == %08x\n", reg_val);
1842
1843 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1844 reg_val & (1<<30) ? "secondary" : "primary",
1845 reg_val & (1<<31) ? "data" : "insn");
1846 if ((cpu_has_mips_r2_r6) &&
1847 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1848 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1849 reg_val & (1<<29) ? "ED " : "",
1850 reg_val & (1<<28) ? "ET " : "",
1851 reg_val & (1<<27) ? "ES " : "",
1852 reg_val & (1<<26) ? "EE " : "",
1853 reg_val & (1<<25) ? "EB " : "",
1854 reg_val & (1<<24) ? "EI " : "",
1855 reg_val & (1<<23) ? "E1 " : "",
1856 reg_val & (1<<22) ? "E0 " : "");
1857 } else {
1858 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1859 reg_val & (1<<29) ? "ED " : "",
1860 reg_val & (1<<28) ? "ET " : "",
1861 reg_val & (1<<26) ? "EE " : "",
1862 reg_val & (1<<25) ? "EB " : "",
1863 reg_val & (1<<24) ? "EI " : "",
1864 reg_val & (1<<23) ? "E1 " : "",
1865 reg_val & (1<<22) ? "E0 " : "");
1866 }
1867 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1868
1869 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1870 if (reg_val & (1<<22))
1871 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1872
1873 if (reg_val & (1<<23))
1874 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1875 #endif
1876
1877 panic("Can't handle the cache error!");
1878 }
1879
1880 asmlinkage void do_ftlb(void)
1881 {
1882 const int field = 2 * sizeof(unsigned long);
1883 unsigned int reg_val;
1884
1885
1886 if ((cpu_has_mips_r2_r6) &&
1887 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1888 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1889 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1890 read_c0_ecc());
1891 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1892 reg_val = read_c0_cacheerr();
1893 pr_err("c0_cacheerr == %08x\n", reg_val);
1894
1895 if ((reg_val & 0xc0000000) == 0xc0000000) {
1896 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1897 } else {
1898 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1899 reg_val & (1<<30) ? "secondary" : "primary",
1900 reg_val & (1<<31) ? "data" : "insn");
1901 }
1902 } else {
1903 pr_err("FTLB error exception\n");
1904 }
1905
1906 cache_parity_error();
1907 }
1908
1909 asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
1910 {
1911 u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
1912 LOONGSON_DIAG1_EXCCODE_SHIFT;
1913 enum ctx_state prev_state;
1914
1915 prev_state = exception_enter();
1916
1917 switch (exccode) {
1918 case 0x08:
1919
1920
1921
1922
1923
1924
1925 force_sig(SIGILL);
1926 break;
1927
1928 default:
1929
1930
1931
1932
1933 show_regs(regs);
1934 panic("Unhandled Loongson exception - GSCause = %08x", diag1);
1935 }
1936
1937 exception_exit(prev_state);
1938 }
1939
1940
1941
1942
1943
1944 void ejtag_exception_handler(struct pt_regs *regs)
1945 {
1946 const int field = 2 * sizeof(unsigned long);
1947 unsigned long depc, old_epc, old_ra;
1948 unsigned int debug;
1949
1950 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1951 depc = read_c0_depc();
1952 debug = read_c0_debug();
1953 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1954 if (debug & 0x80000000) {
1955
1956
1957
1958
1959
1960
1961 old_epc = regs->cp0_epc;
1962 old_ra = regs->regs[31];
1963 regs->cp0_epc = depc;
1964 compute_return_epc(regs);
1965 depc = regs->cp0_epc;
1966 regs->cp0_epc = old_epc;
1967 regs->regs[31] = old_ra;
1968 } else
1969 depc += 4;
1970 write_c0_depc(depc);
1971
1972 #if 0
1973 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1974 write_c0_debug(debug | 0x100);
1975 #endif
1976 }
1977
1978
1979
1980
1981
1982 static RAW_NOTIFIER_HEAD(nmi_chain);
1983
1984 int register_nmi_notifier(struct notifier_block *nb)
1985 {
1986 return raw_notifier_chain_register(&nmi_chain, nb);
1987 }
1988
1989 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1990 {
1991 char str[100];
1992
1993 nmi_enter();
1994 raw_notifier_call_chain(&nmi_chain, 0, regs);
1995 bust_spinlocks(1);
1996 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1997 smp_processor_id(), regs->cp0_epc);
1998 regs->cp0_epc = read_c0_errorepc();
1999 die(str, regs);
2000 nmi_exit();
2001 }
2002
2003 unsigned long ebase;
2004 EXPORT_SYMBOL_GPL(ebase);
2005 unsigned long exception_handlers[32];
2006 unsigned long vi_handlers[64];
2007
2008 void reserve_exception_space(phys_addr_t addr, unsigned long size)
2009 {
2010 memblock_reserve(addr, size);
2011 }
2012
2013 void __init *set_except_vector(int n, void *addr)
2014 {
2015 unsigned long handler = (unsigned long) addr;
2016 unsigned long old_handler;
2017
2018 #ifdef CONFIG_CPU_MICROMIPS
2019
2020
2021
2022
2023
2024
2025
2026 if (!(handler & 0x1))
2027 handler |= 1;
2028 #endif
2029 old_handler = xchg(&exception_handlers[n], handler);
2030
2031 if (n == 0 && cpu_has_divec) {
2032 #ifdef CONFIG_CPU_MICROMIPS
2033 unsigned long jump_mask = ~((1 << 27) - 1);
2034 #else
2035 unsigned long jump_mask = ~((1 << 28) - 1);
2036 #endif
2037 u32 *buf = (u32 *)(ebase + 0x200);
2038 unsigned int k0 = 26;
2039 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
2040 uasm_i_j(&buf, handler & ~jump_mask);
2041 uasm_i_nop(&buf);
2042 } else {
2043 UASM_i_LA(&buf, k0, handler);
2044 uasm_i_jr(&buf, k0);
2045 uasm_i_nop(&buf);
2046 }
2047 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
2048 }
2049 return (void *)old_handler;
2050 }
2051
2052 static void do_default_vi(void)
2053 {
2054 show_regs(get_irq_regs());
2055 panic("Caught unexpected vectored interrupt.");
2056 }
2057
2058 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
2059 {
2060 unsigned long handler;
2061 unsigned long old_handler = vi_handlers[n];
2062 int srssets = current_cpu_data.srsets;
2063 u16 *h;
2064 unsigned char *b;
2065
2066 BUG_ON(!cpu_has_veic && !cpu_has_vint);
2067
2068 if (addr == NULL) {
2069 handler = (unsigned long) do_default_vi;
2070 srs = 0;
2071 } else
2072 handler = (unsigned long) addr;
2073 vi_handlers[n] = handler;
2074
2075 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2076
2077 if (srs >= srssets)
2078 panic("Shadow register set %d not supported", srs);
2079
2080 if (cpu_has_veic) {
2081 if (board_bind_eic_interrupt)
2082 board_bind_eic_interrupt(n, srs);
2083 } else if (cpu_has_vint) {
2084
2085 if (srssets > 1)
2086 change_c0_srsmap(0xf << n*4, srs << n*4);
2087 }
2088
2089 if (srs == 0) {
2090
2091
2092
2093
2094 extern const u8 except_vec_vi[], except_vec_vi_lui[];
2095 extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
2096 extern const u8 rollback_except_vec_vi[];
2097 const u8 *vec_start = using_rollback_handler() ?
2098 rollback_except_vec_vi : except_vec_vi;
2099 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2100 const int lui_offset = except_vec_vi_lui - vec_start + 2;
2101 const int ori_offset = except_vec_vi_ori - vec_start + 2;
2102 #else
2103 const int lui_offset = except_vec_vi_lui - vec_start;
2104 const int ori_offset = except_vec_vi_ori - vec_start;
2105 #endif
2106 const int handler_len = except_vec_vi_end - vec_start;
2107
2108 if (handler_len > VECTORSPACING) {
2109
2110
2111
2112
2113 panic("VECTORSPACING too small");
2114 }
2115
2116 set_handler(((unsigned long)b - ebase), vec_start,
2117 #ifdef CONFIG_CPU_MICROMIPS
2118 (handler_len - 1));
2119 #else
2120 handler_len);
2121 #endif
2122 h = (u16 *)(b + lui_offset);
2123 *h = (handler >> 16) & 0xffff;
2124 h = (u16 *)(b + ori_offset);
2125 *h = (handler & 0xffff);
2126 local_flush_icache_range((unsigned long)b,
2127 (unsigned long)(b+handler_len));
2128 }
2129 else {
2130
2131
2132
2133
2134
2135 u32 insn;
2136
2137 h = (u16 *)b;
2138
2139 #ifdef CONFIG_CPU_MICROMIPS
2140 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2141 #else
2142 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2143 #endif
2144 h[0] = (insn >> 16) & 0xffff;
2145 h[1] = insn & 0xffff;
2146 h[2] = 0;
2147 h[3] = 0;
2148 local_flush_icache_range((unsigned long)b,
2149 (unsigned long)(b+8));
2150 }
2151
2152 return (void *)old_handler;
2153 }
2154
2155 void *set_vi_handler(int n, vi_handler_t addr)
2156 {
2157 return set_vi_srs_handler(n, addr, 0);
2158 }
2159
2160 extern void tlb_init(void);
2161
2162
2163
2164
2165 int cp0_compare_irq;
2166 EXPORT_SYMBOL_GPL(cp0_compare_irq);
2167 int cp0_compare_irq_shift;
2168
2169
2170
2171
2172 int cp0_perfcount_irq;
2173 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2174
2175
2176
2177
2178 int cp0_fdc_irq;
2179 EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2180
2181 static int noulri;
2182
2183 static int __init ulri_disable(char *s)
2184 {
2185 pr_info("Disabling ulri\n");
2186 noulri = 1;
2187
2188 return 1;
2189 }
2190 __setup("noulri", ulri_disable);
2191
2192
2193 static void configure_status(void)
2194 {
2195
2196
2197
2198
2199
2200
2201 unsigned int status_set = ST0_KERNEL_CUMASK;
2202 #ifdef CONFIG_64BIT
2203 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2204 #endif
2205 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2206 status_set |= ST0_XX;
2207 if (cpu_has_dsp)
2208 status_set |= ST0_MX;
2209
2210 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2211 status_set);
2212 back_to_back_c0_hazard();
2213 }
2214
2215 unsigned int hwrena;
2216 EXPORT_SYMBOL_GPL(hwrena);
2217
2218
2219 static void configure_hwrena(void)
2220 {
2221 hwrena = cpu_hwrena_impl_bits;
2222
2223 if (cpu_has_mips_r2_r6)
2224 hwrena |= MIPS_HWRENA_CPUNUM |
2225 MIPS_HWRENA_SYNCISTEP |
2226 MIPS_HWRENA_CC |
2227 MIPS_HWRENA_CCRES;
2228
2229 if (!noulri && cpu_has_userlocal)
2230 hwrena |= MIPS_HWRENA_ULR;
2231
2232 if (hwrena)
2233 write_c0_hwrena(hwrena);
2234 }
2235
2236 static void configure_exception_vector(void)
2237 {
2238 if (cpu_has_mips_r2_r6) {
2239 unsigned long sr = set_c0_status(ST0_BEV);
2240
2241 if (cpu_has_ebase_wg) {
2242 #ifdef CONFIG_64BIT
2243 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2244 #else
2245 write_c0_ebase(ebase | MIPS_EBASE_WG);
2246 #endif
2247 }
2248 write_c0_ebase(ebase);
2249 write_c0_status(sr);
2250 }
2251 if (cpu_has_veic || cpu_has_vint) {
2252
2253 change_c0_intctl(0x3e0, VECTORSPACING);
2254 }
2255 if (cpu_has_divec) {
2256 if (cpu_has_mipsmt) {
2257 unsigned int vpflags = dvpe();
2258 set_c0_cause(CAUSEF_IV);
2259 evpe(vpflags);
2260 } else
2261 set_c0_cause(CAUSEF_IV);
2262 }
2263 }
2264
2265 void per_cpu_trap_init(bool is_boot_cpu)
2266 {
2267 unsigned int cpu = smp_processor_id();
2268
2269 configure_status();
2270 configure_hwrena();
2271
2272 configure_exception_vector();
2273
2274
2275
2276
2277
2278
2279
2280
2281 if (cpu_has_mips_r2_r6) {
2282 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2283 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2284 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2285 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2286 if (!cp0_fdc_irq)
2287 cp0_fdc_irq = -1;
2288
2289 } else {
2290 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2291 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2292 cp0_perfcount_irq = -1;
2293 cp0_fdc_irq = -1;
2294 }
2295
2296 if (cpu_has_mmid)
2297 cpu_data[cpu].asid_cache = 0;
2298 else if (!cpu_data[cpu].asid_cache)
2299 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2300
2301 mmgrab(&init_mm);
2302 current->active_mm = &init_mm;
2303 BUG_ON(current->mm);
2304 enter_lazy_tlb(&init_mm, current);
2305
2306
2307 if (!is_boot_cpu)
2308 cpu_cache_init();
2309 tlb_init();
2310 TLBMISS_HANDLER_SETUP();
2311 }
2312
2313
2314 void set_handler(unsigned long offset, const void *addr, unsigned long size)
2315 {
2316 #ifdef CONFIG_CPU_MICROMIPS
2317 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2318 #else
2319 memcpy((void *)(ebase + offset), addr, size);
2320 #endif
2321 local_flush_icache_range(ebase + offset, ebase + offset + size);
2322 }
2323
2324 static const char panic_null_cerr[] =
2325 "Trying to set NULL cache error exception handler\n";
2326
2327
2328
2329
2330
2331
2332 void set_uncached_handler(unsigned long offset, void *addr,
2333 unsigned long size)
2334 {
2335 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2336
2337 if (!addr)
2338 panic(panic_null_cerr);
2339
2340 memcpy((void *)(uncached_ebase + offset), addr, size);
2341 }
2342
2343 static int __initdata rdhwr_noopt;
2344 static int __init set_rdhwr_noopt(char *str)
2345 {
2346 rdhwr_noopt = 1;
2347 return 1;
2348 }
2349
2350 __setup("rdhwr_noopt", set_rdhwr_noopt);
2351
2352 void __init trap_init(void)
2353 {
2354 extern char except_vec3_generic;
2355 extern char except_vec4;
2356 extern char except_vec3_r4000;
2357 unsigned long i, vec_size;
2358 phys_addr_t ebase_pa;
2359
2360 check_wait();
2361
2362 if (!cpu_has_mips_r2_r6) {
2363 ebase = CAC_BASE;
2364 vec_size = 0x400;
2365 } else {
2366 if (cpu_has_veic || cpu_has_vint)
2367 vec_size = 0x200 + VECTORSPACING*64;
2368 else
2369 vec_size = PAGE_SIZE;
2370
2371 ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2372 if (!ebase_pa)
2373 panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2374 __func__, vec_size, 1 << fls(vec_size));
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2388 ebase = CKSEG0ADDR(ebase_pa);
2389 else
2390 ebase = (unsigned long)phys_to_virt(ebase_pa);
2391 }
2392
2393 if (cpu_has_mmips) {
2394 unsigned int config3 = read_c0_config3();
2395
2396 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2397 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2398 else
2399 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2400 }
2401
2402 if (board_ebase_setup)
2403 board_ebase_setup();
2404 per_cpu_trap_init(true);
2405 memblock_set_bottom_up(false);
2406
2407
2408
2409
2410
2411
2412 set_handler(0x180, &except_vec3_generic, 0x80);
2413
2414
2415
2416
2417 for (i = 0; i <= 31; i++)
2418 set_except_vector(i, handle_reserved);
2419
2420
2421
2422
2423
2424 if (cpu_has_ejtag && board_ejtag_handler_setup)
2425 board_ejtag_handler_setup();
2426
2427
2428
2429
2430 if (cpu_has_watch)
2431 set_except_vector(EXCCODE_WATCH, handle_watch);
2432
2433
2434
2435
2436 if (cpu_has_veic || cpu_has_vint) {
2437 int nvec = cpu_has_veic ? 64 : 8;
2438 for (i = 0; i < nvec; i++)
2439 set_vi_handler(i, NULL);
2440 }
2441 else if (cpu_has_divec)
2442 set_handler(0x200, &except_vec4, 0x8);
2443
2444
2445
2446
2447
2448 parity_protection_init();
2449
2450
2451
2452
2453
2454
2455 if (board_be_init)
2456 board_be_init();
2457
2458 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2459 rollback_handle_int : handle_int);
2460 set_except_vector(EXCCODE_MOD, handle_tlbm);
2461 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2462 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2463
2464 set_except_vector(EXCCODE_ADEL, handle_adel);
2465 set_except_vector(EXCCODE_ADES, handle_ades);
2466
2467 set_except_vector(EXCCODE_IBE, handle_ibe);
2468 set_except_vector(EXCCODE_DBE, handle_dbe);
2469
2470 set_except_vector(EXCCODE_SYS, handle_sys);
2471 set_except_vector(EXCCODE_BP, handle_bp);
2472
2473 if (rdhwr_noopt)
2474 set_except_vector(EXCCODE_RI, handle_ri);
2475 else {
2476 if (cpu_has_vtag_icache)
2477 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2478 else if (current_cpu_type() == CPU_LOONGSON64)
2479 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2480 else
2481 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2482 }
2483
2484 set_except_vector(EXCCODE_CPU, handle_cpu);
2485 set_except_vector(EXCCODE_OV, handle_ov);
2486 set_except_vector(EXCCODE_TR, handle_tr);
2487 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2488
2489 if (board_nmi_handler_setup)
2490 board_nmi_handler_setup();
2491
2492 if (cpu_has_fpu && !cpu_has_nofpuex)
2493 set_except_vector(EXCCODE_FPE, handle_fpe);
2494
2495 if (cpu_has_ftlbparex)
2496 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2497
2498 if (cpu_has_gsexcex)
2499 set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
2500
2501 if (cpu_has_rixiex) {
2502 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2503 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2504 }
2505
2506 set_except_vector(EXCCODE_MSADIS, handle_msa);
2507 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2508
2509 if (cpu_has_mcheck)
2510 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2511
2512 if (cpu_has_mipsmt)
2513 set_except_vector(EXCCODE_THREAD, handle_mt);
2514
2515 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2516
2517 if (board_cache_error_setup)
2518 board_cache_error_setup();
2519
2520 if (cpu_has_vce)
2521
2522 set_handler(0x180, &except_vec3_r4000, 0x100);
2523 else if (cpu_has_4kex)
2524 set_handler(0x180, &except_vec3_generic, 0x80);
2525 else
2526 set_handler(0x080, &except_vec3_generic, 0x80);
2527
2528 local_flush_icache_range(ebase, ebase + vec_size);
2529
2530 sort_extable(__start___dbe_table, __stop___dbe_table);
2531
2532 cu2_notifier(default_cu2_call, 0x80000000);
2533 }
2534
2535 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2536 void *v)
2537 {
2538 switch (cmd) {
2539 case CPU_PM_ENTER_FAILED:
2540 case CPU_PM_EXIT:
2541 configure_status();
2542 configure_hwrena();
2543 configure_exception_vector();
2544
2545
2546 TLBMISS_HANDLER_RESTORE();
2547
2548 break;
2549 }
2550
2551 return NOTIFY_OK;
2552 }
2553
2554 static struct notifier_block trap_pm_notifier_block = {
2555 .notifier_call = trap_pm_notifier,
2556 };
2557
2558 static int __init trap_pm_init(void)
2559 {
2560 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2561 }
2562 arch_initcall(trap_pm_init);