0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/cpu_pm.h>
0012 #include <linux/init.h>
0013 #include <linux/sched.h>
0014 #include <linux/smp.h>
0015 #include <linux/mm.h>
0016 #include <linux/hugetlb.h>
0017 #include <linux/export.h>
0018
0019 #include <asm/cpu.h>
0020 #include <asm/cpu-type.h>
0021 #include <asm/bootinfo.h>
0022 #include <asm/hazards.h>
0023 #include <asm/mmu_context.h>
0024 #include <asm/tlb.h>
0025 #include <asm/tlbmisc.h>
0026
0027 extern void build_tlb_refill_handler(void);
0028
0029
0030
0031
0032
0033
0034 static inline void flush_micro_tlb(void)
0035 {
0036 switch (current_cpu_type()) {
0037 case CPU_LOONGSON2EF:
0038 write_c0_diag(LOONGSON_DIAG_ITLB);
0039 break;
0040 case CPU_LOONGSON64:
0041 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
0042 break;
0043 default:
0044 break;
0045 }
0046 }
0047
0048 static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
0049 {
0050 if (vma->vm_flags & VM_EXEC)
0051 flush_micro_tlb();
0052 }
0053
0054 void local_flush_tlb_all(void)
0055 {
0056 unsigned long flags;
0057 unsigned long old_ctx;
0058 int entry, ftlbhighset;
0059
0060 local_irq_save(flags);
0061
0062 old_ctx = read_c0_entryhi();
0063 htw_stop();
0064 write_c0_entrylo0(0);
0065 write_c0_entrylo1(0);
0066
0067 entry = num_wired_entries();
0068
0069
0070
0071
0072
0073 if (cpu_has_tlbinv && !entry) {
0074 if (current_cpu_data.tlbsizevtlb) {
0075 write_c0_index(0);
0076 mtc0_tlbw_hazard();
0077 tlbinvf();
0078 }
0079 ftlbhighset = current_cpu_data.tlbsizevtlb +
0080 current_cpu_data.tlbsizeftlbsets;
0081 for (entry = current_cpu_data.tlbsizevtlb;
0082 entry < ftlbhighset;
0083 entry++) {
0084 write_c0_index(entry);
0085 mtc0_tlbw_hazard();
0086 tlbinvf();
0087 }
0088 } else {
0089 while (entry < current_cpu_data.tlbsize) {
0090
0091 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
0092 write_c0_index(entry);
0093 mtc0_tlbw_hazard();
0094 tlb_write_indexed();
0095 entry++;
0096 }
0097 }
0098 tlbw_use_hazard();
0099 write_c0_entryhi(old_ctx);
0100 htw_start();
0101 flush_micro_tlb();
0102 local_irq_restore(flags);
0103 }
0104 EXPORT_SYMBOL(local_flush_tlb_all);
0105
0106 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
0107 unsigned long end)
0108 {
0109 struct mm_struct *mm = vma->vm_mm;
0110 int cpu = smp_processor_id();
0111
0112 if (cpu_context(cpu, mm) != 0) {
0113 unsigned long size, flags;
0114
0115 local_irq_save(flags);
0116 start = round_down(start, PAGE_SIZE << 1);
0117 end = round_up(end, PAGE_SIZE << 1);
0118 size = (end - start) >> (PAGE_SHIFT + 1);
0119 if (size <= (current_cpu_data.tlbsizeftlbsets ?
0120 current_cpu_data.tlbsize / 8 :
0121 current_cpu_data.tlbsize / 2)) {
0122 unsigned long old_entryhi, old_mmid;
0123 int newpid = cpu_asid(cpu, mm);
0124
0125 old_entryhi = read_c0_entryhi();
0126 if (cpu_has_mmid) {
0127 old_mmid = read_c0_memorymapid();
0128 write_c0_memorymapid(newpid);
0129 }
0130
0131 htw_stop();
0132 while (start < end) {
0133 int idx;
0134
0135 if (cpu_has_mmid)
0136 write_c0_entryhi(start);
0137 else
0138 write_c0_entryhi(start | newpid);
0139 start += (PAGE_SIZE << 1);
0140 mtc0_tlbw_hazard();
0141 tlb_probe();
0142 tlb_probe_hazard();
0143 idx = read_c0_index();
0144 write_c0_entrylo0(0);
0145 write_c0_entrylo1(0);
0146 if (idx < 0)
0147 continue;
0148
0149 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
0150 mtc0_tlbw_hazard();
0151 tlb_write_indexed();
0152 }
0153 tlbw_use_hazard();
0154 write_c0_entryhi(old_entryhi);
0155 if (cpu_has_mmid)
0156 write_c0_memorymapid(old_mmid);
0157 htw_start();
0158 } else {
0159 drop_mmu_context(mm);
0160 }
0161 flush_micro_tlb();
0162 local_irq_restore(flags);
0163 }
0164 }
0165
0166 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
0167 {
0168 unsigned long size, flags;
0169
0170 local_irq_save(flags);
0171 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
0172 size = (size + 1) >> 1;
0173 if (size <= (current_cpu_data.tlbsizeftlbsets ?
0174 current_cpu_data.tlbsize / 8 :
0175 current_cpu_data.tlbsize / 2)) {
0176 int pid = read_c0_entryhi();
0177
0178 start &= (PAGE_MASK << 1);
0179 end += ((PAGE_SIZE << 1) - 1);
0180 end &= (PAGE_MASK << 1);
0181 htw_stop();
0182
0183 while (start < end) {
0184 int idx;
0185
0186 write_c0_entryhi(start);
0187 start += (PAGE_SIZE << 1);
0188 mtc0_tlbw_hazard();
0189 tlb_probe();
0190 tlb_probe_hazard();
0191 idx = read_c0_index();
0192 write_c0_entrylo0(0);
0193 write_c0_entrylo1(0);
0194 if (idx < 0)
0195 continue;
0196
0197 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
0198 mtc0_tlbw_hazard();
0199 tlb_write_indexed();
0200 }
0201 tlbw_use_hazard();
0202 write_c0_entryhi(pid);
0203 htw_start();
0204 } else {
0205 local_flush_tlb_all();
0206 }
0207 flush_micro_tlb();
0208 local_irq_restore(flags);
0209 }
0210
0211 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
0212 {
0213 int cpu = smp_processor_id();
0214
0215 if (cpu_context(cpu, vma->vm_mm) != 0) {
0216 unsigned long old_mmid;
0217 unsigned long flags, old_entryhi;
0218 int idx;
0219
0220 page &= (PAGE_MASK << 1);
0221 local_irq_save(flags);
0222 old_entryhi = read_c0_entryhi();
0223 htw_stop();
0224 if (cpu_has_mmid) {
0225 old_mmid = read_c0_memorymapid();
0226 write_c0_entryhi(page);
0227 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
0228 } else {
0229 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
0230 }
0231 mtc0_tlbw_hazard();
0232 tlb_probe();
0233 tlb_probe_hazard();
0234 idx = read_c0_index();
0235 write_c0_entrylo0(0);
0236 write_c0_entrylo1(0);
0237 if (idx < 0)
0238 goto finish;
0239
0240 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
0241 mtc0_tlbw_hazard();
0242 tlb_write_indexed();
0243 tlbw_use_hazard();
0244
0245 finish:
0246 write_c0_entryhi(old_entryhi);
0247 if (cpu_has_mmid)
0248 write_c0_memorymapid(old_mmid);
0249 htw_start();
0250 flush_micro_tlb_vm(vma);
0251 local_irq_restore(flags);
0252 }
0253 }
0254
0255
0256
0257
0258
0259 void local_flush_tlb_one(unsigned long page)
0260 {
0261 unsigned long flags;
0262 int oldpid, idx;
0263
0264 local_irq_save(flags);
0265 oldpid = read_c0_entryhi();
0266 htw_stop();
0267 page &= (PAGE_MASK << 1);
0268 write_c0_entryhi(page);
0269 mtc0_tlbw_hazard();
0270 tlb_probe();
0271 tlb_probe_hazard();
0272 idx = read_c0_index();
0273 write_c0_entrylo0(0);
0274 write_c0_entrylo1(0);
0275 if (idx >= 0) {
0276
0277 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
0278 mtc0_tlbw_hazard();
0279 tlb_write_indexed();
0280 tlbw_use_hazard();
0281 }
0282 write_c0_entryhi(oldpid);
0283 htw_start();
0284 flush_micro_tlb();
0285 local_irq_restore(flags);
0286 }
0287
0288
0289
0290
0291
0292
0293 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
0294 {
0295 unsigned long flags;
0296 pgd_t *pgdp;
0297 p4d_t *p4dp;
0298 pud_t *pudp;
0299 pmd_t *pmdp;
0300 pte_t *ptep;
0301 int idx, pid;
0302
0303
0304
0305
0306 if (current->active_mm != vma->vm_mm)
0307 return;
0308
0309 local_irq_save(flags);
0310
0311 htw_stop();
0312 address &= (PAGE_MASK << 1);
0313 if (cpu_has_mmid) {
0314 write_c0_entryhi(address);
0315 } else {
0316 pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data);
0317 write_c0_entryhi(address | pid);
0318 }
0319 pgdp = pgd_offset(vma->vm_mm, address);
0320 mtc0_tlbw_hazard();
0321 tlb_probe();
0322 tlb_probe_hazard();
0323 p4dp = p4d_offset(pgdp, address);
0324 pudp = pud_offset(p4dp, address);
0325 pmdp = pmd_offset(pudp, address);
0326 idx = read_c0_index();
0327 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
0328
0329 if (pmd_huge(*pmdp)) {
0330 unsigned long lo;
0331 write_c0_pagemask(PM_HUGE_MASK);
0332 ptep = (pte_t *)pmdp;
0333 lo = pte_to_entrylo(pte_val(*ptep));
0334 write_c0_entrylo0(lo);
0335 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
0336
0337 mtc0_tlbw_hazard();
0338 if (idx < 0)
0339 tlb_write_random();
0340 else
0341 tlb_write_indexed();
0342 tlbw_use_hazard();
0343 write_c0_pagemask(PM_DEFAULT_MASK);
0344 } else
0345 #endif
0346 {
0347 ptep = pte_offset_map(pmdp, address);
0348
0349 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
0350 #ifdef CONFIG_XPA
0351 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
0352 if (cpu_has_xpa)
0353 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
0354 ptep++;
0355 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
0356 if (cpu_has_xpa)
0357 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
0358 #else
0359 write_c0_entrylo0(ptep->pte_high);
0360 ptep++;
0361 write_c0_entrylo1(ptep->pte_high);
0362 #endif
0363 #else
0364 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
0365 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
0366 #endif
0367 mtc0_tlbw_hazard();
0368 if (idx < 0)
0369 tlb_write_random();
0370 else
0371 tlb_write_indexed();
0372 }
0373 tlbw_use_hazard();
0374 htw_start();
0375 flush_micro_tlb_vm(vma);
0376 local_irq_restore(flags);
0377 }
0378
0379 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
0380 unsigned long entryhi, unsigned long pagemask)
0381 {
0382 #ifdef CONFIG_XPA
0383 panic("Broken for XPA kernels");
0384 #else
0385 unsigned int old_mmid;
0386 unsigned long flags;
0387 unsigned long wired;
0388 unsigned long old_pagemask;
0389 unsigned long old_ctx;
0390
0391 local_irq_save(flags);
0392 if (cpu_has_mmid) {
0393 old_mmid = read_c0_memorymapid();
0394 write_c0_memorymapid(MMID_KERNEL_WIRED);
0395 }
0396
0397 old_ctx = read_c0_entryhi();
0398 htw_stop();
0399 old_pagemask = read_c0_pagemask();
0400 wired = num_wired_entries();
0401 write_c0_wired(wired + 1);
0402 write_c0_index(wired);
0403 tlbw_use_hazard();
0404 write_c0_pagemask(pagemask);
0405 write_c0_entryhi(entryhi);
0406 write_c0_entrylo0(entrylo0);
0407 write_c0_entrylo1(entrylo1);
0408 mtc0_tlbw_hazard();
0409 tlb_write_indexed();
0410 tlbw_use_hazard();
0411
0412 write_c0_entryhi(old_ctx);
0413 if (cpu_has_mmid)
0414 write_c0_memorymapid(old_mmid);
0415 tlbw_use_hazard();
0416 htw_start();
0417 write_c0_pagemask(old_pagemask);
0418 local_flush_tlb_all();
0419 local_irq_restore(flags);
0420 #endif
0421 }
0422
0423 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0424
0425 int has_transparent_hugepage(void)
0426 {
0427 static unsigned int mask = -1;
0428
0429 if (mask == -1) {
0430 unsigned long flags;
0431
0432 local_irq_save(flags);
0433 write_c0_pagemask(PM_HUGE_MASK);
0434 back_to_back_c0_hazard();
0435 mask = read_c0_pagemask();
0436 write_c0_pagemask(PM_DEFAULT_MASK);
0437 local_irq_restore(flags);
0438 }
0439 return mask == PM_HUGE_MASK;
0440 }
0441 EXPORT_SYMBOL(has_transparent_hugepage);
0442
0443 #endif
0444
0445
0446
0447
0448
0449
0450
0451 int temp_tlb_entry;
0452
0453 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
0454 unsigned long entryhi, unsigned long pagemask)
0455 {
0456 int ret = 0;
0457 unsigned long flags;
0458 unsigned long wired;
0459 unsigned long old_pagemask;
0460 unsigned long old_ctx;
0461
0462 local_irq_save(flags);
0463
0464 htw_stop();
0465 old_ctx = read_c0_entryhi();
0466 old_pagemask = read_c0_pagemask();
0467 wired = num_wired_entries();
0468 if (--temp_tlb_entry < wired) {
0469 printk(KERN_WARNING
0470 "No TLB space left for add_temporary_entry\n");
0471 ret = -ENOSPC;
0472 goto out;
0473 }
0474
0475 write_c0_index(temp_tlb_entry);
0476 write_c0_pagemask(pagemask);
0477 write_c0_entryhi(entryhi);
0478 write_c0_entrylo0(entrylo0);
0479 write_c0_entrylo1(entrylo1);
0480 mtc0_tlbw_hazard();
0481 tlb_write_indexed();
0482 tlbw_use_hazard();
0483
0484 write_c0_entryhi(old_ctx);
0485 write_c0_pagemask(old_pagemask);
0486 htw_start();
0487 out:
0488 local_irq_restore(flags);
0489 return ret;
0490 }
0491
0492 static int ntlb;
0493 static int __init set_ntlb(char *str)
0494 {
0495 get_option(&str, &ntlb);
0496 return 1;
0497 }
0498
0499 __setup("ntlb=", set_ntlb);
0500
0501
0502
0503
0504 static void r4k_tlb_configure(void)
0505 {
0506
0507
0508
0509
0510
0511
0512
0513 write_c0_pagemask(PM_DEFAULT_MASK);
0514 back_to_back_c0_hazard();
0515 if (read_c0_pagemask() != PM_DEFAULT_MASK)
0516 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
0517
0518 write_c0_wired(0);
0519 if (current_cpu_type() == CPU_R10000 ||
0520 current_cpu_type() == CPU_R12000 ||
0521 current_cpu_type() == CPU_R14000 ||
0522 current_cpu_type() == CPU_R16000)
0523 write_c0_framemask(0);
0524
0525 if (cpu_has_rixi) {
0526
0527
0528
0529
0530 #ifdef CONFIG_64BIT
0531 set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
0532 #else
0533 set_c0_pagegrain(PG_RIE | PG_XIE);
0534 #endif
0535 }
0536
0537 temp_tlb_entry = current_cpu_data.tlbsize - 1;
0538
0539
0540 local_flush_tlb_all();
0541
0542
0543 }
0544
0545 void tlb_init(void)
0546 {
0547 r4k_tlb_configure();
0548
0549 if (ntlb) {
0550 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
0551 int wired = current_cpu_data.tlbsize - ntlb;
0552 write_c0_wired(wired);
0553 write_c0_index(wired-1);
0554 printk("Restricting TLB to %d entries\n", ntlb);
0555 } else
0556 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
0557 }
0558
0559 build_tlb_refill_handler();
0560 }
0561
0562 static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
0563 void *v)
0564 {
0565 switch (cmd) {
0566 case CPU_PM_ENTER_FAILED:
0567 case CPU_PM_EXIT:
0568 r4k_tlb_configure();
0569 break;
0570 }
0571
0572 return NOTIFY_OK;
0573 }
0574
0575 static struct notifier_block r4k_tlb_pm_notifier_block = {
0576 .notifier_call = r4k_tlb_pm_notifier,
0577 };
0578
0579 static int __init r4k_tlb_init_pm(void)
0580 {
0581 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
0582 }
0583 arch_initcall(r4k_tlb_init_pm);