0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/kernel.h>
0026 #include <linux/export.h>
0027 #include <linux/mm.h>
0028 #include <linux/init.h>
0029 #include <linux/highmem.h>
0030 #include <linux/pagemap.h>
0031 #include <linux/preempt.h>
0032 #include <linux/spinlock.h>
0033 #include <linux/memblock.h>
0034 #include <linux/of_fdt.h>
0035 #include <linux/hugetlb.h>
0036
0037 #include <asm/pgalloc.h>
0038 #include <asm/tlbflush.h>
0039 #include <asm/tlb.h>
0040 #include <asm/code-patching.h>
0041 #include <asm/cputhreads.h>
0042 #include <asm/hugetlb.h>
0043 #include <asm/paca.h>
0044
0045 #include <mm/mmu_decl.h>
0046
0047
0048
0049
0050
0051
0052 #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
0053 #ifdef CONFIG_PPC_FSL_BOOK3E
0054 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
0055 [MMU_PAGE_4K] = {
0056 .shift = 12,
0057 .enc = BOOK3E_PAGESZ_4K,
0058 },
0059 [MMU_PAGE_2M] = {
0060 .shift = 21,
0061 .enc = BOOK3E_PAGESZ_2M,
0062 },
0063 [MMU_PAGE_4M] = {
0064 .shift = 22,
0065 .enc = BOOK3E_PAGESZ_4M,
0066 },
0067 [MMU_PAGE_16M] = {
0068 .shift = 24,
0069 .enc = BOOK3E_PAGESZ_16M,
0070 },
0071 [MMU_PAGE_64M] = {
0072 .shift = 26,
0073 .enc = BOOK3E_PAGESZ_64M,
0074 },
0075 [MMU_PAGE_256M] = {
0076 .shift = 28,
0077 .enc = BOOK3E_PAGESZ_256M,
0078 },
0079 [MMU_PAGE_1G] = {
0080 .shift = 30,
0081 .enc = BOOK3E_PAGESZ_1GB,
0082 },
0083 };
0084 #elif defined(CONFIG_PPC_8xx)
0085 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
0086 [MMU_PAGE_4K] = {
0087 .shift = 12,
0088 },
0089 [MMU_PAGE_16K] = {
0090 .shift = 14,
0091 },
0092 [MMU_PAGE_512K] = {
0093 .shift = 19,
0094 },
0095 [MMU_PAGE_8M] = {
0096 .shift = 23,
0097 },
0098 };
0099 #else
0100 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
0101 [MMU_PAGE_4K] = {
0102 .shift = 12,
0103 .ind = 20,
0104 .enc = BOOK3E_PAGESZ_4K,
0105 },
0106 [MMU_PAGE_16K] = {
0107 .shift = 14,
0108 .enc = BOOK3E_PAGESZ_16K,
0109 },
0110 [MMU_PAGE_64K] = {
0111 .shift = 16,
0112 .ind = 28,
0113 .enc = BOOK3E_PAGESZ_64K,
0114 },
0115 [MMU_PAGE_1M] = {
0116 .shift = 20,
0117 .enc = BOOK3E_PAGESZ_1M,
0118 },
0119 [MMU_PAGE_16M] = {
0120 .shift = 24,
0121 .ind = 36,
0122 .enc = BOOK3E_PAGESZ_16M,
0123 },
0124 [MMU_PAGE_256M] = {
0125 .shift = 28,
0126 .enc = BOOK3E_PAGESZ_256M,
0127 },
0128 [MMU_PAGE_1G] = {
0129 .shift = 30,
0130 .enc = BOOK3E_PAGESZ_1GB,
0131 },
0132 };
0133 #endif
0134
0135 static inline int mmu_get_tsize(int psize)
0136 {
0137 return mmu_psize_defs[psize].enc;
0138 }
0139 #else
0140 static inline int mmu_get_tsize(int psize)
0141 {
0142
0143 return 0;
0144 }
0145 #endif
0146
0147
0148
0149
0150
0151 #ifdef CONFIG_PPC64
0152
0153 int mmu_pte_psize;
0154 int mmu_vmemmap_psize;
0155 int book3e_htw_mode;
0156 unsigned long linear_map_top;
0157
0158
0159
0160
0161
0162
0163
0164
0165 int extlb_level_exc;
0166
0167 #endif
0168
0169 #ifdef CONFIG_PPC_FSL_BOOK3E
0170
0171 DEFINE_PER_CPU(int, next_tlbcam_idx);
0172 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
0173 #endif
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 #ifndef CONFIG_PPC_8xx
0188
0189
0190
0191 void local_flush_tlb_mm(struct mm_struct *mm)
0192 {
0193 unsigned int pid;
0194
0195 preempt_disable();
0196 pid = mm->context.id;
0197 if (pid != MMU_NO_CONTEXT)
0198 _tlbil_pid(pid);
0199 preempt_enable();
0200 }
0201 EXPORT_SYMBOL(local_flush_tlb_mm);
0202
0203 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
0204 int tsize, int ind)
0205 {
0206 unsigned int pid;
0207
0208 preempt_disable();
0209 pid = mm ? mm->context.id : 0;
0210 if (pid != MMU_NO_CONTEXT)
0211 _tlbil_va(vmaddr, pid, tsize, ind);
0212 preempt_enable();
0213 }
0214
0215 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
0216 {
0217 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
0218 mmu_get_tsize(mmu_virtual_psize), 0);
0219 }
0220 EXPORT_SYMBOL(local_flush_tlb_page);
0221 #endif
0222
0223
0224
0225
0226 #ifdef CONFIG_SMP
0227
0228 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
0229
0230 struct tlb_flush_param {
0231 unsigned long addr;
0232 unsigned int pid;
0233 unsigned int tsize;
0234 unsigned int ind;
0235 };
0236
0237 static void do_flush_tlb_mm_ipi(void *param)
0238 {
0239 struct tlb_flush_param *p = param;
0240
0241 _tlbil_pid(p ? p->pid : 0);
0242 }
0243
0244 static void do_flush_tlb_page_ipi(void *param)
0245 {
0246 struct tlb_flush_param *p = param;
0247
0248 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
0249 }
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 void flush_tlb_mm(struct mm_struct *mm)
0269 {
0270 unsigned int pid;
0271
0272 preempt_disable();
0273 pid = mm->context.id;
0274 if (unlikely(pid == MMU_NO_CONTEXT))
0275 goto no_context;
0276 if (!mm_is_core_local(mm)) {
0277 struct tlb_flush_param p = { .pid = pid };
0278
0279 smp_call_function_many(mm_cpumask(mm),
0280 do_flush_tlb_mm_ipi, &p, 1);
0281 }
0282 _tlbil_pid(pid);
0283 no_context:
0284 preempt_enable();
0285 }
0286 EXPORT_SYMBOL(flush_tlb_mm);
0287
0288 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
0289 int tsize, int ind)
0290 {
0291 struct cpumask *cpu_mask;
0292 unsigned int pid;
0293
0294
0295
0296
0297
0298 if (WARN_ON(!mm))
0299 return;
0300
0301 preempt_disable();
0302 pid = mm->context.id;
0303 if (unlikely(pid == MMU_NO_CONTEXT))
0304 goto bail;
0305 cpu_mask = mm_cpumask(mm);
0306 if (!mm_is_core_local(mm)) {
0307
0308 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
0309 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
0310 if (lock)
0311 raw_spin_lock(&tlbivax_lock);
0312 _tlbivax_bcast(vmaddr, pid, tsize, ind);
0313 if (lock)
0314 raw_spin_unlock(&tlbivax_lock);
0315 goto bail;
0316 } else {
0317 struct tlb_flush_param p = {
0318 .pid = pid,
0319 .addr = vmaddr,
0320 .tsize = tsize,
0321 .ind = ind,
0322 };
0323
0324 smp_call_function_many(cpu_mask,
0325 do_flush_tlb_page_ipi, &p, 1);
0326 }
0327 }
0328 _tlbil_va(vmaddr, pid, tsize, ind);
0329 bail:
0330 preempt_enable();
0331 }
0332
0333 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
0334 {
0335 #ifdef CONFIG_HUGETLB_PAGE
0336 if (vma && is_vm_hugetlb_page(vma))
0337 flush_hugetlb_page(vma, vmaddr);
0338 #endif
0339
0340 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
0341 mmu_get_tsize(mmu_virtual_psize), 0);
0342 }
0343 EXPORT_SYMBOL(flush_tlb_page);
0344
0345 #endif
0346
0347 #ifdef CONFIG_PPC_47x
0348 void __init early_init_mmu_47x(void)
0349 {
0350 #ifdef CONFIG_SMP
0351 unsigned long root = of_get_flat_dt_root();
0352 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
0353 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
0354 #endif
0355 }
0356 #endif
0357
0358
0359
0360
0361 #ifndef CONFIG_PPC_8xx
0362 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
0363 {
0364 #ifdef CONFIG_SMP
0365 preempt_disable();
0366 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
0367 _tlbil_pid(0);
0368 preempt_enable();
0369 #else
0370 _tlbil_pid(0);
0371 #endif
0372 }
0373 EXPORT_SYMBOL(flush_tlb_kernel_range);
0374 #endif
0375
0376
0377
0378
0379
0380
0381
0382 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
0383 unsigned long end)
0384
0385 {
0386 if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK))
0387 flush_tlb_page(vma, start);
0388 else
0389 flush_tlb_mm(vma->vm_mm);
0390 }
0391 EXPORT_SYMBOL(flush_tlb_range);
0392
0393 void tlb_flush(struct mmu_gather *tlb)
0394 {
0395 flush_tlb_mm(tlb->mm);
0396 }
0397
0398
0399
0400
0401
0402
0403 #ifdef CONFIG_PPC64
0404
0405
0406
0407
0408
0409 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
0410 {
0411 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
0412
0413 if (book3e_htw_mode != PPC_HTW_NONE) {
0414 unsigned long start = address & PMD_MASK;
0415 unsigned long end = address + PMD_SIZE;
0416 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
0417
0418
0419
0420
0421
0422 while (start < end) {
0423 __flush_tlb_page(tlb->mm, start, tsize, 1);
0424 start += size;
0425 }
0426 } else {
0427 unsigned long rmask = 0xf000000000000000ul;
0428 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
0429 unsigned long vpte = address & ~rmask;
0430
0431 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
0432 vpte |= rid;
0433 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
0434 }
0435 }
0436
0437 static void __init setup_page_sizes(void)
0438 {
0439 unsigned int tlb0cfg;
0440 unsigned int tlb0ps;
0441 unsigned int eptcfg;
0442 int i, psize;
0443
0444 #ifdef CONFIG_PPC_FSL_BOOK3E
0445 unsigned int mmucfg = mfspr(SPRN_MMUCFG);
0446 int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
0447
0448 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
0449 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
0450 unsigned int min_pg, max_pg;
0451
0452 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
0453 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
0454
0455 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
0456 struct mmu_psize_def *def;
0457 unsigned int shift;
0458
0459 def = &mmu_psize_defs[psize];
0460 shift = def->shift;
0461
0462 if (shift == 0 || shift & 1)
0463 continue;
0464
0465
0466 shift = (shift - 10) >> 1;
0467
0468 if ((shift >= min_pg) && (shift <= max_pg))
0469 def->flags |= MMU_PAGE_SIZE_DIRECT;
0470 }
0471
0472 goto out;
0473 }
0474
0475 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
0476 u32 tlb1cfg, tlb1ps;
0477
0478 tlb0cfg = mfspr(SPRN_TLB0CFG);
0479 tlb1cfg = mfspr(SPRN_TLB1CFG);
0480 tlb1ps = mfspr(SPRN_TLB1PS);
0481 eptcfg = mfspr(SPRN_EPTCFG);
0482
0483 if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
0484 book3e_htw_mode = PPC_HTW_E6500;
0485
0486
0487
0488
0489
0490
0491 if (eptcfg != 2)
0492 book3e_htw_mode = PPC_HTW_NONE;
0493
0494 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
0495 struct mmu_psize_def *def = &mmu_psize_defs[psize];
0496
0497 if (!def->shift)
0498 continue;
0499
0500 if (tlb1ps & (1U << (def->shift - 10))) {
0501 def->flags |= MMU_PAGE_SIZE_DIRECT;
0502
0503 if (book3e_htw_mode && psize == MMU_PAGE_2M)
0504 def->flags |= MMU_PAGE_SIZE_INDIRECT;
0505 }
0506 }
0507
0508 goto out;
0509 }
0510 #endif
0511
0512 tlb0cfg = mfspr(SPRN_TLB0CFG);
0513 tlb0ps = mfspr(SPRN_TLB0PS);
0514 eptcfg = mfspr(SPRN_EPTCFG);
0515
0516
0517 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
0518 struct mmu_psize_def *def = &mmu_psize_defs[psize];
0519
0520 if (tlb0ps & (1U << (def->shift - 10)))
0521 def->flags |= MMU_PAGE_SIZE_DIRECT;
0522 }
0523
0524
0525 if ((tlb0cfg & TLBnCFG_IND) == 0 ||
0526 (tlb0cfg & TLBnCFG_PT) == 0)
0527 goto out;
0528
0529 book3e_htw_mode = PPC_HTW_IBM;
0530
0531
0532
0533
0534
0535
0536 for (i = 0; i < 3; i++) {
0537 unsigned int ps, sps;
0538
0539 sps = eptcfg & 0x1f;
0540 eptcfg >>= 5;
0541 ps = eptcfg & 0x1f;
0542 eptcfg >>= 5;
0543 if (!ps || !sps)
0544 continue;
0545 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
0546 struct mmu_psize_def *def = &mmu_psize_defs[psize];
0547
0548 if (ps == (def->shift - 10))
0549 def->flags |= MMU_PAGE_SIZE_INDIRECT;
0550 if (sps == (def->shift - 10))
0551 def->ind = ps + 10;
0552 }
0553 }
0554
0555 out:
0556
0557 pr_info("MMU: Supported page sizes\n");
0558 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
0559 struct mmu_psize_def *def = &mmu_psize_defs[psize];
0560 const char *__page_type_names[] = {
0561 "unsupported",
0562 "direct",
0563 "indirect",
0564 "direct & indirect"
0565 };
0566 if (def->flags == 0) {
0567 def->shift = 0;
0568 continue;
0569 }
0570 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
0571 __page_type_names[def->flags & 0x3]);
0572 }
0573 }
0574
0575 static void __init setup_mmu_htw(void)
0576 {
0577
0578
0579
0580
0581
0582 switch (book3e_htw_mode) {
0583 case PPC_HTW_IBM:
0584 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
0585 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
0586 break;
0587 #ifdef CONFIG_PPC_FSL_BOOK3E
0588 case PPC_HTW_E6500:
0589 extlb_level_exc = EX_TLB_SIZE;
0590 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
0591 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
0592 break;
0593 #endif
0594 }
0595 pr_info("MMU: Book3E HW tablewalk %s\n",
0596 book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
0597 }
0598
0599
0600
0601
0602 static void early_init_this_mmu(void)
0603 {
0604 unsigned int mas4;
0605
0606
0607
0608 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
0609 switch (book3e_htw_mode) {
0610 case PPC_HTW_E6500:
0611 mas4 |= MAS4_INDD;
0612 mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
0613 mas4 |= MAS4_TLBSELD(1);
0614 mmu_pte_psize = MMU_PAGE_2M;
0615 break;
0616
0617 case PPC_HTW_IBM:
0618 mas4 |= MAS4_INDD;
0619 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
0620 mmu_pte_psize = MMU_PAGE_1M;
0621 break;
0622
0623 case PPC_HTW_NONE:
0624 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
0625 mmu_pte_psize = mmu_virtual_psize;
0626 break;
0627 }
0628 mtspr(SPRN_MAS4, mas4);
0629
0630 #ifdef CONFIG_PPC_FSL_BOOK3E
0631 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
0632 unsigned int num_cams;
0633 bool map = true;
0634
0635
0636 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
0637
0638
0639
0640
0641
0642 #ifdef CONFIG_SMP
0643 if (hweight32(get_tensr()) > 1)
0644 map = false;
0645 #endif
0646
0647 if (map)
0648 linear_map_top = map_mem_in_cams(linear_map_top,
0649 num_cams, false, true);
0650 }
0651 #endif
0652
0653
0654
0655
0656 mb();
0657 }
0658
0659 static void __init early_init_mmu_global(void)
0660 {
0661
0662
0663
0664
0665
0666
0667 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
0668 mmu_vmemmap_psize = MMU_PAGE_4K;
0669 else
0670 mmu_vmemmap_psize = MMU_PAGE_16M;
0671
0672
0673
0674
0675
0676
0677
0678 setup_page_sizes();
0679
0680
0681 setup_mmu_htw();
0682
0683 #ifdef CONFIG_PPC_FSL_BOOK3E
0684 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
0685 if (book3e_htw_mode == PPC_HTW_NONE) {
0686 extlb_level_exc = EX_TLB_SIZE;
0687 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
0688 patch_exception(0x1e0,
0689 exc_instruction_tlb_miss_bolted_book3e);
0690 }
0691 }
0692 #endif
0693
0694
0695
0696
0697 linear_map_top = memblock_end_of_DRAM();
0698
0699 ioremap_bot = IOREMAP_BASE;
0700 }
0701
0702 static void __init early_mmu_set_memory_limit(void)
0703 {
0704 #ifdef CONFIG_PPC_FSL_BOOK3E
0705 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
0706
0707
0708
0709
0710
0711
0712
0713 memblock_enforce_memory_limit(linear_map_top);
0714 }
0715 #endif
0716
0717 memblock_set_current_limit(linear_map_top);
0718 }
0719
0720
0721 void __init early_init_mmu(void)
0722 {
0723 early_init_mmu_global();
0724 early_init_this_mmu();
0725 early_mmu_set_memory_limit();
0726 }
0727
0728 void early_init_mmu_secondary(void)
0729 {
0730 early_init_this_mmu();
0731 }
0732
0733 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
0734 phys_addr_t first_memblock_size)
0735 {
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 #ifdef CONFIG_PPC_FSL_BOOK3E
0754 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
0755 unsigned long linear_sz;
0756 unsigned int num_cams;
0757
0758
0759 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
0760
0761 linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
0762 true, true);
0763
0764 ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
0765 } else
0766 #endif
0767 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
0768
0769
0770 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
0771 }
0772 #else
0773 void __init early_init_mmu(void)
0774 {
0775 #ifdef CONFIG_PPC_47x
0776 early_init_mmu_47x();
0777 #endif
0778 }
0779 #endif