Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * This file contains the routines for TLB flushing.
0004  * On machines where the MMU does not use a hash table to store virtual to
0005  * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
0006  * this does -not- include 603 however which shares the implementation with
0007  * hash based processors)
0008  *
0009  *  -- BenH
0010  *
0011  * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
0012  *                     IBM Corp.
0013  *
0014  *  Derived from arch/ppc/mm/init.c:
0015  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0016  *
0017  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
0018  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
0019  *    Copyright (C) 1996 Paul Mackerras
0020  *
0021  *  Derived from "arch/i386/mm/init.c"
0022  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
0023  */
0024 
0025 #include <linux/kernel.h>
0026 #include <linux/export.h>
0027 #include <linux/mm.h>
0028 #include <linux/init.h>
0029 #include <linux/highmem.h>
0030 #include <linux/pagemap.h>
0031 #include <linux/preempt.h>
0032 #include <linux/spinlock.h>
0033 #include <linux/memblock.h>
0034 #include <linux/of_fdt.h>
0035 #include <linux/hugetlb.h>
0036 
0037 #include <asm/pgalloc.h>
0038 #include <asm/tlbflush.h>
0039 #include <asm/tlb.h>
0040 #include <asm/code-patching.h>
0041 #include <asm/cputhreads.h>
0042 #include <asm/hugetlb.h>
0043 #include <asm/paca.h>
0044 
0045 #include <mm/mmu_decl.h>
0046 
0047 /*
0048  * This struct lists the sw-supported page sizes.  The hardawre MMU may support
0049  * other sizes not listed here.   The .ind field is only used on MMUs that have
0050  * indirect page table entries.
0051  */
0052 #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
0053 #ifdef CONFIG_PPC_FSL_BOOK3E
0054 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
0055     [MMU_PAGE_4K] = {
0056         .shift  = 12,
0057         .enc    = BOOK3E_PAGESZ_4K,
0058     },
0059     [MMU_PAGE_2M] = {
0060         .shift  = 21,
0061         .enc    = BOOK3E_PAGESZ_2M,
0062     },
0063     [MMU_PAGE_4M] = {
0064         .shift  = 22,
0065         .enc    = BOOK3E_PAGESZ_4M,
0066     },
0067     [MMU_PAGE_16M] = {
0068         .shift  = 24,
0069         .enc    = BOOK3E_PAGESZ_16M,
0070     },
0071     [MMU_PAGE_64M] = {
0072         .shift  = 26,
0073         .enc    = BOOK3E_PAGESZ_64M,
0074     },
0075     [MMU_PAGE_256M] = {
0076         .shift  = 28,
0077         .enc    = BOOK3E_PAGESZ_256M,
0078     },
0079     [MMU_PAGE_1G] = {
0080         .shift  = 30,
0081         .enc    = BOOK3E_PAGESZ_1GB,
0082     },
0083 };
0084 #elif defined(CONFIG_PPC_8xx)
0085 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
0086     [MMU_PAGE_4K] = {
0087         .shift  = 12,
0088     },
0089     [MMU_PAGE_16K] = {
0090         .shift  = 14,
0091     },
0092     [MMU_PAGE_512K] = {
0093         .shift  = 19,
0094     },
0095     [MMU_PAGE_8M] = {
0096         .shift  = 23,
0097     },
0098 };
0099 #else
0100 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
0101     [MMU_PAGE_4K] = {
0102         .shift  = 12,
0103         .ind    = 20,
0104         .enc    = BOOK3E_PAGESZ_4K,
0105     },
0106     [MMU_PAGE_16K] = {
0107         .shift  = 14,
0108         .enc    = BOOK3E_PAGESZ_16K,
0109     },
0110     [MMU_PAGE_64K] = {
0111         .shift  = 16,
0112         .ind    = 28,
0113         .enc    = BOOK3E_PAGESZ_64K,
0114     },
0115     [MMU_PAGE_1M] = {
0116         .shift  = 20,
0117         .enc    = BOOK3E_PAGESZ_1M,
0118     },
0119     [MMU_PAGE_16M] = {
0120         .shift  = 24,
0121         .ind    = 36,
0122         .enc    = BOOK3E_PAGESZ_16M,
0123     },
0124     [MMU_PAGE_256M] = {
0125         .shift  = 28,
0126         .enc    = BOOK3E_PAGESZ_256M,
0127     },
0128     [MMU_PAGE_1G] = {
0129         .shift  = 30,
0130         .enc    = BOOK3E_PAGESZ_1GB,
0131     },
0132 };
0133 #endif /* CONFIG_FSL_BOOKE */
0134 
0135 static inline int mmu_get_tsize(int psize)
0136 {
0137     return mmu_psize_defs[psize].enc;
0138 }
0139 #else
0140 static inline int mmu_get_tsize(int psize)
0141 {
0142     /* This isn't used on !Book3E for now */
0143     return 0;
0144 }
0145 #endif /* CONFIG_PPC_BOOK3E_MMU */
0146 
0147 /* The variables below are currently only used on 64-bit Book3E
0148  * though this will probably be made common with other nohash
0149  * implementations at some point
0150  */
0151 #ifdef CONFIG_PPC64
0152 
0153 int mmu_pte_psize;      /* Page size used for PTE pages */
0154 int mmu_vmemmap_psize;      /* Page size used for the virtual mem map */
0155 int book3e_htw_mode;        /* HW tablewalk?  Value is PPC_HTW_* */
0156 unsigned long linear_map_top;   /* Top of linear mapping */
0157 
0158 
0159 /*
0160  * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
0161  * exceptions.  This is used for bolted and e6500 TLB miss handlers which
0162  * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
0163  * this is set to zero.
0164  */
0165 int extlb_level_exc;
0166 
0167 #endif /* CONFIG_PPC64 */
0168 
0169 #ifdef CONFIG_PPC_FSL_BOOK3E
0170 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
0171 DEFINE_PER_CPU(int, next_tlbcam_idx);
0172 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
0173 #endif
0174 
0175 /*
0176  * Base TLB flushing operations:
0177  *
0178  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
0179  *  - flush_tlb_page(vma, vmaddr) flushes one page
0180  *  - flush_tlb_range(vma, start, end) flushes a range of pages
0181  *  - flush_tlb_kernel_range(start, end) flushes kernel pages
0182  *
0183  *  - local_* variants of page and mm only apply to the current
0184  *    processor
0185  */
0186 
0187 #ifndef CONFIG_PPC_8xx
0188 /*
0189  * These are the base non-SMP variants of page and mm flushing
0190  */
0191 void local_flush_tlb_mm(struct mm_struct *mm)
0192 {
0193     unsigned int pid;
0194 
0195     preempt_disable();
0196     pid = mm->context.id;
0197     if (pid != MMU_NO_CONTEXT)
0198         _tlbil_pid(pid);
0199     preempt_enable();
0200 }
0201 EXPORT_SYMBOL(local_flush_tlb_mm);
0202 
0203 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
0204                 int tsize, int ind)
0205 {
0206     unsigned int pid;
0207 
0208     preempt_disable();
0209     pid = mm ? mm->context.id : 0;
0210     if (pid != MMU_NO_CONTEXT)
0211         _tlbil_va(vmaddr, pid, tsize, ind);
0212     preempt_enable();
0213 }
0214 
0215 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
0216 {
0217     __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
0218                    mmu_get_tsize(mmu_virtual_psize), 0);
0219 }
0220 EXPORT_SYMBOL(local_flush_tlb_page);
0221 #endif
0222 
0223 /*
0224  * And here are the SMP non-local implementations
0225  */
0226 #ifdef CONFIG_SMP
0227 
0228 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
0229 
0230 struct tlb_flush_param {
0231     unsigned long addr;
0232     unsigned int pid;
0233     unsigned int tsize;
0234     unsigned int ind;
0235 };
0236 
0237 static void do_flush_tlb_mm_ipi(void *param)
0238 {
0239     struct tlb_flush_param *p = param;
0240 
0241     _tlbil_pid(p ? p->pid : 0);
0242 }
0243 
0244 static void do_flush_tlb_page_ipi(void *param)
0245 {
0246     struct tlb_flush_param *p = param;
0247 
0248     _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
0249 }
0250 
0251 
0252 /* Note on invalidations and PID:
0253  *
0254  * We snapshot the PID with preempt disabled. At this point, it can still
0255  * change either because:
0256  * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
0257  * - we are invaliating some target that isn't currently running here
0258  *   and is concurrently acquiring a new PID on another CPU
0259  * - some other CPU is re-acquiring a lost PID for this mm
0260  * etc...
0261  *
0262  * However, this shouldn't be a problem as we only guarantee
0263  * invalidation of TLB entries present prior to this call, so we
0264  * don't care about the PID changing, and invalidating a stale PID
0265  * is generally harmless.
0266  */
0267 
0268 void flush_tlb_mm(struct mm_struct *mm)
0269 {
0270     unsigned int pid;
0271 
0272     preempt_disable();
0273     pid = mm->context.id;
0274     if (unlikely(pid == MMU_NO_CONTEXT))
0275         goto no_context;
0276     if (!mm_is_core_local(mm)) {
0277         struct tlb_flush_param p = { .pid = pid };
0278         /* Ignores smp_processor_id() even if set. */
0279         smp_call_function_many(mm_cpumask(mm),
0280                        do_flush_tlb_mm_ipi, &p, 1);
0281     }
0282     _tlbil_pid(pid);
0283  no_context:
0284     preempt_enable();
0285 }
0286 EXPORT_SYMBOL(flush_tlb_mm);
0287 
0288 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
0289               int tsize, int ind)
0290 {
0291     struct cpumask *cpu_mask;
0292     unsigned int pid;
0293 
0294     /*
0295      * This function as well as __local_flush_tlb_page() must only be called
0296      * for user contexts.
0297      */
0298     if (WARN_ON(!mm))
0299         return;
0300 
0301     preempt_disable();
0302     pid = mm->context.id;
0303     if (unlikely(pid == MMU_NO_CONTEXT))
0304         goto bail;
0305     cpu_mask = mm_cpumask(mm);
0306     if (!mm_is_core_local(mm)) {
0307         /* If broadcast tlbivax is supported, use it */
0308         if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
0309             int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
0310             if (lock)
0311                 raw_spin_lock(&tlbivax_lock);
0312             _tlbivax_bcast(vmaddr, pid, tsize, ind);
0313             if (lock)
0314                 raw_spin_unlock(&tlbivax_lock);
0315             goto bail;
0316         } else {
0317             struct tlb_flush_param p = {
0318                 .pid = pid,
0319                 .addr = vmaddr,
0320                 .tsize = tsize,
0321                 .ind = ind,
0322             };
0323             /* Ignores smp_processor_id() even if set in cpu_mask */
0324             smp_call_function_many(cpu_mask,
0325                            do_flush_tlb_page_ipi, &p, 1);
0326         }
0327     }
0328     _tlbil_va(vmaddr, pid, tsize, ind);
0329  bail:
0330     preempt_enable();
0331 }
0332 
0333 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
0334 {
0335 #ifdef CONFIG_HUGETLB_PAGE
0336     if (vma && is_vm_hugetlb_page(vma))
0337         flush_hugetlb_page(vma, vmaddr);
0338 #endif
0339 
0340     __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
0341              mmu_get_tsize(mmu_virtual_psize), 0);
0342 }
0343 EXPORT_SYMBOL(flush_tlb_page);
0344 
0345 #endif /* CONFIG_SMP */
0346 
0347 #ifdef CONFIG_PPC_47x
0348 void __init early_init_mmu_47x(void)
0349 {
0350 #ifdef CONFIG_SMP
0351     unsigned long root = of_get_flat_dt_root();
0352     if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
0353         mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
0354 #endif /* CONFIG_SMP */
0355 }
0356 #endif /* CONFIG_PPC_47x */
0357 
0358 /*
0359  * Flush kernel TLB entries in the given range
0360  */
0361 #ifndef CONFIG_PPC_8xx
0362 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
0363 {
0364 #ifdef CONFIG_SMP
0365     preempt_disable();
0366     smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
0367     _tlbil_pid(0);
0368     preempt_enable();
0369 #else
0370     _tlbil_pid(0);
0371 #endif
0372 }
0373 EXPORT_SYMBOL(flush_tlb_kernel_range);
0374 #endif
0375 
0376 /*
0377  * Currently, for range flushing, we just do a full mm flush. This should
0378  * be optimized based on a threshold on the size of the range, since
0379  * some implementation can stack multiple tlbivax before a tlbsync but
0380  * for now, we keep it that way
0381  */
0382 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
0383              unsigned long end)
0384 
0385 {
0386     if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK))
0387         flush_tlb_page(vma, start);
0388     else
0389         flush_tlb_mm(vma->vm_mm);
0390 }
0391 EXPORT_SYMBOL(flush_tlb_range);
0392 
0393 void tlb_flush(struct mmu_gather *tlb)
0394 {
0395     flush_tlb_mm(tlb->mm);
0396 }
0397 
0398 /*
0399  * Below are functions specific to the 64-bit variant of Book3E though that
0400  * may change in the future
0401  */
0402 
0403 #ifdef CONFIG_PPC64
0404 
0405 /*
0406  * Handling of virtual linear page tables or indirect TLB entries
0407  * flushing when PTE pages are freed
0408  */
0409 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
0410 {
0411     int tsize = mmu_psize_defs[mmu_pte_psize].enc;
0412 
0413     if (book3e_htw_mode != PPC_HTW_NONE) {
0414         unsigned long start = address & PMD_MASK;
0415         unsigned long end = address + PMD_SIZE;
0416         unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
0417 
0418         /* This isn't the most optimal, ideally we would factor out the
0419          * while preempt & CPU mask mucking around, or even the IPI but
0420          * it will do for now
0421          */
0422         while (start < end) {
0423             __flush_tlb_page(tlb->mm, start, tsize, 1);
0424             start += size;
0425         }
0426     } else {
0427         unsigned long rmask = 0xf000000000000000ul;
0428         unsigned long rid = (address & rmask) | 0x1000000000000000ul;
0429         unsigned long vpte = address & ~rmask;
0430 
0431         vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
0432         vpte |= rid;
0433         __flush_tlb_page(tlb->mm, vpte, tsize, 0);
0434     }
0435 }
0436 
0437 static void __init setup_page_sizes(void)
0438 {
0439     unsigned int tlb0cfg;
0440     unsigned int tlb0ps;
0441     unsigned int eptcfg;
0442     int i, psize;
0443 
0444 #ifdef CONFIG_PPC_FSL_BOOK3E
0445     unsigned int mmucfg = mfspr(SPRN_MMUCFG);
0446     int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
0447 
0448     if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
0449         unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
0450         unsigned int min_pg, max_pg;
0451 
0452         min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
0453         max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
0454 
0455         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
0456             struct mmu_psize_def *def;
0457             unsigned int shift;
0458 
0459             def = &mmu_psize_defs[psize];
0460             shift = def->shift;
0461 
0462             if (shift == 0 || shift & 1)
0463                 continue;
0464 
0465             /* adjust to be in terms of 4^shift Kb */
0466             shift = (shift - 10) >> 1;
0467 
0468             if ((shift >= min_pg) && (shift <= max_pg))
0469                 def->flags |= MMU_PAGE_SIZE_DIRECT;
0470         }
0471 
0472         goto out;
0473     }
0474 
0475     if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
0476         u32 tlb1cfg, tlb1ps;
0477 
0478         tlb0cfg = mfspr(SPRN_TLB0CFG);
0479         tlb1cfg = mfspr(SPRN_TLB1CFG);
0480         tlb1ps = mfspr(SPRN_TLB1PS);
0481         eptcfg = mfspr(SPRN_EPTCFG);
0482 
0483         if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
0484             book3e_htw_mode = PPC_HTW_E6500;
0485 
0486         /*
0487          * We expect 4K subpage size and unrestricted indirect size.
0488          * The lack of a restriction on indirect size is a Freescale
0489          * extension, indicated by PSn = 0 but SPSn != 0.
0490          */
0491         if (eptcfg != 2)
0492             book3e_htw_mode = PPC_HTW_NONE;
0493 
0494         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
0495             struct mmu_psize_def *def = &mmu_psize_defs[psize];
0496 
0497             if (!def->shift)
0498                 continue;
0499 
0500             if (tlb1ps & (1U << (def->shift - 10))) {
0501                 def->flags |= MMU_PAGE_SIZE_DIRECT;
0502 
0503                 if (book3e_htw_mode && psize == MMU_PAGE_2M)
0504                     def->flags |= MMU_PAGE_SIZE_INDIRECT;
0505             }
0506         }
0507 
0508         goto out;
0509     }
0510 #endif
0511 
0512     tlb0cfg = mfspr(SPRN_TLB0CFG);
0513     tlb0ps = mfspr(SPRN_TLB0PS);
0514     eptcfg = mfspr(SPRN_EPTCFG);
0515 
0516     /* Look for supported direct sizes */
0517     for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
0518         struct mmu_psize_def *def = &mmu_psize_defs[psize];
0519 
0520         if (tlb0ps & (1U << (def->shift - 10)))
0521             def->flags |= MMU_PAGE_SIZE_DIRECT;
0522     }
0523 
0524     /* Indirect page sizes supported ? */
0525     if ((tlb0cfg & TLBnCFG_IND) == 0 ||
0526         (tlb0cfg & TLBnCFG_PT) == 0)
0527         goto out;
0528 
0529     book3e_htw_mode = PPC_HTW_IBM;
0530 
0531     /* Now, we only deal with one IND page size for each
0532      * direct size. Hopefully all implementations today are
0533      * unambiguous, but we might want to be careful in the
0534      * future.
0535      */
0536     for (i = 0; i < 3; i++) {
0537         unsigned int ps, sps;
0538 
0539         sps = eptcfg & 0x1f;
0540         eptcfg >>= 5;
0541         ps = eptcfg & 0x1f;
0542         eptcfg >>= 5;
0543         if (!ps || !sps)
0544             continue;
0545         for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
0546             struct mmu_psize_def *def = &mmu_psize_defs[psize];
0547 
0548             if (ps == (def->shift - 10))
0549                 def->flags |= MMU_PAGE_SIZE_INDIRECT;
0550             if (sps == (def->shift - 10))
0551                 def->ind = ps + 10;
0552         }
0553     }
0554 
0555 out:
0556     /* Cleanup array and print summary */
0557     pr_info("MMU: Supported page sizes\n");
0558     for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
0559         struct mmu_psize_def *def = &mmu_psize_defs[psize];
0560         const char *__page_type_names[] = {
0561             "unsupported",
0562             "direct",
0563             "indirect",
0564             "direct & indirect"
0565         };
0566         if (def->flags == 0) {
0567             def->shift = 0; 
0568             continue;
0569         }
0570         pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
0571             __page_type_names[def->flags & 0x3]);
0572     }
0573 }
0574 
0575 static void __init setup_mmu_htw(void)
0576 {
0577     /*
0578      * If we want to use HW tablewalk, enable it by patching the TLB miss
0579      * handlers to branch to the one dedicated to it.
0580      */
0581 
0582     switch (book3e_htw_mode) {
0583     case PPC_HTW_IBM:
0584         patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
0585         patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
0586         break;
0587 #ifdef CONFIG_PPC_FSL_BOOK3E
0588     case PPC_HTW_E6500:
0589         extlb_level_exc = EX_TLB_SIZE;
0590         patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
0591         patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
0592         break;
0593 #endif
0594     }
0595     pr_info("MMU: Book3E HW tablewalk %s\n",
0596         book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
0597 }
0598 
0599 /*
0600  * Early initialization of the MMU TLB code
0601  */
0602 static void early_init_this_mmu(void)
0603 {
0604     unsigned int mas4;
0605 
0606     /* Set MAS4 based on page table setting */
0607 
0608     mas4 = 0x4 << MAS4_WIMGED_SHIFT;
0609     switch (book3e_htw_mode) {
0610     case PPC_HTW_E6500:
0611         mas4 |= MAS4_INDD;
0612         mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
0613         mas4 |= MAS4_TLBSELD(1);
0614         mmu_pte_psize = MMU_PAGE_2M;
0615         break;
0616 
0617     case PPC_HTW_IBM:
0618         mas4 |= MAS4_INDD;
0619         mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
0620         mmu_pte_psize = MMU_PAGE_1M;
0621         break;
0622 
0623     case PPC_HTW_NONE:
0624         mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
0625         mmu_pte_psize = mmu_virtual_psize;
0626         break;
0627     }
0628     mtspr(SPRN_MAS4, mas4);
0629 
0630 #ifdef CONFIG_PPC_FSL_BOOK3E
0631     if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
0632         unsigned int num_cams;
0633         bool map = true;
0634 
0635         /* use a quarter of the TLBCAM for bolted linear map */
0636         num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
0637 
0638         /*
0639          * Only do the mapping once per core, or else the
0640          * transient mapping would cause problems.
0641          */
0642 #ifdef CONFIG_SMP
0643         if (hweight32(get_tensr()) > 1)
0644             map = false;
0645 #endif
0646 
0647         if (map)
0648             linear_map_top = map_mem_in_cams(linear_map_top,
0649                              num_cams, false, true);
0650     }
0651 #endif
0652 
0653     /* A sync won't hurt us after mucking around with
0654      * the MMU configuration
0655      */
0656     mb();
0657 }
0658 
0659 static void __init early_init_mmu_global(void)
0660 {
0661     /* XXX This should be decided at runtime based on supported
0662      * page sizes in the TLB, but for now let's assume 16M is
0663      * always there and a good fit (which it probably is)
0664      *
0665      * Freescale booke only supports 4K pages in TLB0, so use that.
0666      */
0667     if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
0668         mmu_vmemmap_psize = MMU_PAGE_4K;
0669     else
0670         mmu_vmemmap_psize = MMU_PAGE_16M;
0671 
0672     /* XXX This code only checks for TLB 0 capabilities and doesn't
0673      *     check what page size combos are supported by the HW. It
0674      *     also doesn't handle the case where a separate array holds
0675      *     the IND entries from the array loaded by the PT.
0676      */
0677     /* Look for supported page sizes */
0678     setup_page_sizes();
0679 
0680     /* Look for HW tablewalk support */
0681     setup_mmu_htw();
0682 
0683 #ifdef CONFIG_PPC_FSL_BOOK3E
0684     if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
0685         if (book3e_htw_mode == PPC_HTW_NONE) {
0686             extlb_level_exc = EX_TLB_SIZE;
0687             patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
0688             patch_exception(0x1e0,
0689                 exc_instruction_tlb_miss_bolted_book3e);
0690         }
0691     }
0692 #endif
0693 
0694     /* Set the global containing the top of the linear mapping
0695      * for use by the TLB miss code
0696      */
0697     linear_map_top = memblock_end_of_DRAM();
0698 
0699     ioremap_bot = IOREMAP_BASE;
0700 }
0701 
0702 static void __init early_mmu_set_memory_limit(void)
0703 {
0704 #ifdef CONFIG_PPC_FSL_BOOK3E
0705     if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
0706         /*
0707          * Limit memory so we dont have linear faults.
0708          * Unlike memblock_set_current_limit, which limits
0709          * memory available during early boot, this permanently
0710          * reduces the memory available to Linux.  We need to
0711          * do this because highmem is not supported on 64-bit.
0712          */
0713         memblock_enforce_memory_limit(linear_map_top);
0714     }
0715 #endif
0716 
0717     memblock_set_current_limit(linear_map_top);
0718 }
0719 
0720 /* boot cpu only */
0721 void __init early_init_mmu(void)
0722 {
0723     early_init_mmu_global();
0724     early_init_this_mmu();
0725     early_mmu_set_memory_limit();
0726 }
0727 
0728 void early_init_mmu_secondary(void)
0729 {
0730     early_init_this_mmu();
0731 }
0732 
0733 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
0734                 phys_addr_t first_memblock_size)
0735 {
0736     /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
0737      * the bolted TLB entry. We know for now that only 1G
0738      * entries are supported though that may eventually
0739      * change.
0740      *
0741      * on FSL Embedded 64-bit, usually all RAM is bolted, but with
0742      * unusual memory sizes it's possible for some RAM to not be mapped
0743      * (such RAM is not used at all by Linux, since we don't support
0744      * highmem on 64-bit).  We limit ppc64_rma_size to what would be
0745      * mappable if this memblock is the only one.  Additional memblocks
0746      * can only increase, not decrease, the amount that ends up getting
0747      * mapped.  We still limit max to 1G even if we'll eventually map
0748      * more.  This is due to what the early init code is set up to do.
0749      *
0750      * We crop it to the size of the first MEMBLOCK to
0751      * avoid going over total available memory just in case...
0752      */
0753 #ifdef CONFIG_PPC_FSL_BOOK3E
0754     if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
0755         unsigned long linear_sz;
0756         unsigned int num_cams;
0757 
0758         /* use a quarter of the TLBCAM for bolted linear map */
0759         num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
0760 
0761         linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
0762                         true, true);
0763 
0764         ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
0765     } else
0766 #endif
0767         ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
0768 
0769     /* Finally limit subsequent allocations */
0770     memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
0771 }
0772 #else /* ! CONFIG_PPC64 */
0773 void __init early_init_mmu(void)
0774 {
0775 #ifdef CONFIG_PPC_47x
0776     early_init_mmu_47x();
0777 #endif
0778 }
0779 #endif /* CONFIG_PPC64 */