Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * This file contains the routines for initializing the MMU
0004  * on the 8xx series of chips.
0005  *  -- christophe
0006  *
0007  *  Derived from arch/powerpc/mm/40x_mmu.c:
0008  */
0009 
0010 #include <linux/memblock.h>
0011 #include <linux/hugetlb.h>
0012 
0013 #include <mm/mmu_decl.h>
0014 
0015 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
0016 
0017 static unsigned long block_mapped_ram;
0018 
0019 /*
0020  * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
0021  * Otherwise, returns 0
0022  */
0023 phys_addr_t v_block_mapped(unsigned long va)
0024 {
0025     unsigned long p = PHYS_IMMR_BASE;
0026 
0027     if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
0028         return p + va - VIRT_IMMR_BASE;
0029     if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
0030         return __pa(va);
0031     return 0;
0032 }
0033 
0034 /*
0035  * Return VA for a given PA mapped with LTLBs or fixmap
0036  * Return 0 if not mapped
0037  */
0038 unsigned long p_block_mapped(phys_addr_t pa)
0039 {
0040     unsigned long p = PHYS_IMMR_BASE;
0041 
0042     if (pa >= p && pa < p + IMMR_SIZE)
0043         return VIRT_IMMR_BASE + pa - p;
0044     if (pa < block_mapped_ram)
0045         return (unsigned long)__va(pa);
0046     return 0;
0047 }
0048 
0049 static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
0050 {
0051     if (hpd_val(*pmdp) == 0) {
0052         pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
0053 
0054         if (!ptep)
0055             return NULL;
0056 
0057         hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
0058         hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
0059     }
0060     return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
0061 }
0062 
0063 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
0064                          pgprot_t prot, int psize, bool new)
0065 {
0066     pmd_t *pmdp = pmd_off_k(va);
0067     pte_t *ptep;
0068 
0069     if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
0070         return -EINVAL;
0071 
0072     if (new) {
0073         if (WARN_ON(slab_is_available()))
0074             return -EINVAL;
0075 
0076         if (psize == MMU_PAGE_512K)
0077             ptep = early_pte_alloc_kernel(pmdp, va);
0078         else
0079             ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
0080     } else {
0081         if (psize == MMU_PAGE_512K)
0082             ptep = pte_offset_kernel(pmdp, va);
0083         else
0084             ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
0085     }
0086 
0087     if (WARN_ON(!ptep))
0088         return -ENOMEM;
0089 
0090     /* The PTE should never be already present */
0091     if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
0092         return -EINVAL;
0093 
0094     set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
0095 
0096     return 0;
0097 }
0098 
0099 /*
0100  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
0101  */
0102 void __init MMU_init_hw(void)
0103 {
0104 }
0105 
0106 static bool immr_is_mapped __initdata;
0107 
0108 void __init mmu_mapin_immr(void)
0109 {
0110     if (immr_is_mapped)
0111         return;
0112 
0113     immr_is_mapped = true;
0114 
0115     __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
0116                     PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
0117 }
0118 
0119 static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
0120                 pgprot_t prot, bool new)
0121 {
0122     unsigned long v = PAGE_OFFSET + offset;
0123     unsigned long p = offset;
0124 
0125     WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
0126 
0127     for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
0128         __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
0129     for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
0130         __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
0131     for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
0132         __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
0133 
0134     if (!new)
0135         flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
0136 }
0137 
0138 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
0139 {
0140     unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
0141     unsigned long sinittext = __pa(_sinittext);
0142     bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
0143     unsigned long boundary = strict_boundary ? sinittext : etext8;
0144     unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
0145 
0146     WARN_ON(top < einittext8);
0147 
0148     mmu_mapin_immr();
0149 
0150     mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
0151     if (debug_pagealloc_enabled_or_kfence()) {
0152         top = boundary;
0153     } else {
0154         mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
0155         mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
0156     }
0157 
0158     if (top > SZ_32M)
0159         memblock_set_current_limit(top);
0160 
0161     block_mapped_ram = top;
0162 
0163     return top;
0164 }
0165 
0166 void mmu_mark_initmem_nx(void)
0167 {
0168     unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
0169     unsigned long sinittext = __pa(_sinittext);
0170     unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
0171     unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
0172 
0173     if (!debug_pagealloc_enabled_or_kfence())
0174         mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
0175 
0176     mmu_pin_tlb(block_mapped_ram, false);
0177 }
0178 
0179 #ifdef CONFIG_STRICT_KERNEL_RWX
0180 void mmu_mark_rodata_ro(void)
0181 {
0182     unsigned long sinittext = __pa(_sinittext);
0183 
0184     mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
0185     if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
0186         mmu_pin_tlb(block_mapped_ram, true);
0187 }
0188 #endif
0189 
0190 void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
0191                        phys_addr_t first_memblock_size)
0192 {
0193     /* We don't currently support the first MEMBLOCK not mapping 0
0194      * physical on those processors
0195      */
0196     BUG_ON(first_memblock_base != 0);
0197 
0198     /* 8xx can only access 32MB at the moment */
0199     memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
0200 }
0201 
0202 int pud_clear_huge(pud_t *pud)
0203 {
0204      return 0;
0205 }
0206 
0207 int pmd_clear_huge(pmd_t *pmd)
0208 {
0209      return 0;
0210 }