Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * This file contains the routines for initializing the MMU
0004  * on the 4xx series of chips.
0005  *  -- paulus
0006  *
0007  *  Derived from arch/ppc/mm/init.c:
0008  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0009  *
0010  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
0011  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
0012  *    Copyright (C) 1996 Paul Mackerras
0013  *
0014  *  Derived from "arch/i386/mm/init.c"
0015  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
0016  */
0017 
0018 #include <linux/signal.h>
0019 #include <linux/sched.h>
0020 #include <linux/kernel.h>
0021 #include <linux/errno.h>
0022 #include <linux/string.h>
0023 #include <linux/types.h>
0024 #include <linux/ptrace.h>
0025 #include <linux/mman.h>
0026 #include <linux/mm.h>
0027 #include <linux/swap.h>
0028 #include <linux/stddef.h>
0029 #include <linux/vmalloc.h>
0030 #include <linux/init.h>
0031 #include <linux/delay.h>
0032 #include <linux/highmem.h>
0033 #include <linux/memblock.h>
0034 
0035 #include <asm/io.h>
0036 #include <asm/mmu_context.h>
0037 #include <asm/mmu.h>
0038 #include <linux/uaccess.h>
0039 #include <asm/smp.h>
0040 #include <asm/bootx.h>
0041 #include <asm/machdep.h>
0042 #include <asm/setup.h>
0043 
0044 #include <mm/mmu_decl.h>
0045 
0046 /*
0047  * MMU_init_hw does the chip-specific initialization of the MMU hardware.
0048  */
0049 void __init MMU_init_hw(void)
0050 {
0051     /*
0052      * The Zone Protection Register (ZPR) defines how protection will
0053      * be applied to every page which is a member of a given zone. At
0054      * present, we utilize only two of the 4xx's zones.
0055      * The zone index bits (of ZSEL) in the PTE are used for software
0056      * indicators, except the LSB.  For user access, zone 1 is used,
0057      * for kernel access, zone 0 is used.  We set all but zone 1
0058      * to zero, allowing only kernel access as indicated in the PTE.
0059      * For zone 1, we set a 01 binary (a value of 10 will not work)
0060      * to allow user access as indicated in the PTE.  This also allows
0061      * kernel access as indicated in the PTE.
0062      */
0063 
0064         mtspr(SPRN_ZPR, 0x10000000);
0065 
0066     flush_instruction_cache();
0067 
0068     /*
0069      * Set up the real-mode cache parameters for the exception vector
0070      * handlers (which are run in real-mode).
0071      */
0072 
0073         mtspr(SPRN_DCWR, 0x00000000);   /* All caching is write-back */
0074 
0075         /*
0076      * Cache instruction and data space where the exception
0077      * vectors and the kernel live in real-mode.
0078      */
0079 
0080         mtspr(SPRN_DCCR, 0xFFFF0000);   /* 2GByte of data space at 0x0. */
0081         mtspr(SPRN_ICCR, 0xFFFF0000);   /* 2GByte of instr. space at 0x0. */
0082 }
0083 
0084 #define LARGE_PAGE_SIZE_16M (1<<24)
0085 #define LARGE_PAGE_SIZE_4M  (1<<22)
0086 
0087 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
0088 {
0089     unsigned long v, s, mapped;
0090     phys_addr_t p;
0091 
0092     v = KERNELBASE;
0093     p = 0;
0094     s = total_lowmem;
0095 
0096     if (IS_ENABLED(CONFIG_KFENCE))
0097         return 0;
0098 
0099     if (debug_pagealloc_enabled())
0100         return 0;
0101 
0102     if (strict_kernel_rwx_enabled())
0103         return 0;
0104 
0105     while (s >= LARGE_PAGE_SIZE_16M) {
0106         pmd_t *pmdp;
0107         unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW;
0108 
0109         pmdp = pmd_off_k(v);
0110         *pmdp++ = __pmd(val);
0111         *pmdp++ = __pmd(val);
0112         *pmdp++ = __pmd(val);
0113         *pmdp++ = __pmd(val);
0114 
0115         v += LARGE_PAGE_SIZE_16M;
0116         p += LARGE_PAGE_SIZE_16M;
0117         s -= LARGE_PAGE_SIZE_16M;
0118     }
0119 
0120     while (s >= LARGE_PAGE_SIZE_4M) {
0121         pmd_t *pmdp;
0122         unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW;
0123 
0124         pmdp = pmd_off_k(v);
0125         *pmdp = __pmd(val);
0126 
0127         v += LARGE_PAGE_SIZE_4M;
0128         p += LARGE_PAGE_SIZE_4M;
0129         s -= LARGE_PAGE_SIZE_4M;
0130     }
0131 
0132     mapped = total_lowmem - s;
0133 
0134     /* If the size of RAM is not an exact power of two, we may not
0135      * have covered RAM in its entirety with 16 and 4 MiB
0136      * pages. Consequently, restrict the top end of RAM currently
0137      * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
0138      * coverage with normal-sized pages (or other reasons) do not
0139      * attempt to allocate outside the allowed range.
0140      */
0141     memblock_set_current_limit(mapped);
0142 
0143     return mapped;
0144 }
0145 
0146 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
0147                 phys_addr_t first_memblock_size)
0148 {
0149     /* We don't currently support the first MEMBLOCK not mapping 0
0150      * physical on those processors
0151      */
0152     BUG_ON(first_memblock_base != 0);
0153 
0154     /* 40x can only access 16MB at the moment (see head_40x.S) */
0155     memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
0156 }