0001
0002
0003
0004
0005
0006
0007 #include <linux/module.h>
0008 #include <linux/kernel.h>
0009 #include <linux/errno.h>
0010 #include <linux/init.h>
0011 #include <linux/mman.h>
0012 #include <linux/nodemask.h>
0013 #include <linux/memblock.h>
0014 #include <linux/fs.h>
0015 #include <linux/vmalloc.h>
0016 #include <linux/sizes.h>
0017
0018 #include <asm/cp15.h>
0019 #include <asm/cputype.h>
0020 #include <asm/cachetype.h>
0021 #include <asm/sections.h>
0022 #include <asm/setup.h>
0023 #include <asm/smp_plat.h>
0024 #include <asm/tlb.h>
0025 #include <asm/highmem.h>
0026 #include <asm/system_info.h>
0027 #include <asm/traps.h>
0028 #include <asm/procinfo.h>
0029 #include <asm/memory.h>
0030 #include <asm/pgalloc.h>
0031 #include <asm/kasan_def.h>
0032
0033 #include <asm/mach/arch.h>
0034 #include <asm/mach/map.h>
0035 #include <asm/mach/pci.h>
0036 #include <asm/fixmap.h>
0037
0038 #include "fault.h"
0039 #include "mm.h"
0040 #include "tcm.h"
0041
0042 extern unsigned long __atags_pointer;
0043
0044
0045
0046
0047
0048 struct page *empty_zero_page;
0049 EXPORT_SYMBOL(empty_zero_page);
0050
0051
0052
0053
0054 pmd_t *top_pmd;
0055
0056 pmdval_t user_pmd_table = _PAGE_USER_TABLE;
0057
0058 #define CPOLICY_UNCACHED 0
0059 #define CPOLICY_BUFFERED 1
0060 #define CPOLICY_WRITETHROUGH 2
0061 #define CPOLICY_WRITEBACK 3
0062 #define CPOLICY_WRITEALLOC 4
0063
0064 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
0065 static unsigned int ecc_mask __initdata = 0;
0066 pgprot_t pgprot_user;
0067 pgprot_t pgprot_kernel;
0068
0069 EXPORT_SYMBOL(pgprot_user);
0070 EXPORT_SYMBOL(pgprot_kernel);
0071
0072 struct cachepolicy {
0073 const char policy[16];
0074 unsigned int cr_mask;
0075 pmdval_t pmd;
0076 pteval_t pte;
0077 };
0078
0079 static struct cachepolicy cache_policies[] __initdata = {
0080 {
0081 .policy = "uncached",
0082 .cr_mask = CR_W|CR_C,
0083 .pmd = PMD_SECT_UNCACHED,
0084 .pte = L_PTE_MT_UNCACHED,
0085 }, {
0086 .policy = "buffered",
0087 .cr_mask = CR_C,
0088 .pmd = PMD_SECT_BUFFERED,
0089 .pte = L_PTE_MT_BUFFERABLE,
0090 }, {
0091 .policy = "writethrough",
0092 .cr_mask = 0,
0093 .pmd = PMD_SECT_WT,
0094 .pte = L_PTE_MT_WRITETHROUGH,
0095 }, {
0096 .policy = "writeback",
0097 .cr_mask = 0,
0098 .pmd = PMD_SECT_WB,
0099 .pte = L_PTE_MT_WRITEBACK,
0100 }, {
0101 .policy = "writealloc",
0102 .cr_mask = 0,
0103 .pmd = PMD_SECT_WBWA,
0104 .pte = L_PTE_MT_WRITEALLOC,
0105 }
0106 };
0107
0108 #ifdef CONFIG_CPU_CP15
0109 static unsigned long initial_pmd_value __initdata = 0;
0110
0111
0112
0113
0114
0115
0116
0117
0118 void __init init_default_cache_policy(unsigned long pmd)
0119 {
0120 int i;
0121
0122 initial_pmd_value = pmd;
0123
0124 pmd &= PMD_SECT_CACHE_MASK;
0125
0126 for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
0127 if (cache_policies[i].pmd == pmd) {
0128 cachepolicy = i;
0129 break;
0130 }
0131
0132 if (i == ARRAY_SIZE(cache_policies))
0133 pr_err("ERROR: could not find cache policy\n");
0134 }
0135
0136
0137
0138
0139
0140
0141 static int __init early_cachepolicy(char *p)
0142 {
0143 int i, selected = -1;
0144
0145 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
0146 int len = strlen(cache_policies[i].policy);
0147
0148 if (memcmp(p, cache_policies[i].policy, len) == 0) {
0149 selected = i;
0150 break;
0151 }
0152 }
0153
0154 if (selected == -1)
0155 pr_err("ERROR: unknown or unsupported cache policy\n");
0156
0157
0158
0159
0160
0161
0162
0163
0164 if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
0165 pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
0166 cache_policies[cachepolicy].policy);
0167 return 0;
0168 }
0169
0170 if (selected != cachepolicy) {
0171 unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
0172 cachepolicy = selected;
0173 flush_cache_all();
0174 set_cr(cr);
0175 }
0176 return 0;
0177 }
0178 early_param("cachepolicy", early_cachepolicy);
0179
0180 static int __init early_nocache(char *__unused)
0181 {
0182 char *p = "buffered";
0183 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
0184 early_cachepolicy(p);
0185 return 0;
0186 }
0187 early_param("nocache", early_nocache);
0188
0189 static int __init early_nowrite(char *__unused)
0190 {
0191 char *p = "uncached";
0192 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
0193 early_cachepolicy(p);
0194 return 0;
0195 }
0196 early_param("nowb", early_nowrite);
0197
0198 #ifndef CONFIG_ARM_LPAE
0199 static int __init early_ecc(char *p)
0200 {
0201 if (memcmp(p, "on", 2) == 0)
0202 ecc_mask = PMD_PROTECTION;
0203 else if (memcmp(p, "off", 3) == 0)
0204 ecc_mask = 0;
0205 return 0;
0206 }
0207 early_param("ecc", early_ecc);
0208 #endif
0209
0210 #else
0211
0212 static int __init early_cachepolicy(char *p)
0213 {
0214 pr_warn("cachepolicy kernel parameter not supported without cp15\n");
0215 return 0;
0216 }
0217 early_param("cachepolicy", early_cachepolicy);
0218
0219 static int __init noalign_setup(char *__unused)
0220 {
0221 pr_warn("noalign kernel parameter not supported without cp15\n");
0222 return 1;
0223 }
0224 __setup("noalign", noalign_setup);
0225
0226 #endif
0227
0228 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
0229 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
0230 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
0231
0232 static struct mem_type mem_types[] __ro_after_init = {
0233 [MT_DEVICE] = {
0234 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
0235 L_PTE_SHARED,
0236 .prot_l1 = PMD_TYPE_TABLE,
0237 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
0238 .domain = DOMAIN_IO,
0239 },
0240 [MT_DEVICE_NONSHARED] = {
0241 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
0242 .prot_l1 = PMD_TYPE_TABLE,
0243 .prot_sect = PROT_SECT_DEVICE,
0244 .domain = DOMAIN_IO,
0245 },
0246 [MT_DEVICE_CACHED] = {
0247 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
0248 .prot_l1 = PMD_TYPE_TABLE,
0249 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
0250 .domain = DOMAIN_IO,
0251 },
0252 [MT_DEVICE_WC] = {
0253 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
0254 .prot_l1 = PMD_TYPE_TABLE,
0255 .prot_sect = PROT_SECT_DEVICE,
0256 .domain = DOMAIN_IO,
0257 },
0258 [MT_UNCACHED] = {
0259 .prot_pte = PROT_PTE_DEVICE,
0260 .prot_l1 = PMD_TYPE_TABLE,
0261 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
0262 .domain = DOMAIN_IO,
0263 },
0264 [MT_CACHECLEAN] = {
0265 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
0266 .domain = DOMAIN_KERNEL,
0267 },
0268 #ifndef CONFIG_ARM_LPAE
0269 [MT_MINICLEAN] = {
0270 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
0271 .domain = DOMAIN_KERNEL,
0272 },
0273 #endif
0274 [MT_LOW_VECTORS] = {
0275 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
0276 L_PTE_RDONLY,
0277 .prot_l1 = PMD_TYPE_TABLE,
0278 .domain = DOMAIN_VECTORS,
0279 },
0280 [MT_HIGH_VECTORS] = {
0281 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
0282 L_PTE_USER | L_PTE_RDONLY,
0283 .prot_l1 = PMD_TYPE_TABLE,
0284 .domain = DOMAIN_VECTORS,
0285 },
0286 [MT_MEMORY_RWX] = {
0287 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
0288 .prot_l1 = PMD_TYPE_TABLE,
0289 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
0290 .domain = DOMAIN_KERNEL,
0291 },
0292 [MT_MEMORY_RW] = {
0293 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
0294 L_PTE_XN,
0295 .prot_l1 = PMD_TYPE_TABLE,
0296 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
0297 .domain = DOMAIN_KERNEL,
0298 },
0299 [MT_MEMORY_RO] = {
0300 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
0301 L_PTE_XN | L_PTE_RDONLY,
0302 .prot_l1 = PMD_TYPE_TABLE,
0303 .prot_sect = PMD_TYPE_SECT,
0304 .domain = DOMAIN_KERNEL,
0305 },
0306 [MT_ROM] = {
0307 .prot_sect = PMD_TYPE_SECT,
0308 .domain = DOMAIN_KERNEL,
0309 },
0310 [MT_MEMORY_RWX_NONCACHED] = {
0311 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
0312 L_PTE_MT_BUFFERABLE,
0313 .prot_l1 = PMD_TYPE_TABLE,
0314 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
0315 .domain = DOMAIN_KERNEL,
0316 },
0317 [MT_MEMORY_RW_DTCM] = {
0318 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
0319 L_PTE_XN,
0320 .prot_l1 = PMD_TYPE_TABLE,
0321 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
0322 .domain = DOMAIN_KERNEL,
0323 },
0324 [MT_MEMORY_RWX_ITCM] = {
0325 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
0326 .prot_l1 = PMD_TYPE_TABLE,
0327 .domain = DOMAIN_KERNEL,
0328 },
0329 [MT_MEMORY_RW_SO] = {
0330 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
0331 L_PTE_MT_UNCACHED | L_PTE_XN,
0332 .prot_l1 = PMD_TYPE_TABLE,
0333 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
0334 PMD_SECT_UNCACHED | PMD_SECT_XN,
0335 .domain = DOMAIN_KERNEL,
0336 },
0337 [MT_MEMORY_DMA_READY] = {
0338 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
0339 L_PTE_XN,
0340 .prot_l1 = PMD_TYPE_TABLE,
0341 .domain = DOMAIN_KERNEL,
0342 },
0343 };
0344
0345 const struct mem_type *get_mem_type(unsigned int type)
0346 {
0347 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
0348 }
0349 EXPORT_SYMBOL(get_mem_type);
0350
0351 static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
0352
0353 static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
0354 __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
0355
0356 static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
0357 {
0358 return &bm_pte[pte_index(addr)];
0359 }
0360
0361 static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
0362 {
0363 return pte_offset_kernel(dir, addr);
0364 }
0365
0366 static inline pmd_t * __init fixmap_pmd(unsigned long addr)
0367 {
0368 return pmd_off_k(addr);
0369 }
0370
0371 void __init early_fixmap_init(void)
0372 {
0373 pmd_t *pmd;
0374
0375
0376
0377
0378
0379 BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT)
0380 != FIXADDR_TOP >> PMD_SHIFT);
0381
0382 pmd = fixmap_pmd(FIXADDR_TOP);
0383 pmd_populate_kernel(&init_mm, pmd, bm_pte);
0384
0385 pte_offset_fixmap = pte_offset_early_fixmap;
0386 }
0387
0388
0389
0390
0391
0392
0393 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
0394 {
0395 unsigned long vaddr = __fix_to_virt(idx);
0396 pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
0397
0398
0399 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START);
0400 BUG_ON(idx >= __end_of_fixed_addresses);
0401
0402
0403 if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) &&
0404 pgprot_val(prot) && pgprot_val(pgprot_kernel) == 0))
0405 return;
0406
0407 if (pgprot_val(prot))
0408 set_pte_at(NULL, vaddr, pte,
0409 pfn_pte(phys >> PAGE_SHIFT, prot));
0410 else
0411 pte_clear(NULL, vaddr, pte);
0412 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
0413 }
0414
0415 static pgprot_t protection_map[16] __ro_after_init = {
0416 [VM_NONE] = __PAGE_NONE,
0417 [VM_READ] = __PAGE_READONLY,
0418 [VM_WRITE] = __PAGE_COPY,
0419 [VM_WRITE | VM_READ] = __PAGE_COPY,
0420 [VM_EXEC] = __PAGE_READONLY_EXEC,
0421 [VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
0422 [VM_EXEC | VM_WRITE] = __PAGE_COPY_EXEC,
0423 [VM_EXEC | VM_WRITE | VM_READ] = __PAGE_COPY_EXEC,
0424 [VM_SHARED] = __PAGE_NONE,
0425 [VM_SHARED | VM_READ] = __PAGE_READONLY,
0426 [VM_SHARED | VM_WRITE] = __PAGE_SHARED,
0427 [VM_SHARED | VM_WRITE | VM_READ] = __PAGE_SHARED,
0428 [VM_SHARED | VM_EXEC] = __PAGE_READONLY_EXEC,
0429 [VM_SHARED | VM_EXEC | VM_READ] = __PAGE_READONLY_EXEC,
0430 [VM_SHARED | VM_EXEC | VM_WRITE] = __PAGE_SHARED_EXEC,
0431 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __PAGE_SHARED_EXEC
0432 };
0433 DECLARE_VM_GET_PAGE_PROT
0434
0435
0436
0437
0438 static void __init build_mem_type_table(void)
0439 {
0440 struct cachepolicy *cp;
0441 unsigned int cr = get_cr();
0442 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
0443 int cpu_arch = cpu_architecture();
0444 int i;
0445
0446 if (cpu_arch < CPU_ARCH_ARMv6) {
0447 #if defined(CONFIG_CPU_DCACHE_DISABLE)
0448 if (cachepolicy > CPOLICY_BUFFERED)
0449 cachepolicy = CPOLICY_BUFFERED;
0450 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
0451 if (cachepolicy > CPOLICY_WRITETHROUGH)
0452 cachepolicy = CPOLICY_WRITETHROUGH;
0453 #endif
0454 }
0455 if (cpu_arch < CPU_ARCH_ARMv5) {
0456 if (cachepolicy >= CPOLICY_WRITEALLOC)
0457 cachepolicy = CPOLICY_WRITEBACK;
0458 ecc_mask = 0;
0459 }
0460
0461 if (is_smp()) {
0462 if (cachepolicy != CPOLICY_WRITEALLOC) {
0463 pr_warn("Forcing write-allocate cache policy for SMP\n");
0464 cachepolicy = CPOLICY_WRITEALLOC;
0465 }
0466 if (!(initial_pmd_value & PMD_SECT_S)) {
0467 pr_warn("Forcing shared mappings for SMP\n");
0468 initial_pmd_value |= PMD_SECT_S;
0469 }
0470 }
0471
0472
0473
0474
0475
0476
0477 if (cpu_arch < CPU_ARCH_ARMv5)
0478 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
0479 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
0480 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
0481 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
0482 mem_types[i].prot_sect &= ~PMD_SECT_S;
0483
0484
0485
0486
0487
0488
0489 if (cpu_is_xscale_family()) {
0490 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
0491 mem_types[i].prot_sect &= ~PMD_BIT4;
0492 mem_types[i].prot_l1 &= ~PMD_BIT4;
0493 }
0494 } else if (cpu_arch < CPU_ARCH_ARMv6) {
0495 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
0496 if (mem_types[i].prot_l1)
0497 mem_types[i].prot_l1 |= PMD_BIT4;
0498 if (mem_types[i].prot_sect)
0499 mem_types[i].prot_sect |= PMD_BIT4;
0500 }
0501 }
0502
0503
0504
0505
0506 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
0507 if (!cpu_is_xsc3()) {
0508
0509
0510
0511
0512 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
0513 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
0514 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
0515 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
0516
0517
0518 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
0519 mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN;
0520 }
0521 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
0522
0523
0524
0525
0526
0527
0528
0529 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
0530 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
0531 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
0532 } else if (cpu_is_xsc3()) {
0533
0534
0535
0536
0537
0538
0539
0540 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
0541 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
0542 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
0543 } else {
0544
0545
0546
0547
0548
0549
0550
0551 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
0552 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
0553 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
0554 }
0555 } else {
0556
0557
0558
0559 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
0560 }
0561
0562
0563
0564
0565 cp = &cache_policies[cachepolicy];
0566 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
0567
0568 #ifndef CONFIG_ARM_LPAE
0569
0570
0571
0572
0573
0574 if (cpu_arch == CPU_ARCH_ARMv6)
0575 vecs_pgprot |= L_PTE_MT_VECTORS;
0576
0577
0578
0579
0580
0581 if (cpu_arch == CPU_ARCH_ARMv7 &&
0582 (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
0583 user_pmd_table |= PMD_PXNTABLE;
0584 }
0585 #endif
0586
0587
0588
0589
0590 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
0591 #ifndef CONFIG_ARM_LPAE
0592
0593
0594
0595
0596 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
0597 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
0598 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
0599 mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
0600 #endif
0601
0602
0603
0604
0605
0606
0607 if (initial_pmd_value & PMD_SECT_S) {
0608 user_pgprot |= L_PTE_SHARED;
0609 kern_pgprot |= L_PTE_SHARED;
0610 vecs_pgprot |= L_PTE_SHARED;
0611 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
0612 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
0613 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
0614 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
0615 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
0616 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
0617 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
0618 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
0619 mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S;
0620 mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED;
0621 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
0622 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
0623 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
0624 }
0625 }
0626
0627
0628
0629
0630
0631 if (cpu_arch >= CPU_ARCH_ARMv6) {
0632 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
0633
0634 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
0635 PMD_SECT_BUFFERED;
0636 } else {
0637
0638 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
0639 PMD_SECT_TEX(1);
0640 }
0641 } else {
0642 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
0643 }
0644
0645 #ifdef CONFIG_ARM_LPAE
0646
0647
0648
0649 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
0650 mem_types[i].prot_pte |= PTE_EXT_AF;
0651 if (mem_types[i].prot_sect)
0652 mem_types[i].prot_sect |= PMD_SECT_AF;
0653 }
0654 kern_pgprot |= PTE_EXT_AF;
0655 vecs_pgprot |= PTE_EXT_AF;
0656
0657
0658
0659
0660 user_pgprot |= PTE_EXT_PXN;
0661 #endif
0662
0663 for (i = 0; i < 16; i++) {
0664 pteval_t v = pgprot_val(protection_map[i]);
0665 protection_map[i] = __pgprot(v | user_pgprot);
0666 }
0667
0668 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
0669 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
0670
0671 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
0672 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
0673 L_PTE_DIRTY | kern_pgprot);
0674
0675 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
0676 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
0677 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
0678 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
0679 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
0680 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
0681 mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd;
0682 mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot;
0683 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
0684 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
0685 mem_types[MT_ROM].prot_sect |= cp->pmd;
0686
0687 switch (cp->pmd) {
0688 case PMD_SECT_WT:
0689 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
0690 break;
0691 case PMD_SECT_WB:
0692 case PMD_SECT_WBWA:
0693 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
0694 break;
0695 }
0696 pr_info("Memory policy: %sData cache %s\n",
0697 ecc_mask ? "ECC enabled, " : "", cp->policy);
0698
0699 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
0700 struct mem_type *t = &mem_types[i];
0701 if (t->prot_l1)
0702 t->prot_l1 |= PMD_DOMAIN(t->domain);
0703 if (t->prot_sect)
0704 t->prot_sect |= PMD_DOMAIN(t->domain);
0705 }
0706 }
0707
0708 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
0709 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
0710 unsigned long size, pgprot_t vma_prot)
0711 {
0712 if (!pfn_valid(pfn))
0713 return pgprot_noncached(vma_prot);
0714 else if (file->f_flags & O_SYNC)
0715 return pgprot_writecombine(vma_prot);
0716 return vma_prot;
0717 }
0718 EXPORT_SYMBOL(phys_mem_access_prot);
0719 #endif
0720
0721 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
0722
0723 static void __init *early_alloc(unsigned long sz)
0724 {
0725 void *ptr = memblock_alloc(sz, sz);
0726
0727 if (!ptr)
0728 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
0729 __func__, sz, sz);
0730
0731 return ptr;
0732 }
0733
0734 static void *__init late_alloc(unsigned long sz)
0735 {
0736 void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz));
0737
0738 if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr)))
0739 BUG();
0740 return ptr;
0741 }
0742
0743 static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr,
0744 unsigned long prot,
0745 void *(*alloc)(unsigned long sz))
0746 {
0747 if (pmd_none(*pmd)) {
0748 pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
0749 __pmd_populate(pmd, __pa(pte), prot);
0750 }
0751 BUG_ON(pmd_bad(*pmd));
0752 return pte_offset_kernel(pmd, addr);
0753 }
0754
0755 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
0756 unsigned long prot)
0757 {
0758 return arm_pte_alloc(pmd, addr, prot, early_alloc);
0759 }
0760
0761 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
0762 unsigned long end, unsigned long pfn,
0763 const struct mem_type *type,
0764 void *(*alloc)(unsigned long sz),
0765 bool ng)
0766 {
0767 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc);
0768 do {
0769 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
0770 ng ? PTE_EXT_NG : 0);
0771 pfn++;
0772 } while (pte++, addr += PAGE_SIZE, addr != end);
0773 }
0774
0775 static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
0776 unsigned long end, phys_addr_t phys,
0777 const struct mem_type *type, bool ng)
0778 {
0779 pmd_t *p = pmd;
0780
0781 #ifndef CONFIG_ARM_LPAE
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791 if (addr & SECTION_SIZE)
0792 pmd++;
0793 #endif
0794 do {
0795 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0));
0796 phys += SECTION_SIZE;
0797 } while (pmd++, addr += SECTION_SIZE, addr != end);
0798
0799 flush_pmd_entry(p);
0800 }
0801
0802 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
0803 unsigned long end, phys_addr_t phys,
0804 const struct mem_type *type,
0805 void *(*alloc)(unsigned long sz), bool ng)
0806 {
0807 pmd_t *pmd = pmd_offset(pud, addr);
0808 unsigned long next;
0809
0810 do {
0811
0812
0813
0814
0815 next = pmd_addr_end(addr, end);
0816
0817
0818
0819
0820
0821 if (type->prot_sect &&
0822 ((addr | next | phys) & ~SECTION_MASK) == 0) {
0823 __map_init_section(pmd, addr, next, phys, type, ng);
0824 } else {
0825 alloc_init_pte(pmd, addr, next,
0826 __phys_to_pfn(phys), type, alloc, ng);
0827 }
0828
0829 phys += next - addr;
0830
0831 } while (pmd++, addr = next, addr != end);
0832 }
0833
0834 static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr,
0835 unsigned long end, phys_addr_t phys,
0836 const struct mem_type *type,
0837 void *(*alloc)(unsigned long sz), bool ng)
0838 {
0839 pud_t *pud = pud_offset(p4d, addr);
0840 unsigned long next;
0841
0842 do {
0843 next = pud_addr_end(addr, end);
0844 alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
0845 phys += next - addr;
0846 } while (pud++, addr = next, addr != end);
0847 }
0848
0849 static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr,
0850 unsigned long end, phys_addr_t phys,
0851 const struct mem_type *type,
0852 void *(*alloc)(unsigned long sz), bool ng)
0853 {
0854 p4d_t *p4d = p4d_offset(pgd, addr);
0855 unsigned long next;
0856
0857 do {
0858 next = p4d_addr_end(addr, end);
0859 alloc_init_pud(p4d, addr, next, phys, type, alloc, ng);
0860 phys += next - addr;
0861 } while (p4d++, addr = next, addr != end);
0862 }
0863
0864 #ifndef CONFIG_ARM_LPAE
0865 static void __init create_36bit_mapping(struct mm_struct *mm,
0866 struct map_desc *md,
0867 const struct mem_type *type,
0868 bool ng)
0869 {
0870 unsigned long addr, length, end;
0871 phys_addr_t phys;
0872 pgd_t *pgd;
0873
0874 addr = md->virtual;
0875 phys = __pfn_to_phys(md->pfn);
0876 length = PAGE_ALIGN(md->length);
0877
0878 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
0879 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
0880 (long long)__pfn_to_phys((u64)md->pfn), addr);
0881 return;
0882 }
0883
0884
0885
0886
0887
0888
0889
0890 if (type->domain) {
0891 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
0892 (long long)__pfn_to_phys((u64)md->pfn), addr);
0893 return;
0894 }
0895
0896 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
0897 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
0898 (long long)__pfn_to_phys((u64)md->pfn), addr);
0899 return;
0900 }
0901
0902
0903
0904
0905
0906 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
0907
0908 pgd = pgd_offset(mm, addr);
0909 end = addr + length;
0910 do {
0911 p4d_t *p4d = p4d_offset(pgd, addr);
0912 pud_t *pud = pud_offset(p4d, addr);
0913 pmd_t *pmd = pmd_offset(pud, addr);
0914 int i;
0915
0916 for (i = 0; i < 16; i++)
0917 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER |
0918 (ng ? PMD_SECT_nG : 0));
0919
0920 addr += SUPERSECTION_SIZE;
0921 phys += SUPERSECTION_SIZE;
0922 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
0923 } while (addr != end);
0924 }
0925 #endif
0926
0927 static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
0928 void *(*alloc)(unsigned long sz),
0929 bool ng)
0930 {
0931 unsigned long addr, length, end;
0932 phys_addr_t phys;
0933 const struct mem_type *type;
0934 pgd_t *pgd;
0935
0936 type = &mem_types[md->type];
0937
0938 #ifndef CONFIG_ARM_LPAE
0939
0940
0941
0942 if (md->pfn >= 0x100000) {
0943 create_36bit_mapping(mm, md, type, ng);
0944 return;
0945 }
0946 #endif
0947
0948 addr = md->virtual & PAGE_MASK;
0949 phys = __pfn_to_phys(md->pfn);
0950 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
0951
0952 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
0953 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
0954 (long long)__pfn_to_phys(md->pfn), addr);
0955 return;
0956 }
0957
0958 pgd = pgd_offset(mm, addr);
0959 end = addr + length;
0960 do {
0961 unsigned long next = pgd_addr_end(addr, end);
0962
0963 alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng);
0964
0965 phys += next - addr;
0966 addr = next;
0967 } while (pgd++, addr != end);
0968 }
0969
0970
0971
0972
0973
0974
0975
0976
0977 static void __init create_mapping(struct map_desc *md)
0978 {
0979 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
0980 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
0981 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
0982 return;
0983 }
0984
0985 if (md->type == MT_DEVICE &&
0986 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
0987 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
0988 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
0989 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
0990 }
0991
0992 __create_mapping(&init_mm, md, early_alloc, false);
0993 }
0994
0995 void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
0996 bool ng)
0997 {
0998 #ifdef CONFIG_ARM_LPAE
0999 p4d_t *p4d;
1000 pud_t *pud;
1001
1002 p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
1003 if (WARN_ON(!p4d))
1004 return;
1005 pud = pud_alloc(mm, p4d, md->virtual);
1006 if (WARN_ON(!pud))
1007 return;
1008 pmd_alloc(mm, pud, 0);
1009 #endif
1010 __create_mapping(mm, md, late_alloc, ng);
1011 }
1012
1013
1014
1015
1016 void __init iotable_init(struct map_desc *io_desc, int nr)
1017 {
1018 struct map_desc *md;
1019 struct vm_struct *vm;
1020 struct static_vm *svm;
1021
1022 if (!nr)
1023 return;
1024
1025 svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
1026 if (!svm)
1027 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
1028 __func__, sizeof(*svm) * nr, __alignof__(*svm));
1029
1030 for (md = io_desc; nr; md++, nr--) {
1031 create_mapping(md);
1032
1033 vm = &svm->vm;
1034 vm->addr = (void *)(md->virtual & PAGE_MASK);
1035 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
1036 vm->phys_addr = __pfn_to_phys(md->pfn);
1037 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
1038 vm->flags |= VM_ARM_MTYPE(md->type);
1039 vm->caller = iotable_init;
1040 add_static_vm_early(svm++);
1041 }
1042 }
1043
1044 void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
1045 void *caller)
1046 {
1047 struct vm_struct *vm;
1048 struct static_vm *svm;
1049
1050 svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
1051 if (!svm)
1052 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
1053 __func__, sizeof(*svm), __alignof__(*svm));
1054
1055 vm = &svm->vm;
1056 vm->addr = (void *)addr;
1057 vm->size = size;
1058 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
1059 vm->caller = caller;
1060 add_static_vm_early(svm);
1061 }
1062
1063 #ifndef CONFIG_ARM_LPAE
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078 static void __init pmd_empty_section_gap(unsigned long addr)
1079 {
1080 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
1081 }
1082
1083 static void __init fill_pmd_gaps(void)
1084 {
1085 struct static_vm *svm;
1086 struct vm_struct *vm;
1087 unsigned long addr, next = 0;
1088 pmd_t *pmd;
1089
1090 list_for_each_entry(svm, &static_vmlist, list) {
1091 vm = &svm->vm;
1092 addr = (unsigned long)vm->addr;
1093 if (addr < next)
1094 continue;
1095
1096
1097
1098
1099
1100
1101 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1102 pmd = pmd_off_k(addr);
1103 if (pmd_none(*pmd))
1104 pmd_empty_section_gap(addr & PMD_MASK);
1105 }
1106
1107
1108
1109
1110
1111
1112 addr += vm->size;
1113 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1114 pmd = pmd_off_k(addr) + 1;
1115 if (pmd_none(*pmd))
1116 pmd_empty_section_gap(addr);
1117 }
1118
1119
1120 next = (addr + PMD_SIZE - 1) & PMD_MASK;
1121 }
1122 }
1123
1124 #else
1125 #define fill_pmd_gaps() do { } while (0)
1126 #endif
1127
1128 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
1129 static void __init pci_reserve_io(void)
1130 {
1131 struct static_vm *svm;
1132
1133 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
1134 if (svm)
1135 return;
1136
1137 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
1138 }
1139 #else
1140 #define pci_reserve_io() do { } while (0)
1141 #endif
1142
1143 #ifdef CONFIG_DEBUG_LL
1144 void __init debug_ll_io_init(void)
1145 {
1146 struct map_desc map;
1147
1148 debug_ll_addr(&map.pfn, &map.virtual);
1149 if (!map.pfn || !map.virtual)
1150 return;
1151 map.pfn = __phys_to_pfn(map.pfn);
1152 map.virtual &= PAGE_MASK;
1153 map.length = PAGE_SIZE;
1154 map.type = MT_DEVICE;
1155 iotable_init(&map, 1);
1156 }
1157 #endif
1158
1159 static unsigned long __initdata vmalloc_size = 240 * SZ_1M;
1160
1161
1162
1163
1164
1165
1166 static int __init early_vmalloc(char *arg)
1167 {
1168 unsigned long vmalloc_reserve = memparse(arg, NULL);
1169 unsigned long vmalloc_max;
1170
1171 if (vmalloc_reserve < SZ_16M) {
1172 vmalloc_reserve = SZ_16M;
1173 pr_warn("vmalloc area is too small, limiting to %luMiB\n",
1174 vmalloc_reserve >> 20);
1175 }
1176
1177 vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET);
1178 if (vmalloc_reserve > vmalloc_max) {
1179 vmalloc_reserve = vmalloc_max;
1180 pr_warn("vmalloc area is too big, limiting to %luMiB\n",
1181 vmalloc_reserve >> 20);
1182 }
1183
1184 vmalloc_size = vmalloc_reserve;
1185 return 0;
1186 }
1187 early_param("vmalloc", early_vmalloc);
1188
1189 phys_addr_t arm_lowmem_limit __initdata = 0;
1190
1191 void __init adjust_lowmem_bounds(void)
1192 {
1193 phys_addr_t block_start, block_end, memblock_limit = 0;
1194 u64 vmalloc_limit, i;
1195 phys_addr_t lowmem_limit = 0;
1196
1197
1198
1199
1200
1201
1202
1203
1204 vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET -
1205 PAGE_OFFSET + PHYS_OFFSET;
1206
1207
1208
1209
1210
1211 for_each_mem_range(i, &block_start, &block_end) {
1212 if (!IS_ALIGNED(block_start, PMD_SIZE)) {
1213 phys_addr_t len;
1214
1215 len = round_up(block_start, PMD_SIZE) - block_start;
1216 memblock_mark_nomap(block_start, len);
1217 }
1218 break;
1219 }
1220
1221 for_each_mem_range(i, &block_start, &block_end) {
1222 if (block_start < vmalloc_limit) {
1223 if (block_end > lowmem_limit)
1224
1225
1226
1227
1228
1229
1230 lowmem_limit = min_t(u64,
1231 vmalloc_limit,
1232 block_end);
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 if (!memblock_limit) {
1248 if (!IS_ALIGNED(block_start, PMD_SIZE))
1249 memblock_limit = block_start;
1250 else if (!IS_ALIGNED(block_end, PMD_SIZE))
1251 memblock_limit = lowmem_limit;
1252 }
1253
1254 }
1255 }
1256
1257 arm_lowmem_limit = lowmem_limit;
1258
1259 high_memory = __va(arm_lowmem_limit - 1) + 1;
1260
1261 if (!memblock_limit)
1262 memblock_limit = arm_lowmem_limit;
1263
1264
1265
1266
1267
1268
1269 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1270
1271 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1272 if (memblock_end_of_DRAM() > arm_lowmem_limit) {
1273 phys_addr_t end = memblock_end_of_DRAM();
1274
1275 pr_notice("Ignoring RAM at %pa-%pa\n",
1276 &memblock_limit, &end);
1277 pr_notice("Consider using a HIGHMEM enabled kernel.\n");
1278
1279 memblock_remove(memblock_limit, end - memblock_limit);
1280 }
1281 }
1282
1283 memblock_set_current_limit(memblock_limit);
1284 }
1285
1286 static __init void prepare_page_table(void)
1287 {
1288 unsigned long addr;
1289 phys_addr_t end;
1290
1291
1292
1293
1294 #ifdef CONFIG_KASAN
1295
1296
1297
1298
1299 for (addr = 0; addr < KASAN_SHADOW_START; addr += PMD_SIZE)
1300 pmd_clear(pmd_off_k(addr));
1301
1302
1303
1304
1305
1306
1307 for (addr = KASAN_SHADOW_END; addr < MODULES_VADDR; addr += PMD_SIZE)
1308 pmd_clear(pmd_off_k(addr));
1309 #else
1310 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
1311 pmd_clear(pmd_off_k(addr));
1312 #endif
1313
1314 #ifdef CONFIG_XIP_KERNEL
1315
1316 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
1317 #endif
1318 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
1319 pmd_clear(pmd_off_k(addr));
1320
1321
1322
1323
1324 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1325 if (end >= arm_lowmem_limit)
1326 end = arm_lowmem_limit;
1327
1328
1329
1330
1331
1332 for (addr = __phys_to_virt(end);
1333 addr < VMALLOC_START; addr += PMD_SIZE)
1334 pmd_clear(pmd_off_k(addr));
1335 }
1336
1337 #ifdef CONFIG_ARM_LPAE
1338
1339 #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1340 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1341 #else
1342 #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
1343 #endif
1344
1345
1346
1347
1348 void __init arm_mm_memblock_reserve(void)
1349 {
1350
1351
1352
1353
1354 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
1355
1356 #ifdef CONFIG_SA1111
1357
1358
1359
1360
1361 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1362 #endif
1363 }
1364
1365
1366
1367
1368
1369
1370
1371
1372 static void __init devicemaps_init(const struct machine_desc *mdesc)
1373 {
1374 struct map_desc map;
1375 unsigned long addr;
1376 void *vectors;
1377
1378
1379
1380
1381 vectors = early_alloc(PAGE_SIZE * 2);
1382
1383 early_trap_init(vectors);
1384
1385
1386
1387
1388 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
1389 pmd_clear(pmd_off_k(addr));
1390
1391 if (__atags_pointer) {
1392
1393 map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
1394 map.virtual = FDT_FIXED_BASE;
1395 map.length = FDT_FIXED_SIZE;
1396 map.type = MT_MEMORY_RO;
1397 create_mapping(&map);
1398 }
1399
1400
1401
1402
1403
1404 #ifdef CONFIG_XIP_KERNEL
1405 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1406 map.virtual = MODULES_VADDR;
1407 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1408 map.type = MT_ROM;
1409 create_mapping(&map);
1410 #endif
1411
1412
1413
1414
1415 #ifdef FLUSH_BASE
1416 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1417 map.virtual = FLUSH_BASE;
1418 map.length = SZ_1M;
1419 map.type = MT_CACHECLEAN;
1420 create_mapping(&map);
1421 #endif
1422 #ifdef FLUSH_BASE_MINICACHE
1423 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1424 map.virtual = FLUSH_BASE_MINICACHE;
1425 map.length = SZ_1M;
1426 map.type = MT_MINICLEAN;
1427 create_mapping(&map);
1428 #endif
1429
1430
1431
1432
1433
1434
1435 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1436 map.virtual = 0xffff0000;
1437 map.length = PAGE_SIZE;
1438 #ifdef CONFIG_KUSER_HELPERS
1439 map.type = MT_HIGH_VECTORS;
1440 #else
1441 map.type = MT_LOW_VECTORS;
1442 #endif
1443 create_mapping(&map);
1444
1445 if (!vectors_high()) {
1446 map.virtual = 0;
1447 map.length = PAGE_SIZE * 2;
1448 map.type = MT_LOW_VECTORS;
1449 create_mapping(&map);
1450 }
1451
1452
1453 map.pfn += 1;
1454 map.virtual = 0xffff0000 + PAGE_SIZE;
1455 map.length = PAGE_SIZE;
1456 map.type = MT_LOW_VECTORS;
1457 create_mapping(&map);
1458
1459
1460
1461
1462 if (mdesc->map_io)
1463 mdesc->map_io();
1464 else
1465 debug_ll_io_init();
1466 fill_pmd_gaps();
1467
1468
1469 pci_reserve_io();
1470
1471
1472
1473
1474
1475
1476
1477 local_flush_tlb_all();
1478 flush_cache_all();
1479
1480
1481 early_abt_enable();
1482 }
1483
1484 static void __init kmap_init(void)
1485 {
1486 #ifdef CONFIG_HIGHMEM
1487 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1488 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1489 #endif
1490
1491 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1492 _PAGE_KERNEL_TABLE);
1493 }
1494
1495 static void __init map_lowmem(void)
1496 {
1497 phys_addr_t start, end;
1498 u64 i;
1499
1500
1501 for_each_mem_range(i, &start, &end) {
1502 struct map_desc map;
1503
1504 pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n",
1505 (long long)start, (long long)end);
1506 if (end > arm_lowmem_limit)
1507 end = arm_lowmem_limit;
1508 if (start >= end)
1509 break;
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 if ((start > kernel_sec_start) && (end < kernel_sec_end))
1537 break;
1538
1539
1540 if ((kernel_sec_start >= start) && (kernel_sec_start <= end)) {
1541
1542 if ((start < kernel_sec_start) && (end > kernel_sec_end)) {
1543
1544 map.pfn = __phys_to_pfn(start);
1545 map.virtual = __phys_to_virt(start);
1546 map.length = kernel_sec_start - start;
1547 map.type = MT_MEMORY_RW;
1548 create_mapping(&map);
1549
1550 map.pfn = __phys_to_pfn(kernel_sec_end);
1551 map.virtual = __phys_to_virt(kernel_sec_end);
1552 map.length = end - kernel_sec_end;
1553 map.type = MT_MEMORY_RW;
1554 create_mapping(&map);
1555 break;
1556 }
1557
1558 if (kernel_sec_start == start)
1559 start = kernel_sec_end;
1560
1561 if (kernel_sec_end == end)
1562 end = kernel_sec_start;
1563 } else if ((kernel_sec_start < start) && (kernel_sec_end > start) && (kernel_sec_end < end)) {
1564
1565 start = kernel_sec_end;
1566 } else if ((kernel_sec_start > start) && (kernel_sec_start < end) && (kernel_sec_end > end)) {
1567
1568 end = kernel_sec_start;
1569 }
1570 map.pfn = __phys_to_pfn(start);
1571 map.virtual = __phys_to_virt(start);
1572 map.length = end - start;
1573 map.type = MT_MEMORY_RW;
1574 create_mapping(&map);
1575 }
1576 }
1577
1578 static void __init map_kernel(void)
1579 {
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601 phys_addr_t kernel_x_start = kernel_sec_start;
1602 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
1603 phys_addr_t kernel_nx_start = kernel_x_end;
1604 phys_addr_t kernel_nx_end = kernel_sec_end;
1605 struct map_desc map;
1606
1607 map.pfn = __phys_to_pfn(kernel_x_start);
1608 map.virtual = __phys_to_virt(kernel_x_start);
1609 map.length = kernel_x_end - kernel_x_start;
1610 map.type = MT_MEMORY_RWX;
1611 create_mapping(&map);
1612
1613
1614 if (kernel_x_end == kernel_nx_end)
1615 return;
1616
1617 map.pfn = __phys_to_pfn(kernel_nx_start);
1618 map.virtual = __phys_to_virt(kernel_nx_start);
1619 map.length = kernel_nx_end - kernel_nx_start;
1620 map.type = MT_MEMORY_RW;
1621 create_mapping(&map);
1622 }
1623
1624 #ifdef CONFIG_ARM_PV_FIXUP
1625 typedef void pgtables_remap(long long offset, unsigned long pgd);
1626 pgtables_remap lpae_pgtables_remap_asm;
1627
1628
1629
1630
1631
1632 static void __init early_paging_init(const struct machine_desc *mdesc)
1633 {
1634 pgtables_remap *lpae_pgtables_remap;
1635 unsigned long pa_pgd;
1636 unsigned int cr, ttbcr;
1637 long long offset;
1638
1639 if (!mdesc->pv_fixup)
1640 return;
1641
1642 offset = mdesc->pv_fixup();
1643 if (offset == 0)
1644 return;
1645
1646
1647
1648
1649
1650 kernel_sec_start += offset;
1651 kernel_sec_end += offset;
1652
1653
1654
1655
1656
1657
1658
1659 lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
1660 pa_pgd = __pa(swapper_pg_dir);
1661 barrier();
1662
1663 pr_info("Switching physical address space to 0x%08llx\n",
1664 (u64)PHYS_OFFSET + offset);
1665
1666
1667 __pv_offset += offset;
1668 __pv_phys_pfn_offset += PFN_DOWN(offset);
1669
1670
1671 fixup_pv_table(&__pv_table_begin,
1672 (&__pv_table_end - &__pv_table_begin) << 2);
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683 cr = get_cr();
1684 set_cr(cr & ~(CR_I | CR_C));
1685 asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
1686 asm volatile("mcr p15, 0, %0, c2, c0, 2"
1687 : : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
1688 flush_cache_all();
1689
1690
1691
1692
1693
1694
1695
1696 lpae_pgtables_remap(offset, pa_pgd);
1697
1698
1699 asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
1700 set_cr(cr);
1701 }
1702
1703 #else
1704
1705 static void __init early_paging_init(const struct machine_desc *mdesc)
1706 {
1707 long long offset;
1708
1709 if (!mdesc->pv_fixup)
1710 return;
1711
1712 offset = mdesc->pv_fixup();
1713 if (offset == 0)
1714 return;
1715
1716 pr_crit("Physical address space modification is only to support Keystone2.\n");
1717 pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
1718 pr_crit("feature. Your kernel may crash now, have a good day.\n");
1719 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1720 }
1721
1722 #endif
1723
1724 static void __init early_fixmap_shutdown(void)
1725 {
1726 int i;
1727 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
1728
1729 pte_offset_fixmap = pte_offset_late_fixmap;
1730 pmd_clear(fixmap_pmd(va));
1731 local_flush_tlb_kernel_page(va);
1732
1733 for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
1734 pte_t *pte;
1735 struct map_desc map;
1736
1737 map.virtual = fix_to_virt(i);
1738 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
1739
1740
1741 if (pte_none(*pte) ||
1742 (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
1743 continue;
1744
1745 map.pfn = pte_pfn(*pte);
1746 map.type = MT_DEVICE;
1747 map.length = PAGE_SIZE;
1748
1749 create_mapping(&map);
1750 }
1751 }
1752
1753
1754
1755
1756
1757 void __init paging_init(const struct machine_desc *mdesc)
1758 {
1759 void *zero_page;
1760
1761 pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
1762 kernel_sec_start, kernel_sec_end);
1763
1764 prepare_page_table();
1765 map_lowmem();
1766 memblock_set_current_limit(arm_lowmem_limit);
1767 pr_debug("lowmem limit is %08llx\n", (long long)arm_lowmem_limit);
1768
1769
1770
1771
1772 map_kernel();
1773 dma_contiguous_remap();
1774 early_fixmap_shutdown();
1775 devicemaps_init(mdesc);
1776 kmap_init();
1777 tcm_init();
1778
1779 top_pmd = pmd_off_k(0xffff0000);
1780
1781
1782 zero_page = early_alloc(PAGE_SIZE);
1783
1784 bootmem_init();
1785
1786 empty_zero_page = virt_to_page(zero_page);
1787 __flush_dcache_page(NULL, empty_zero_page);
1788 }
1789
1790 void __init early_mm_init(const struct machine_desc *mdesc)
1791 {
1792 build_mem_type_table();
1793 early_paging_init(mdesc);
1794 }
1795
1796 void set_pte_at(struct mm_struct *mm, unsigned long addr,
1797 pte_t *ptep, pte_t pteval)
1798 {
1799 unsigned long ext = 0;
1800
1801 if (addr < TASK_SIZE && pte_valid_user(pteval)) {
1802 if (!pte_special(pteval))
1803 __sync_icache_dcache(pteval);
1804 ext |= PTE_EXT_NG;
1805 }
1806
1807 set_pte_ext(ptep, pteval, ext);
1808 }