0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/memblock.h>
0016 #include <linux/highmem.h>
0017 #include <linux/suspend.h>
0018 #include <linux/dma-direct.h>
0019
0020 #include <asm/swiotlb.h>
0021 #include <asm/machdep.h>
0022 #include <asm/rtas.h>
0023 #include <asm/kasan.h>
0024 #include <asm/svm.h>
0025 #include <asm/mmzone.h>
0026 #include <asm/ftrace.h>
0027 #include <asm/code-patching.h>
0028 #include <asm/setup.h>
0029
0030 #include <mm/mmu_decl.h>
0031
0032 unsigned long long memory_limit;
0033
0034 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
0035 EXPORT_SYMBOL(empty_zero_page);
0036
0037 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
0038 unsigned long size, pgprot_t vma_prot)
0039 {
0040 if (ppc_md.phys_mem_access_prot)
0041 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
0042
0043 if (!page_is_ram(pfn))
0044 vma_prot = pgprot_noncached(vma_prot);
0045
0046 return vma_prot;
0047 }
0048 EXPORT_SYMBOL(phys_mem_access_prot);
0049
0050 #ifdef CONFIG_MEMORY_HOTPLUG
0051 static DEFINE_MUTEX(linear_mapping_mutex);
0052
0053 #ifdef CONFIG_NUMA
0054 int memory_add_physaddr_to_nid(u64 start)
0055 {
0056 return hot_add_scn_to_nid(start);
0057 }
0058 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
0059 #endif
0060
0061 int __weak create_section_mapping(unsigned long start, unsigned long end,
0062 int nid, pgprot_t prot)
0063 {
0064 return -ENODEV;
0065 }
0066
0067 int __weak remove_section_mapping(unsigned long start, unsigned long end)
0068 {
0069 return -ENODEV;
0070 }
0071
0072 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
0073 struct mhp_params *params)
0074 {
0075 int rc;
0076
0077 start = (unsigned long)__va(start);
0078 mutex_lock(&linear_mapping_mutex);
0079 rc = create_section_mapping(start, start + size, nid,
0080 params->pgprot);
0081 mutex_unlock(&linear_mapping_mutex);
0082 if (rc) {
0083 pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
0084 start, start + size, rc);
0085 return -EFAULT;
0086 }
0087 return 0;
0088 }
0089
0090 void __ref arch_remove_linear_mapping(u64 start, u64 size)
0091 {
0092 int ret;
0093
0094
0095 start = (unsigned long)__va(start);
0096
0097 mutex_lock(&linear_mapping_mutex);
0098 ret = remove_section_mapping(start, start + size);
0099 mutex_unlock(&linear_mapping_mutex);
0100 if (ret)
0101 pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
0102 start, start + size, ret);
0103
0104
0105
0106
0107 vm_unmap_aliases();
0108 }
0109
0110
0111
0112
0113
0114 static void update_end_of_memory_vars(u64 start, u64 size)
0115 {
0116 unsigned long end_pfn = PFN_UP(start + size);
0117
0118 if (end_pfn > max_pfn) {
0119 max_pfn = end_pfn;
0120 max_low_pfn = end_pfn;
0121 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
0122 }
0123 }
0124
0125 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
0126 struct mhp_params *params)
0127 {
0128 int ret;
0129
0130 ret = __add_pages(nid, start_pfn, nr_pages, params);
0131 if (ret)
0132 return ret;
0133
0134
0135 update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
0136 nr_pages << PAGE_SHIFT);
0137
0138 return ret;
0139 }
0140
0141 int __ref arch_add_memory(int nid, u64 start, u64 size,
0142 struct mhp_params *params)
0143 {
0144 unsigned long start_pfn = start >> PAGE_SHIFT;
0145 unsigned long nr_pages = size >> PAGE_SHIFT;
0146 int rc;
0147
0148 rc = arch_create_linear_mapping(nid, start, size, params);
0149 if (rc)
0150 return rc;
0151 rc = add_pages(nid, start_pfn, nr_pages, params);
0152 if (rc)
0153 arch_remove_linear_mapping(start, size);
0154 return rc;
0155 }
0156
0157 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
0158 {
0159 unsigned long start_pfn = start >> PAGE_SHIFT;
0160 unsigned long nr_pages = size >> PAGE_SHIFT;
0161
0162 __remove_pages(start_pfn, nr_pages, altmap);
0163 arch_remove_linear_mapping(start, size);
0164 }
0165 #endif
0166
0167 #ifndef CONFIG_NUMA
0168 void __init mem_topology_setup(void)
0169 {
0170 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
0171 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
0172 #ifdef CONFIG_HIGHMEM
0173 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
0174 #endif
0175
0176
0177
0178
0179 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
0180 }
0181
0182 void __init initmem_init(void)
0183 {
0184 sparse_init();
0185 }
0186
0187
0188 static int __init mark_nonram_nosave(void)
0189 {
0190 unsigned long spfn, epfn, prev = 0;
0191 int i;
0192
0193 for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
0194 if (prev && prev < spfn)
0195 register_nosave_region(prev, spfn);
0196
0197 prev = epfn;
0198 }
0199
0200 return 0;
0201 }
0202 #else
0203 static int __init mark_nonram_nosave(void)
0204 {
0205 return 0;
0206 }
0207 #endif
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221 static unsigned long max_zone_pfns[MAX_NR_ZONES];
0222
0223
0224
0225
0226 void __init paging_init(void)
0227 {
0228 unsigned long long total_ram = memblock_phys_mem_size();
0229 phys_addr_t top_of_ram = memblock_end_of_DRAM();
0230
0231 #ifdef CONFIG_HIGHMEM
0232 unsigned long v = __fix_to_virt(FIX_KMAP_END);
0233 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
0234
0235 for (; v < end; v += PAGE_SIZE)
0236 map_kernel_page(v, 0, __pgprot(0));
0237
0238 map_kernel_page(PKMAP_BASE, 0, __pgprot(0));
0239 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
0240 #endif
0241
0242 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
0243 (unsigned long long)top_of_ram, total_ram);
0244 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
0245 (long int)((top_of_ram - total_ram) >> 20));
0246
0247
0248
0249
0250
0251 if (IS_ENABLED(CONFIG_PPC32))
0252 zone_dma_bits = 30;
0253 else
0254 zone_dma_bits = 31;
0255
0256 #ifdef CONFIG_ZONE_DMA
0257 max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
0258 1UL << (zone_dma_bits - PAGE_SHIFT));
0259 #endif
0260 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
0261 #ifdef CONFIG_HIGHMEM
0262 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
0263 #endif
0264
0265 free_area_init(max_zone_pfns);
0266
0267 mark_nonram_nosave();
0268 }
0269
0270 void __init mem_init(void)
0271 {
0272
0273
0274
0275
0276 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
0277
0278 #ifdef CONFIG_SWIOTLB
0279
0280
0281
0282
0283
0284
0285
0286 memblock_set_bottom_up(true);
0287 swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
0288 #endif
0289
0290 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
0291 set_max_mapnr(max_pfn);
0292
0293 kasan_late_init();
0294
0295 memblock_free_all();
0296
0297 #ifdef CONFIG_HIGHMEM
0298 {
0299 unsigned long pfn, highmem_mapnr;
0300
0301 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
0302 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
0303 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
0304 struct page *page = pfn_to_page(pfn);
0305 if (!memblock_is_reserved(paddr))
0306 free_highmem_page(page);
0307 }
0308 }
0309 #endif
0310
0311 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
0312
0313
0314
0315
0316 per_cpu(next_tlbcam_idx, smp_processor_id()) =
0317 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
0318 #endif
0319
0320 #ifdef CONFIG_PPC32
0321 pr_info("Kernel virtual memory layout:\n");
0322 #ifdef CONFIG_KASAN
0323 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
0324 KASAN_SHADOW_START, KASAN_SHADOW_END);
0325 #endif
0326 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
0327 #ifdef CONFIG_HIGHMEM
0328 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
0329 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
0330 #endif
0331 if (ioremap_bot != IOREMAP_TOP)
0332 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
0333 ioremap_bot, IOREMAP_TOP);
0334 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
0335 VMALLOC_START, VMALLOC_END);
0336 #ifdef MODULES_VADDR
0337 pr_info(" * 0x%08lx..0x%08lx : modules\n",
0338 MODULES_VADDR, MODULES_END);
0339 #endif
0340 #endif
0341 }
0342
0343 void free_initmem(void)
0344 {
0345 ppc_md.progress = ppc_printk_progress;
0346 mark_initmem_nx();
0347 static_branch_enable(&init_mem_is_free);
0348 free_initmem_default(POISON_FREE_INITMEM);
0349 ftrace_free_init_tramp();
0350 }
0351
0352
0353
0354
0355
0356 static int __init add_system_ram_resources(void)
0357 {
0358 phys_addr_t start, end;
0359 u64 i;
0360
0361 for_each_mem_range(i, &start, &end) {
0362 struct resource *res;
0363
0364 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
0365 WARN_ON(!res);
0366
0367 if (res) {
0368 res->name = "System RAM";
0369 res->start = start;
0370
0371
0372
0373
0374
0375 res->end = end - 1;
0376 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
0377 WARN_ON(request_resource(&iomem_resource, res) < 0);
0378 }
0379 }
0380
0381 return 0;
0382 }
0383 subsys_initcall(add_system_ram_resources);
0384
0385 #ifdef CONFIG_STRICT_DEVMEM
0386
0387
0388
0389
0390
0391
0392
0393 int devmem_is_allowed(unsigned long pfn)
0394 {
0395 if (page_is_rtas_user_buf(pfn))
0396 return 1;
0397 if (iomem_is_exclusive(PFN_PHYS(pfn)))
0398 return 0;
0399 if (!page_is_ram(pfn))
0400 return 1;
0401 return 0;
0402 }
0403 #endif
0404
0405
0406
0407
0408
0409 EXPORT_SYMBOL_GPL(walk_system_ram_range);