Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  PowerPC version
0004  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0005  *
0006  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
0007  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
0008  *    Copyright (C) 1996 Paul Mackerras
0009  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
0010  *
0011  *  Derived from "arch/i386/mm/init.c"
0012  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
0013  */
0014 
0015 #include <linux/memblock.h>
0016 #include <linux/highmem.h>
0017 #include <linux/suspend.h>
0018 #include <linux/dma-direct.h>
0019 
0020 #include <asm/swiotlb.h>
0021 #include <asm/machdep.h>
0022 #include <asm/rtas.h>
0023 #include <asm/kasan.h>
0024 #include <asm/svm.h>
0025 #include <asm/mmzone.h>
0026 #include <asm/ftrace.h>
0027 #include <asm/code-patching.h>
0028 #include <asm/setup.h>
0029 
0030 #include <mm/mmu_decl.h>
0031 
0032 unsigned long long memory_limit;
0033 
0034 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
0035 EXPORT_SYMBOL(empty_zero_page);
0036 
0037 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
0038                   unsigned long size, pgprot_t vma_prot)
0039 {
0040     if (ppc_md.phys_mem_access_prot)
0041         return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
0042 
0043     if (!page_is_ram(pfn))
0044         vma_prot = pgprot_noncached(vma_prot);
0045 
0046     return vma_prot;
0047 }
0048 EXPORT_SYMBOL(phys_mem_access_prot);
0049 
0050 #ifdef CONFIG_MEMORY_HOTPLUG
0051 static DEFINE_MUTEX(linear_mapping_mutex);
0052 
0053 #ifdef CONFIG_NUMA
0054 int memory_add_physaddr_to_nid(u64 start)
0055 {
0056     return hot_add_scn_to_nid(start);
0057 }
0058 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
0059 #endif
0060 
0061 int __weak create_section_mapping(unsigned long start, unsigned long end,
0062                   int nid, pgprot_t prot)
0063 {
0064     return -ENODEV;
0065 }
0066 
0067 int __weak remove_section_mapping(unsigned long start, unsigned long end)
0068 {
0069     return -ENODEV;
0070 }
0071 
0072 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
0073                      struct mhp_params *params)
0074 {
0075     int rc;
0076 
0077     start = (unsigned long)__va(start);
0078     mutex_lock(&linear_mapping_mutex);
0079     rc = create_section_mapping(start, start + size, nid,
0080                     params->pgprot);
0081     mutex_unlock(&linear_mapping_mutex);
0082     if (rc) {
0083         pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
0084             start, start + size, rc);
0085         return -EFAULT;
0086     }
0087     return 0;
0088 }
0089 
0090 void __ref arch_remove_linear_mapping(u64 start, u64 size)
0091 {
0092     int ret;
0093 
0094     /* Remove htab bolted mappings for this section of memory */
0095     start = (unsigned long)__va(start);
0096 
0097     mutex_lock(&linear_mapping_mutex);
0098     ret = remove_section_mapping(start, start + size);
0099     mutex_unlock(&linear_mapping_mutex);
0100     if (ret)
0101         pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
0102             start, start + size, ret);
0103 
0104     /* Ensure all vmalloc mappings are flushed in case they also
0105      * hit that section of memory
0106      */
0107     vm_unmap_aliases();
0108 }
0109 
0110 /*
0111  * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
0112  * updating.
0113  */
0114 static void update_end_of_memory_vars(u64 start, u64 size)
0115 {
0116     unsigned long end_pfn = PFN_UP(start + size);
0117 
0118     if (end_pfn > max_pfn) {
0119         max_pfn = end_pfn;
0120         max_low_pfn = end_pfn;
0121         high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
0122     }
0123 }
0124 
0125 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
0126             struct mhp_params *params)
0127 {
0128     int ret;
0129 
0130     ret = __add_pages(nid, start_pfn, nr_pages, params);
0131     if (ret)
0132         return ret;
0133 
0134     /* update max_pfn, max_low_pfn and high_memory */
0135     update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
0136                   nr_pages << PAGE_SHIFT);
0137 
0138     return ret;
0139 }
0140 
0141 int __ref arch_add_memory(int nid, u64 start, u64 size,
0142               struct mhp_params *params)
0143 {
0144     unsigned long start_pfn = start >> PAGE_SHIFT;
0145     unsigned long nr_pages = size >> PAGE_SHIFT;
0146     int rc;
0147 
0148     rc = arch_create_linear_mapping(nid, start, size, params);
0149     if (rc)
0150         return rc;
0151     rc = add_pages(nid, start_pfn, nr_pages, params);
0152     if (rc)
0153         arch_remove_linear_mapping(start, size);
0154     return rc;
0155 }
0156 
0157 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
0158 {
0159     unsigned long start_pfn = start >> PAGE_SHIFT;
0160     unsigned long nr_pages = size >> PAGE_SHIFT;
0161 
0162     __remove_pages(start_pfn, nr_pages, altmap);
0163     arch_remove_linear_mapping(start, size);
0164 }
0165 #endif
0166 
0167 #ifndef CONFIG_NUMA
0168 void __init mem_topology_setup(void)
0169 {
0170     max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
0171     min_low_pfn = MEMORY_START >> PAGE_SHIFT;
0172 #ifdef CONFIG_HIGHMEM
0173     max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
0174 #endif
0175 
0176     /* Place all memblock_regions in the same node and merge contiguous
0177      * memblock_regions
0178      */
0179     memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
0180 }
0181 
0182 void __init initmem_init(void)
0183 {
0184     sparse_init();
0185 }
0186 
0187 /* mark pages that don't exist as nosave */
0188 static int __init mark_nonram_nosave(void)
0189 {
0190     unsigned long spfn, epfn, prev = 0;
0191     int i;
0192 
0193     for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
0194         if (prev && prev < spfn)
0195             register_nosave_region(prev, spfn);
0196 
0197         prev = epfn;
0198     }
0199 
0200     return 0;
0201 }
0202 #else /* CONFIG_NUMA */
0203 static int __init mark_nonram_nosave(void)
0204 {
0205     return 0;
0206 }
0207 #endif
0208 
0209 /*
0210  * Zones usage:
0211  *
0212  * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
0213  * everything else. GFP_DMA32 page allocations automatically fall back to
0214  * ZONE_DMA.
0215  *
0216  * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
0217  * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
0218  * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
0219  * ZONE_DMA.
0220  */
0221 static unsigned long max_zone_pfns[MAX_NR_ZONES];
0222 
0223 /*
0224  * paging_init() sets up the page tables - in fact we've already done this.
0225  */
0226 void __init paging_init(void)
0227 {
0228     unsigned long long total_ram = memblock_phys_mem_size();
0229     phys_addr_t top_of_ram = memblock_end_of_DRAM();
0230 
0231 #ifdef CONFIG_HIGHMEM
0232     unsigned long v = __fix_to_virt(FIX_KMAP_END);
0233     unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
0234 
0235     for (; v < end; v += PAGE_SIZE)
0236         map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
0237 
0238     map_kernel_page(PKMAP_BASE, 0, __pgprot(0));    /* XXX gross */
0239     pkmap_page_table = virt_to_kpte(PKMAP_BASE);
0240 #endif /* CONFIG_HIGHMEM */
0241 
0242     printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
0243            (unsigned long long)top_of_ram, total_ram);
0244     printk(KERN_DEBUG "Memory hole size: %ldMB\n",
0245            (long int)((top_of_ram - total_ram) >> 20));
0246 
0247     /*
0248      * Allow 30-bit DMA for very limited Broadcom wifi chips on many
0249      * powerbooks.
0250      */
0251     if (IS_ENABLED(CONFIG_PPC32))
0252         zone_dma_bits = 30;
0253     else
0254         zone_dma_bits = 31;
0255 
0256 #ifdef CONFIG_ZONE_DMA
0257     max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
0258                       1UL << (zone_dma_bits - PAGE_SHIFT));
0259 #endif
0260     max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
0261 #ifdef CONFIG_HIGHMEM
0262     max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
0263 #endif
0264 
0265     free_area_init(max_zone_pfns);
0266 
0267     mark_nonram_nosave();
0268 }
0269 
0270 void __init mem_init(void)
0271 {
0272     /*
0273      * book3s is limited to 16 page sizes due to encoding this in
0274      * a 4-bit field for slices.
0275      */
0276     BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
0277 
0278 #ifdef CONFIG_SWIOTLB
0279     /*
0280      * Some platforms (e.g. 85xx) limit DMA-able memory way below
0281      * 4G. We force memblock to bottom-up mode to ensure that the
0282      * memory allocated in swiotlb_init() is DMA-able.
0283      * As it's the last memblock allocation, no need to reset it
0284      * back to to-down.
0285      */
0286     memblock_set_bottom_up(true);
0287     swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
0288 #endif
0289 
0290     high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
0291     set_max_mapnr(max_pfn);
0292 
0293     kasan_late_init();
0294 
0295     memblock_free_all();
0296 
0297 #ifdef CONFIG_HIGHMEM
0298     {
0299         unsigned long pfn, highmem_mapnr;
0300 
0301         highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
0302         for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
0303             phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
0304             struct page *page = pfn_to_page(pfn);
0305             if (!memblock_is_reserved(paddr))
0306                 free_highmem_page(page);
0307         }
0308     }
0309 #endif /* CONFIG_HIGHMEM */
0310 
0311 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
0312     /*
0313      * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
0314      * functions.... do it here for the non-smp case.
0315      */
0316     per_cpu(next_tlbcam_idx, smp_processor_id()) =
0317         (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
0318 #endif
0319 
0320 #ifdef CONFIG_PPC32
0321     pr_info("Kernel virtual memory layout:\n");
0322 #ifdef CONFIG_KASAN
0323     pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
0324         KASAN_SHADOW_START, KASAN_SHADOW_END);
0325 #endif
0326     pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
0327 #ifdef CONFIG_HIGHMEM
0328     pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
0329         PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
0330 #endif /* CONFIG_HIGHMEM */
0331     if (ioremap_bot != IOREMAP_TOP)
0332         pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
0333             ioremap_bot, IOREMAP_TOP);
0334     pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
0335         VMALLOC_START, VMALLOC_END);
0336 #ifdef MODULES_VADDR
0337     pr_info("  * 0x%08lx..0x%08lx  : modules\n",
0338         MODULES_VADDR, MODULES_END);
0339 #endif
0340 #endif /* CONFIG_PPC32 */
0341 }
0342 
0343 void free_initmem(void)
0344 {
0345     ppc_md.progress = ppc_printk_progress;
0346     mark_initmem_nx();
0347     static_branch_enable(&init_mem_is_free);
0348     free_initmem_default(POISON_FREE_INITMEM);
0349     ftrace_free_init_tramp();
0350 }
0351 
0352 /*
0353  * System memory should not be in /proc/iomem but various tools expect it
0354  * (eg kdump).
0355  */
0356 static int __init add_system_ram_resources(void)
0357 {
0358     phys_addr_t start, end;
0359     u64 i;
0360 
0361     for_each_mem_range(i, &start, &end) {
0362         struct resource *res;
0363 
0364         res = kzalloc(sizeof(struct resource), GFP_KERNEL);
0365         WARN_ON(!res);
0366 
0367         if (res) {
0368             res->name = "System RAM";
0369             res->start = start;
0370             /*
0371              * In memblock, end points to the first byte after
0372              * the range while in resourses, end points to the
0373              * last byte in the range.
0374              */
0375             res->end = end - 1;
0376             res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
0377             WARN_ON(request_resource(&iomem_resource, res) < 0);
0378         }
0379     }
0380 
0381     return 0;
0382 }
0383 subsys_initcall(add_system_ram_resources);
0384 
0385 #ifdef CONFIG_STRICT_DEVMEM
0386 /*
0387  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
0388  * is valid. The argument is a physical page number.
0389  *
0390  * Access has to be given to non-kernel-ram areas as well, these contain the
0391  * PCI mmio resources as well as potential bios/acpi data regions.
0392  */
0393 int devmem_is_allowed(unsigned long pfn)
0394 {
0395     if (page_is_rtas_user_buf(pfn))
0396         return 1;
0397     if (iomem_is_exclusive(PFN_PHYS(pfn)))
0398         return 0;
0399     if (!page_is_ram(pfn))
0400         return 1;
0401     return 0;
0402 }
0403 #endif /* CONFIG_STRICT_DEVMEM */
0404 
0405 /*
0406  * This is defined in kernel/resource.c but only powerpc needs to export it, for
0407  * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
0408  */
0409 EXPORT_SYMBOL_GPL(walk_system_ram_range);