Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  PowerPC version
0004  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0005  *
0006  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
0007  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
0008  *    Copyright (C) 1996 Paul Mackerras
0009  *
0010  *  Derived from "arch/i386/mm/init.c"
0011  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
0012  *
0013  *  Dave Engebretsen <engebret@us.ibm.com>
0014  *      Rework for PPC64 port.
0015  */
0016 
0017 #undef DEBUG
0018 
0019 #include <linux/signal.h>
0020 #include <linux/sched.h>
0021 #include <linux/kernel.h>
0022 #include <linux/errno.h>
0023 #include <linux/string.h>
0024 #include <linux/types.h>
0025 #include <linux/mman.h>
0026 #include <linux/mm.h>
0027 #include <linux/swap.h>
0028 #include <linux/stddef.h>
0029 #include <linux/vmalloc.h>
0030 #include <linux/init.h>
0031 #include <linux/delay.h>
0032 #include <linux/highmem.h>
0033 #include <linux/idr.h>
0034 #include <linux/nodemask.h>
0035 #include <linux/module.h>
0036 #include <linux/poison.h>
0037 #include <linux/memblock.h>
0038 #include <linux/hugetlb.h>
0039 #include <linux/slab.h>
0040 #include <linux/of_fdt.h>
0041 #include <linux/libfdt.h>
0042 #include <linux/memremap.h>
0043 
0044 #include <asm/pgalloc.h>
0045 #include <asm/page.h>
0046 #include <asm/prom.h>
0047 #include <asm/rtas.h>
0048 #include <asm/io.h>
0049 #include <asm/mmu_context.h>
0050 #include <asm/mmu.h>
0051 #include <linux/uaccess.h>
0052 #include <asm/smp.h>
0053 #include <asm/machdep.h>
0054 #include <asm/tlb.h>
0055 #include <asm/eeh.h>
0056 #include <asm/processor.h>
0057 #include <asm/mmzone.h>
0058 #include <asm/cputable.h>
0059 #include <asm/sections.h>
0060 #include <asm/iommu.h>
0061 #include <asm/vdso.h>
0062 #include <asm/hugetlb.h>
0063 
0064 #include <mm/mmu_decl.h>
0065 
0066 #ifdef CONFIG_SPARSEMEM_VMEMMAP
0067 /*
0068  * Given an address within the vmemmap, determine the page that
0069  * represents the start of the subsection it is within.  Note that we have to
0070  * do this by hand as the proffered address may not be correctly aligned.
0071  * Subtraction of non-aligned pointers produces undefined results.
0072  */
0073 static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr)
0074 {
0075     unsigned long start_pfn;
0076     unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap));
0077 
0078     /* Return the pfn of the start of the section. */
0079     start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK;
0080     return pfn_to_page(start_pfn);
0081 }
0082 
0083 /*
0084  * Since memory is added in sub-section chunks, before creating a new vmemmap
0085  * mapping, the kernel should check whether there is an existing memmap mapping
0086  * covering the new subsection added. This is needed because kernel can map
0087  * vmemmap area using 16MB pages which will cover a memory range of 16G. Such
0088  * a range covers multiple subsections (2M)
0089  *
0090  * If any subsection in the 16G range mapped by vmemmap is valid we consider the
0091  * vmemmap populated (There is a page table entry already present). We can't do
0092  * a page table lookup here because with the hash translation we don't keep
0093  * vmemmap details in linux page table.
0094  */
0095 static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
0096 {
0097     struct page *start;
0098     unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size;
0099     start = vmemmap_subsection_start(vmemmap_addr);
0100 
0101     for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION)
0102         /*
0103          * pfn valid check here is intended to really check
0104          * whether we have any subsection already initialized
0105          * in this range.
0106          */
0107         if (pfn_valid(page_to_pfn(start)))
0108             return 1;
0109 
0110     return 0;
0111 }
0112 
0113 /*
0114  * vmemmap virtual address space management does not have a traditional page
0115  * table to track which virtual struct pages are backed by physical mapping.
0116  * The virtual to physical mappings are tracked in a simple linked list
0117  * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
0118  * all times where as the 'next' list maintains the available
0119  * vmemmap_backing structures which have been deleted from the
0120  * 'vmemmap_global' list during system runtime (memory hotplug remove
0121  * operation). The freed 'vmemmap_backing' structures are reused later when
0122  * new requests come in without allocating fresh memory. This pointer also
0123  * tracks the allocated 'vmemmap_backing' structures as we allocate one
0124  * full page memory at a time when we dont have any.
0125  */
0126 struct vmemmap_backing *vmemmap_list;
0127 static struct vmemmap_backing *next;
0128 
0129 /*
0130  * The same pointer 'next' tracks individual chunks inside the allocated
0131  * full page during the boot time and again tracks the freed nodes during
0132  * runtime. It is racy but it does not happen as they are separated by the
0133  * boot process. Will create problem if some how we have memory hotplug
0134  * operation during boot !!
0135  */
0136 static int num_left;
0137 static int num_freed;
0138 
0139 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
0140 {
0141     struct vmemmap_backing *vmem_back;
0142     /* get from freed entries first */
0143     if (num_freed) {
0144         num_freed--;
0145         vmem_back = next;
0146         next = next->list;
0147 
0148         return vmem_back;
0149     }
0150 
0151     /* allocate a page when required and hand out chunks */
0152     if (!num_left) {
0153         next = vmemmap_alloc_block(PAGE_SIZE, node);
0154         if (unlikely(!next)) {
0155             WARN_ON(1);
0156             return NULL;
0157         }
0158         num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
0159     }
0160 
0161     num_left--;
0162 
0163     return next++;
0164 }
0165 
0166 static __meminit int vmemmap_list_populate(unsigned long phys,
0167                        unsigned long start,
0168                        int node)
0169 {
0170     struct vmemmap_backing *vmem_back;
0171 
0172     vmem_back = vmemmap_list_alloc(node);
0173     if (unlikely(!vmem_back)) {
0174         pr_debug("vmemap list allocation failed\n");
0175         return -ENOMEM;
0176     }
0177 
0178     vmem_back->phys = phys;
0179     vmem_back->virt_addr = start;
0180     vmem_back->list = vmemmap_list;
0181 
0182     vmemmap_list = vmem_back;
0183     return 0;
0184 }
0185 
0186 static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
0187                 unsigned long page_size)
0188 {
0189     unsigned long nr_pfn = page_size / sizeof(struct page);
0190     unsigned long start_pfn = page_to_pfn((struct page *)start);
0191 
0192     if ((start_pfn + nr_pfn) > altmap->end_pfn)
0193         return true;
0194 
0195     if (start_pfn < altmap->base_pfn)
0196         return true;
0197 
0198     return false;
0199 }
0200 
0201 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
0202         struct vmem_altmap *altmap)
0203 {
0204     bool altmap_alloc;
0205     unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
0206 
0207     /* Align to the page size of the linear mapping. */
0208     start = ALIGN_DOWN(start, page_size);
0209 
0210     pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
0211 
0212     for (; start < end; start += page_size) {
0213         void *p = NULL;
0214         int rc;
0215 
0216         /*
0217          * This vmemmap range is backing different subsections. If any
0218          * of that subsection is marked valid, that means we already
0219          * have initialized a page table covering this range and hence
0220          * the vmemmap range is populated.
0221          */
0222         if (vmemmap_populated(start, page_size))
0223             continue;
0224 
0225         /*
0226          * Allocate from the altmap first if we have one. This may
0227          * fail due to alignment issues when using 16MB hugepages, so
0228          * fall back to system memory if the altmap allocation fail.
0229          */
0230         if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
0231             p = vmemmap_alloc_block_buf(page_size, node, altmap);
0232             if (!p)
0233                 pr_debug("altmap block allocation failed, falling back to system memory");
0234             else
0235                 altmap_alloc = true;
0236         }
0237         if (!p) {
0238             p = vmemmap_alloc_block_buf(page_size, node, NULL);
0239             altmap_alloc = false;
0240         }
0241         if (!p)
0242             return -ENOMEM;
0243 
0244         if (vmemmap_list_populate(__pa(p), start, node)) {
0245             /*
0246              * If we don't populate vmemap list, we don't have
0247              * the ability to free the allocated vmemmap
0248              * pages in section_deactivate. Hence free them
0249              * here.
0250              */
0251             int nr_pfns = page_size >> PAGE_SHIFT;
0252             unsigned long page_order = get_order(page_size);
0253 
0254             if (altmap_alloc)
0255                 vmem_altmap_free(altmap, nr_pfns);
0256             else
0257                 free_pages((unsigned long)p, page_order);
0258             return -ENOMEM;
0259         }
0260 
0261         pr_debug("      * %016lx..%016lx allocated at %p\n",
0262              start, start + page_size, p);
0263 
0264         rc = vmemmap_create_mapping(start, page_size, __pa(p));
0265         if (rc < 0) {
0266             pr_warn("%s: Unable to create vmemmap mapping: %d\n",
0267                 __func__, rc);
0268             return -EFAULT;
0269         }
0270     }
0271 
0272     return 0;
0273 }
0274 
0275 #ifdef CONFIG_MEMORY_HOTPLUG
0276 static unsigned long vmemmap_list_free(unsigned long start)
0277 {
0278     struct vmemmap_backing *vmem_back, *vmem_back_prev;
0279 
0280     vmem_back_prev = vmem_back = vmemmap_list;
0281 
0282     /* look for it with prev pointer recorded */
0283     for (; vmem_back; vmem_back = vmem_back->list) {
0284         if (vmem_back->virt_addr == start)
0285             break;
0286         vmem_back_prev = vmem_back;
0287     }
0288 
0289     if (unlikely(!vmem_back))
0290         return 0;
0291 
0292     /* remove it from vmemmap_list */
0293     if (vmem_back == vmemmap_list) /* remove head */
0294         vmemmap_list = vmem_back->list;
0295     else
0296         vmem_back_prev->list = vmem_back->list;
0297 
0298     /* next point to this freed entry */
0299     vmem_back->list = next;
0300     next = vmem_back;
0301     num_freed++;
0302 
0303     return vmem_back->phys;
0304 }
0305 
0306 void __ref vmemmap_free(unsigned long start, unsigned long end,
0307         struct vmem_altmap *altmap)
0308 {
0309     unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
0310     unsigned long page_order = get_order(page_size);
0311     unsigned long alt_start = ~0, alt_end = ~0;
0312     unsigned long base_pfn;
0313 
0314     start = ALIGN_DOWN(start, page_size);
0315     if (altmap) {
0316         alt_start = altmap->base_pfn;
0317         alt_end = altmap->base_pfn + altmap->reserve +
0318               altmap->free + altmap->alloc + altmap->align;
0319     }
0320 
0321     pr_debug("vmemmap_free %lx...%lx\n", start, end);
0322 
0323     for (; start < end; start += page_size) {
0324         unsigned long nr_pages, addr;
0325         struct page *page;
0326 
0327         /*
0328          * We have already marked the subsection we are trying to remove
0329          * invalid. So if we want to remove the vmemmap range, we
0330          * need to make sure there is no subsection marked valid
0331          * in this range.
0332          */
0333         if (vmemmap_populated(start, page_size))
0334             continue;
0335 
0336         addr = vmemmap_list_free(start);
0337         if (!addr)
0338             continue;
0339 
0340         page = pfn_to_page(addr >> PAGE_SHIFT);
0341         nr_pages = 1 << page_order;
0342         base_pfn = PHYS_PFN(addr);
0343 
0344         if (base_pfn >= alt_start && base_pfn < alt_end) {
0345             vmem_altmap_free(altmap, nr_pages);
0346         } else if (PageReserved(page)) {
0347             /* allocated from bootmem */
0348             if (page_size < PAGE_SIZE) {
0349                 /*
0350                  * this shouldn't happen, but if it is
0351                  * the case, leave the memory there
0352                  */
0353                 WARN_ON_ONCE(1);
0354             } else {
0355                 while (nr_pages--)
0356                     free_reserved_page(page++);
0357             }
0358         } else {
0359             free_pages((unsigned long)(__va(addr)), page_order);
0360         }
0361 
0362         vmemmap_remove_mapping(start, page_size);
0363     }
0364 }
0365 #endif
0366 void register_page_bootmem_memmap(unsigned long section_nr,
0367                   struct page *start_page, unsigned long size)
0368 {
0369 }
0370 
0371 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
0372 
0373 #ifdef CONFIG_PPC_BOOK3S_64
0374 unsigned int mmu_lpid_bits;
0375 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0376 EXPORT_SYMBOL_GPL(mmu_lpid_bits);
0377 #endif
0378 unsigned int mmu_pid_bits;
0379 
0380 static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
0381 
0382 static int __init parse_disable_radix(char *p)
0383 {
0384     bool val;
0385 
0386     if (!p)
0387         val = true;
0388     else if (kstrtobool(p, &val))
0389         return -EINVAL;
0390 
0391     disable_radix = val;
0392 
0393     return 0;
0394 }
0395 early_param("disable_radix", parse_disable_radix);
0396 
0397 /*
0398  * If we're running under a hypervisor, we need to check the contents of
0399  * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
0400  * radix.  If not, we clear the radix feature bit so we fall back to hash.
0401  */
0402 static void __init early_check_vec5(void)
0403 {
0404     unsigned long root, chosen;
0405     int size;
0406     const u8 *vec5;
0407     u8 mmu_supported;
0408 
0409     root = of_get_flat_dt_root();
0410     chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
0411     if (chosen == -FDT_ERR_NOTFOUND) {
0412         cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
0413         return;
0414     }
0415     vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
0416     if (!vec5) {
0417         cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
0418         return;
0419     }
0420     if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
0421         cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
0422         return;
0423     }
0424 
0425     /* Check for supported configuration */
0426     mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
0427             OV5_FEAT(OV5_MMU_SUPPORT);
0428     if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
0429         /* Hypervisor only supports radix - check enabled && GTSE */
0430         if (!early_radix_enabled()) {
0431             pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
0432         }
0433         if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
0434                         OV5_FEAT(OV5_RADIX_GTSE))) {
0435             cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
0436         } else
0437             cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
0438         /* Do radix anyway - the hypervisor said we had to */
0439         cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
0440     } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
0441         /* Hypervisor only supports hash - disable radix */
0442         cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
0443         cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
0444     }
0445 }
0446 
0447 static int __init dt_scan_mmu_pid_width(unsigned long node,
0448                        const char *uname, int depth,
0449                        void *data)
0450 {
0451     int size = 0;
0452     const __be32 *prop;
0453     const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
0454 
0455     /* We are scanning "cpu" nodes only */
0456     if (type == NULL || strcmp(type, "cpu") != 0)
0457         return 0;
0458 
0459     /* Find MMU LPID, PID register size */
0460     prop = of_get_flat_dt_prop(node, "ibm,mmu-lpid-bits", &size);
0461     if (prop && size == 4)
0462         mmu_lpid_bits = be32_to_cpup(prop);
0463 
0464     prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
0465     if (prop && size == 4)
0466         mmu_pid_bits = be32_to_cpup(prop);
0467 
0468     if (!mmu_pid_bits && !mmu_lpid_bits)
0469         return 0;
0470 
0471     return 1;
0472 }
0473 
0474 void __init mmu_early_init_devtree(void)
0475 {
0476     bool hvmode = !!(mfmsr() & MSR_HV);
0477 
0478     /* Disable radix mode based on kernel command line. */
0479     if (disable_radix) {
0480         if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
0481             cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
0482         else
0483             pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
0484     }
0485 
0486     of_scan_flat_dt(dt_scan_mmu_pid_width, NULL);
0487     if (hvmode && !mmu_lpid_bits) {
0488         if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
0489             mmu_lpid_bits = 12; /* POWER8-10 */
0490         else
0491             mmu_lpid_bits = 10; /* POWER7 */
0492     }
0493     if (!mmu_pid_bits) {
0494         if (early_cpu_has_feature(CPU_FTR_ARCH_300))
0495             mmu_pid_bits = 20; /* POWER9-10 */
0496     }
0497 
0498     /*
0499      * Check /chosen/ibm,architecture-vec-5 if running as a guest.
0500      * When running bare-metal, we can use radix if we like
0501      * even though the ibm,architecture-vec-5 property created by
0502      * skiboot doesn't have the necessary bits set.
0503      */
0504     if (!hvmode)
0505         early_check_vec5();
0506 
0507     if (early_radix_enabled()) {
0508         radix__early_init_devtree();
0509 
0510         /*
0511          * We have finalized the translation we are going to use by now.
0512          * Radix mode is not limited by RMA / VRMA addressing.
0513          * Hence don't limit memblock allocations.
0514          */
0515         ppc64_rma_size = ULONG_MAX;
0516         memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
0517     } else
0518         hash__early_init_devtree();
0519 
0520     if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
0521         hugetlbpage_init_defaultsize();
0522 
0523     if (!(cur_cpu_spec->mmu_features & MMU_FTR_HPTE_TABLE) &&
0524         !(cur_cpu_spec->mmu_features & MMU_FTR_TYPE_RADIX))
0525         panic("kernel does not support any MMU type offered by platform");
0526 }
0527 #endif /* CONFIG_PPC_BOOK3S_64 */