Back to home page

LXR

 
 

    


0001 /*
0002  * Virtual Memory Map support
0003  *
0004  * (C) 2007 sgi. Christoph Lameter.
0005  *
0006  * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
0007  * virt_to_page, page_address() to be implemented as a base offset
0008  * calculation without memory access.
0009  *
0010  * However, virtual mappings need a page table and TLBs. Many Linux
0011  * architectures already map their physical space using 1-1 mappings
0012  * via TLBs. For those arches the virtual memory map is essentially
0013  * for free if we use the same page size as the 1-1 mappings. In that
0014  * case the overhead consists of a few additional pages that are
0015  * allocated to create a view of memory for vmemmap.
0016  *
0017  * The architecture is expected to provide a vmemmap_populate() function
0018  * to instantiate the mapping.
0019  */
0020 #include <linux/mm.h>
0021 #include <linux/mmzone.h>
0022 #include <linux/bootmem.h>
0023 #include <linux/memremap.h>
0024 #include <linux/highmem.h>
0025 #include <linux/slab.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/vmalloc.h>
0028 #include <linux/sched.h>
0029 #include <asm/dma.h>
0030 #include <asm/pgalloc.h>
0031 #include <asm/pgtable.h>
0032 
0033 /*
0034  * Allocate a block of memory to be used to back the virtual memory map
0035  * or to back the page tables that are used to create the mapping.
0036  * Uses the main allocators if they are available, else bootmem.
0037  */
0038 
0039 static void * __ref __earlyonly_bootmem_alloc(int node,
0040                 unsigned long size,
0041                 unsigned long align,
0042                 unsigned long goal)
0043 {
0044     return memblock_virt_alloc_try_nid(size, align, goal,
0045                         BOOTMEM_ALLOC_ACCESSIBLE, node);
0046 }
0047 
0048 static void *vmemmap_buf;
0049 static void *vmemmap_buf_end;
0050 
0051 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
0052 {
0053     /* If the main allocator is up use that, fallback to bootmem. */
0054     if (slab_is_available()) {
0055         struct page *page;
0056 
0057         if (node_state(node, N_HIGH_MEMORY))
0058             page = alloc_pages_node(
0059                 node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
0060                 get_order(size));
0061         else
0062             page = alloc_pages(
0063                 GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
0064                 get_order(size));
0065         if (page)
0066             return page_address(page);
0067         return NULL;
0068     } else
0069         return __earlyonly_bootmem_alloc(node, size, size,
0070                 __pa(MAX_DMA_ADDRESS));
0071 }
0072 
0073 /* need to make sure size is all the same during early stage */
0074 static void * __meminit alloc_block_buf(unsigned long size, int node)
0075 {
0076     void *ptr;
0077 
0078     if (!vmemmap_buf)
0079         return vmemmap_alloc_block(size, node);
0080 
0081     /* take the from buf */
0082     ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
0083     if (ptr + size > vmemmap_buf_end)
0084         return vmemmap_alloc_block(size, node);
0085 
0086     vmemmap_buf = ptr + size;
0087 
0088     return ptr;
0089 }
0090 
0091 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
0092 {
0093     return altmap->base_pfn + altmap->reserve + altmap->alloc
0094         + altmap->align;
0095 }
0096 
0097 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
0098 {
0099     unsigned long allocated = altmap->alloc + altmap->align;
0100 
0101     if (altmap->free > allocated)
0102         return altmap->free - allocated;
0103     return 0;
0104 }
0105 
0106 /**
0107  * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
0108  * @altmap - reserved page pool for the allocation
0109  * @nr_pfns - size (in pages) of the allocation
0110  *
0111  * Allocations are aligned to the size of the request
0112  */
0113 static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
0114         unsigned long nr_pfns)
0115 {
0116     unsigned long pfn = vmem_altmap_next_pfn(altmap);
0117     unsigned long nr_align;
0118 
0119     nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
0120     nr_align = ALIGN(pfn, nr_align) - pfn;
0121 
0122     if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
0123         return ULONG_MAX;
0124     altmap->alloc += nr_pfns;
0125     altmap->align += nr_align;
0126     return pfn + nr_align;
0127 }
0128 
0129 static void * __meminit altmap_alloc_block_buf(unsigned long size,
0130         struct vmem_altmap *altmap)
0131 {
0132     unsigned long pfn, nr_pfns;
0133     void *ptr;
0134 
0135     if (size & ~PAGE_MASK) {
0136         pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
0137                 __func__, size);
0138         return NULL;
0139     }
0140 
0141     nr_pfns = size >> PAGE_SHIFT;
0142     pfn = vmem_altmap_alloc(altmap, nr_pfns);
0143     if (pfn < ULONG_MAX)
0144         ptr = __va(__pfn_to_phys(pfn));
0145     else
0146         ptr = NULL;
0147     pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
0148             __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
0149 
0150     return ptr;
0151 }
0152 
0153 /* need to make sure size is all the same during early stage */
0154 void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
0155         struct vmem_altmap *altmap)
0156 {
0157     if (altmap)
0158         return altmap_alloc_block_buf(size, altmap);
0159     return alloc_block_buf(size, node);
0160 }
0161 
0162 void __meminit vmemmap_verify(pte_t *pte, int node,
0163                 unsigned long start, unsigned long end)
0164 {
0165     unsigned long pfn = pte_pfn(*pte);
0166     int actual_node = early_pfn_to_nid(pfn);
0167 
0168     if (node_distance(actual_node, node) > LOCAL_DISTANCE)
0169         pr_warn("[%lx-%lx] potential offnode page_structs\n",
0170             start, end - 1);
0171 }
0172 
0173 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
0174 {
0175     pte_t *pte = pte_offset_kernel(pmd, addr);
0176     if (pte_none(*pte)) {
0177         pte_t entry;
0178         void *p = alloc_block_buf(PAGE_SIZE, node);
0179         if (!p)
0180             return NULL;
0181         entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
0182         set_pte_at(&init_mm, addr, pte, entry);
0183     }
0184     return pte;
0185 }
0186 
0187 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
0188 {
0189     pmd_t *pmd = pmd_offset(pud, addr);
0190     if (pmd_none(*pmd)) {
0191         void *p = vmemmap_alloc_block(PAGE_SIZE, node);
0192         if (!p)
0193             return NULL;
0194         pmd_populate_kernel(&init_mm, pmd, p);
0195     }
0196     return pmd;
0197 }
0198 
0199 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
0200 {
0201     pud_t *pud = pud_offset(pgd, addr);
0202     if (pud_none(*pud)) {
0203         void *p = vmemmap_alloc_block(PAGE_SIZE, node);
0204         if (!p)
0205             return NULL;
0206         pud_populate(&init_mm, pud, p);
0207     }
0208     return pud;
0209 }
0210 
0211 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
0212 {
0213     pgd_t *pgd = pgd_offset_k(addr);
0214     if (pgd_none(*pgd)) {
0215         void *p = vmemmap_alloc_block(PAGE_SIZE, node);
0216         if (!p)
0217             return NULL;
0218         pgd_populate(&init_mm, pgd, p);
0219     }
0220     return pgd;
0221 }
0222 
0223 int __meminit vmemmap_populate_basepages(unsigned long start,
0224                      unsigned long end, int node)
0225 {
0226     unsigned long addr = start;
0227     pgd_t *pgd;
0228     pud_t *pud;
0229     pmd_t *pmd;
0230     pte_t *pte;
0231 
0232     for (; addr < end; addr += PAGE_SIZE) {
0233         pgd = vmemmap_pgd_populate(addr, node);
0234         if (!pgd)
0235             return -ENOMEM;
0236         pud = vmemmap_pud_populate(pgd, addr, node);
0237         if (!pud)
0238             return -ENOMEM;
0239         pmd = vmemmap_pmd_populate(pud, addr, node);
0240         if (!pmd)
0241             return -ENOMEM;
0242         pte = vmemmap_pte_populate(pmd, addr, node);
0243         if (!pte)
0244             return -ENOMEM;
0245         vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
0246     }
0247 
0248     return 0;
0249 }
0250 
0251 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
0252 {
0253     unsigned long start;
0254     unsigned long end;
0255     struct page *map;
0256 
0257     map = pfn_to_page(pnum * PAGES_PER_SECTION);
0258     start = (unsigned long)map;
0259     end = (unsigned long)(map + PAGES_PER_SECTION);
0260 
0261     if (vmemmap_populate(start, end, nid))
0262         return NULL;
0263 
0264     return map;
0265 }
0266 
0267 void __init sparse_mem_maps_populate_node(struct page **map_map,
0268                       unsigned long pnum_begin,
0269                       unsigned long pnum_end,
0270                       unsigned long map_count, int nodeid)
0271 {
0272     unsigned long pnum;
0273     unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
0274     void *vmemmap_buf_start;
0275 
0276     size = ALIGN(size, PMD_SIZE);
0277     vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
0278              PMD_SIZE, __pa(MAX_DMA_ADDRESS));
0279 
0280     if (vmemmap_buf_start) {
0281         vmemmap_buf = vmemmap_buf_start;
0282         vmemmap_buf_end = vmemmap_buf_start + size * map_count;
0283     }
0284 
0285     for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
0286         struct mem_section *ms;
0287 
0288         if (!present_section_nr(pnum))
0289             continue;
0290 
0291         map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
0292         if (map_map[pnum])
0293             continue;
0294         ms = __nr_to_section(pnum);
0295         pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
0296                __func__);
0297         ms->section_mem_map = 0;
0298     }
0299 
0300     if (vmemmap_buf_start) {
0301         /* need to free left buf */
0302         memblock_free_early(__pa(vmemmap_buf),
0303                     vmemmap_buf_end - vmemmap_buf);
0304         vmemmap_buf = NULL;
0305         vmemmap_buf_end = NULL;
0306     }
0307 }