0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/mm.h>
0022 #include <linux/mmzone.h>
0023 #include <linux/memblock.h>
0024 #include <linux/memremap.h>
0025 #include <linux/highmem.h>
0026 #include <linux/slab.h>
0027 #include <linux/spinlock.h>
0028 #include <linux/vmalloc.h>
0029 #include <linux/sched.h>
0030
0031 #include <asm/dma.h>
0032 #include <asm/pgalloc.h>
0033
0034
0035
0036
0037
0038
0039
0040 static void * __ref __earlyonly_bootmem_alloc(int node,
0041 unsigned long size,
0042 unsigned long align,
0043 unsigned long goal)
0044 {
0045 return memblock_alloc_try_nid_raw(size, align, goal,
0046 MEMBLOCK_ALLOC_ACCESSIBLE, node);
0047 }
0048
0049 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
0050 {
0051
0052 if (slab_is_available()) {
0053 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
0054 int order = get_order(size);
0055 static bool warned;
0056 struct page *page;
0057
0058 page = alloc_pages_node(node, gfp_mask, order);
0059 if (page)
0060 return page_address(page);
0061
0062 if (!warned) {
0063 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
0064 "vmemmap alloc failure: order:%u", order);
0065 warned = true;
0066 }
0067 return NULL;
0068 } else
0069 return __earlyonly_bootmem_alloc(node, size, size,
0070 __pa(MAX_DMA_ADDRESS));
0071 }
0072
0073 static void * __meminit altmap_alloc_block_buf(unsigned long size,
0074 struct vmem_altmap *altmap);
0075
0076
0077 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
0078 struct vmem_altmap *altmap)
0079 {
0080 void *ptr;
0081
0082 if (altmap)
0083 return altmap_alloc_block_buf(size, altmap);
0084
0085 ptr = sparse_buffer_alloc(size);
0086 if (!ptr)
0087 ptr = vmemmap_alloc_block(size, node);
0088 return ptr;
0089 }
0090
0091 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
0092 {
0093 return altmap->base_pfn + altmap->reserve + altmap->alloc
0094 + altmap->align;
0095 }
0096
0097 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
0098 {
0099 unsigned long allocated = altmap->alloc + altmap->align;
0100
0101 if (altmap->free > allocated)
0102 return altmap->free - allocated;
0103 return 0;
0104 }
0105
0106 static void * __meminit altmap_alloc_block_buf(unsigned long size,
0107 struct vmem_altmap *altmap)
0108 {
0109 unsigned long pfn, nr_pfns, nr_align;
0110
0111 if (size & ~PAGE_MASK) {
0112 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
0113 __func__, size);
0114 return NULL;
0115 }
0116
0117 pfn = vmem_altmap_next_pfn(altmap);
0118 nr_pfns = size >> PAGE_SHIFT;
0119 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
0120 nr_align = ALIGN(pfn, nr_align) - pfn;
0121 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
0122 return NULL;
0123
0124 altmap->alloc += nr_pfns;
0125 altmap->align += nr_align;
0126 pfn += nr_align;
0127
0128 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
0129 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
0130 return __va(__pfn_to_phys(pfn));
0131 }
0132
0133 void __meminit vmemmap_verify(pte_t *pte, int node,
0134 unsigned long start, unsigned long end)
0135 {
0136 unsigned long pfn = pte_pfn(*pte);
0137 int actual_node = early_pfn_to_nid(pfn);
0138
0139 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
0140 pr_warn_once("[%lx-%lx] potential offnode page_structs\n",
0141 start, end - 1);
0142 }
0143
0144 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
0145 struct vmem_altmap *altmap,
0146 struct page *reuse)
0147 {
0148 pte_t *pte = pte_offset_kernel(pmd, addr);
0149 if (pte_none(*pte)) {
0150 pte_t entry;
0151 void *p;
0152
0153 if (!reuse) {
0154 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
0155 if (!p)
0156 return NULL;
0157 } else {
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 get_page(reuse);
0168 p = page_to_virt(reuse);
0169 }
0170 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
0171 set_pte_at(&init_mm, addr, pte, entry);
0172 }
0173 return pte;
0174 }
0175
0176 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
0177 {
0178 void *p = vmemmap_alloc_block(size, node);
0179
0180 if (!p)
0181 return NULL;
0182 memset(p, 0, size);
0183
0184 return p;
0185 }
0186
0187 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
0188 {
0189 pmd_t *pmd = pmd_offset(pud, addr);
0190 if (pmd_none(*pmd)) {
0191 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
0192 if (!p)
0193 return NULL;
0194 pmd_populate_kernel(&init_mm, pmd, p);
0195 }
0196 return pmd;
0197 }
0198
0199 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
0200 {
0201 pud_t *pud = pud_offset(p4d, addr);
0202 if (pud_none(*pud)) {
0203 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
0204 if (!p)
0205 return NULL;
0206 pud_populate(&init_mm, pud, p);
0207 }
0208 return pud;
0209 }
0210
0211 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
0212 {
0213 p4d_t *p4d = p4d_offset(pgd, addr);
0214 if (p4d_none(*p4d)) {
0215 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
0216 if (!p)
0217 return NULL;
0218 p4d_populate(&init_mm, p4d, p);
0219 }
0220 return p4d;
0221 }
0222
0223 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
0224 {
0225 pgd_t *pgd = pgd_offset_k(addr);
0226 if (pgd_none(*pgd)) {
0227 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
0228 if (!p)
0229 return NULL;
0230 pgd_populate(&init_mm, pgd, p);
0231 }
0232 return pgd;
0233 }
0234
0235 static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
0236 struct vmem_altmap *altmap,
0237 struct page *reuse)
0238 {
0239 pgd_t *pgd;
0240 p4d_t *p4d;
0241 pud_t *pud;
0242 pmd_t *pmd;
0243 pte_t *pte;
0244
0245 pgd = vmemmap_pgd_populate(addr, node);
0246 if (!pgd)
0247 return NULL;
0248 p4d = vmemmap_p4d_populate(pgd, addr, node);
0249 if (!p4d)
0250 return NULL;
0251 pud = vmemmap_pud_populate(p4d, addr, node);
0252 if (!pud)
0253 return NULL;
0254 pmd = vmemmap_pmd_populate(pud, addr, node);
0255 if (!pmd)
0256 return NULL;
0257 pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse);
0258 if (!pte)
0259 return NULL;
0260 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
0261
0262 return pte;
0263 }
0264
0265 static int __meminit vmemmap_populate_range(unsigned long start,
0266 unsigned long end, int node,
0267 struct vmem_altmap *altmap,
0268 struct page *reuse)
0269 {
0270 unsigned long addr = start;
0271 pte_t *pte;
0272
0273 for (; addr < end; addr += PAGE_SIZE) {
0274 pte = vmemmap_populate_address(addr, node, altmap, reuse);
0275 if (!pte)
0276 return -ENOMEM;
0277 }
0278
0279 return 0;
0280 }
0281
0282 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
0283 int node, struct vmem_altmap *altmap)
0284 {
0285 return vmemmap_populate_range(start, end, node, altmap, NULL);
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 static bool __meminit reuse_compound_section(unsigned long start_pfn,
0299 struct dev_pagemap *pgmap)
0300 {
0301 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
0302 unsigned long offset = start_pfn -
0303 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start);
0304
0305 return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION;
0306 }
0307
0308 static pte_t * __meminit compound_section_tail_page(unsigned long addr)
0309 {
0310 pte_t *pte;
0311
0312 addr -= PAGE_SIZE;
0313
0314
0315
0316
0317
0318 pte = pte_offset_kernel(pmd_off_k(addr), addr);
0319 if (!pte)
0320 return NULL;
0321
0322 return pte;
0323 }
0324
0325 static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
0326 unsigned long start,
0327 unsigned long end, int node,
0328 struct dev_pagemap *pgmap)
0329 {
0330 unsigned long size, addr;
0331 pte_t *pte;
0332 int rc;
0333
0334 if (reuse_compound_section(start_pfn, pgmap)) {
0335 pte = compound_section_tail_page(start);
0336 if (!pte)
0337 return -ENOMEM;
0338
0339
0340
0341
0342
0343 return vmemmap_populate_range(start, end, node, NULL,
0344 pte_page(*pte));
0345 }
0346
0347 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));
0348 for (addr = start; addr < end; addr += size) {
0349 unsigned long next, last = addr + size;
0350
0351
0352 pte = vmemmap_populate_address(addr, node, NULL, NULL);
0353 if (!pte)
0354 return -ENOMEM;
0355
0356
0357 next = addr + PAGE_SIZE;
0358 pte = vmemmap_populate_address(next, node, NULL, NULL);
0359 if (!pte)
0360 return -ENOMEM;
0361
0362
0363
0364
0365
0366 next += PAGE_SIZE;
0367 rc = vmemmap_populate_range(next, last, node, NULL,
0368 pte_page(*pte));
0369 if (rc)
0370 return -ENOMEM;
0371 }
0372
0373 return 0;
0374 }
0375
0376 struct page * __meminit __populate_section_memmap(unsigned long pfn,
0377 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
0378 struct dev_pagemap *pgmap)
0379 {
0380 unsigned long start = (unsigned long) pfn_to_page(pfn);
0381 unsigned long end = start + nr_pages * sizeof(struct page);
0382 int r;
0383
0384 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
0385 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
0386 return NULL;
0387
0388 if (is_power_of_2(sizeof(struct page)) &&
0389 pgmap && pgmap_vmemmap_nr(pgmap) > 1 && !altmap)
0390 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap);
0391 else
0392 r = vmemmap_populate(start, end, nid, altmap);
0393
0394 if (r < 0)
0395 return NULL;
0396
0397 return pfn_to_page(pfn);
0398 }