Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Bootmem core functions.
0004  *
0005  * Copyright (c) 2020, Bytedance.
0006  *
0007  *     Author: Muchun Song <songmuchun@bytedance.com>
0008  *
0009  */
0010 #include <linux/mm.h>
0011 #include <linux/compiler.h>
0012 #include <linux/memblock.h>
0013 #include <linux/bootmem_info.h>
0014 #include <linux/memory_hotplug.h>
0015 #include <linux/kmemleak.h>
0016 
0017 void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
0018 {
0019     page->index = type;
0020     SetPagePrivate(page);
0021     set_page_private(page, info);
0022     page_ref_inc(page);
0023 }
0024 
0025 void put_page_bootmem(struct page *page)
0026 {
0027     unsigned long type = page->index;
0028 
0029     BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
0030            type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
0031 
0032     if (page_ref_dec_return(page) == 1) {
0033         page->index = 0;
0034         ClearPagePrivate(page);
0035         set_page_private(page, 0);
0036         INIT_LIST_HEAD(&page->lru);
0037         kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
0038         free_reserved_page(page);
0039     }
0040 }
0041 
0042 #ifndef CONFIG_SPARSEMEM_VMEMMAP
0043 static void __init register_page_bootmem_info_section(unsigned long start_pfn)
0044 {
0045     unsigned long mapsize, section_nr, i;
0046     struct mem_section *ms;
0047     struct page *page, *memmap;
0048     struct mem_section_usage *usage;
0049 
0050     section_nr = pfn_to_section_nr(start_pfn);
0051     ms = __nr_to_section(section_nr);
0052 
0053     /* Get section's memmap address */
0054     memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
0055 
0056     /*
0057      * Get page for the memmap's phys address
0058      * XXX: need more consideration for sparse_vmemmap...
0059      */
0060     page = virt_to_page(memmap);
0061     mapsize = sizeof(struct page) * PAGES_PER_SECTION;
0062     mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
0063 
0064     /* remember memmap's page */
0065     for (i = 0; i < mapsize; i++, page++)
0066         get_page_bootmem(section_nr, page, SECTION_INFO);
0067 
0068     usage = ms->usage;
0069     page = virt_to_page(usage);
0070 
0071     mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
0072 
0073     for (i = 0; i < mapsize; i++, page++)
0074         get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
0075 
0076 }
0077 #else /* CONFIG_SPARSEMEM_VMEMMAP */
0078 static void __init register_page_bootmem_info_section(unsigned long start_pfn)
0079 {
0080     unsigned long mapsize, section_nr, i;
0081     struct mem_section *ms;
0082     struct page *page, *memmap;
0083     struct mem_section_usage *usage;
0084 
0085     section_nr = pfn_to_section_nr(start_pfn);
0086     ms = __nr_to_section(section_nr);
0087 
0088     memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
0089 
0090     register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
0091 
0092     usage = ms->usage;
0093     page = virt_to_page(usage);
0094 
0095     mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
0096 
0097     for (i = 0; i < mapsize; i++, page++)
0098         get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
0099 }
0100 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
0101 
0102 void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
0103 {
0104     unsigned long i, pfn, end_pfn, nr_pages;
0105     int node = pgdat->node_id;
0106     struct page *page;
0107 
0108     nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
0109     page = virt_to_page(pgdat);
0110 
0111     for (i = 0; i < nr_pages; i++, page++)
0112         get_page_bootmem(node, page, NODE_INFO);
0113 
0114     pfn = pgdat->node_start_pfn;
0115     end_pfn = pgdat_end_pfn(pgdat);
0116 
0117     /* register section info */
0118     for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
0119         /*
0120          * Some platforms can assign the same pfn to multiple nodes - on
0121          * node0 as well as nodeN.  To avoid registering a pfn against
0122          * multiple nodes we check that this pfn does not already
0123          * reside in some other nodes.
0124          */
0125         if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
0126             register_page_bootmem_info_section(pfn);
0127     }
0128 }