Back to home page

LXR

 
 

    


0001 /*
0002  * mm_init.c - Memory initialisation verification and debugging
0003  *
0004  * Copyright 2008 IBM Corporation, 2008
0005  * Author Mel Gorman <mel@csn.ul.ie>
0006  *
0007  */
0008 #include <linux/kernel.h>
0009 #include <linux/init.h>
0010 #include <linux/kobject.h>
0011 #include <linux/export.h>
0012 #include <linux/memory.h>
0013 #include <linux/notifier.h>
0014 #include <linux/sched.h>
0015 #include "internal.h"
0016 
0017 #ifdef CONFIG_DEBUG_MEMORY_INIT
0018 int __meminitdata mminit_loglevel;
0019 
0020 #ifndef SECTIONS_SHIFT
0021 #define SECTIONS_SHIFT  0
0022 #endif
0023 
0024 /* The zonelists are simply reported, validation is manual. */
0025 void __init mminit_verify_zonelist(void)
0026 {
0027     int nid;
0028 
0029     if (mminit_loglevel < MMINIT_VERIFY)
0030         return;
0031 
0032     for_each_online_node(nid) {
0033         pg_data_t *pgdat = NODE_DATA(nid);
0034         struct zone *zone;
0035         struct zoneref *z;
0036         struct zonelist *zonelist;
0037         int i, listid, zoneid;
0038 
0039         BUG_ON(MAX_ZONELISTS > 2);
0040         for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
0041 
0042             /* Identify the zone and nodelist */
0043             zoneid = i % MAX_NR_ZONES;
0044             listid = i / MAX_NR_ZONES;
0045             zonelist = &pgdat->node_zonelists[listid];
0046             zone = &pgdat->node_zones[zoneid];
0047             if (!populated_zone(zone))
0048                 continue;
0049 
0050             /* Print information about the zonelist */
0051             printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
0052                 listid > 0 ? "thisnode" : "general", nid,
0053                 zone->name);
0054 
0055             /* Iterate the zonelist */
0056             for_each_zone_zonelist(zone, z, zonelist, zoneid) {
0057 #ifdef CONFIG_NUMA
0058                 pr_cont("%d:%s ", zone->node, zone->name);
0059 #else
0060                 pr_cont("0:%s ", zone->name);
0061 #endif /* CONFIG_NUMA */
0062             }
0063             pr_cont("\n");
0064         }
0065     }
0066 }
0067 
0068 void __init mminit_verify_pageflags_layout(void)
0069 {
0070     int shift, width;
0071     unsigned long or_mask, add_mask;
0072 
0073     shift = 8 * sizeof(unsigned long);
0074     width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
0075     mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
0076         "Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
0077         SECTIONS_WIDTH,
0078         NODES_WIDTH,
0079         ZONES_WIDTH,
0080         LAST_CPUPID_WIDTH,
0081         NR_PAGEFLAGS);
0082     mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
0083         "Section %d Node %d Zone %d Lastcpupid %d\n",
0084         SECTIONS_SHIFT,
0085         NODES_SHIFT,
0086         ZONES_SHIFT,
0087         LAST_CPUPID_SHIFT);
0088     mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
0089         "Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
0090         (unsigned long)SECTIONS_PGSHIFT,
0091         (unsigned long)NODES_PGSHIFT,
0092         (unsigned long)ZONES_PGSHIFT,
0093         (unsigned long)LAST_CPUPID_PGSHIFT);
0094     mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
0095         "Node/Zone ID: %lu -> %lu\n",
0096         (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
0097         (unsigned long)ZONEID_PGOFF);
0098     mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
0099         "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
0100         shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
0101 #ifdef NODE_NOT_IN_PAGE_FLAGS
0102     mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
0103         "Node not in page flags");
0104 #endif
0105 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
0106     mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
0107         "Last cpupid not in page flags");
0108 #endif
0109 
0110     if (SECTIONS_WIDTH) {
0111         shift -= SECTIONS_WIDTH;
0112         BUG_ON(shift != SECTIONS_PGSHIFT);
0113     }
0114     if (NODES_WIDTH) {
0115         shift -= NODES_WIDTH;
0116         BUG_ON(shift != NODES_PGSHIFT);
0117     }
0118     if (ZONES_WIDTH) {
0119         shift -= ZONES_WIDTH;
0120         BUG_ON(shift != ZONES_PGSHIFT);
0121     }
0122 
0123     /* Check for bitmask overlaps */
0124     or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
0125             (NODES_MASK << NODES_PGSHIFT) |
0126             (SECTIONS_MASK << SECTIONS_PGSHIFT);
0127     add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
0128             (NODES_MASK << NODES_PGSHIFT) +
0129             (SECTIONS_MASK << SECTIONS_PGSHIFT);
0130     BUG_ON(or_mask != add_mask);
0131 }
0132 
0133 static __init int set_mminit_loglevel(char *str)
0134 {
0135     get_option(&str, &mminit_loglevel);
0136     return 0;
0137 }
0138 early_param("mminit_loglevel", set_mminit_loglevel);
0139 #endif /* CONFIG_DEBUG_MEMORY_INIT */
0140 
0141 struct kobject *mm_kobj;
0142 EXPORT_SYMBOL_GPL(mm_kobj);
0143 
0144 #ifdef CONFIG_SMP
0145 s32 vm_committed_as_batch = 32;
0146 
0147 static void __meminit mm_compute_batch(void)
0148 {
0149     u64 memsized_batch;
0150     s32 nr = num_present_cpus();
0151     s32 batch = max_t(s32, nr*2, 32);
0152 
0153     /* batch size set to 0.4% of (total memory/#cpus), or max int32 */
0154     memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff);
0155 
0156     vm_committed_as_batch = max_t(s32, memsized_batch, batch);
0157 }
0158 
0159 static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
0160                     unsigned long action, void *arg)
0161 {
0162     switch (action) {
0163     case MEM_ONLINE:
0164     case MEM_OFFLINE:
0165         mm_compute_batch();
0166     default:
0167         break;
0168     }
0169     return NOTIFY_OK;
0170 }
0171 
0172 static struct notifier_block compute_batch_nb __meminitdata = {
0173     .notifier_call = mm_compute_batch_notifier,
0174     .priority = IPC_CALLBACK_PRI, /* use lowest priority */
0175 };
0176 
0177 static int __init mm_compute_batch_init(void)
0178 {
0179     mm_compute_batch();
0180     register_hotmemory_notifier(&compute_batch_nb);
0181 
0182     return 0;
0183 }
0184 
0185 __initcall(mm_compute_batch_init);
0186 
0187 #endif
0188 
0189 static int __init mm_sysfs_init(void)
0190 {
0191     mm_kobj = kobject_create_and_add("mm", kernel_kobj);
0192     if (!mm_kobj)
0193         return -ENOMEM;
0194 
0195     return 0;
0196 }
0197 postcore_initcall(mm_sysfs_init);