Back to home page

LXR

 
 

    


0001 /*
0002  * linux/mm/mmzone.c
0003  *
0004  * management codes for pgdats, zones and page flags
0005  */
0006 
0007 
0008 #include <linux/stddef.h>
0009 #include <linux/mm.h>
0010 #include <linux/mmzone.h>
0011 
0012 struct pglist_data *first_online_pgdat(void)
0013 {
0014     return NODE_DATA(first_online_node);
0015 }
0016 
0017 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
0018 {
0019     int nid = next_online_node(pgdat->node_id);
0020 
0021     if (nid == MAX_NUMNODES)
0022         return NULL;
0023     return NODE_DATA(nid);
0024 }
0025 
0026 /*
0027  * next_zone - helper magic for for_each_zone()
0028  */
0029 struct zone *next_zone(struct zone *zone)
0030 {
0031     pg_data_t *pgdat = zone->zone_pgdat;
0032 
0033     if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
0034         zone++;
0035     else {
0036         pgdat = next_online_pgdat(pgdat);
0037         if (pgdat)
0038             zone = pgdat->node_zones;
0039         else
0040             zone = NULL;
0041     }
0042     return zone;
0043 }
0044 
0045 static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
0046 {
0047 #ifdef CONFIG_NUMA
0048     return node_isset(zonelist_node_idx(zref), *nodes);
0049 #else
0050     return 1;
0051 #endif /* CONFIG_NUMA */
0052 }
0053 
0054 /* Returns the next zone at or below highest_zoneidx in a zonelist */
0055 struct zoneref *__next_zones_zonelist(struct zoneref *z,
0056                     enum zone_type highest_zoneidx,
0057                     nodemask_t *nodes)
0058 {
0059     /*
0060      * Find the next suitable zone to use for the allocation.
0061      * Only filter based on nodemask if it's set
0062      */
0063     if (likely(nodes == NULL))
0064         while (zonelist_zone_idx(z) > highest_zoneidx)
0065             z++;
0066     else
0067         while (zonelist_zone_idx(z) > highest_zoneidx ||
0068                 (z->zone && !zref_in_nodemask(z, nodes)))
0069             z++;
0070 
0071     return z;
0072 }
0073 
0074 #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
0075 bool memmap_valid_within(unsigned long pfn,
0076                     struct page *page, struct zone *zone)
0077 {
0078     if (page_to_pfn(page) != pfn)
0079         return false;
0080 
0081     if (page_zone(page) != zone)
0082         return false;
0083 
0084     return true;
0085 }
0086 #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
0087 
0088 void lruvec_init(struct lruvec *lruvec)
0089 {
0090     enum lru_list lru;
0091 
0092     memset(lruvec, 0, sizeof(struct lruvec));
0093 
0094     for_each_lru(lru)
0095         INIT_LIST_HEAD(&lruvec->lists[lru]);
0096 }
0097 
0098 #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
0099 int page_cpupid_xchg_last(struct page *page, int cpupid)
0100 {
0101     unsigned long old_flags, flags;
0102     int last_cpupid;
0103 
0104     do {
0105         old_flags = flags = page->flags;
0106         last_cpupid = page_cpupid_last(page);
0107 
0108         flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
0109         flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
0110     } while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
0111 
0112     return last_cpupid;
0113 }
0114 #endif