Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * linux/mm/mmzone.c
0004  *
0005  * management codes for pgdats, zones and page flags
0006  */
0007 
0008 
0009 #include <linux/stddef.h>
0010 #include <linux/mm.h>
0011 #include <linux/mmzone.h>
0012 
0013 struct pglist_data *first_online_pgdat(void)
0014 {
0015     return NODE_DATA(first_online_node);
0016 }
0017 
0018 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
0019 {
0020     int nid = next_online_node(pgdat->node_id);
0021 
0022     if (nid == MAX_NUMNODES)
0023         return NULL;
0024     return NODE_DATA(nid);
0025 }
0026 
0027 /*
0028  * next_zone - helper magic for for_each_zone()
0029  */
0030 struct zone *next_zone(struct zone *zone)
0031 {
0032     pg_data_t *pgdat = zone->zone_pgdat;
0033 
0034     if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
0035         zone++;
0036     else {
0037         pgdat = next_online_pgdat(pgdat);
0038         if (pgdat)
0039             zone = pgdat->node_zones;
0040         else
0041             zone = NULL;
0042     }
0043     return zone;
0044 }
0045 
0046 static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
0047 {
0048 #ifdef CONFIG_NUMA
0049     return node_isset(zonelist_node_idx(zref), *nodes);
0050 #else
0051     return 1;
0052 #endif /* CONFIG_NUMA */
0053 }
0054 
0055 /* Returns the next zone at or below highest_zoneidx in a zonelist */
0056 struct zoneref *__next_zones_zonelist(struct zoneref *z,
0057                     enum zone_type highest_zoneidx,
0058                     nodemask_t *nodes)
0059 {
0060     /*
0061      * Find the next suitable zone to use for the allocation.
0062      * Only filter based on nodemask if it's set
0063      */
0064     if (unlikely(nodes == NULL))
0065         while (zonelist_zone_idx(z) > highest_zoneidx)
0066             z++;
0067     else
0068         while (zonelist_zone_idx(z) > highest_zoneidx ||
0069                 (z->zone && !zref_in_nodemask(z, nodes)))
0070             z++;
0071 
0072     return z;
0073 }
0074 
0075 void lruvec_init(struct lruvec *lruvec)
0076 {
0077     enum lru_list lru;
0078 
0079     memset(lruvec, 0, sizeof(struct lruvec));
0080     spin_lock_init(&lruvec->lru_lock);
0081 
0082     for_each_lru(lru)
0083         INIT_LIST_HEAD(&lruvec->lists[lru]);
0084     /*
0085      * The "Unevictable LRU" is imaginary: though its size is maintained,
0086      * it is never scanned, and unevictable pages are not threaded on it
0087      * (so that their lru fields can be reused to hold mlock_count).
0088      * Poison its list head, so that any operations on it would crash.
0089      */
0090     list_del(&lruvec->lists[LRU_UNEVICTABLE]);
0091 }
0092 
0093 #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
0094 int page_cpupid_xchg_last(struct page *page, int cpupid)
0095 {
0096     unsigned long old_flags, flags;
0097     int last_cpupid;
0098 
0099     old_flags = READ_ONCE(page->flags);
0100     do {
0101         flags = old_flags;
0102         last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
0103 
0104         flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
0105         flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
0106     } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
0107 
0108     return last_cpupid;
0109 }
0110 #endif