Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __LINUX_MEMORY_HOTPLUG_H
0003 #define __LINUX_MEMORY_HOTPLUG_H
0004 
0005 #include <linux/mmzone.h>
0006 #include <linux/spinlock.h>
0007 #include <linux/notifier.h>
0008 #include <linux/bug.h>
0009 
0010 struct page;
0011 struct zone;
0012 struct pglist_data;
0013 struct mem_section;
0014 struct memory_block;
0015 struct memory_group;
0016 struct resource;
0017 struct vmem_altmap;
0018 struct dev_pagemap;
0019 
0020 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
0021 /*
0022  * For supporting node-hotadd, we have to allocate a new pgdat.
0023  *
0024  * If an arch has generic style NODE_DATA(),
0025  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
0026  *
0027  * In general, generic_alloc_nodedata() is used.
0028  *
0029  */
0030 extern pg_data_t *arch_alloc_nodedata(int nid);
0031 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
0032 
0033 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
0034 
0035 #define arch_alloc_nodedata(nid)    generic_alloc_nodedata(nid)
0036 
0037 #ifdef CONFIG_NUMA
0038 /*
0039  * XXX: node aware allocation can't work well to get new node's memory at this time.
0040  *  Because, pgdat for the new node is not allocated/initialized yet itself.
0041  *  To use new node's memory, more consideration will be necessary.
0042  */
0043 #define generic_alloc_nodedata(nid)             \
0044 ({                              \
0045     memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES);    \
0046 })
0047 /*
0048  * This definition is just for error path in node hotadd.
0049  * For node hotremove, we have to replace this.
0050  */
0051 #define generic_free_nodedata(pgdat)    kfree(pgdat)
0052 
0053 extern pg_data_t *node_data[];
0054 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
0055 {
0056     node_data[nid] = pgdat;
0057 }
0058 
0059 #else /* !CONFIG_NUMA */
0060 
0061 /* never called */
0062 static inline pg_data_t *generic_alloc_nodedata(int nid)
0063 {
0064     BUG();
0065     return NULL;
0066 }
0067 static inline void generic_free_nodedata(pg_data_t *pgdat)
0068 {
0069 }
0070 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
0071 {
0072 }
0073 #endif /* CONFIG_NUMA */
0074 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
0075 
0076 #ifdef CONFIG_MEMORY_HOTPLUG
0077 struct page *pfn_to_online_page(unsigned long pfn);
0078 
0079 /* Types for control the zone type of onlined and offlined memory */
0080 enum {
0081     /* Offline the memory. */
0082     MMOP_OFFLINE = 0,
0083     /* Online the memory. Zone depends, see default_zone_for_pfn(). */
0084     MMOP_ONLINE,
0085     /* Online the memory to ZONE_NORMAL. */
0086     MMOP_ONLINE_KERNEL,
0087     /* Online the memory to ZONE_MOVABLE. */
0088     MMOP_ONLINE_MOVABLE,
0089 };
0090 
0091 /* Flags for add_memory() and friends to specify memory hotplug details. */
0092 typedef int __bitwise mhp_t;
0093 
0094 /* No special request */
0095 #define MHP_NONE        ((__force mhp_t)0)
0096 /*
0097  * Allow merging of the added System RAM resource with adjacent,
0098  * mergeable resources. After a successful call to add_memory_resource()
0099  * with this flag set, the resource pointer must no longer be used as it
0100  * might be stale, or the resource might have changed.
0101  */
0102 #define MHP_MERGE_RESOURCE  ((__force mhp_t)BIT(0))
0103 
0104 /*
0105  * We want memmap (struct page array) to be self contained.
0106  * To do so, we will use the beginning of the hot-added range to build
0107  * the page tables for the memmap array that describes the entire range.
0108  * Only selected architectures support it with SPARSE_VMEMMAP.
0109  */
0110 #define MHP_MEMMAP_ON_MEMORY   ((__force mhp_t)BIT(1))
0111 /*
0112  * The nid field specifies a memory group id (mgid) instead. The memory group
0113  * implies the node id (nid).
0114  */
0115 #define MHP_NID_IS_MGID     ((__force mhp_t)BIT(2))
0116 
0117 /*
0118  * Extended parameters for memory hotplug:
0119  * altmap: alternative allocator for memmap array (optional)
0120  * pgprot: page protection flags to apply to newly created page tables
0121  *  (required)
0122  */
0123 struct mhp_params {
0124     struct vmem_altmap *altmap;
0125     pgprot_t pgprot;
0126     struct dev_pagemap *pgmap;
0127 };
0128 
0129 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
0130 struct range mhp_get_pluggable_range(bool need_mapping);
0131 
0132 /*
0133  * Zone resizing functions
0134  *
0135  * Note: any attempt to resize a zone should has pgdat_resize_lock()
0136  * zone_span_writelock() both held. This ensure the size of a zone
0137  * can't be changed while pgdat_resize_lock() held.
0138  */
0139 static inline unsigned zone_span_seqbegin(struct zone *zone)
0140 {
0141     return read_seqbegin(&zone->span_seqlock);
0142 }
0143 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
0144 {
0145     return read_seqretry(&zone->span_seqlock, iv);
0146 }
0147 static inline void zone_span_writelock(struct zone *zone)
0148 {
0149     write_seqlock(&zone->span_seqlock);
0150 }
0151 static inline void zone_span_writeunlock(struct zone *zone)
0152 {
0153     write_sequnlock(&zone->span_seqlock);
0154 }
0155 static inline void zone_seqlock_init(struct zone *zone)
0156 {
0157     seqlock_init(&zone->span_seqlock);
0158 }
0159 extern void adjust_present_page_count(struct page *page,
0160                       struct memory_group *group,
0161                       long nr_pages);
0162 /* VM interface that may be used by firmware interface */
0163 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
0164                      struct zone *zone);
0165 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
0166 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
0167             struct zone *zone, struct memory_group *group);
0168 extern void __offline_isolated_pages(unsigned long start_pfn,
0169                      unsigned long end_pfn);
0170 
0171 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
0172 
0173 extern void generic_online_page(struct page *page, unsigned int order);
0174 extern int set_online_page_callback(online_page_callback_t callback);
0175 extern int restore_online_page_callback(online_page_callback_t callback);
0176 
0177 extern int try_online_node(int nid);
0178 
0179 extern int arch_add_memory(int nid, u64 start, u64 size,
0180                struct mhp_params *params);
0181 extern u64 max_mem_size;
0182 
0183 extern int mhp_online_type_from_str(const char *str);
0184 
0185 /* Default online_type (MMOP_*) when new memory blocks are added. */
0186 extern int mhp_default_online_type;
0187 /* If movable_node boot option specified */
0188 extern bool movable_node_enabled;
0189 static inline bool movable_node_is_enabled(void)
0190 {
0191     return movable_node_enabled;
0192 }
0193 
0194 extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
0195 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
0196                struct vmem_altmap *altmap);
0197 
0198 /* reasonably generic interface to expand the physical pages */
0199 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
0200                struct mhp_params *params);
0201 
0202 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
0203 static inline int add_pages(int nid, unsigned long start_pfn,
0204         unsigned long nr_pages, struct mhp_params *params)
0205 {
0206     return __add_pages(nid, start_pfn, nr_pages, params);
0207 }
0208 #else /* ARCH_HAS_ADD_PAGES */
0209 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
0210           struct mhp_params *params);
0211 #endif /* ARCH_HAS_ADD_PAGES */
0212 
0213 void get_online_mems(void);
0214 void put_online_mems(void);
0215 
0216 void mem_hotplug_begin(void);
0217 void mem_hotplug_done(void);
0218 
0219 #else /* ! CONFIG_MEMORY_HOTPLUG */
0220 #define pfn_to_online_page(pfn)         \
0221 ({                      \
0222     struct page *___page = NULL;        \
0223     if (pfn_valid(pfn))         \
0224         ___page = pfn_to_page(pfn); \
0225     ___page;                \
0226  })
0227 
0228 static inline unsigned zone_span_seqbegin(struct zone *zone)
0229 {
0230     return 0;
0231 }
0232 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
0233 {
0234     return 0;
0235 }
0236 static inline void zone_span_writelock(struct zone *zone) {}
0237 static inline void zone_span_writeunlock(struct zone *zone) {}
0238 static inline void zone_seqlock_init(struct zone *zone) {}
0239 
0240 static inline int try_online_node(int nid)
0241 {
0242     return 0;
0243 }
0244 
0245 static inline void get_online_mems(void) {}
0246 static inline void put_online_mems(void) {}
0247 
0248 static inline void mem_hotplug_begin(void) {}
0249 static inline void mem_hotplug_done(void) {}
0250 
0251 static inline bool movable_node_is_enabled(void)
0252 {
0253     return false;
0254 }
0255 #endif /* ! CONFIG_MEMORY_HOTPLUG */
0256 
0257 /*
0258  * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
0259  * platforms might override and use arch_get_mappable_range()
0260  * for internal non memory hotplug purposes.
0261  */
0262 struct range arch_get_mappable_range(void);
0263 
0264 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
0265 /*
0266  * pgdat resizing functions
0267  */
0268 static inline
0269 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
0270 {
0271     spin_lock_irqsave(&pgdat->node_size_lock, *flags);
0272 }
0273 static inline
0274 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
0275 {
0276     spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
0277 }
0278 static inline
0279 void pgdat_resize_init(struct pglist_data *pgdat)
0280 {
0281     spin_lock_init(&pgdat->node_size_lock);
0282 }
0283 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
0284 /*
0285  * Stub functions for when hotplug is off
0286  */
0287 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
0288 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
0289 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
0290 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
0291 
0292 #ifdef CONFIG_MEMORY_HOTREMOVE
0293 
0294 extern void try_offline_node(int nid);
0295 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
0296              struct zone *zone, struct memory_group *group);
0297 extern int remove_memory(u64 start, u64 size);
0298 extern void __remove_memory(u64 start, u64 size);
0299 extern int offline_and_remove_memory(u64 start, u64 size);
0300 
0301 #else
0302 static inline void try_offline_node(int nid) {}
0303 
0304 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
0305                 struct zone *zone, struct memory_group *group)
0306 {
0307     return -EINVAL;
0308 }
0309 
0310 static inline int remove_memory(u64 start, u64 size)
0311 {
0312     return -EBUSY;
0313 }
0314 
0315 static inline void __remove_memory(u64 start, u64 size) {}
0316 #endif /* CONFIG_MEMORY_HOTREMOVE */
0317 
0318 extern void set_zone_contiguous(struct zone *zone);
0319 extern void clear_zone_contiguous(struct zone *zone);
0320 
0321 #ifdef CONFIG_MEMORY_HOTPLUG
0322 extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
0323 extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
0324 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
0325 extern int add_memory_resource(int nid, struct resource *resource,
0326                    mhp_t mhp_flags);
0327 extern int add_memory_driver_managed(int nid, u64 start, u64 size,
0328                      const char *resource_name,
0329                      mhp_t mhp_flags);
0330 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
0331                    unsigned long nr_pages,
0332                    struct vmem_altmap *altmap, int migratetype);
0333 extern void remove_pfn_range_from_zone(struct zone *zone,
0334                        unsigned long start_pfn,
0335                        unsigned long nr_pages);
0336 extern bool is_memblock_offlined(struct memory_block *mem);
0337 extern int sparse_add_section(int nid, unsigned long pfn,
0338         unsigned long nr_pages, struct vmem_altmap *altmap,
0339         struct dev_pagemap *pgmap);
0340 extern void sparse_remove_section(struct mem_section *ms,
0341         unsigned long pfn, unsigned long nr_pages,
0342         unsigned long map_offset, struct vmem_altmap *altmap);
0343 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
0344                       unsigned long pnum);
0345 extern struct zone *zone_for_pfn_range(int online_type, int nid,
0346         struct memory_group *group, unsigned long start_pfn,
0347         unsigned long nr_pages);
0348 extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
0349                       struct mhp_params *params);
0350 void arch_remove_linear_mapping(u64 start, u64 size);
0351 extern bool mhp_supports_memmap_on_memory(unsigned long size);
0352 #endif /* CONFIG_MEMORY_HOTPLUG */
0353 
0354 #endif /* __LINUX_MEMORY_HOTPLUG_H */