0001
0002 #ifndef _LINUX_MEMBLOCK_H
0003 #define _LINUX_MEMBLOCK_H
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/init.h>
0012 #include <linux/mm.h>
0013 #include <asm/dma.h>
0014
0015 extern unsigned long max_low_pfn;
0016 extern unsigned long min_low_pfn;
0017
0018
0019
0020
0021 extern unsigned long max_pfn;
0022
0023
0024
0025 extern unsigned long long max_possible_pfn;
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 enum memblock_flags {
0045 MEMBLOCK_NONE = 0x0,
0046 MEMBLOCK_HOTPLUG = 0x1,
0047 MEMBLOCK_MIRROR = 0x2,
0048 MEMBLOCK_NOMAP = 0x4,
0049 MEMBLOCK_DRIVER_MANAGED = 0x8,
0050 };
0051
0052
0053
0054
0055
0056
0057
0058
0059 struct memblock_region {
0060 phys_addr_t base;
0061 phys_addr_t size;
0062 enum memblock_flags flags;
0063 #ifdef CONFIG_NUMA
0064 int nid;
0065 #endif
0066 };
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 struct memblock_type {
0077 unsigned long cnt;
0078 unsigned long max;
0079 phys_addr_t total_size;
0080 struct memblock_region *regions;
0081 char *name;
0082 };
0083
0084
0085
0086
0087
0088
0089
0090
0091 struct memblock {
0092 bool bottom_up;
0093 phys_addr_t current_limit;
0094 struct memblock_type memory;
0095 struct memblock_type reserved;
0096 };
0097
0098 extern struct memblock memblock;
0099
0100 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
0101 #define __init_memblock __meminit
0102 #define __initdata_memblock __meminitdata
0103 void memblock_discard(void);
0104 #else
0105 #define __init_memblock
0106 #define __initdata_memblock
0107 static inline void memblock_discard(void) {}
0108 #endif
0109
0110 void memblock_allow_resize(void);
0111 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
0112 enum memblock_flags flags);
0113 int memblock_add(phys_addr_t base, phys_addr_t size);
0114 int memblock_remove(phys_addr_t base, phys_addr_t size);
0115 int memblock_phys_free(phys_addr_t base, phys_addr_t size);
0116 int memblock_reserve(phys_addr_t base, phys_addr_t size);
0117 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
0118 int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
0119 #endif
0120 void memblock_trim_memory(phys_addr_t align);
0121 bool memblock_overlaps_region(struct memblock_type *type,
0122 phys_addr_t base, phys_addr_t size);
0123 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
0124 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
0125 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
0126 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
0127 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
0128
0129 void memblock_free_all(void);
0130 void memblock_free(void *ptr, size_t size);
0131 void reset_node_managed_pages(pg_data_t *pgdat);
0132 void reset_all_zones_managed_pages(void);
0133
0134
0135 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
0136 struct memblock_type *type_a,
0137 struct memblock_type *type_b, phys_addr_t *out_start,
0138 phys_addr_t *out_end, int *out_nid);
0139
0140 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
0141 struct memblock_type *type_a,
0142 struct memblock_type *type_b, phys_addr_t *out_start,
0143 phys_addr_t *out_end, int *out_nid);
0144
0145 void memblock_free_late(phys_addr_t base, phys_addr_t size);
0146
0147 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
0148 static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
0149 phys_addr_t *out_start,
0150 phys_addr_t *out_end)
0151 {
0152 extern struct memblock_type physmem;
0153
0154 __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
0155 out_start, out_end, NULL);
0156 }
0157
0158
0159
0160
0161
0162
0163
0164
0165 #define for_each_physmem_range(i, type, p_start, p_end) \
0166 for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
0167 i != (u64)ULLONG_MAX; \
0168 __next_physmem_range(&i, type, p_start, p_end))
0169 #endif
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183 #define __for_each_mem_range(i, type_a, type_b, nid, flags, \
0184 p_start, p_end, p_nid) \
0185 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
0186 p_start, p_end, p_nid); \
0187 i != (u64)ULLONG_MAX; \
0188 __next_mem_range(&i, nid, flags, type_a, type_b, \
0189 p_start, p_end, p_nid))
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203 #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
0204 p_start, p_end, p_nid) \
0205 for (i = (u64)ULLONG_MAX, \
0206 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
0207 p_start, p_end, p_nid); \
0208 i != (u64)ULLONG_MAX; \
0209 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
0210 p_start, p_end, p_nid))
0211
0212
0213
0214
0215
0216
0217
0218 #define for_each_mem_range(i, p_start, p_end) \
0219 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
0220 MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
0221 p_start, p_end, NULL)
0222
0223
0224
0225
0226
0227
0228
0229
0230 #define for_each_mem_range_rev(i, p_start, p_end) \
0231 __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
0232 MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
0233 p_start, p_end, NULL)
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 #define for_each_reserved_mem_range(i, p_start, p_end) \
0245 __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
0246 MEMBLOCK_NONE, p_start, p_end, NULL)
0247
0248 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
0249 {
0250 return m->flags & MEMBLOCK_HOTPLUG;
0251 }
0252
0253 static inline bool memblock_is_mirror(struct memblock_region *m)
0254 {
0255 return m->flags & MEMBLOCK_MIRROR;
0256 }
0257
0258 static inline bool memblock_is_nomap(struct memblock_region *m)
0259 {
0260 return m->flags & MEMBLOCK_NOMAP;
0261 }
0262
0263 static inline bool memblock_is_driver_managed(struct memblock_region *m)
0264 {
0265 return m->flags & MEMBLOCK_DRIVER_MANAGED;
0266 }
0267
0268 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
0269 unsigned long *end_pfn);
0270 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
0271 unsigned long *out_end_pfn, int *out_nid);
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
0284 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
0285 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
0286
0287 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
0288 void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
0289 unsigned long *out_spfn,
0290 unsigned long *out_epfn);
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
0306 for (i = 0, \
0307 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
0308 i != U64_MAX; \
0309 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
0324 for (; i != U64_MAX; \
0325 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
0326
0327 int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
0328
0329 #endif
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
0344 __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
0345 nid, flags, p_start, p_end, p_nid)
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
0360 p_nid) \
0361 __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
0362 nid, flags, p_start, p_end, p_nid)
0363
0364 int memblock_set_node(phys_addr_t base, phys_addr_t size,
0365 struct memblock_type *type, int nid);
0366
0367 #ifdef CONFIG_NUMA
0368 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
0369 {
0370 r->nid = nid;
0371 }
0372
0373 static inline int memblock_get_region_node(const struct memblock_region *r)
0374 {
0375 return r->nid;
0376 }
0377 #else
0378 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
0379 {
0380 }
0381
0382 static inline int memblock_get_region_node(const struct memblock_region *r)
0383 {
0384 return 0;
0385 }
0386 #endif
0387
0388
0389 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
0390 #define MEMBLOCK_ALLOC_ACCESSIBLE 0
0391 #define MEMBLOCK_ALLOC_NOLEAKTRACE 1
0392
0393
0394 #define MEMBLOCK_LOW_LIMIT 0
0395
0396 #ifndef ARCH_LOW_ADDRESS_LIMIT
0397 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
0398 #endif
0399
0400 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
0401 phys_addr_t start, phys_addr_t end);
0402 phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
0403 phys_addr_t align, phys_addr_t start,
0404 phys_addr_t end, int nid, bool exact_nid);
0405 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
0406
0407 static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
0408 phys_addr_t align)
0409 {
0410 return memblock_phys_alloc_range(size, align, 0,
0411 MEMBLOCK_ALLOC_ACCESSIBLE);
0412 }
0413
0414 void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
0415 phys_addr_t min_addr, phys_addr_t max_addr,
0416 int nid);
0417 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
0418 phys_addr_t min_addr, phys_addr_t max_addr,
0419 int nid);
0420 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
0421 phys_addr_t min_addr, phys_addr_t max_addr,
0422 int nid);
0423
0424 static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
0425 {
0426 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
0427 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
0428 }
0429
0430 static inline void *memblock_alloc_raw(phys_addr_t size,
0431 phys_addr_t align)
0432 {
0433 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
0434 MEMBLOCK_ALLOC_ACCESSIBLE,
0435 NUMA_NO_NODE);
0436 }
0437
0438 static inline void *memblock_alloc_from(phys_addr_t size,
0439 phys_addr_t align,
0440 phys_addr_t min_addr)
0441 {
0442 return memblock_alloc_try_nid(size, align, min_addr,
0443 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
0444 }
0445
0446 static inline void *memblock_alloc_low(phys_addr_t size,
0447 phys_addr_t align)
0448 {
0449 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
0450 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
0451 }
0452
0453 static inline void *memblock_alloc_node(phys_addr_t size,
0454 phys_addr_t align, int nid)
0455 {
0456 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
0457 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
0458 }
0459
0460
0461
0462
0463 static inline __init_memblock void memblock_set_bottom_up(bool enable)
0464 {
0465 memblock.bottom_up = enable;
0466 }
0467
0468
0469
0470
0471
0472
0473 static inline __init_memblock bool memblock_bottom_up(void)
0474 {
0475 return memblock.bottom_up;
0476 }
0477
0478 phys_addr_t memblock_phys_mem_size(void);
0479 phys_addr_t memblock_reserved_size(void);
0480 phys_addr_t memblock_start_of_DRAM(void);
0481 phys_addr_t memblock_end_of_DRAM(void);
0482 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
0483 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
0484 void memblock_mem_limit_remove_map(phys_addr_t limit);
0485 bool memblock_is_memory(phys_addr_t addr);
0486 bool memblock_is_map_memory(phys_addr_t addr);
0487 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
0488 bool memblock_is_reserved(phys_addr_t addr);
0489 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
0490
0491 void memblock_dump_all(void);
0492
0493
0494
0495
0496
0497
0498
0499 void memblock_set_current_limit(phys_addr_t limit);
0500
0501
0502 phys_addr_t memblock_get_current_limit(void);
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
0519 {
0520 return PFN_UP(reg->base);
0521 }
0522
0523
0524
0525
0526
0527
0528
0529 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
0530 {
0531 return PFN_DOWN(reg->base + reg->size);
0532 }
0533
0534
0535
0536
0537
0538
0539
0540 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
0541 {
0542 return PFN_DOWN(reg->base);
0543 }
0544
0545
0546
0547
0548
0549
0550
0551 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
0552 {
0553 return PFN_UP(reg->base + reg->size);
0554 }
0555
0556
0557
0558
0559
0560 #define for_each_mem_region(region) \
0561 for (region = memblock.memory.regions; \
0562 region < (memblock.memory.regions + memblock.memory.cnt); \
0563 region++)
0564
0565
0566
0567
0568
0569 #define for_each_reserved_mem_region(region) \
0570 for (region = memblock.reserved.regions; \
0571 region < (memblock.reserved.regions + memblock.reserved.cnt); \
0572 region++)
0573
0574 extern void *alloc_large_system_hash(const char *tablename,
0575 unsigned long bucketsize,
0576 unsigned long numentries,
0577 int scale,
0578 int flags,
0579 unsigned int *_hash_shift,
0580 unsigned int *_hash_mask,
0581 unsigned long low_limit,
0582 unsigned long high_limit);
0583
0584 #define HASH_EARLY 0x00000001
0585 #define HASH_SMALL 0x00000002
0586
0587 #define HASH_ZERO 0x00000004
0588
0589
0590
0591
0592 #ifdef CONFIG_NUMA
0593 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
0594 extern int hashdist;
0595 #else
0596 #define hashdist (0)
0597 #endif
0598
0599 #ifdef CONFIG_MEMTEST
0600 extern void early_memtest(phys_addr_t start, phys_addr_t end);
0601 #else
0602 static inline void early_memtest(phys_addr_t start, phys_addr_t end)
0603 {
0604 }
0605 #endif
0606
0607
0608 #endif