0001
0002 #ifndef __LINUX_GFP_H
0003 #define __LINUX_GFP_H
0004
0005 #include <linux/gfp_types.h>
0006
0007 #include <linux/mmzone.h>
0008 #include <linux/topology.h>
0009
0010 struct vm_area_struct;
0011
0012
0013 #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
0014 #define GFP_MOVABLE_SHIFT 3
0015
0016 static inline int gfp_migratetype(const gfp_t gfp_flags)
0017 {
0018 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
0019 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
0020 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
0021
0022 if (unlikely(page_group_by_mobility_disabled))
0023 return MIGRATE_UNMOVABLE;
0024
0025
0026 return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
0027 }
0028 #undef GFP_MOVABLE_MASK
0029 #undef GFP_MOVABLE_SHIFT
0030
0031 static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
0032 {
0033 return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
0034 }
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
0054 {
0055 return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
0056 __GFP_DIRECT_RECLAIM;
0057 }
0058
0059 #ifdef CONFIG_HIGHMEM
0060 #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
0061 #else
0062 #define OPT_ZONE_HIGHMEM ZONE_NORMAL
0063 #endif
0064
0065 #ifdef CONFIG_ZONE_DMA
0066 #define OPT_ZONE_DMA ZONE_DMA
0067 #else
0068 #define OPT_ZONE_DMA ZONE_NORMAL
0069 #endif
0070
0071 #ifdef CONFIG_ZONE_DMA32
0072 #define OPT_ZONE_DMA32 ZONE_DMA32
0073 #else
0074 #define OPT_ZONE_DMA32 ZONE_NORMAL
0075 #endif
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 #if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4
0111
0112 #define GFP_ZONES_SHIFT 2
0113 #else
0114 #define GFP_ZONES_SHIFT ZONES_SHIFT
0115 #endif
0116
0117 #if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG
0118 #error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
0119 #endif
0120
0121 #define GFP_ZONE_TABLE ( \
0122 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \
0123 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \
0124 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \
0125 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \
0126 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \
0127 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \
0128 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\
0129 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\
0130 )
0131
0132
0133
0134
0135
0136
0137
0138 #define GFP_ZONE_BAD ( \
0139 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
0140 | 1 << (___GFP_DMA | ___GFP_DMA32) \
0141 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
0142 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
0143 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
0144 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
0145 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
0146 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
0147 )
0148
0149 static inline enum zone_type gfp_zone(gfp_t flags)
0150 {
0151 enum zone_type z;
0152 int bit = (__force int) (flags & GFP_ZONEMASK);
0153
0154 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
0155 ((1 << GFP_ZONES_SHIFT) - 1);
0156 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
0157 return z;
0158 }
0159
0160
0161
0162
0163
0164
0165
0166
0167 static inline int gfp_zonelist(gfp_t flags)
0168 {
0169 #ifdef CONFIG_NUMA
0170 if (unlikely(flags & __GFP_THISNODE))
0171 return ZONELIST_NOFALLBACK;
0172 #endif
0173 return ZONELIST_FALLBACK;
0174 }
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185 static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
0186 {
0187 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
0188 }
0189
0190 #ifndef HAVE_ARCH_FREE_PAGE
0191 static inline void arch_free_page(struct page *page, int order) { }
0192 #endif
0193 #ifndef HAVE_ARCH_ALLOC_PAGE
0194 static inline void arch_alloc_page(struct page *page, int order) { }
0195 #endif
0196
0197 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
0198 nodemask_t *nodemask);
0199 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
0200 nodemask_t *nodemask);
0201
0202 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
0203 nodemask_t *nodemask, int nr_pages,
0204 struct list_head *page_list,
0205 struct page **page_array);
0206
0207 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
0208 unsigned long nr_pages,
0209 struct page **page_array);
0210
0211
0212 static inline unsigned long
0213 alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
0214 {
0215 return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
0216 }
0217
0218 static inline unsigned long
0219 alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
0220 {
0221 return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
0222 }
0223
0224 static inline unsigned long
0225 alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array)
0226 {
0227 if (nid == NUMA_NO_NODE)
0228 nid = numa_mem_id();
0229
0230 return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
0231 }
0232
0233
0234
0235
0236
0237 static inline struct page *
0238 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
0239 {
0240 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
0241 VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
0242
0243 return __alloc_pages(gfp_mask, order, nid, NULL);
0244 }
0245
0246 static inline
0247 struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
0248 {
0249 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
0250 VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
0251
0252 return __folio_alloc(gfp, order, nid, NULL);
0253 }
0254
0255
0256
0257
0258
0259
0260 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
0261 unsigned int order)
0262 {
0263 if (nid == NUMA_NO_NODE)
0264 nid = numa_mem_id();
0265
0266 return __alloc_pages_node(nid, gfp_mask, order);
0267 }
0268
0269 #ifdef CONFIG_NUMA
0270 struct page *alloc_pages(gfp_t gfp, unsigned int order);
0271 struct folio *folio_alloc(gfp_t gfp, unsigned order);
0272 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
0273 unsigned long addr, bool hugepage);
0274 #else
0275 static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
0276 {
0277 return alloc_pages_node(numa_node_id(), gfp_mask, order);
0278 }
0279 static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
0280 {
0281 return __folio_alloc_node(gfp, order, numa_node_id());
0282 }
0283 #define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
0284 folio_alloc(gfp, order)
0285 #endif
0286 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
0287 static inline struct page *alloc_page_vma(gfp_t gfp,
0288 struct vm_area_struct *vma, unsigned long addr)
0289 {
0290 struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false);
0291
0292 return &folio->page;
0293 }
0294
0295 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
0296 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
0297
0298 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
0299 void free_pages_exact(void *virt, size_t size);
0300 __meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
0301
0302 #define __get_free_page(gfp_mask) \
0303 __get_free_pages((gfp_mask), 0)
0304
0305 #define __get_dma_pages(gfp_mask, order) \
0306 __get_free_pages((gfp_mask) | GFP_DMA, (order))
0307
0308 extern void __free_pages(struct page *page, unsigned int order);
0309 extern void free_pages(unsigned long addr, unsigned int order);
0310
0311 struct page_frag_cache;
0312 extern void __page_frag_cache_drain(struct page *page, unsigned int count);
0313 extern void *page_frag_alloc_align(struct page_frag_cache *nc,
0314 unsigned int fragsz, gfp_t gfp_mask,
0315 unsigned int align_mask);
0316
0317 static inline void *page_frag_alloc(struct page_frag_cache *nc,
0318 unsigned int fragsz, gfp_t gfp_mask)
0319 {
0320 return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
0321 }
0322
0323 extern void page_frag_free(void *addr);
0324
0325 #define __free_page(page) __free_pages((page), 0)
0326 #define free_page(addr) free_pages((addr), 0)
0327
0328 void page_alloc_init(void);
0329 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
0330 void drain_all_pages(struct zone *zone);
0331 void drain_local_pages(struct zone *zone);
0332
0333 void page_alloc_init_late(void);
0334
0335
0336
0337
0338
0339
0340
0341
0342 extern gfp_t gfp_allowed_mask;
0343
0344
0345 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
0346
0347 extern void pm_restrict_gfp_mask(void);
0348 extern void pm_restore_gfp_mask(void);
0349
0350 extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
0351
0352 #ifdef CONFIG_PM_SLEEP
0353 extern bool pm_suspended_storage(void);
0354 #else
0355 static inline bool pm_suspended_storage(void)
0356 {
0357 return false;
0358 }
0359 #endif
0360
0361 #ifdef CONFIG_CONTIG_ALLOC
0362
0363 extern int alloc_contig_range(unsigned long start, unsigned long end,
0364 unsigned migratetype, gfp_t gfp_mask);
0365 extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
0366 int nid, nodemask_t *nodemask);
0367 #endif
0368 void free_contig_range(unsigned long pfn, unsigned long nr_pages);
0369
0370 #ifdef CONFIG_CMA
0371
0372 extern void init_cma_reserved_pageblock(struct page *page);
0373 #endif
0374
0375 #endif