Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
0004  *
0005  * (C) SGI 2006, Christoph Lameter
0006  *  Cleaned up and restructured to ease the addition of alternative
0007  *  implementations of SLAB allocators.
0008  * (C) Linux Foundation 2008-2013
0009  *      Unified interface for all slab allocators
0010  */
0011 
0012 #ifndef _LINUX_SLAB_H
0013 #define _LINUX_SLAB_H
0014 
0015 #include <linux/gfp.h>
0016 #include <linux/overflow.h>
0017 #include <linux/types.h>
0018 #include <linux/workqueue.h>
0019 #include <linux/percpu-refcount.h>
0020 
0021 
0022 /*
0023  * Flags to pass to kmem_cache_create().
0024  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
0025  */
0026 /* DEBUG: Perform (expensive) checks on alloc/free */
0027 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
0028 /* DEBUG: Red zone objs in a cache */
0029 #define SLAB_RED_ZONE       ((slab_flags_t __force)0x00000400U)
0030 /* DEBUG: Poison objects */
0031 #define SLAB_POISON     ((slab_flags_t __force)0x00000800U)
0032 /* Align objs on cache lines */
0033 #define SLAB_HWCACHE_ALIGN  ((slab_flags_t __force)0x00002000U)
0034 /* Use GFP_DMA memory */
0035 #define SLAB_CACHE_DMA      ((slab_flags_t __force)0x00004000U)
0036 /* Use GFP_DMA32 memory */
0037 #define SLAB_CACHE_DMA32    ((slab_flags_t __force)0x00008000U)
0038 /* DEBUG: Store the last owner for bug hunting */
0039 #define SLAB_STORE_USER     ((slab_flags_t __force)0x00010000U)
0040 /* Panic if kmem_cache_create() fails */
0041 #define SLAB_PANIC      ((slab_flags_t __force)0x00040000U)
0042 /*
0043  * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
0044  *
0045  * This delays freeing the SLAB page by a grace period, it does _NOT_
0046  * delay object freeing. This means that if you do kmem_cache_free()
0047  * that memory location is free to be reused at any time. Thus it may
0048  * be possible to see another object there in the same RCU grace period.
0049  *
0050  * This feature only ensures the memory location backing the object
0051  * stays valid, the trick to using this is relying on an independent
0052  * object validation pass. Something like:
0053  *
0054  *  rcu_read_lock()
0055  * again:
0056  *  obj = lockless_lookup(key);
0057  *  if (obj) {
0058  *    if (!try_get_ref(obj)) // might fail for free objects
0059  *      goto again;
0060  *
0061  *    if (obj->key != key) { // not the object we expected
0062  *      put_ref(obj);
0063  *      goto again;
0064  *    }
0065  *  }
0066  *  rcu_read_unlock();
0067  *
0068  * This is useful if we need to approach a kernel structure obliquely,
0069  * from its address obtained without the usual locking. We can lock
0070  * the structure to stabilize it and check it's still at the given address,
0071  * only if we can be sure that the memory has not been meanwhile reused
0072  * for some other kind of object (which our subsystem's lock might corrupt).
0073  *
0074  * rcu_read_lock before reading the address, then rcu_read_unlock after
0075  * taking the spinlock within the structure expected at that address.
0076  *
0077  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
0078  */
0079 /* Defer freeing slabs to RCU */
0080 #define SLAB_TYPESAFE_BY_RCU    ((slab_flags_t __force)0x00080000U)
0081 /* Spread some memory over cpuset */
0082 #define SLAB_MEM_SPREAD     ((slab_flags_t __force)0x00100000U)
0083 /* Trace allocations and frees */
0084 #define SLAB_TRACE      ((slab_flags_t __force)0x00200000U)
0085 
0086 /* Flag to prevent checks on free */
0087 #ifdef CONFIG_DEBUG_OBJECTS
0088 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
0089 #else
0090 # define SLAB_DEBUG_OBJECTS 0
0091 #endif
0092 
0093 /* Avoid kmemleak tracing */
0094 #define SLAB_NOLEAKTRACE    ((slab_flags_t __force)0x00800000U)
0095 
0096 /* Fault injection mark */
0097 #ifdef CONFIG_FAILSLAB
0098 # define SLAB_FAILSLAB      ((slab_flags_t __force)0x02000000U)
0099 #else
0100 # define SLAB_FAILSLAB      0
0101 #endif
0102 /* Account to memcg */
0103 #ifdef CONFIG_MEMCG_KMEM
0104 # define SLAB_ACCOUNT       ((slab_flags_t __force)0x04000000U)
0105 #else
0106 # define SLAB_ACCOUNT       0
0107 #endif
0108 
0109 #ifdef CONFIG_KASAN
0110 #define SLAB_KASAN      ((slab_flags_t __force)0x08000000U)
0111 #else
0112 #define SLAB_KASAN      0
0113 #endif
0114 
0115 /*
0116  * Ignore user specified debugging flags.
0117  * Intended for caches created for self-tests so they have only flags
0118  * specified in the code and other flags are ignored.
0119  */
0120 #define SLAB_NO_USER_FLAGS  ((slab_flags_t __force)0x10000000U)
0121 
0122 /* The following flags affect the page allocator grouping pages by mobility */
0123 /* Objects are reclaimable */
0124 #define SLAB_RECLAIM_ACCOUNT    ((slab_flags_t __force)0x00020000U)
0125 #define SLAB_TEMPORARY      SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
0126 
0127 /*
0128  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
0129  *
0130  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
0131  *
0132  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
0133  * Both make kfree a no-op.
0134  */
0135 #define ZERO_SIZE_PTR ((void *)16)
0136 
0137 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
0138                 (unsigned long)ZERO_SIZE_PTR)
0139 
0140 #include <linux/kasan.h>
0141 
0142 struct list_lru;
0143 struct mem_cgroup;
0144 /*
0145  * struct kmem_cache related prototypes
0146  */
0147 void __init kmem_cache_init(void);
0148 bool slab_is_available(void);
0149 
0150 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
0151             unsigned int align, slab_flags_t flags,
0152             void (*ctor)(void *));
0153 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
0154             unsigned int size, unsigned int align,
0155             slab_flags_t flags,
0156             unsigned int useroffset, unsigned int usersize,
0157             void (*ctor)(void *));
0158 void kmem_cache_destroy(struct kmem_cache *s);
0159 int kmem_cache_shrink(struct kmem_cache *s);
0160 
0161 /*
0162  * Please use this macro to create slab caches. Simply specify the
0163  * name of the structure and maybe some flags that are listed above.
0164  *
0165  * The alignment of the struct determines object alignment. If you
0166  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
0167  * then the objects will be properly aligned in SMP configurations.
0168  */
0169 #define KMEM_CACHE(__struct, __flags)                   \
0170         kmem_cache_create(#__struct, sizeof(struct __struct),   \
0171             __alignof__(struct __struct), (__flags), NULL)
0172 
0173 /*
0174  * To whitelist a single field for copying to/from usercopy, use this
0175  * macro instead for KMEM_CACHE() above.
0176  */
0177 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)         \
0178         kmem_cache_create_usercopy(#__struct,           \
0179             sizeof(struct __struct),            \
0180             __alignof__(struct __struct), (__flags),    \
0181             offsetof(struct __struct, __field),     \
0182             sizeof_field(struct __struct, __field), NULL)
0183 
0184 /*
0185  * Common kmalloc functions provided by all allocators
0186  */
0187 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
0188 void kfree(const void *objp);
0189 void kfree_sensitive(const void *objp);
0190 size_t __ksize(const void *objp);
0191 size_t ksize(const void *objp);
0192 #ifdef CONFIG_PRINTK
0193 bool kmem_valid_obj(void *object);
0194 void kmem_dump_obj(void *object);
0195 #endif
0196 
0197 /*
0198  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
0199  * alignment larger than the alignment of a 64-bit integer.
0200  * Setting ARCH_DMA_MINALIGN in arch headers allows that.
0201  */
0202 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
0203 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
0204 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
0205 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
0206 #else
0207 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
0208 #endif
0209 
0210 /*
0211  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
0212  * Intended for arches that get misalignment faults even for 64 bit integer
0213  * aligned buffers.
0214  */
0215 #ifndef ARCH_SLAB_MINALIGN
0216 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
0217 #endif
0218 
0219 /*
0220  * Arches can define this function if they want to decide the minimum slab
0221  * alignment at runtime. The value returned by the function must be a power
0222  * of two and >= ARCH_SLAB_MINALIGN.
0223  */
0224 #ifndef arch_slab_minalign
0225 static inline unsigned int arch_slab_minalign(void)
0226 {
0227     return ARCH_SLAB_MINALIGN;
0228 }
0229 #endif
0230 
0231 /*
0232  * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
0233  * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
0234  * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
0235  */
0236 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
0237 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
0238 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
0239 
0240 /*
0241  * Kmalloc array related definitions
0242  */
0243 
0244 #ifdef CONFIG_SLAB
0245 /*
0246  * The largest kmalloc size supported by the SLAB allocators is
0247  * 32 megabyte (2^25) or the maximum allocatable page order if that is
0248  * less than 32 MB.
0249  *
0250  * WARNING: Its not easy to increase this value since the allocators have
0251  * to do various tricks to work around compiler limitations in order to
0252  * ensure proper constant folding.
0253  */
0254 #define KMALLOC_SHIFT_HIGH  ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
0255                 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
0256 #define KMALLOC_SHIFT_MAX   KMALLOC_SHIFT_HIGH
0257 #ifndef KMALLOC_SHIFT_LOW
0258 #define KMALLOC_SHIFT_LOW   5
0259 #endif
0260 #endif
0261 
0262 #ifdef CONFIG_SLUB
0263 /*
0264  * SLUB directly allocates requests fitting in to an order-1 page
0265  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
0266  */
0267 #define KMALLOC_SHIFT_HIGH  (PAGE_SHIFT + 1)
0268 #define KMALLOC_SHIFT_MAX   (MAX_ORDER + PAGE_SHIFT - 1)
0269 #ifndef KMALLOC_SHIFT_LOW
0270 #define KMALLOC_SHIFT_LOW   3
0271 #endif
0272 #endif
0273 
0274 #ifdef CONFIG_SLOB
0275 /*
0276  * SLOB passes all requests larger than one page to the page allocator.
0277  * No kmalloc array is necessary since objects of different sizes can
0278  * be allocated from the same page.
0279  */
0280 #define KMALLOC_SHIFT_HIGH  PAGE_SHIFT
0281 #define KMALLOC_SHIFT_MAX   (MAX_ORDER + PAGE_SHIFT - 1)
0282 #ifndef KMALLOC_SHIFT_LOW
0283 #define KMALLOC_SHIFT_LOW   3
0284 #endif
0285 #endif
0286 
0287 /* Maximum allocatable size */
0288 #define KMALLOC_MAX_SIZE    (1UL << KMALLOC_SHIFT_MAX)
0289 /* Maximum size for which we actually use a slab cache */
0290 #define KMALLOC_MAX_CACHE_SIZE  (1UL << KMALLOC_SHIFT_HIGH)
0291 /* Maximum order allocatable via the slab allocator */
0292 #define KMALLOC_MAX_ORDER   (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
0293 
0294 /*
0295  * Kmalloc subsystem.
0296  */
0297 #ifndef KMALLOC_MIN_SIZE
0298 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
0299 #endif
0300 
0301 /*
0302  * This restriction comes from byte sized index implementation.
0303  * Page size is normally 2^12 bytes and, in this case, if we want to use
0304  * byte sized index which can represent 2^8 entries, the size of the object
0305  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
0306  * If minimum size of kmalloc is less than 16, we use it as minimum object
0307  * size and give up to use byte sized index.
0308  */
0309 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
0310                                (KMALLOC_MIN_SIZE) : 16)
0311 
0312 /*
0313  * Whenever changing this, take care of that kmalloc_type() and
0314  * create_kmalloc_caches() still work as intended.
0315  *
0316  * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
0317  * is for accounted but unreclaimable and non-dma objects. All the other
0318  * kmem caches can have both accounted and unaccounted objects.
0319  */
0320 enum kmalloc_cache_type {
0321     KMALLOC_NORMAL = 0,
0322 #ifndef CONFIG_ZONE_DMA
0323     KMALLOC_DMA = KMALLOC_NORMAL,
0324 #endif
0325 #ifndef CONFIG_MEMCG_KMEM
0326     KMALLOC_CGROUP = KMALLOC_NORMAL,
0327 #else
0328     KMALLOC_CGROUP,
0329 #endif
0330     KMALLOC_RECLAIM,
0331 #ifdef CONFIG_ZONE_DMA
0332     KMALLOC_DMA,
0333 #endif
0334     NR_KMALLOC_TYPES
0335 };
0336 
0337 #ifndef CONFIG_SLOB
0338 extern struct kmem_cache *
0339 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
0340 
0341 /*
0342  * Define gfp bits that should not be set for KMALLOC_NORMAL.
0343  */
0344 #define KMALLOC_NOT_NORMAL_BITS                 \
0345     (__GFP_RECLAIMABLE |                    \
0346     (IS_ENABLED(CONFIG_ZONE_DMA)   ? __GFP_DMA : 0) |   \
0347     (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
0348 
0349 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
0350 {
0351     /*
0352      * The most common case is KMALLOC_NORMAL, so test for it
0353      * with a single branch for all the relevant flags.
0354      */
0355     if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
0356         return KMALLOC_NORMAL;
0357 
0358     /*
0359      * At least one of the flags has to be set. Their priorities in
0360      * decreasing order are:
0361      *  1) __GFP_DMA
0362      *  2) __GFP_RECLAIMABLE
0363      *  3) __GFP_ACCOUNT
0364      */
0365     if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
0366         return KMALLOC_DMA;
0367     if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
0368         return KMALLOC_RECLAIM;
0369     else
0370         return KMALLOC_CGROUP;
0371 }
0372 
0373 /*
0374  * Figure out which kmalloc slab an allocation of a certain size
0375  * belongs to.
0376  * 0 = zero alloc
0377  * 1 =  65 .. 96 bytes
0378  * 2 = 129 .. 192 bytes
0379  * n = 2^(n-1)+1 .. 2^n
0380  *
0381  * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
0382  * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
0383  * Callers where !size_is_constant should only be test modules, where runtime
0384  * overheads of __kmalloc_index() can be tolerated.  Also see kmalloc_slab().
0385  */
0386 static __always_inline unsigned int __kmalloc_index(size_t size,
0387                             bool size_is_constant)
0388 {
0389     if (!size)
0390         return 0;
0391 
0392     if (size <= KMALLOC_MIN_SIZE)
0393         return KMALLOC_SHIFT_LOW;
0394 
0395     if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
0396         return 1;
0397     if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
0398         return 2;
0399     if (size <=          8) return 3;
0400     if (size <=         16) return 4;
0401     if (size <=         32) return 5;
0402     if (size <=         64) return 6;
0403     if (size <=        128) return 7;
0404     if (size <=        256) return 8;
0405     if (size <=        512) return 9;
0406     if (size <=       1024) return 10;
0407     if (size <=   2 * 1024) return 11;
0408     if (size <=   4 * 1024) return 12;
0409     if (size <=   8 * 1024) return 13;
0410     if (size <=  16 * 1024) return 14;
0411     if (size <=  32 * 1024) return 15;
0412     if (size <=  64 * 1024) return 16;
0413     if (size <= 128 * 1024) return 17;
0414     if (size <= 256 * 1024) return 18;
0415     if (size <= 512 * 1024) return 19;
0416     if (size <= 1024 * 1024) return 20;
0417     if (size <=  2 * 1024 * 1024) return 21;
0418     if (size <=  4 * 1024 * 1024) return 22;
0419     if (size <=  8 * 1024 * 1024) return 23;
0420     if (size <=  16 * 1024 * 1024) return 24;
0421     if (size <=  32 * 1024 * 1024) return 25;
0422 
0423     if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
0424         BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
0425     else
0426         BUG();
0427 
0428     /* Will never be reached. Needed because the compiler may complain */
0429     return -1;
0430 }
0431 #define kmalloc_index(s) __kmalloc_index(s, true)
0432 #endif /* !CONFIG_SLOB */
0433 
0434 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
0435 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
0436 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
0437                gfp_t gfpflags) __assume_slab_alignment __malloc;
0438 void kmem_cache_free(struct kmem_cache *s, void *objp);
0439 
0440 /*
0441  * Bulk allocation and freeing operations. These are accelerated in an
0442  * allocator specific way to avoid taking locks repeatedly or building
0443  * metadata structures unnecessarily.
0444  *
0445  * Note that interrupts must be enabled when calling these functions.
0446  */
0447 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
0448 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
0449 
0450 /*
0451  * Caller must not use kfree_bulk() on memory not originally allocated
0452  * by kmalloc(), because the SLOB allocator cannot handle this.
0453  */
0454 static __always_inline void kfree_bulk(size_t size, void **p)
0455 {
0456     kmem_cache_free_bulk(NULL, size, p);
0457 }
0458 
0459 #ifdef CONFIG_NUMA
0460 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
0461                              __alloc_size(1);
0462 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
0463                                      __malloc;
0464 #else
0465 static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
0466 {
0467     return __kmalloc(size, flags);
0468 }
0469 
0470 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
0471 {
0472     return kmem_cache_alloc(s, flags);
0473 }
0474 #endif
0475 
0476 #ifdef CONFIG_TRACING
0477 extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
0478                    __assume_slab_alignment __alloc_size(3);
0479 
0480 #ifdef CONFIG_NUMA
0481 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
0482                      int node, size_t size) __assume_slab_alignment
0483                                 __alloc_size(4);
0484 #else
0485 static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
0486                          gfp_t gfpflags, int node, size_t size)
0487 {
0488     return kmem_cache_alloc_trace(s, gfpflags, size);
0489 }
0490 #endif /* CONFIG_NUMA */
0491 
0492 #else /* CONFIG_TRACING */
0493 static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
0494                                     gfp_t flags, size_t size)
0495 {
0496     void *ret = kmem_cache_alloc(s, flags);
0497 
0498     ret = kasan_kmalloc(s, ret, size, flags);
0499     return ret;
0500 }
0501 
0502 static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
0503                              int node, size_t size)
0504 {
0505     void *ret = kmem_cache_alloc_node(s, gfpflags, node);
0506 
0507     ret = kasan_kmalloc(s, ret, size, gfpflags);
0508     return ret;
0509 }
0510 #endif /* CONFIG_TRACING */
0511 
0512 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
0513                                      __alloc_size(1);
0514 
0515 #ifdef CONFIG_TRACING
0516 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
0517                 __assume_page_alignment __alloc_size(1);
0518 #else
0519 static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
0520                                  unsigned int order)
0521 {
0522     return kmalloc_order(size, flags, order);
0523 }
0524 #endif
0525 
0526 static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
0527 {
0528     unsigned int order = get_order(size);
0529     return kmalloc_order_trace(size, flags, order);
0530 }
0531 
0532 /**
0533  * kmalloc - allocate memory
0534  * @size: how many bytes of memory are required.
0535  * @flags: the type of memory to allocate.
0536  *
0537  * kmalloc is the normal method of allocating memory
0538  * for objects smaller than page size in the kernel.
0539  *
0540  * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
0541  * bytes. For @size of power of two bytes, the alignment is also guaranteed
0542  * to be at least to the size.
0543  *
0544  * The @flags argument may be one of the GFP flags defined at
0545  * include/linux/gfp.h and described at
0546  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
0547  *
0548  * The recommended usage of the @flags is described at
0549  * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
0550  *
0551  * Below is a brief outline of the most useful GFP flags
0552  *
0553  * %GFP_KERNEL
0554  *  Allocate normal kernel ram. May sleep.
0555  *
0556  * %GFP_NOWAIT
0557  *  Allocation will not sleep.
0558  *
0559  * %GFP_ATOMIC
0560  *  Allocation will not sleep.  May use emergency pools.
0561  *
0562  * %GFP_HIGHUSER
0563  *  Allocate memory from high memory on behalf of user.
0564  *
0565  * Also it is possible to set different flags by OR'ing
0566  * in one or more of the following additional @flags:
0567  *
0568  * %__GFP_HIGH
0569  *  This allocation has high priority and may use emergency pools.
0570  *
0571  * %__GFP_NOFAIL
0572  *  Indicate that this allocation is in no way allowed to fail
0573  *  (think twice before using).
0574  *
0575  * %__GFP_NORETRY
0576  *  If memory is not immediately available,
0577  *  then give up at once.
0578  *
0579  * %__GFP_NOWARN
0580  *  If allocation fails, don't issue any warnings.
0581  *
0582  * %__GFP_RETRY_MAYFAIL
0583  *  Try really hard to succeed the allocation but fail
0584  *  eventually.
0585  */
0586 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
0587 {
0588     if (__builtin_constant_p(size)) {
0589 #ifndef CONFIG_SLOB
0590         unsigned int index;
0591 #endif
0592         if (size > KMALLOC_MAX_CACHE_SIZE)
0593             return kmalloc_large(size, flags);
0594 #ifndef CONFIG_SLOB
0595         index = kmalloc_index(size);
0596 
0597         if (!index)
0598             return ZERO_SIZE_PTR;
0599 
0600         return kmem_cache_alloc_trace(
0601                 kmalloc_caches[kmalloc_type(flags)][index],
0602                 flags, size);
0603 #endif
0604     }
0605     return __kmalloc(size, flags);
0606 }
0607 
0608 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
0609 {
0610 #ifndef CONFIG_SLOB
0611     if (__builtin_constant_p(size) &&
0612         size <= KMALLOC_MAX_CACHE_SIZE) {
0613         unsigned int i = kmalloc_index(size);
0614 
0615         if (!i)
0616             return ZERO_SIZE_PTR;
0617 
0618         return kmem_cache_alloc_node_trace(
0619                 kmalloc_caches[kmalloc_type(flags)][i],
0620                         flags, node, size);
0621     }
0622 #endif
0623     return __kmalloc_node(size, flags, node);
0624 }
0625 
0626 /**
0627  * kmalloc_array - allocate memory for an array.
0628  * @n: number of elements.
0629  * @size: element size.
0630  * @flags: the type of memory to allocate (see kmalloc).
0631  */
0632 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
0633 {
0634     size_t bytes;
0635 
0636     if (unlikely(check_mul_overflow(n, size, &bytes)))
0637         return NULL;
0638     if (__builtin_constant_p(n) && __builtin_constant_p(size))
0639         return kmalloc(bytes, flags);
0640     return __kmalloc(bytes, flags);
0641 }
0642 
0643 /**
0644  * krealloc_array - reallocate memory for an array.
0645  * @p: pointer to the memory chunk to reallocate
0646  * @new_n: new number of elements to alloc
0647  * @new_size: new size of a single member of the array
0648  * @flags: the type of memory to allocate (see kmalloc)
0649  */
0650 static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
0651                                     size_t new_n,
0652                                     size_t new_size,
0653                                     gfp_t flags)
0654 {
0655     size_t bytes;
0656 
0657     if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
0658         return NULL;
0659 
0660     return krealloc(p, bytes, flags);
0661 }
0662 
0663 /**
0664  * kcalloc - allocate memory for an array. The memory is set to zero.
0665  * @n: number of elements.
0666  * @size: element size.
0667  * @flags: the type of memory to allocate (see kmalloc).
0668  */
0669 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
0670 {
0671     return kmalloc_array(n, size, flags | __GFP_ZERO);
0672 }
0673 
0674 /*
0675  * kmalloc_track_caller is a special version of kmalloc that records the
0676  * calling function of the routine calling it for slab leak tracking instead
0677  * of just the calling function (confusing, eh?).
0678  * It's useful when the call to kmalloc comes from a widely-used standard
0679  * allocator where we care about the real place the memory allocation
0680  * request comes from.
0681  */
0682 extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
0683 #define kmalloc_track_caller(size, flags) \
0684     __kmalloc_track_caller(size, flags, _RET_IP_)
0685 
0686 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
0687                               int node)
0688 {
0689     size_t bytes;
0690 
0691     if (unlikely(check_mul_overflow(n, size, &bytes)))
0692         return NULL;
0693     if (__builtin_constant_p(n) && __builtin_constant_p(size))
0694         return kmalloc_node(bytes, flags, node);
0695     return __kmalloc_node(bytes, flags, node);
0696 }
0697 
0698 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
0699 {
0700     return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
0701 }
0702 
0703 
0704 #ifdef CONFIG_NUMA
0705 extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
0706                      unsigned long caller) __alloc_size(1);
0707 #define kmalloc_node_track_caller(size, flags, node) \
0708     __kmalloc_node_track_caller(size, flags, node, \
0709             _RET_IP_)
0710 
0711 #else /* CONFIG_NUMA */
0712 
0713 #define kmalloc_node_track_caller(size, flags, node) \
0714     kmalloc_track_caller(size, flags)
0715 
0716 #endif /* CONFIG_NUMA */
0717 
0718 /*
0719  * Shortcuts
0720  */
0721 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
0722 {
0723     return kmem_cache_alloc(k, flags | __GFP_ZERO);
0724 }
0725 
0726 /**
0727  * kzalloc - allocate memory. The memory is set to zero.
0728  * @size: how many bytes of memory are required.
0729  * @flags: the type of memory to allocate (see kmalloc).
0730  */
0731 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
0732 {
0733     return kmalloc(size, flags | __GFP_ZERO);
0734 }
0735 
0736 /**
0737  * kzalloc_node - allocate zeroed memory from a particular memory node.
0738  * @size: how many bytes of memory are required.
0739  * @flags: the type of memory to allocate (see kmalloc).
0740  * @node: memory node from which to allocate
0741  */
0742 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
0743 {
0744     return kmalloc_node(size, flags | __GFP_ZERO, node);
0745 }
0746 
0747 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
0748 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
0749 {
0750     return kvmalloc_node(size, flags, NUMA_NO_NODE);
0751 }
0752 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
0753 {
0754     return kvmalloc_node(size, flags | __GFP_ZERO, node);
0755 }
0756 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
0757 {
0758     return kvmalloc(size, flags | __GFP_ZERO);
0759 }
0760 
0761 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
0762 {
0763     size_t bytes;
0764 
0765     if (unlikely(check_mul_overflow(n, size, &bytes)))
0766         return NULL;
0767 
0768     return kvmalloc(bytes, flags);
0769 }
0770 
0771 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
0772 {
0773     return kvmalloc_array(n, size, flags | __GFP_ZERO);
0774 }
0775 
0776 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
0777               __alloc_size(3);
0778 extern void kvfree(const void *addr);
0779 extern void kvfree_sensitive(const void *addr, size_t len);
0780 
0781 unsigned int kmem_cache_size(struct kmem_cache *s);
0782 void __init kmem_cache_init_late(void);
0783 
0784 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
0785 int slab_prepare_cpu(unsigned int cpu);
0786 int slab_dead_cpu(unsigned int cpu);
0787 #else
0788 #define slab_prepare_cpu    NULL
0789 #define slab_dead_cpu       NULL
0790 #endif
0791 
0792 #endif  /* _LINUX_SLAB_H */