0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef _LINUX_SLAB_H
0013 #define _LINUX_SLAB_H
0014
0015 #include <linux/gfp.h>
0016 #include <linux/overflow.h>
0017 #include <linux/types.h>
0018 #include <linux/workqueue.h>
0019 #include <linux/percpu-refcount.h>
0020
0021
0022
0023
0024
0025
0026
0027 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
0028
0029 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
0030
0031 #define SLAB_POISON ((slab_flags_t __force)0x00000800U)
0032
0033 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
0034
0035 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
0036
0037 #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
0038
0039 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
0040
0041 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
0081
0082 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
0083
0084 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
0085
0086
0087 #ifdef CONFIG_DEBUG_OBJECTS
0088 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
0089 #else
0090 # define SLAB_DEBUG_OBJECTS 0
0091 #endif
0092
0093
0094 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
0095
0096
0097 #ifdef CONFIG_FAILSLAB
0098 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
0099 #else
0100 # define SLAB_FAILSLAB 0
0101 #endif
0102
0103 #ifdef CONFIG_MEMCG_KMEM
0104 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
0105 #else
0106 # define SLAB_ACCOUNT 0
0107 #endif
0108
0109 #ifdef CONFIG_KASAN
0110 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
0111 #else
0112 #define SLAB_KASAN 0
0113 #endif
0114
0115
0116
0117
0118
0119
0120 #define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
0121
0122
0123
0124 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
0125 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 #define ZERO_SIZE_PTR ((void *)16)
0136
0137 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
0138 (unsigned long)ZERO_SIZE_PTR)
0139
0140 #include <linux/kasan.h>
0141
0142 struct list_lru;
0143 struct mem_cgroup;
0144
0145
0146
0147 void __init kmem_cache_init(void);
0148 bool slab_is_available(void);
0149
0150 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
0151 unsigned int align, slab_flags_t flags,
0152 void (*ctor)(void *));
0153 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
0154 unsigned int size, unsigned int align,
0155 slab_flags_t flags,
0156 unsigned int useroffset, unsigned int usersize,
0157 void (*ctor)(void *));
0158 void kmem_cache_destroy(struct kmem_cache *s);
0159 int kmem_cache_shrink(struct kmem_cache *s);
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 #define KMEM_CACHE(__struct, __flags) \
0170 kmem_cache_create(#__struct, sizeof(struct __struct), \
0171 __alignof__(struct __struct), (__flags), NULL)
0172
0173
0174
0175
0176
0177 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
0178 kmem_cache_create_usercopy(#__struct, \
0179 sizeof(struct __struct), \
0180 __alignof__(struct __struct), (__flags), \
0181 offsetof(struct __struct, __field), \
0182 sizeof_field(struct __struct, __field), NULL)
0183
0184
0185
0186
0187 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
0188 void kfree(const void *objp);
0189 void kfree_sensitive(const void *objp);
0190 size_t __ksize(const void *objp);
0191 size_t ksize(const void *objp);
0192 #ifdef CONFIG_PRINTK
0193 bool kmem_valid_obj(void *object);
0194 void kmem_dump_obj(void *object);
0195 #endif
0196
0197
0198
0199
0200
0201
0202 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
0203 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
0204 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
0205 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
0206 #else
0207 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
0208 #endif
0209
0210
0211
0212
0213
0214
0215 #ifndef ARCH_SLAB_MINALIGN
0216 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
0217 #endif
0218
0219
0220
0221
0222
0223
0224 #ifndef arch_slab_minalign
0225 static inline unsigned int arch_slab_minalign(void)
0226 {
0227 return ARCH_SLAB_MINALIGN;
0228 }
0229 #endif
0230
0231
0232
0233
0234
0235
0236 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
0237 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
0238 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
0239
0240
0241
0242
0243
0244 #ifdef CONFIG_SLAB
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
0255 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
0256 #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
0257 #ifndef KMALLOC_SHIFT_LOW
0258 #define KMALLOC_SHIFT_LOW 5
0259 #endif
0260 #endif
0261
0262 #ifdef CONFIG_SLUB
0263
0264
0265
0266
0267 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
0268 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
0269 #ifndef KMALLOC_SHIFT_LOW
0270 #define KMALLOC_SHIFT_LOW 3
0271 #endif
0272 #endif
0273
0274 #ifdef CONFIG_SLOB
0275
0276
0277
0278
0279
0280 #define KMALLOC_SHIFT_HIGH PAGE_SHIFT
0281 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
0282 #ifndef KMALLOC_SHIFT_LOW
0283 #define KMALLOC_SHIFT_LOW 3
0284 #endif
0285 #endif
0286
0287
0288 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
0289
0290 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
0291
0292 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
0293
0294
0295
0296
0297 #ifndef KMALLOC_MIN_SIZE
0298 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
0299 #endif
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
0310 (KMALLOC_MIN_SIZE) : 16)
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 enum kmalloc_cache_type {
0321 KMALLOC_NORMAL = 0,
0322 #ifndef CONFIG_ZONE_DMA
0323 KMALLOC_DMA = KMALLOC_NORMAL,
0324 #endif
0325 #ifndef CONFIG_MEMCG_KMEM
0326 KMALLOC_CGROUP = KMALLOC_NORMAL,
0327 #else
0328 KMALLOC_CGROUP,
0329 #endif
0330 KMALLOC_RECLAIM,
0331 #ifdef CONFIG_ZONE_DMA
0332 KMALLOC_DMA,
0333 #endif
0334 NR_KMALLOC_TYPES
0335 };
0336
0337 #ifndef CONFIG_SLOB
0338 extern struct kmem_cache *
0339 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
0340
0341
0342
0343
0344 #define KMALLOC_NOT_NORMAL_BITS \
0345 (__GFP_RECLAIMABLE | \
0346 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
0347 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
0348
0349 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
0350 {
0351
0352
0353
0354
0355 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
0356 return KMALLOC_NORMAL;
0357
0358
0359
0360
0361
0362
0363
0364
0365 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
0366 return KMALLOC_DMA;
0367 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
0368 return KMALLOC_RECLAIM;
0369 else
0370 return KMALLOC_CGROUP;
0371 }
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 static __always_inline unsigned int __kmalloc_index(size_t size,
0387 bool size_is_constant)
0388 {
0389 if (!size)
0390 return 0;
0391
0392 if (size <= KMALLOC_MIN_SIZE)
0393 return KMALLOC_SHIFT_LOW;
0394
0395 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
0396 return 1;
0397 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
0398 return 2;
0399 if (size <= 8) return 3;
0400 if (size <= 16) return 4;
0401 if (size <= 32) return 5;
0402 if (size <= 64) return 6;
0403 if (size <= 128) return 7;
0404 if (size <= 256) return 8;
0405 if (size <= 512) return 9;
0406 if (size <= 1024) return 10;
0407 if (size <= 2 * 1024) return 11;
0408 if (size <= 4 * 1024) return 12;
0409 if (size <= 8 * 1024) return 13;
0410 if (size <= 16 * 1024) return 14;
0411 if (size <= 32 * 1024) return 15;
0412 if (size <= 64 * 1024) return 16;
0413 if (size <= 128 * 1024) return 17;
0414 if (size <= 256 * 1024) return 18;
0415 if (size <= 512 * 1024) return 19;
0416 if (size <= 1024 * 1024) return 20;
0417 if (size <= 2 * 1024 * 1024) return 21;
0418 if (size <= 4 * 1024 * 1024) return 22;
0419 if (size <= 8 * 1024 * 1024) return 23;
0420 if (size <= 16 * 1024 * 1024) return 24;
0421 if (size <= 32 * 1024 * 1024) return 25;
0422
0423 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
0424 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
0425 else
0426 BUG();
0427
0428
0429 return -1;
0430 }
0431 #define kmalloc_index(s) __kmalloc_index(s, true)
0432 #endif
0433
0434 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
0435 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
0436 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
0437 gfp_t gfpflags) __assume_slab_alignment __malloc;
0438 void kmem_cache_free(struct kmem_cache *s, void *objp);
0439
0440
0441
0442
0443
0444
0445
0446
0447 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
0448 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
0449
0450
0451
0452
0453
0454 static __always_inline void kfree_bulk(size_t size, void **p)
0455 {
0456 kmem_cache_free_bulk(NULL, size, p);
0457 }
0458
0459 #ifdef CONFIG_NUMA
0460 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
0461 __alloc_size(1);
0462 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
0463 __malloc;
0464 #else
0465 static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
0466 {
0467 return __kmalloc(size, flags);
0468 }
0469
0470 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
0471 {
0472 return kmem_cache_alloc(s, flags);
0473 }
0474 #endif
0475
0476 #ifdef CONFIG_TRACING
0477 extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
0478 __assume_slab_alignment __alloc_size(3);
0479
0480 #ifdef CONFIG_NUMA
0481 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
0482 int node, size_t size) __assume_slab_alignment
0483 __alloc_size(4);
0484 #else
0485 static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
0486 gfp_t gfpflags, int node, size_t size)
0487 {
0488 return kmem_cache_alloc_trace(s, gfpflags, size);
0489 }
0490 #endif
0491
0492 #else
0493 static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
0494 gfp_t flags, size_t size)
0495 {
0496 void *ret = kmem_cache_alloc(s, flags);
0497
0498 ret = kasan_kmalloc(s, ret, size, flags);
0499 return ret;
0500 }
0501
0502 static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
0503 int node, size_t size)
0504 {
0505 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
0506
0507 ret = kasan_kmalloc(s, ret, size, gfpflags);
0508 return ret;
0509 }
0510 #endif
0511
0512 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
0513 __alloc_size(1);
0514
0515 #ifdef CONFIG_TRACING
0516 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
0517 __assume_page_alignment __alloc_size(1);
0518 #else
0519 static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
0520 unsigned int order)
0521 {
0522 return kmalloc_order(size, flags, order);
0523 }
0524 #endif
0525
0526 static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
0527 {
0528 unsigned int order = get_order(size);
0529 return kmalloc_order_trace(size, flags, order);
0530 }
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
0587 {
0588 if (__builtin_constant_p(size)) {
0589 #ifndef CONFIG_SLOB
0590 unsigned int index;
0591 #endif
0592 if (size > KMALLOC_MAX_CACHE_SIZE)
0593 return kmalloc_large(size, flags);
0594 #ifndef CONFIG_SLOB
0595 index = kmalloc_index(size);
0596
0597 if (!index)
0598 return ZERO_SIZE_PTR;
0599
0600 return kmem_cache_alloc_trace(
0601 kmalloc_caches[kmalloc_type(flags)][index],
0602 flags, size);
0603 #endif
0604 }
0605 return __kmalloc(size, flags);
0606 }
0607
0608 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
0609 {
0610 #ifndef CONFIG_SLOB
0611 if (__builtin_constant_p(size) &&
0612 size <= KMALLOC_MAX_CACHE_SIZE) {
0613 unsigned int i = kmalloc_index(size);
0614
0615 if (!i)
0616 return ZERO_SIZE_PTR;
0617
0618 return kmem_cache_alloc_node_trace(
0619 kmalloc_caches[kmalloc_type(flags)][i],
0620 flags, node, size);
0621 }
0622 #endif
0623 return __kmalloc_node(size, flags, node);
0624 }
0625
0626
0627
0628
0629
0630
0631
0632 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
0633 {
0634 size_t bytes;
0635
0636 if (unlikely(check_mul_overflow(n, size, &bytes)))
0637 return NULL;
0638 if (__builtin_constant_p(n) && __builtin_constant_p(size))
0639 return kmalloc(bytes, flags);
0640 return __kmalloc(bytes, flags);
0641 }
0642
0643
0644
0645
0646
0647
0648
0649
0650 static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
0651 size_t new_n,
0652 size_t new_size,
0653 gfp_t flags)
0654 {
0655 size_t bytes;
0656
0657 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
0658 return NULL;
0659
0660 return krealloc(p, bytes, flags);
0661 }
0662
0663
0664
0665
0666
0667
0668
0669 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
0670 {
0671 return kmalloc_array(n, size, flags | __GFP_ZERO);
0672 }
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682 extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
0683 #define kmalloc_track_caller(size, flags) \
0684 __kmalloc_track_caller(size, flags, _RET_IP_)
0685
0686 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
0687 int node)
0688 {
0689 size_t bytes;
0690
0691 if (unlikely(check_mul_overflow(n, size, &bytes)))
0692 return NULL;
0693 if (__builtin_constant_p(n) && __builtin_constant_p(size))
0694 return kmalloc_node(bytes, flags, node);
0695 return __kmalloc_node(bytes, flags, node);
0696 }
0697
0698 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
0699 {
0700 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
0701 }
0702
0703
0704 #ifdef CONFIG_NUMA
0705 extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
0706 unsigned long caller) __alloc_size(1);
0707 #define kmalloc_node_track_caller(size, flags, node) \
0708 __kmalloc_node_track_caller(size, flags, node, \
0709 _RET_IP_)
0710
0711 #else
0712
0713 #define kmalloc_node_track_caller(size, flags, node) \
0714 kmalloc_track_caller(size, flags)
0715
0716 #endif
0717
0718
0719
0720
0721 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
0722 {
0723 return kmem_cache_alloc(k, flags | __GFP_ZERO);
0724 }
0725
0726
0727
0728
0729
0730
0731 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
0732 {
0733 return kmalloc(size, flags | __GFP_ZERO);
0734 }
0735
0736
0737
0738
0739
0740
0741
0742 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
0743 {
0744 return kmalloc_node(size, flags | __GFP_ZERO, node);
0745 }
0746
0747 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
0748 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
0749 {
0750 return kvmalloc_node(size, flags, NUMA_NO_NODE);
0751 }
0752 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
0753 {
0754 return kvmalloc_node(size, flags | __GFP_ZERO, node);
0755 }
0756 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
0757 {
0758 return kvmalloc(size, flags | __GFP_ZERO);
0759 }
0760
0761 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
0762 {
0763 size_t bytes;
0764
0765 if (unlikely(check_mul_overflow(n, size, &bytes)))
0766 return NULL;
0767
0768 return kvmalloc(bytes, flags);
0769 }
0770
0771 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
0772 {
0773 return kvmalloc_array(n, size, flags | __GFP_ZERO);
0774 }
0775
0776 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
0777 __alloc_size(3);
0778 extern void kvfree(const void *addr);
0779 extern void kvfree_sensitive(const void *addr, size_t len);
0780
0781 unsigned int kmem_cache_size(struct kmem_cache *s);
0782 void __init kmem_cache_init_late(void);
0783
0784 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
0785 int slab_prepare_cpu(unsigned int cpu);
0786 int slab_dead_cpu(unsigned int cpu);
0787 #else
0788 #define slab_prepare_cpu NULL
0789 #define slab_dead_cpu NULL
0790 #endif
0791
0792 #endif