0001
0002 #ifndef MM_SLAB_H
0003 #define MM_SLAB_H
0004
0005
0006
0007
0008
0009 struct slab {
0010 unsigned long __page_flags;
0011
0012 #if defined(CONFIG_SLAB)
0013
0014 union {
0015 struct list_head slab_list;
0016 struct rcu_head rcu_head;
0017 };
0018 struct kmem_cache *slab_cache;
0019 void *freelist;
0020 void *s_mem;
0021 unsigned int active;
0022
0023 #elif defined(CONFIG_SLUB)
0024
0025 union {
0026 struct list_head slab_list;
0027 struct rcu_head rcu_head;
0028 #ifdef CONFIG_SLUB_CPU_PARTIAL
0029 struct {
0030 struct slab *next;
0031 int slabs;
0032 };
0033 #endif
0034 };
0035 struct kmem_cache *slab_cache;
0036
0037 void *freelist;
0038 union {
0039 unsigned long counters;
0040 struct {
0041 unsigned inuse:16;
0042 unsigned objects:15;
0043 unsigned frozen:1;
0044 };
0045 };
0046 unsigned int __unused;
0047
0048 #elif defined(CONFIG_SLOB)
0049
0050 struct list_head slab_list;
0051 void *__unused_1;
0052 void *freelist;
0053 long units;
0054 unsigned int __unused_2;
0055
0056 #else
0057 #error "Unexpected slab allocator configured"
0058 #endif
0059
0060 atomic_t __page_refcount;
0061 #ifdef CONFIG_MEMCG
0062 unsigned long memcg_data;
0063 #endif
0064 };
0065
0066 #define SLAB_MATCH(pg, sl) \
0067 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
0068 SLAB_MATCH(flags, __page_flags);
0069 SLAB_MATCH(compound_head, slab_list);
0070 #ifndef CONFIG_SLOB
0071 SLAB_MATCH(rcu_head, rcu_head);
0072 #endif
0073 SLAB_MATCH(_refcount, __page_refcount);
0074 #ifdef CONFIG_MEMCG
0075 SLAB_MATCH(memcg_data, memcg_data);
0076 #endif
0077 #undef SLAB_MATCH
0078 static_assert(sizeof(struct slab) <= sizeof(struct page));
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089 #define folio_slab(folio) (_Generic((folio), \
0090 const struct folio *: (const struct slab *)(folio), \
0091 struct folio *: (struct slab *)(folio)))
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 #define slab_folio(s) (_Generic((s), \
0105 const struct slab *: (const struct folio *)s, \
0106 struct slab *: (struct folio *)s))
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 #define page_slab(p) (_Generic((p), \
0121 const struct page *: (const struct slab *)(p), \
0122 struct page *: (struct slab *)(p)))
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 #define slab_page(s) folio_page(slab_folio(s), 0)
0133
0134
0135
0136
0137
0138 static inline bool slab_test_pfmemalloc(const struct slab *slab)
0139 {
0140 return folio_test_active((struct folio *)slab_folio(slab));
0141 }
0142
0143 static inline void slab_set_pfmemalloc(struct slab *slab)
0144 {
0145 folio_set_active(slab_folio(slab));
0146 }
0147
0148 static inline void slab_clear_pfmemalloc(struct slab *slab)
0149 {
0150 folio_clear_active(slab_folio(slab));
0151 }
0152
0153 static inline void __slab_clear_pfmemalloc(struct slab *slab)
0154 {
0155 __folio_clear_active(slab_folio(slab));
0156 }
0157
0158 static inline void *slab_address(const struct slab *slab)
0159 {
0160 return folio_address(slab_folio(slab));
0161 }
0162
0163 static inline int slab_nid(const struct slab *slab)
0164 {
0165 return folio_nid(slab_folio(slab));
0166 }
0167
0168 static inline pg_data_t *slab_pgdat(const struct slab *slab)
0169 {
0170 return folio_pgdat(slab_folio(slab));
0171 }
0172
0173 static inline struct slab *virt_to_slab(const void *addr)
0174 {
0175 struct folio *folio = virt_to_folio(addr);
0176
0177 if (!folio_test_slab(folio))
0178 return NULL;
0179
0180 return folio_slab(folio);
0181 }
0182
0183 static inline int slab_order(const struct slab *slab)
0184 {
0185 return folio_order((struct folio *)slab_folio(slab));
0186 }
0187
0188 static inline size_t slab_size(const struct slab *slab)
0189 {
0190 return PAGE_SIZE << slab_order(slab);
0191 }
0192
0193 #ifdef CONFIG_SLOB
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205 struct kmem_cache {
0206 unsigned int object_size;
0207 unsigned int size;
0208 unsigned int align;
0209 slab_flags_t flags;
0210 unsigned int useroffset;
0211 unsigned int usersize;
0212 const char *name;
0213 int refcount;
0214 void (*ctor)(void *);
0215 struct list_head list;
0216 };
0217
0218 #endif
0219
0220 #ifdef CONFIG_SLAB
0221 #include <linux/slab_def.h>
0222 #endif
0223
0224 #ifdef CONFIG_SLUB
0225 #include <linux/slub_def.h>
0226 #endif
0227
0228 #include <linux/memcontrol.h>
0229 #include <linux/fault-inject.h>
0230 #include <linux/kasan.h>
0231 #include <linux/kmemleak.h>
0232 #include <linux/random.h>
0233 #include <linux/sched/mm.h>
0234 #include <linux/list_lru.h>
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 enum slab_state {
0245 DOWN,
0246 PARTIAL,
0247 PARTIAL_NODE,
0248 UP,
0249 FULL
0250 };
0251
0252 extern enum slab_state slab_state;
0253
0254
0255 extern struct mutex slab_mutex;
0256
0257
0258 extern struct list_head slab_caches;
0259
0260
0261 extern struct kmem_cache *kmem_cache;
0262
0263
0264 extern const struct kmalloc_info_struct {
0265 const char *name[NR_KMALLOC_TYPES];
0266 unsigned int size;
0267 } kmalloc_info[];
0268
0269 #ifndef CONFIG_SLOB
0270
0271 void setup_kmalloc_cache_index_table(void);
0272 void create_kmalloc_caches(slab_flags_t);
0273
0274
0275 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
0276 #endif
0277
0278 gfp_t kmalloc_fix_flags(gfp_t flags);
0279
0280
0281 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
0282
0283 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
0284 slab_flags_t flags, unsigned int useroffset,
0285 unsigned int usersize);
0286 extern void create_boot_cache(struct kmem_cache *, const char *name,
0287 unsigned int size, slab_flags_t flags,
0288 unsigned int useroffset, unsigned int usersize);
0289
0290 int slab_unmergeable(struct kmem_cache *s);
0291 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
0292 slab_flags_t flags, const char *name, void (*ctor)(void *));
0293 #ifndef CONFIG_SLOB
0294 struct kmem_cache *
0295 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
0296 slab_flags_t flags, void (*ctor)(void *));
0297
0298 slab_flags_t kmem_cache_flags(unsigned int object_size,
0299 slab_flags_t flags, const char *name);
0300 #else
0301 static inline struct kmem_cache *
0302 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
0303 slab_flags_t flags, void (*ctor)(void *))
0304 { return NULL; }
0305
0306 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
0307 slab_flags_t flags, const char *name)
0308 {
0309 return flags;
0310 }
0311 #endif
0312
0313
0314
0315 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
0316 SLAB_CACHE_DMA32 | SLAB_PANIC | \
0317 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
0318
0319 #if defined(CONFIG_DEBUG_SLAB)
0320 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
0321 #elif defined(CONFIG_SLUB_DEBUG)
0322 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
0323 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
0324 #else
0325 #define SLAB_DEBUG_FLAGS (0)
0326 #endif
0327
0328 #if defined(CONFIG_SLAB)
0329 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
0330 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
0331 SLAB_ACCOUNT)
0332 #elif defined(CONFIG_SLUB)
0333 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
0334 SLAB_TEMPORARY | SLAB_ACCOUNT | SLAB_NO_USER_FLAGS)
0335 #else
0336 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
0337 #endif
0338
0339
0340 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
0341
0342
0343 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
0344 SLAB_RED_ZONE | \
0345 SLAB_POISON | \
0346 SLAB_STORE_USER | \
0347 SLAB_TRACE | \
0348 SLAB_CONSISTENCY_CHECKS | \
0349 SLAB_MEM_SPREAD | \
0350 SLAB_NOLEAKTRACE | \
0351 SLAB_RECLAIM_ACCOUNT | \
0352 SLAB_TEMPORARY | \
0353 SLAB_ACCOUNT | \
0354 SLAB_NO_USER_FLAGS)
0355
0356 bool __kmem_cache_empty(struct kmem_cache *);
0357 int __kmem_cache_shutdown(struct kmem_cache *);
0358 void __kmem_cache_release(struct kmem_cache *);
0359 int __kmem_cache_shrink(struct kmem_cache *);
0360 void slab_kmem_cache_release(struct kmem_cache *);
0361
0362 struct seq_file;
0363 struct file;
0364
0365 struct slabinfo {
0366 unsigned long active_objs;
0367 unsigned long num_objs;
0368 unsigned long active_slabs;
0369 unsigned long num_slabs;
0370 unsigned long shared_avail;
0371 unsigned int limit;
0372 unsigned int batchcount;
0373 unsigned int shared;
0374 unsigned int objects_per_slab;
0375 unsigned int cache_order;
0376 };
0377
0378 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
0379 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
0380 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
0381 size_t count, loff_t *ppos);
0382
0383 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
0384 {
0385 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
0386 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
0387 }
0388
0389 #ifdef CONFIG_SLUB_DEBUG
0390 #ifdef CONFIG_SLUB_DEBUG_ON
0391 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
0392 #else
0393 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
0394 #endif
0395 extern void print_tracking(struct kmem_cache *s, void *object);
0396 long validate_slab_cache(struct kmem_cache *s);
0397 static inline bool __slub_debug_enabled(void)
0398 {
0399 return static_branch_unlikely(&slub_debug_enabled);
0400 }
0401 #else
0402 static inline void print_tracking(struct kmem_cache *s, void *object)
0403 {
0404 }
0405 static inline bool __slub_debug_enabled(void)
0406 {
0407 return false;
0408 }
0409 #endif
0410
0411
0412
0413
0414
0415
0416 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
0417 {
0418 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
0419 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
0420 if (__slub_debug_enabled())
0421 return s->flags & flags;
0422 return false;
0423 }
0424
0425 #ifdef CONFIG_MEMCG_KMEM
0426
0427
0428
0429
0430
0431
0432
0433 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
0434 {
0435 unsigned long memcg_data = READ_ONCE(slab->memcg_data);
0436
0437 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
0438 slab_page(slab));
0439 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
0440
0441 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
0442 }
0443
0444 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
0445 gfp_t gfp, bool new_slab);
0446 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
0447 enum node_stat_item idx, int nr);
0448
0449 static inline void memcg_free_slab_cgroups(struct slab *slab)
0450 {
0451 kfree(slab_objcgs(slab));
0452 slab->memcg_data = 0;
0453 }
0454
0455 static inline size_t obj_full_size(struct kmem_cache *s)
0456 {
0457
0458
0459
0460
0461 return s->size + sizeof(struct obj_cgroup *);
0462 }
0463
0464
0465
0466
0467 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
0468 struct list_lru *lru,
0469 struct obj_cgroup **objcgp,
0470 size_t objects, gfp_t flags)
0471 {
0472 struct obj_cgroup *objcg;
0473
0474 if (!memcg_kmem_enabled())
0475 return true;
0476
0477 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
0478 return true;
0479
0480 objcg = get_obj_cgroup_from_current();
0481 if (!objcg)
0482 return true;
0483
0484 if (lru) {
0485 int ret;
0486 struct mem_cgroup *memcg;
0487
0488 memcg = get_mem_cgroup_from_objcg(objcg);
0489 ret = memcg_list_lru_alloc(memcg, lru, flags);
0490 css_put(&memcg->css);
0491
0492 if (ret)
0493 goto out;
0494 }
0495
0496 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
0497 goto out;
0498
0499 *objcgp = objcg;
0500 return true;
0501 out:
0502 obj_cgroup_put(objcg);
0503 return false;
0504 }
0505
0506 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
0507 struct obj_cgroup *objcg,
0508 gfp_t flags, size_t size,
0509 void **p)
0510 {
0511 struct slab *slab;
0512 unsigned long off;
0513 size_t i;
0514
0515 if (!memcg_kmem_enabled() || !objcg)
0516 return;
0517
0518 for (i = 0; i < size; i++) {
0519 if (likely(p[i])) {
0520 slab = virt_to_slab(p[i]);
0521
0522 if (!slab_objcgs(slab) &&
0523 memcg_alloc_slab_cgroups(slab, s, flags,
0524 false)) {
0525 obj_cgroup_uncharge(objcg, obj_full_size(s));
0526 continue;
0527 }
0528
0529 off = obj_to_index(s, slab, p[i]);
0530 obj_cgroup_get(objcg);
0531 slab_objcgs(slab)[off] = objcg;
0532 mod_objcg_state(objcg, slab_pgdat(slab),
0533 cache_vmstat_idx(s), obj_full_size(s));
0534 } else {
0535 obj_cgroup_uncharge(objcg, obj_full_size(s));
0536 }
0537 }
0538 obj_cgroup_put(objcg);
0539 }
0540
0541 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
0542 void **p, int objects)
0543 {
0544 struct obj_cgroup **objcgs;
0545 int i;
0546
0547 if (!memcg_kmem_enabled())
0548 return;
0549
0550 objcgs = slab_objcgs(slab);
0551 if (!objcgs)
0552 return;
0553
0554 for (i = 0; i < objects; i++) {
0555 struct obj_cgroup *objcg;
0556 unsigned int off;
0557
0558 off = obj_to_index(s, slab, p[i]);
0559 objcg = objcgs[off];
0560 if (!objcg)
0561 continue;
0562
0563 objcgs[off] = NULL;
0564 obj_cgroup_uncharge(objcg, obj_full_size(s));
0565 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
0566 -obj_full_size(s));
0567 obj_cgroup_put(objcg);
0568 }
0569 }
0570
0571 #else
0572 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
0573 {
0574 return NULL;
0575 }
0576
0577 static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
0578 {
0579 return NULL;
0580 }
0581
0582 static inline int memcg_alloc_slab_cgroups(struct slab *slab,
0583 struct kmem_cache *s, gfp_t gfp,
0584 bool new_slab)
0585 {
0586 return 0;
0587 }
0588
0589 static inline void memcg_free_slab_cgroups(struct slab *slab)
0590 {
0591 }
0592
0593 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
0594 struct list_lru *lru,
0595 struct obj_cgroup **objcgp,
0596 size_t objects, gfp_t flags)
0597 {
0598 return true;
0599 }
0600
0601 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
0602 struct obj_cgroup *objcg,
0603 gfp_t flags, size_t size,
0604 void **p)
0605 {
0606 }
0607
0608 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
0609 void **p, int objects)
0610 {
0611 }
0612 #endif
0613
0614 #ifndef CONFIG_SLOB
0615 static inline struct kmem_cache *virt_to_cache(const void *obj)
0616 {
0617 struct slab *slab;
0618
0619 slab = virt_to_slab(obj);
0620 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
0621 __func__))
0622 return NULL;
0623 return slab->slab_cache;
0624 }
0625
0626 static __always_inline void account_slab(struct slab *slab, int order,
0627 struct kmem_cache *s, gfp_t gfp)
0628 {
0629 if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
0630 memcg_alloc_slab_cgroups(slab, s, gfp, true);
0631
0632 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
0633 PAGE_SIZE << order);
0634 }
0635
0636 static __always_inline void unaccount_slab(struct slab *slab, int order,
0637 struct kmem_cache *s)
0638 {
0639 if (memcg_kmem_enabled())
0640 memcg_free_slab_cgroups(slab);
0641
0642 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
0643 -(PAGE_SIZE << order));
0644 }
0645
0646 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
0647 {
0648 struct kmem_cache *cachep;
0649
0650 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
0651 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
0652 return s;
0653
0654 cachep = virt_to_cache(x);
0655 if (WARN(cachep && cachep != s,
0656 "%s: Wrong slab cache. %s but object is from %s\n",
0657 __func__, s->name, cachep->name))
0658 print_tracking(cachep, x);
0659 return cachep;
0660 }
0661 #endif
0662
0663 static inline size_t slab_ksize(const struct kmem_cache *s)
0664 {
0665 #ifndef CONFIG_SLUB
0666 return s->object_size;
0667
0668 #else
0669 # ifdef CONFIG_SLUB_DEBUG
0670
0671
0672
0673
0674 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
0675 return s->object_size;
0676 # endif
0677 if (s->flags & SLAB_KASAN)
0678 return s->object_size;
0679
0680
0681
0682
0683
0684 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
0685 return s->inuse;
0686
0687
0688
0689 return s->size;
0690 #endif
0691 }
0692
0693 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
0694 struct list_lru *lru,
0695 struct obj_cgroup **objcgp,
0696 size_t size, gfp_t flags)
0697 {
0698 flags &= gfp_allowed_mask;
0699
0700 might_alloc(flags);
0701
0702 if (should_failslab(s, flags))
0703 return NULL;
0704
0705 if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
0706 return NULL;
0707
0708 return s;
0709 }
0710
0711 static inline void slab_post_alloc_hook(struct kmem_cache *s,
0712 struct obj_cgroup *objcg, gfp_t flags,
0713 size_t size, void **p, bool init)
0714 {
0715 size_t i;
0716
0717 flags &= gfp_allowed_mask;
0718
0719
0720
0721
0722
0723
0724
0725
0726 for (i = 0; i < size; i++) {
0727 p[i] = kasan_slab_alloc(s, p[i], flags, init);
0728 if (p[i] && init && !kasan_has_integrated_init())
0729 memset(p[i], 0, s->object_size);
0730 kmemleak_alloc_recursive(p[i], s->object_size, 1,
0731 s->flags, flags);
0732 }
0733
0734 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
0735 }
0736
0737 #ifndef CONFIG_SLOB
0738
0739
0740
0741 struct kmem_cache_node {
0742 spinlock_t list_lock;
0743
0744 #ifdef CONFIG_SLAB
0745 struct list_head slabs_partial;
0746 struct list_head slabs_full;
0747 struct list_head slabs_free;
0748 unsigned long total_slabs;
0749 unsigned long free_slabs;
0750 unsigned long free_objects;
0751 unsigned int free_limit;
0752 unsigned int colour_next;
0753 struct array_cache *shared;
0754 struct alien_cache **alien;
0755 unsigned long next_reap;
0756 int free_touched;
0757 #endif
0758
0759 #ifdef CONFIG_SLUB
0760 unsigned long nr_partial;
0761 struct list_head partial;
0762 #ifdef CONFIG_SLUB_DEBUG
0763 atomic_long_t nr_slabs;
0764 atomic_long_t total_objects;
0765 struct list_head full;
0766 #endif
0767 #endif
0768
0769 };
0770
0771 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
0772 {
0773 return s->node[node];
0774 }
0775
0776
0777
0778
0779
0780 #define for_each_kmem_cache_node(__s, __node, __n) \
0781 for (__node = 0; __node < nr_node_ids; __node++) \
0782 if ((__n = get_node(__s, __node)))
0783
0784 #endif
0785
0786 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
0787 void dump_unreclaimable_slab(void);
0788 #else
0789 static inline void dump_unreclaimable_slab(void)
0790 {
0791 }
0792 #endif
0793
0794 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
0795
0796 #ifdef CONFIG_SLAB_FREELIST_RANDOM
0797 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
0798 gfp_t gfp);
0799 void cache_random_seq_destroy(struct kmem_cache *cachep);
0800 #else
0801 static inline int cache_random_seq_create(struct kmem_cache *cachep,
0802 unsigned int count, gfp_t gfp)
0803 {
0804 return 0;
0805 }
0806 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
0807 #endif
0808
0809 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
0810 {
0811 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
0812 &init_on_alloc)) {
0813 if (c->ctor)
0814 return false;
0815 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
0816 return flags & __GFP_ZERO;
0817 return true;
0818 }
0819 return flags & __GFP_ZERO;
0820 }
0821
0822 static inline bool slab_want_init_on_free(struct kmem_cache *c)
0823 {
0824 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
0825 &init_on_free))
0826 return !(c->ctor ||
0827 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
0828 return false;
0829 }
0830
0831 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
0832 void debugfs_slab_release(struct kmem_cache *);
0833 #else
0834 static inline void debugfs_slab_release(struct kmem_cache *s) { }
0835 #endif
0836
0837 #ifdef CONFIG_PRINTK
0838 #define KS_ADDRS_COUNT 16
0839 struct kmem_obj_info {
0840 void *kp_ptr;
0841 struct slab *kp_slab;
0842 void *kp_objp;
0843 unsigned long kp_data_offset;
0844 struct kmem_cache *kp_slab_cache;
0845 void *kp_ret;
0846 void *kp_stack[KS_ADDRS_COUNT];
0847 void *kp_free_stack[KS_ADDRS_COUNT];
0848 };
0849 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
0850 #endif
0851
0852 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
0853 void __check_heap_object(const void *ptr, unsigned long n,
0854 const struct slab *slab, bool to_user);
0855 #else
0856 static inline
0857 void __check_heap_object(const void *ptr, unsigned long n,
0858 const struct slab *slab, bool to_user)
0859 {
0860 }
0861 #endif
0862
0863 #endif