Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef MM_SLAB_H
0003 #define MM_SLAB_H
0004 /*
0005  * Internal slab definitions
0006  */
0007 
0008 /* Reuses the bits in struct page */
0009 struct slab {
0010     unsigned long __page_flags;
0011 
0012 #if defined(CONFIG_SLAB)
0013 
0014     union {
0015         struct list_head slab_list;
0016         struct rcu_head rcu_head;
0017     };
0018     struct kmem_cache *slab_cache;
0019     void *freelist; /* array of free object indexes */
0020     void *s_mem;    /* first object */
0021     unsigned int active;
0022 
0023 #elif defined(CONFIG_SLUB)
0024 
0025     union {
0026         struct list_head slab_list;
0027         struct rcu_head rcu_head;
0028 #ifdef CONFIG_SLUB_CPU_PARTIAL
0029         struct {
0030             struct slab *next;
0031             int slabs;  /* Nr of slabs left */
0032         };
0033 #endif
0034     };
0035     struct kmem_cache *slab_cache;
0036     /* Double-word boundary */
0037     void *freelist;     /* first free object */
0038     union {
0039         unsigned long counters;
0040         struct {
0041             unsigned inuse:16;
0042             unsigned objects:15;
0043             unsigned frozen:1;
0044         };
0045     };
0046     unsigned int __unused;
0047 
0048 #elif defined(CONFIG_SLOB)
0049 
0050     struct list_head slab_list;
0051     void *__unused_1;
0052     void *freelist;     /* first free block */
0053     long units;
0054     unsigned int __unused_2;
0055 
0056 #else
0057 #error "Unexpected slab allocator configured"
0058 #endif
0059 
0060     atomic_t __page_refcount;
0061 #ifdef CONFIG_MEMCG
0062     unsigned long memcg_data;
0063 #endif
0064 };
0065 
0066 #define SLAB_MATCH(pg, sl)                      \
0067     static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
0068 SLAB_MATCH(flags, __page_flags);
0069 SLAB_MATCH(compound_head, slab_list);   /* Ensure bit 0 is clear */
0070 #ifndef CONFIG_SLOB
0071 SLAB_MATCH(rcu_head, rcu_head);
0072 #endif
0073 SLAB_MATCH(_refcount, __page_refcount);
0074 #ifdef CONFIG_MEMCG
0075 SLAB_MATCH(memcg_data, memcg_data);
0076 #endif
0077 #undef SLAB_MATCH
0078 static_assert(sizeof(struct slab) <= sizeof(struct page));
0079 
0080 /**
0081  * folio_slab - Converts from folio to slab.
0082  * @folio: The folio.
0083  *
0084  * Currently struct slab is a different representation of a folio where
0085  * folio_test_slab() is true.
0086  *
0087  * Return: The slab which contains this folio.
0088  */
0089 #define folio_slab(folio)   (_Generic((folio),          \
0090     const struct folio *:   (const struct slab *)(folio),       \
0091     struct folio *:     (struct slab *)(folio)))
0092 
0093 /**
0094  * slab_folio - The folio allocated for a slab
0095  * @slab: The slab.
0096  *
0097  * Slabs are allocated as folios that contain the individual objects and are
0098  * using some fields in the first struct page of the folio - those fields are
0099  * now accessed by struct slab. It is occasionally necessary to convert back to
0100  * a folio in order to communicate with the rest of the mm.  Please use this
0101  * helper function instead of casting yourself, as the implementation may change
0102  * in the future.
0103  */
0104 #define slab_folio(s)       (_Generic((s),              \
0105     const struct slab *:    (const struct folio *)s,        \
0106     struct slab *:      (struct folio *)s))
0107 
0108 /**
0109  * page_slab - Converts from first struct page to slab.
0110  * @p: The first (either head of compound or single) page of slab.
0111  *
0112  * A temporary wrapper to convert struct page to struct slab in situations where
0113  * we know the page is the compound head, or single order-0 page.
0114  *
0115  * Long-term ideally everything would work with struct slab directly or go
0116  * through folio to struct slab.
0117  *
0118  * Return: The slab which contains this page
0119  */
0120 #define page_slab(p)        (_Generic((p),              \
0121     const struct page *:    (const struct slab *)(p),       \
0122     struct page *:      (struct slab *)(p)))
0123 
0124 /**
0125  * slab_page - The first struct page allocated for a slab
0126  * @slab: The slab.
0127  *
0128  * A convenience wrapper for converting slab to the first struct page of the
0129  * underlying folio, to communicate with code not yet converted to folio or
0130  * struct slab.
0131  */
0132 #define slab_page(s) folio_page(slab_folio(s), 0)
0133 
0134 /*
0135  * If network-based swap is enabled, sl*b must keep track of whether pages
0136  * were allocated from pfmemalloc reserves.
0137  */
0138 static inline bool slab_test_pfmemalloc(const struct slab *slab)
0139 {
0140     return folio_test_active((struct folio *)slab_folio(slab));
0141 }
0142 
0143 static inline void slab_set_pfmemalloc(struct slab *slab)
0144 {
0145     folio_set_active(slab_folio(slab));
0146 }
0147 
0148 static inline void slab_clear_pfmemalloc(struct slab *slab)
0149 {
0150     folio_clear_active(slab_folio(slab));
0151 }
0152 
0153 static inline void __slab_clear_pfmemalloc(struct slab *slab)
0154 {
0155     __folio_clear_active(slab_folio(slab));
0156 }
0157 
0158 static inline void *slab_address(const struct slab *slab)
0159 {
0160     return folio_address(slab_folio(slab));
0161 }
0162 
0163 static inline int slab_nid(const struct slab *slab)
0164 {
0165     return folio_nid(slab_folio(slab));
0166 }
0167 
0168 static inline pg_data_t *slab_pgdat(const struct slab *slab)
0169 {
0170     return folio_pgdat(slab_folio(slab));
0171 }
0172 
0173 static inline struct slab *virt_to_slab(const void *addr)
0174 {
0175     struct folio *folio = virt_to_folio(addr);
0176 
0177     if (!folio_test_slab(folio))
0178         return NULL;
0179 
0180     return folio_slab(folio);
0181 }
0182 
0183 static inline int slab_order(const struct slab *slab)
0184 {
0185     return folio_order((struct folio *)slab_folio(slab));
0186 }
0187 
0188 static inline size_t slab_size(const struct slab *slab)
0189 {
0190     return PAGE_SIZE << slab_order(slab);
0191 }
0192 
0193 #ifdef CONFIG_SLOB
0194 /*
0195  * Common fields provided in kmem_cache by all slab allocators
0196  * This struct is either used directly by the allocator (SLOB)
0197  * or the allocator must include definitions for all fields
0198  * provided in kmem_cache_common in their definition of kmem_cache.
0199  *
0200  * Once we can do anonymous structs (C11 standard) we could put a
0201  * anonymous struct definition in these allocators so that the
0202  * separate allocations in the kmem_cache structure of SLAB and
0203  * SLUB is no longer needed.
0204  */
0205 struct kmem_cache {
0206     unsigned int object_size;/* The original size of the object */
0207     unsigned int size;  /* The aligned/padded/added on size  */
0208     unsigned int align; /* Alignment as calculated */
0209     slab_flags_t flags; /* Active flags on the slab */
0210     unsigned int useroffset;/* Usercopy region offset */
0211     unsigned int usersize;  /* Usercopy region size */
0212     const char *name;   /* Slab name for sysfs */
0213     int refcount;       /* Use counter */
0214     void (*ctor)(void *);   /* Called on object slot creation */
0215     struct list_head list;  /* List of all slab caches on the system */
0216 };
0217 
0218 #endif /* CONFIG_SLOB */
0219 
0220 #ifdef CONFIG_SLAB
0221 #include <linux/slab_def.h>
0222 #endif
0223 
0224 #ifdef CONFIG_SLUB
0225 #include <linux/slub_def.h>
0226 #endif
0227 
0228 #include <linux/memcontrol.h>
0229 #include <linux/fault-inject.h>
0230 #include <linux/kasan.h>
0231 #include <linux/kmemleak.h>
0232 #include <linux/random.h>
0233 #include <linux/sched/mm.h>
0234 #include <linux/list_lru.h>
0235 
0236 /*
0237  * State of the slab allocator.
0238  *
0239  * This is used to describe the states of the allocator during bootup.
0240  * Allocators use this to gradually bootstrap themselves. Most allocators
0241  * have the problem that the structures used for managing slab caches are
0242  * allocated from slab caches themselves.
0243  */
0244 enum slab_state {
0245     DOWN,           /* No slab functionality yet */
0246     PARTIAL,        /* SLUB: kmem_cache_node available */
0247     PARTIAL_NODE,       /* SLAB: kmalloc size for node struct available */
0248     UP,         /* Slab caches usable but not all extras yet */
0249     FULL            /* Everything is working */
0250 };
0251 
0252 extern enum slab_state slab_state;
0253 
0254 /* The slab cache mutex protects the management structures during changes */
0255 extern struct mutex slab_mutex;
0256 
0257 /* The list of all slab caches on the system */
0258 extern struct list_head slab_caches;
0259 
0260 /* The slab cache that manages slab cache information */
0261 extern struct kmem_cache *kmem_cache;
0262 
0263 /* A table of kmalloc cache names and sizes */
0264 extern const struct kmalloc_info_struct {
0265     const char *name[NR_KMALLOC_TYPES];
0266     unsigned int size;
0267 } kmalloc_info[];
0268 
0269 #ifndef CONFIG_SLOB
0270 /* Kmalloc array related functions */
0271 void setup_kmalloc_cache_index_table(void);
0272 void create_kmalloc_caches(slab_flags_t);
0273 
0274 /* Find the kmalloc slab corresponding for a certain size */
0275 struct kmem_cache *kmalloc_slab(size_t, gfp_t);
0276 #endif
0277 
0278 gfp_t kmalloc_fix_flags(gfp_t flags);
0279 
0280 /* Functions provided by the slab allocators */
0281 int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
0282 
0283 struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
0284             slab_flags_t flags, unsigned int useroffset,
0285             unsigned int usersize);
0286 extern void create_boot_cache(struct kmem_cache *, const char *name,
0287             unsigned int size, slab_flags_t flags,
0288             unsigned int useroffset, unsigned int usersize);
0289 
0290 int slab_unmergeable(struct kmem_cache *s);
0291 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
0292         slab_flags_t flags, const char *name, void (*ctor)(void *));
0293 #ifndef CONFIG_SLOB
0294 struct kmem_cache *
0295 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
0296            slab_flags_t flags, void (*ctor)(void *));
0297 
0298 slab_flags_t kmem_cache_flags(unsigned int object_size,
0299     slab_flags_t flags, const char *name);
0300 #else
0301 static inline struct kmem_cache *
0302 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
0303            slab_flags_t flags, void (*ctor)(void *))
0304 { return NULL; }
0305 
0306 static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
0307     slab_flags_t flags, const char *name)
0308 {
0309     return flags;
0310 }
0311 #endif
0312 
0313 
0314 /* Legal flag mask for kmem_cache_create(), for various configurations */
0315 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
0316              SLAB_CACHE_DMA32 | SLAB_PANIC | \
0317              SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
0318 
0319 #if defined(CONFIG_DEBUG_SLAB)
0320 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
0321 #elif defined(CONFIG_SLUB_DEBUG)
0322 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
0323               SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
0324 #else
0325 #define SLAB_DEBUG_FLAGS (0)
0326 #endif
0327 
0328 #if defined(CONFIG_SLAB)
0329 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
0330               SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
0331               SLAB_ACCOUNT)
0332 #elif defined(CONFIG_SLUB)
0333 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
0334               SLAB_TEMPORARY | SLAB_ACCOUNT | SLAB_NO_USER_FLAGS)
0335 #else
0336 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
0337 #endif
0338 
0339 /* Common flags available with current configuration */
0340 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
0341 
0342 /* Common flags permitted for kmem_cache_create */
0343 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
0344                   SLAB_RED_ZONE | \
0345                   SLAB_POISON | \
0346                   SLAB_STORE_USER | \
0347                   SLAB_TRACE | \
0348                   SLAB_CONSISTENCY_CHECKS | \
0349                   SLAB_MEM_SPREAD | \
0350                   SLAB_NOLEAKTRACE | \
0351                   SLAB_RECLAIM_ACCOUNT | \
0352                   SLAB_TEMPORARY | \
0353                   SLAB_ACCOUNT | \
0354                   SLAB_NO_USER_FLAGS)
0355 
0356 bool __kmem_cache_empty(struct kmem_cache *);
0357 int __kmem_cache_shutdown(struct kmem_cache *);
0358 void __kmem_cache_release(struct kmem_cache *);
0359 int __kmem_cache_shrink(struct kmem_cache *);
0360 void slab_kmem_cache_release(struct kmem_cache *);
0361 
0362 struct seq_file;
0363 struct file;
0364 
0365 struct slabinfo {
0366     unsigned long active_objs;
0367     unsigned long num_objs;
0368     unsigned long active_slabs;
0369     unsigned long num_slabs;
0370     unsigned long shared_avail;
0371     unsigned int limit;
0372     unsigned int batchcount;
0373     unsigned int shared;
0374     unsigned int objects_per_slab;
0375     unsigned int cache_order;
0376 };
0377 
0378 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
0379 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
0380 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
0381                size_t count, loff_t *ppos);
0382 
0383 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
0384 {
0385     return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
0386         NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
0387 }
0388 
0389 #ifdef CONFIG_SLUB_DEBUG
0390 #ifdef CONFIG_SLUB_DEBUG_ON
0391 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
0392 #else
0393 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
0394 #endif
0395 extern void print_tracking(struct kmem_cache *s, void *object);
0396 long validate_slab_cache(struct kmem_cache *s);
0397 static inline bool __slub_debug_enabled(void)
0398 {
0399     return static_branch_unlikely(&slub_debug_enabled);
0400 }
0401 #else
0402 static inline void print_tracking(struct kmem_cache *s, void *object)
0403 {
0404 }
0405 static inline bool __slub_debug_enabled(void)
0406 {
0407     return false;
0408 }
0409 #endif
0410 
0411 /*
0412  * Returns true if any of the specified slub_debug flags is enabled for the
0413  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
0414  * the static key.
0415  */
0416 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
0417 {
0418     if (IS_ENABLED(CONFIG_SLUB_DEBUG))
0419         VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
0420     if (__slub_debug_enabled())
0421         return s->flags & flags;
0422     return false;
0423 }
0424 
0425 #ifdef CONFIG_MEMCG_KMEM
0426 /*
0427  * slab_objcgs - get the object cgroups vector associated with a slab
0428  * @slab: a pointer to the slab struct
0429  *
0430  * Returns a pointer to the object cgroups vector associated with the slab,
0431  * or NULL if no such vector has been associated yet.
0432  */
0433 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
0434 {
0435     unsigned long memcg_data = READ_ONCE(slab->memcg_data);
0436 
0437     VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
0438                             slab_page(slab));
0439     VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
0440 
0441     return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
0442 }
0443 
0444 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
0445                  gfp_t gfp, bool new_slab);
0446 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
0447              enum node_stat_item idx, int nr);
0448 
0449 static inline void memcg_free_slab_cgroups(struct slab *slab)
0450 {
0451     kfree(slab_objcgs(slab));
0452     slab->memcg_data = 0;
0453 }
0454 
0455 static inline size_t obj_full_size(struct kmem_cache *s)
0456 {
0457     /*
0458      * For each accounted object there is an extra space which is used
0459      * to store obj_cgroup membership. Charge it too.
0460      */
0461     return s->size + sizeof(struct obj_cgroup *);
0462 }
0463 
0464 /*
0465  * Returns false if the allocation should fail.
0466  */
0467 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
0468                          struct list_lru *lru,
0469                          struct obj_cgroup **objcgp,
0470                          size_t objects, gfp_t flags)
0471 {
0472     struct obj_cgroup *objcg;
0473 
0474     if (!memcg_kmem_enabled())
0475         return true;
0476 
0477     if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
0478         return true;
0479 
0480     objcg = get_obj_cgroup_from_current();
0481     if (!objcg)
0482         return true;
0483 
0484     if (lru) {
0485         int ret;
0486         struct mem_cgroup *memcg;
0487 
0488         memcg = get_mem_cgroup_from_objcg(objcg);
0489         ret = memcg_list_lru_alloc(memcg, lru, flags);
0490         css_put(&memcg->css);
0491 
0492         if (ret)
0493             goto out;
0494     }
0495 
0496     if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
0497         goto out;
0498 
0499     *objcgp = objcg;
0500     return true;
0501 out:
0502     obj_cgroup_put(objcg);
0503     return false;
0504 }
0505 
0506 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
0507                           struct obj_cgroup *objcg,
0508                           gfp_t flags, size_t size,
0509                           void **p)
0510 {
0511     struct slab *slab;
0512     unsigned long off;
0513     size_t i;
0514 
0515     if (!memcg_kmem_enabled() || !objcg)
0516         return;
0517 
0518     for (i = 0; i < size; i++) {
0519         if (likely(p[i])) {
0520             slab = virt_to_slab(p[i]);
0521 
0522             if (!slab_objcgs(slab) &&
0523                 memcg_alloc_slab_cgroups(slab, s, flags,
0524                              false)) {
0525                 obj_cgroup_uncharge(objcg, obj_full_size(s));
0526                 continue;
0527             }
0528 
0529             off = obj_to_index(s, slab, p[i]);
0530             obj_cgroup_get(objcg);
0531             slab_objcgs(slab)[off] = objcg;
0532             mod_objcg_state(objcg, slab_pgdat(slab),
0533                     cache_vmstat_idx(s), obj_full_size(s));
0534         } else {
0535             obj_cgroup_uncharge(objcg, obj_full_size(s));
0536         }
0537     }
0538     obj_cgroup_put(objcg);
0539 }
0540 
0541 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
0542                     void **p, int objects)
0543 {
0544     struct obj_cgroup **objcgs;
0545     int i;
0546 
0547     if (!memcg_kmem_enabled())
0548         return;
0549 
0550     objcgs = slab_objcgs(slab);
0551     if (!objcgs)
0552         return;
0553 
0554     for (i = 0; i < objects; i++) {
0555         struct obj_cgroup *objcg;
0556         unsigned int off;
0557 
0558         off = obj_to_index(s, slab, p[i]);
0559         objcg = objcgs[off];
0560         if (!objcg)
0561             continue;
0562 
0563         objcgs[off] = NULL;
0564         obj_cgroup_uncharge(objcg, obj_full_size(s));
0565         mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
0566                 -obj_full_size(s));
0567         obj_cgroup_put(objcg);
0568     }
0569 }
0570 
0571 #else /* CONFIG_MEMCG_KMEM */
0572 static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
0573 {
0574     return NULL;
0575 }
0576 
0577 static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
0578 {
0579     return NULL;
0580 }
0581 
0582 static inline int memcg_alloc_slab_cgroups(struct slab *slab,
0583                            struct kmem_cache *s, gfp_t gfp,
0584                            bool new_slab)
0585 {
0586     return 0;
0587 }
0588 
0589 static inline void memcg_free_slab_cgroups(struct slab *slab)
0590 {
0591 }
0592 
0593 static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
0594                          struct list_lru *lru,
0595                          struct obj_cgroup **objcgp,
0596                          size_t objects, gfp_t flags)
0597 {
0598     return true;
0599 }
0600 
0601 static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
0602                           struct obj_cgroup *objcg,
0603                           gfp_t flags, size_t size,
0604                           void **p)
0605 {
0606 }
0607 
0608 static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
0609                     void **p, int objects)
0610 {
0611 }
0612 #endif /* CONFIG_MEMCG_KMEM */
0613 
0614 #ifndef CONFIG_SLOB
0615 static inline struct kmem_cache *virt_to_cache(const void *obj)
0616 {
0617     struct slab *slab;
0618 
0619     slab = virt_to_slab(obj);
0620     if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
0621                     __func__))
0622         return NULL;
0623     return slab->slab_cache;
0624 }
0625 
0626 static __always_inline void account_slab(struct slab *slab, int order,
0627                      struct kmem_cache *s, gfp_t gfp)
0628 {
0629     if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
0630         memcg_alloc_slab_cgroups(slab, s, gfp, true);
0631 
0632     mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
0633                 PAGE_SIZE << order);
0634 }
0635 
0636 static __always_inline void unaccount_slab(struct slab *slab, int order,
0637                        struct kmem_cache *s)
0638 {
0639     if (memcg_kmem_enabled())
0640         memcg_free_slab_cgroups(slab);
0641 
0642     mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
0643                 -(PAGE_SIZE << order));
0644 }
0645 
0646 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
0647 {
0648     struct kmem_cache *cachep;
0649 
0650     if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
0651         !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
0652         return s;
0653 
0654     cachep = virt_to_cache(x);
0655     if (WARN(cachep && cachep != s,
0656           "%s: Wrong slab cache. %s but object is from %s\n",
0657           __func__, s->name, cachep->name))
0658         print_tracking(cachep, x);
0659     return cachep;
0660 }
0661 #endif /* CONFIG_SLOB */
0662 
0663 static inline size_t slab_ksize(const struct kmem_cache *s)
0664 {
0665 #ifndef CONFIG_SLUB
0666     return s->object_size;
0667 
0668 #else /* CONFIG_SLUB */
0669 # ifdef CONFIG_SLUB_DEBUG
0670     /*
0671      * Debugging requires use of the padding between object
0672      * and whatever may come after it.
0673      */
0674     if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
0675         return s->object_size;
0676 # endif
0677     if (s->flags & SLAB_KASAN)
0678         return s->object_size;
0679     /*
0680      * If we have the need to store the freelist pointer
0681      * back there or track user information then we can
0682      * only use the space before that information.
0683      */
0684     if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
0685         return s->inuse;
0686     /*
0687      * Else we can use all the padding etc for the allocation
0688      */
0689     return s->size;
0690 #endif
0691 }
0692 
0693 static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
0694                              struct list_lru *lru,
0695                              struct obj_cgroup **objcgp,
0696                              size_t size, gfp_t flags)
0697 {
0698     flags &= gfp_allowed_mask;
0699 
0700     might_alloc(flags);
0701 
0702     if (should_failslab(s, flags))
0703         return NULL;
0704 
0705     if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
0706         return NULL;
0707 
0708     return s;
0709 }
0710 
0711 static inline void slab_post_alloc_hook(struct kmem_cache *s,
0712                     struct obj_cgroup *objcg, gfp_t flags,
0713                     size_t size, void **p, bool init)
0714 {
0715     size_t i;
0716 
0717     flags &= gfp_allowed_mask;
0718 
0719     /*
0720      * As memory initialization might be integrated into KASAN,
0721      * kasan_slab_alloc and initialization memset must be
0722      * kept together to avoid discrepancies in behavior.
0723      *
0724      * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
0725      */
0726     for (i = 0; i < size; i++) {
0727         p[i] = kasan_slab_alloc(s, p[i], flags, init);
0728         if (p[i] && init && !kasan_has_integrated_init())
0729             memset(p[i], 0, s->object_size);
0730         kmemleak_alloc_recursive(p[i], s->object_size, 1,
0731                      s->flags, flags);
0732     }
0733 
0734     memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
0735 }
0736 
0737 #ifndef CONFIG_SLOB
0738 /*
0739  * The slab lists for all objects.
0740  */
0741 struct kmem_cache_node {
0742     spinlock_t list_lock;
0743 
0744 #ifdef CONFIG_SLAB
0745     struct list_head slabs_partial; /* partial list first, better asm code */
0746     struct list_head slabs_full;
0747     struct list_head slabs_free;
0748     unsigned long total_slabs;  /* length of all slab lists */
0749     unsigned long free_slabs;   /* length of free slab list only */
0750     unsigned long free_objects;
0751     unsigned int free_limit;
0752     unsigned int colour_next;   /* Per-node cache coloring */
0753     struct array_cache *shared; /* shared per node */
0754     struct alien_cache **alien; /* on other nodes */
0755     unsigned long next_reap;    /* updated without locking */
0756     int free_touched;       /* updated without locking */
0757 #endif
0758 
0759 #ifdef CONFIG_SLUB
0760     unsigned long nr_partial;
0761     struct list_head partial;
0762 #ifdef CONFIG_SLUB_DEBUG
0763     atomic_long_t nr_slabs;
0764     atomic_long_t total_objects;
0765     struct list_head full;
0766 #endif
0767 #endif
0768 
0769 };
0770 
0771 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
0772 {
0773     return s->node[node];
0774 }
0775 
0776 /*
0777  * Iterator over all nodes. The body will be executed for each node that has
0778  * a kmem_cache_node structure allocated (which is true for all online nodes)
0779  */
0780 #define for_each_kmem_cache_node(__s, __node, __n) \
0781     for (__node = 0; __node < nr_node_ids; __node++) \
0782          if ((__n = get_node(__s, __node)))
0783 
0784 #endif
0785 
0786 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
0787 void dump_unreclaimable_slab(void);
0788 #else
0789 static inline void dump_unreclaimable_slab(void)
0790 {
0791 }
0792 #endif
0793 
0794 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
0795 
0796 #ifdef CONFIG_SLAB_FREELIST_RANDOM
0797 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
0798             gfp_t gfp);
0799 void cache_random_seq_destroy(struct kmem_cache *cachep);
0800 #else
0801 static inline int cache_random_seq_create(struct kmem_cache *cachep,
0802                     unsigned int count, gfp_t gfp)
0803 {
0804     return 0;
0805 }
0806 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
0807 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
0808 
0809 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
0810 {
0811     if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
0812                 &init_on_alloc)) {
0813         if (c->ctor)
0814             return false;
0815         if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
0816             return flags & __GFP_ZERO;
0817         return true;
0818     }
0819     return flags & __GFP_ZERO;
0820 }
0821 
0822 static inline bool slab_want_init_on_free(struct kmem_cache *c)
0823 {
0824     if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
0825                 &init_on_free))
0826         return !(c->ctor ||
0827              (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
0828     return false;
0829 }
0830 
0831 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
0832 void debugfs_slab_release(struct kmem_cache *);
0833 #else
0834 static inline void debugfs_slab_release(struct kmem_cache *s) { }
0835 #endif
0836 
0837 #ifdef CONFIG_PRINTK
0838 #define KS_ADDRS_COUNT 16
0839 struct kmem_obj_info {
0840     void *kp_ptr;
0841     struct slab *kp_slab;
0842     void *kp_objp;
0843     unsigned long kp_data_offset;
0844     struct kmem_cache *kp_slab_cache;
0845     void *kp_ret;
0846     void *kp_stack[KS_ADDRS_COUNT];
0847     void *kp_free_stack[KS_ADDRS_COUNT];
0848 };
0849 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
0850 #endif
0851 
0852 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
0853 void __check_heap_object(const void *ptr, unsigned long n,
0854              const struct slab *slab, bool to_user);
0855 #else
0856 static inline
0857 void __check_heap_object(const void *ptr, unsigned long n,
0858              const struct slab *slab, bool to_user)
0859 {
0860 }
0861 #endif
0862 
0863 #endif /* MM_SLAB_H */