Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SLUB_DEF_H
0003 #define _LINUX_SLUB_DEF_H
0004 
0005 /*
0006  * SLUB : A Slab allocator without object queues.
0007  *
0008  * (C) 2007 SGI, Christoph Lameter
0009  */
0010 #include <linux/kfence.h>
0011 #include <linux/kobject.h>
0012 #include <linux/reciprocal_div.h>
0013 #include <linux/local_lock.h>
0014 
0015 enum stat_item {
0016     ALLOC_FASTPATH,     /* Allocation from cpu slab */
0017     ALLOC_SLOWPATH,     /* Allocation by getting a new cpu slab */
0018     FREE_FASTPATH,      /* Free to cpu slab */
0019     FREE_SLOWPATH,      /* Freeing not to cpu slab */
0020     FREE_FROZEN,        /* Freeing to frozen slab */
0021     FREE_ADD_PARTIAL,   /* Freeing moves slab to partial list */
0022     FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
0023     ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
0024     ALLOC_SLAB,     /* Cpu slab acquired from page allocator */
0025     ALLOC_REFILL,       /* Refill cpu slab from slab freelist */
0026     ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
0027     FREE_SLAB,      /* Slab freed to the page allocator */
0028     CPUSLAB_FLUSH,      /* Abandoning of the cpu slab */
0029     DEACTIVATE_FULL,    /* Cpu slab was full when deactivated */
0030     DEACTIVATE_EMPTY,   /* Cpu slab was empty when deactivated */
0031     DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
0032     DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
0033     DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
0034     DEACTIVATE_BYPASS,  /* Implicit deactivation */
0035     ORDER_FALLBACK,     /* Number of times fallback was necessary */
0036     CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
0037     CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
0038     CPU_PARTIAL_ALLOC,  /* Used cpu partial on alloc */
0039     CPU_PARTIAL_FREE,   /* Refill cpu partial on free */
0040     CPU_PARTIAL_NODE,   /* Refill cpu partial from node partial */
0041     CPU_PARTIAL_DRAIN,  /* Drain cpu partial to node partial */
0042     NR_SLUB_STAT_ITEMS };
0043 
0044 /*
0045  * When changing the layout, make sure freelist and tid are still compatible
0046  * with this_cpu_cmpxchg_double() alignment requirements.
0047  */
0048 struct kmem_cache_cpu {
0049     void **freelist;    /* Pointer to next available object */
0050     unsigned long tid;  /* Globally unique transaction id */
0051     struct slab *slab;  /* The slab from which we are allocating */
0052 #ifdef CONFIG_SLUB_CPU_PARTIAL
0053     struct slab *partial;   /* Partially allocated frozen slabs */
0054 #endif
0055     local_lock_t lock;  /* Protects the fields above */
0056 #ifdef CONFIG_SLUB_STATS
0057     unsigned stat[NR_SLUB_STAT_ITEMS];
0058 #endif
0059 };
0060 
0061 #ifdef CONFIG_SLUB_CPU_PARTIAL
0062 #define slub_percpu_partial(c)      ((c)->partial)
0063 
0064 #define slub_set_percpu_partial(c, p)       \
0065 ({                      \
0066     slub_percpu_partial(c) = (p)->next; \
0067 })
0068 
0069 #define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
0070 #else
0071 #define slub_percpu_partial(c)          NULL
0072 
0073 #define slub_set_percpu_partial(c, p)
0074 
0075 #define slub_percpu_partial_read_once(c)    NULL
0076 #endif // CONFIG_SLUB_CPU_PARTIAL
0077 
0078 /*
0079  * Word size structure that can be atomically updated or read and that
0080  * contains both the order and the number of objects that a slab of the
0081  * given order would contain.
0082  */
0083 struct kmem_cache_order_objects {
0084     unsigned int x;
0085 };
0086 
0087 /*
0088  * Slab cache management.
0089  */
0090 struct kmem_cache {
0091     struct kmem_cache_cpu __percpu *cpu_slab;
0092     /* Used for retrieving partial slabs, etc. */
0093     slab_flags_t flags;
0094     unsigned long min_partial;
0095     unsigned int size;  /* The size of an object including metadata */
0096     unsigned int object_size;/* The size of an object without metadata */
0097     struct reciprocal_value reciprocal_size;
0098     unsigned int offset;    /* Free pointer offset */
0099 #ifdef CONFIG_SLUB_CPU_PARTIAL
0100     /* Number of per cpu partial objects to keep around */
0101     unsigned int cpu_partial;
0102     /* Number of per cpu partial slabs to keep around */
0103     unsigned int cpu_partial_slabs;
0104 #endif
0105     struct kmem_cache_order_objects oo;
0106 
0107     /* Allocation and freeing of slabs */
0108     struct kmem_cache_order_objects min;
0109     gfp_t allocflags;   /* gfp flags to use on each alloc */
0110     int refcount;       /* Refcount for slab cache destroy */
0111     void (*ctor)(void *);
0112     unsigned int inuse;     /* Offset to metadata */
0113     unsigned int align;     /* Alignment */
0114     unsigned int red_left_pad;  /* Left redzone padding size */
0115     const char *name;   /* Name (only for display!) */
0116     struct list_head list;  /* List of slab caches */
0117 #ifdef CONFIG_SYSFS
0118     struct kobject kobj;    /* For sysfs */
0119 #endif
0120 #ifdef CONFIG_SLAB_FREELIST_HARDENED
0121     unsigned long random;
0122 #endif
0123 
0124 #ifdef CONFIG_NUMA
0125     /*
0126      * Defragmentation by allocating from a remote node.
0127      */
0128     unsigned int remote_node_defrag_ratio;
0129 #endif
0130 
0131 #ifdef CONFIG_SLAB_FREELIST_RANDOM
0132     unsigned int *random_seq;
0133 #endif
0134 
0135 #ifdef CONFIG_KASAN
0136     struct kasan_cache kasan_info;
0137 #endif
0138 
0139     unsigned int useroffset;    /* Usercopy region offset */
0140     unsigned int usersize;      /* Usercopy region size */
0141 
0142     struct kmem_cache_node *node[MAX_NUMNODES];
0143 };
0144 
0145 #ifdef CONFIG_SYSFS
0146 #define SLAB_SUPPORTS_SYSFS
0147 void sysfs_slab_unlink(struct kmem_cache *);
0148 void sysfs_slab_release(struct kmem_cache *);
0149 #else
0150 static inline void sysfs_slab_unlink(struct kmem_cache *s)
0151 {
0152 }
0153 static inline void sysfs_slab_release(struct kmem_cache *s)
0154 {
0155 }
0156 #endif
0157 
0158 void *fixup_red_left(struct kmem_cache *s, void *p);
0159 
0160 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
0161                 void *x) {
0162     void *object = x - (x - slab_address(slab)) % cache->size;
0163     void *last_object = slab_address(slab) +
0164         (slab->objects - 1) * cache->size;
0165     void *result = (unlikely(object > last_object)) ? last_object : object;
0166 
0167     result = fixup_red_left(cache, result);
0168     return result;
0169 }
0170 
0171 /* Determine object index from a given position */
0172 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
0173                       void *addr, void *obj)
0174 {
0175     return reciprocal_divide(kasan_reset_tag(obj) - addr,
0176                  cache->reciprocal_size);
0177 }
0178 
0179 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
0180                     const struct slab *slab, void *obj)
0181 {
0182     if (is_kfence_address(obj))
0183         return 0;
0184     return __obj_to_index(cache, slab_address(slab), obj);
0185 }
0186 
0187 static inline int objs_per_slab(const struct kmem_cache *cache,
0188                      const struct slab *slab)
0189 {
0190     return slab->objects;
0191 }
0192 #endif /* _LINUX_SLUB_DEF_H */