0001
0002 #ifndef _LINUX_SLUB_DEF_H
0003 #define _LINUX_SLUB_DEF_H
0004
0005
0006
0007
0008
0009
0010 #include <linux/kfence.h>
0011 #include <linux/kobject.h>
0012 #include <linux/reciprocal_div.h>
0013 #include <linux/local_lock.h>
0014
0015 enum stat_item {
0016 ALLOC_FASTPATH,
0017 ALLOC_SLOWPATH,
0018 FREE_FASTPATH,
0019 FREE_SLOWPATH,
0020 FREE_FROZEN,
0021 FREE_ADD_PARTIAL,
0022 FREE_REMOVE_PARTIAL,
0023 ALLOC_FROM_PARTIAL,
0024 ALLOC_SLAB,
0025 ALLOC_REFILL,
0026 ALLOC_NODE_MISMATCH,
0027 FREE_SLAB,
0028 CPUSLAB_FLUSH,
0029 DEACTIVATE_FULL,
0030 DEACTIVATE_EMPTY,
0031 DEACTIVATE_TO_HEAD,
0032 DEACTIVATE_TO_TAIL,
0033 DEACTIVATE_REMOTE_FREES,
0034 DEACTIVATE_BYPASS,
0035 ORDER_FALLBACK,
0036 CMPXCHG_DOUBLE_CPU_FAIL,
0037 CMPXCHG_DOUBLE_FAIL,
0038 CPU_PARTIAL_ALLOC,
0039 CPU_PARTIAL_FREE,
0040 CPU_PARTIAL_NODE,
0041 CPU_PARTIAL_DRAIN,
0042 NR_SLUB_STAT_ITEMS };
0043
0044
0045
0046
0047
0048 struct kmem_cache_cpu {
0049 void **freelist;
0050 unsigned long tid;
0051 struct slab *slab;
0052 #ifdef CONFIG_SLUB_CPU_PARTIAL
0053 struct slab *partial;
0054 #endif
0055 local_lock_t lock;
0056 #ifdef CONFIG_SLUB_STATS
0057 unsigned stat[NR_SLUB_STAT_ITEMS];
0058 #endif
0059 };
0060
0061 #ifdef CONFIG_SLUB_CPU_PARTIAL
0062 #define slub_percpu_partial(c) ((c)->partial)
0063
0064 #define slub_set_percpu_partial(c, p) \
0065 ({ \
0066 slub_percpu_partial(c) = (p)->next; \
0067 })
0068
0069 #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
0070 #else
0071 #define slub_percpu_partial(c) NULL
0072
0073 #define slub_set_percpu_partial(c, p)
0074
0075 #define slub_percpu_partial_read_once(c) NULL
0076 #endif
0077
0078
0079
0080
0081
0082
0083 struct kmem_cache_order_objects {
0084 unsigned int x;
0085 };
0086
0087
0088
0089
0090 struct kmem_cache {
0091 struct kmem_cache_cpu __percpu *cpu_slab;
0092
0093 slab_flags_t flags;
0094 unsigned long min_partial;
0095 unsigned int size;
0096 unsigned int object_size;
0097 struct reciprocal_value reciprocal_size;
0098 unsigned int offset;
0099 #ifdef CONFIG_SLUB_CPU_PARTIAL
0100
0101 unsigned int cpu_partial;
0102
0103 unsigned int cpu_partial_slabs;
0104 #endif
0105 struct kmem_cache_order_objects oo;
0106
0107
0108 struct kmem_cache_order_objects min;
0109 gfp_t allocflags;
0110 int refcount;
0111 void (*ctor)(void *);
0112 unsigned int inuse;
0113 unsigned int align;
0114 unsigned int red_left_pad;
0115 const char *name;
0116 struct list_head list;
0117 #ifdef CONFIG_SYSFS
0118 struct kobject kobj;
0119 #endif
0120 #ifdef CONFIG_SLAB_FREELIST_HARDENED
0121 unsigned long random;
0122 #endif
0123
0124 #ifdef CONFIG_NUMA
0125
0126
0127
0128 unsigned int remote_node_defrag_ratio;
0129 #endif
0130
0131 #ifdef CONFIG_SLAB_FREELIST_RANDOM
0132 unsigned int *random_seq;
0133 #endif
0134
0135 #ifdef CONFIG_KASAN
0136 struct kasan_cache kasan_info;
0137 #endif
0138
0139 unsigned int useroffset;
0140 unsigned int usersize;
0141
0142 struct kmem_cache_node *node[MAX_NUMNODES];
0143 };
0144
0145 #ifdef CONFIG_SYSFS
0146 #define SLAB_SUPPORTS_SYSFS
0147 void sysfs_slab_unlink(struct kmem_cache *);
0148 void sysfs_slab_release(struct kmem_cache *);
0149 #else
0150 static inline void sysfs_slab_unlink(struct kmem_cache *s)
0151 {
0152 }
0153 static inline void sysfs_slab_release(struct kmem_cache *s)
0154 {
0155 }
0156 #endif
0157
0158 void *fixup_red_left(struct kmem_cache *s, void *p);
0159
0160 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
0161 void *x) {
0162 void *object = x - (x - slab_address(slab)) % cache->size;
0163 void *last_object = slab_address(slab) +
0164 (slab->objects - 1) * cache->size;
0165 void *result = (unlikely(object > last_object)) ? last_object : object;
0166
0167 result = fixup_red_left(cache, result);
0168 return result;
0169 }
0170
0171
0172 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
0173 void *addr, void *obj)
0174 {
0175 return reciprocal_divide(kasan_reset_tag(obj) - addr,
0176 cache->reciprocal_size);
0177 }
0178
0179 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
0180 const struct slab *slab, void *obj)
0181 {
0182 if (is_kfence_address(obj))
0183 return 0;
0184 return __obj_to_index(cache, slab_address(slab), obj);
0185 }
0186
0187 static inline int objs_per_slab(const struct kmem_cache *cache,
0188 const struct slab *slab)
0189 {
0190 return slab->objects;
0191 }
0192 #endif