0001
0002 #ifndef _LINUX_SLAB_DEF_H
0003 #define _LINUX_SLAB_DEF_H
0004
0005 #include <linux/kfence.h>
0006 #include <linux/reciprocal_div.h>
0007
0008
0009
0010
0011
0012 struct kmem_cache {
0013 struct array_cache __percpu *cpu_cache;
0014
0015
0016 unsigned int batchcount;
0017 unsigned int limit;
0018 unsigned int shared;
0019
0020 unsigned int size;
0021 struct reciprocal_value reciprocal_buffer_size;
0022
0023
0024 slab_flags_t flags;
0025 unsigned int num;
0026
0027
0028
0029 unsigned int gfporder;
0030
0031
0032 gfp_t allocflags;
0033
0034 size_t colour;
0035 unsigned int colour_off;
0036 struct kmem_cache *freelist_cache;
0037 unsigned int freelist_size;
0038
0039
0040 void (*ctor)(void *obj);
0041
0042
0043 const char *name;
0044 struct list_head list;
0045 int refcount;
0046 int object_size;
0047 int align;
0048
0049
0050 #ifdef CONFIG_DEBUG_SLAB
0051 unsigned long num_active;
0052 unsigned long num_allocations;
0053 unsigned long high_mark;
0054 unsigned long grown;
0055 unsigned long reaped;
0056 unsigned long errors;
0057 unsigned long max_freeable;
0058 unsigned long node_allocs;
0059 unsigned long node_frees;
0060 unsigned long node_overflow;
0061 atomic_t allochit;
0062 atomic_t allocmiss;
0063 atomic_t freehit;
0064 atomic_t freemiss;
0065
0066
0067
0068
0069
0070
0071
0072
0073 int obj_offset;
0074 #endif
0075
0076 #ifdef CONFIG_KASAN
0077 struct kasan_cache kasan_info;
0078 #endif
0079
0080 #ifdef CONFIG_SLAB_FREELIST_RANDOM
0081 unsigned int *random_seq;
0082 #endif
0083
0084 unsigned int useroffset;
0085 unsigned int usersize;
0086
0087 struct kmem_cache_node *node[MAX_NUMNODES];
0088 };
0089
0090 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
0091 void *x)
0092 {
0093 void *object = x - (x - slab->s_mem) % cache->size;
0094 void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
0095
0096 if (unlikely(object > last_object))
0097 return last_object;
0098 else
0099 return object;
0100 }
0101
0102
0103
0104
0105
0106
0107
0108 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
0109 const struct slab *slab, void *obj)
0110 {
0111 u32 offset = (obj - slab->s_mem);
0112 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
0113 }
0114
0115 static inline int objs_per_slab(const struct kmem_cache *cache,
0116 const struct slab *slab)
0117 {
0118 if (is_kfence_address(slab_address(slab)))
0119 return 1;
0120 return cache->num;
0121 }
0122
0123 #endif