Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SLAB_DEF_H
0003 #define _LINUX_SLAB_DEF_H
0004 
0005 #include <linux/kfence.h>
0006 #include <linux/reciprocal_div.h>
0007 
0008 /*
0009  * Definitions unique to the original Linux SLAB allocator.
0010  */
0011 
0012 struct kmem_cache {
0013     struct array_cache __percpu *cpu_cache;
0014 
0015 /* 1) Cache tunables. Protected by slab_mutex */
0016     unsigned int batchcount;
0017     unsigned int limit;
0018     unsigned int shared;
0019 
0020     unsigned int size;
0021     struct reciprocal_value reciprocal_buffer_size;
0022 /* 2) touched by every alloc & free from the backend */
0023 
0024     slab_flags_t flags;     /* constant flags */
0025     unsigned int num;       /* # of objs per slab */
0026 
0027 /* 3) cache_grow/shrink */
0028     /* order of pgs per slab (2^n) */
0029     unsigned int gfporder;
0030 
0031     /* force GFP flags, e.g. GFP_DMA */
0032     gfp_t allocflags;
0033 
0034     size_t colour;          /* cache colouring range */
0035     unsigned int colour_off;    /* colour offset */
0036     struct kmem_cache *freelist_cache;
0037     unsigned int freelist_size;
0038 
0039     /* constructor func */
0040     void (*ctor)(void *obj);
0041 
0042 /* 4) cache creation/removal */
0043     const char *name;
0044     struct list_head list;
0045     int refcount;
0046     int object_size;
0047     int align;
0048 
0049 /* 5) statistics */
0050 #ifdef CONFIG_DEBUG_SLAB
0051     unsigned long num_active;
0052     unsigned long num_allocations;
0053     unsigned long high_mark;
0054     unsigned long grown;
0055     unsigned long reaped;
0056     unsigned long errors;
0057     unsigned long max_freeable;
0058     unsigned long node_allocs;
0059     unsigned long node_frees;
0060     unsigned long node_overflow;
0061     atomic_t allochit;
0062     atomic_t allocmiss;
0063     atomic_t freehit;
0064     atomic_t freemiss;
0065 
0066     /*
0067      * If debugging is enabled, then the allocator can add additional
0068      * fields and/or padding to every object. 'size' contains the total
0069      * object size including these internal fields, while 'obj_offset'
0070      * and 'object_size' contain the offset to the user object and its
0071      * size.
0072      */
0073     int obj_offset;
0074 #endif /* CONFIG_DEBUG_SLAB */
0075 
0076 #ifdef CONFIG_KASAN
0077     struct kasan_cache kasan_info;
0078 #endif
0079 
0080 #ifdef CONFIG_SLAB_FREELIST_RANDOM
0081     unsigned int *random_seq;
0082 #endif
0083 
0084     unsigned int useroffset;    /* Usercopy region offset */
0085     unsigned int usersize;      /* Usercopy region size */
0086 
0087     struct kmem_cache_node *node[MAX_NUMNODES];
0088 };
0089 
0090 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
0091                 void *x)
0092 {
0093     void *object = x - (x - slab->s_mem) % cache->size;
0094     void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
0095 
0096     if (unlikely(object > last_object))
0097         return last_object;
0098     else
0099         return object;
0100 }
0101 
0102 /*
0103  * We want to avoid an expensive divide : (offset / cache->size)
0104  *   Using the fact that size is a constant for a particular cache,
0105  *   we can replace (offset / cache->size) by
0106  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
0107  */
0108 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
0109                     const struct slab *slab, void *obj)
0110 {
0111     u32 offset = (obj - slab->s_mem);
0112     return reciprocal_divide(offset, cache->reciprocal_buffer_size);
0113 }
0114 
0115 static inline int objs_per_slab(const struct kmem_cache *cache,
0116                      const struct slab *slab)
0117 {
0118     if (is_kfence_address(slab_address(slab)))
0119         return 1;
0120     return cache->num;
0121 }
0122 
0123 #endif  /* _LINUX_SLAB_DEF_H */