Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _MM_PERCPU_INTERNAL_H
0003 #define _MM_PERCPU_INTERNAL_H
0004 
0005 #include <linux/types.h>
0006 #include <linux/percpu.h>
0007 
0008 /*
0009  * pcpu_block_md is the metadata block struct.
0010  * Each chunk's bitmap is split into a number of full blocks.
0011  * All units are in terms of bits.
0012  *
0013  * The scan hint is the largest known contiguous area before the contig hint.
0014  * It is not necessarily the actual largest contig hint though.  There is an
0015  * invariant that the scan_hint_start > contig_hint_start iff
0016  * scan_hint == contig_hint.  This is necessary because when scanning forward,
0017  * we don't know if a new contig hint would be better than the current one.
0018  */
0019 struct pcpu_block_md {
0020     int         scan_hint;  /* scan hint for block */
0021     int         scan_hint_start; /* block relative starting
0022                             position of the scan hint */
0023     int                     contig_hint;    /* contig hint for block */
0024     int                     contig_hint_start; /* block relative starting
0025                               position of the contig hint */
0026     int                     left_free;      /* size of free space along
0027                            the left side of the block */
0028     int                     right_free;     /* size of free space along
0029                            the right side of the block */
0030     int                     first_free;     /* block position of first free */
0031     int         nr_bits;    /* total bits responsible for */
0032 };
0033 
0034 struct pcpu_chunk {
0035 #ifdef CONFIG_PERCPU_STATS
0036     int         nr_alloc;   /* # of allocations */
0037     size_t          max_alloc_size; /* largest allocation size */
0038 #endif
0039 
0040     struct list_head    list;       /* linked to pcpu_slot lists */
0041     int         free_bytes; /* free bytes in the chunk */
0042     struct pcpu_block_md    chunk_md;
0043     void            *base_addr; /* base address of this chunk */
0044 
0045     unsigned long       *alloc_map; /* allocation map */
0046     unsigned long       *bound_map; /* boundary map */
0047     struct pcpu_block_md    *md_blocks; /* metadata blocks */
0048 
0049     void            *data;      /* chunk data */
0050     bool            immutable;  /* no [de]population allowed */
0051     bool            isolated;   /* isolated from active chunk
0052                            slots */
0053     int         start_offset;   /* the overlap with the previous
0054                            region to have a page aligned
0055                            base_addr */
0056     int         end_offset; /* additional area required to
0057                            have the region end page
0058                            aligned */
0059 #ifdef CONFIG_MEMCG_KMEM
0060     struct obj_cgroup   **obj_cgroups;  /* vector of object cgroups */
0061 #endif
0062 
0063     int         nr_pages;   /* # of pages served by this chunk */
0064     int         nr_populated;   /* # of populated pages */
0065     int                     nr_empty_pop_pages; /* # of empty populated pages */
0066     unsigned long       populated[];    /* populated bitmap */
0067 };
0068 
0069 extern spinlock_t pcpu_lock;
0070 
0071 extern struct list_head *pcpu_chunk_lists;
0072 extern int pcpu_nr_slots;
0073 extern int pcpu_sidelined_slot;
0074 extern int pcpu_to_depopulate_slot;
0075 extern int pcpu_nr_empty_pop_pages;
0076 
0077 extern struct pcpu_chunk *pcpu_first_chunk;
0078 extern struct pcpu_chunk *pcpu_reserved_chunk;
0079 
0080 /**
0081  * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
0082  * @chunk: chunk of interest
0083  *
0084  * This conversion is from the number of physical pages that the chunk
0085  * serves to the number of bitmap blocks used.
0086  */
0087 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
0088 {
0089     return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
0090 }
0091 
0092 /**
0093  * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
0094  * @pages: number of physical pages
0095  *
0096  * This conversion is from physical pages to the number of bits
0097  * required in the bitmap.
0098  */
0099 static inline int pcpu_nr_pages_to_map_bits(int pages)
0100 {
0101     return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
0102 }
0103 
0104 /**
0105  * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
0106  * @chunk: chunk of interest
0107  *
0108  * This conversion is from the number of physical pages that the chunk
0109  * serves to the number of bits in the bitmap.
0110  */
0111 static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
0112 {
0113     return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
0114 }
0115 
0116 /**
0117  * pcpu_obj_full_size - helper to calculate size of each accounted object
0118  * @size: size of area to allocate in bytes
0119  *
0120  * For each accounted object there is an extra space which is used to store
0121  * obj_cgroup membership. Charge it too.
0122  */
0123 static inline size_t pcpu_obj_full_size(size_t size)
0124 {
0125     size_t extra_size = 0;
0126 
0127 #ifdef CONFIG_MEMCG_KMEM
0128     extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
0129 #endif
0130 
0131     return size * num_possible_cpus() + extra_size;
0132 }
0133 
0134 #ifdef CONFIG_PERCPU_STATS
0135 
0136 #include <linux/spinlock.h>
0137 
0138 struct percpu_stats {
0139     u64 nr_alloc;       /* lifetime # of allocations */
0140     u64 nr_dealloc;     /* lifetime # of deallocations */
0141     u64 nr_cur_alloc;   /* current # of allocations */
0142     u64 nr_max_alloc;   /* max # of live allocations */
0143     u32 nr_chunks;      /* current # of live chunks */
0144     u32 nr_max_chunks;  /* max # of live chunks */
0145     size_t min_alloc_size;  /* min allocation size */
0146     size_t max_alloc_size;  /* max allocation size */
0147 };
0148 
0149 extern struct percpu_stats pcpu_stats;
0150 extern struct pcpu_alloc_info pcpu_stats_ai;
0151 
0152 /*
0153  * For debug purposes. We don't care about the flexible array.
0154  */
0155 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
0156 {
0157     memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
0158 
0159     /* initialize min_alloc_size to unit_size */
0160     pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
0161 }
0162 
0163 /*
0164  * pcpu_stats_area_alloc - increment area allocation stats
0165  * @chunk: the location of the area being allocated
0166  * @size: size of area to allocate in bytes
0167  *
0168  * CONTEXT:
0169  * pcpu_lock.
0170  */
0171 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
0172 {
0173     lockdep_assert_held(&pcpu_lock);
0174 
0175     pcpu_stats.nr_alloc++;
0176     pcpu_stats.nr_cur_alloc++;
0177     pcpu_stats.nr_max_alloc =
0178         max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
0179     pcpu_stats.min_alloc_size =
0180         min(pcpu_stats.min_alloc_size, size);
0181     pcpu_stats.max_alloc_size =
0182         max(pcpu_stats.max_alloc_size, size);
0183 
0184     chunk->nr_alloc++;
0185     chunk->max_alloc_size = max(chunk->max_alloc_size, size);
0186 }
0187 
0188 /*
0189  * pcpu_stats_area_dealloc - decrement allocation stats
0190  * @chunk: the location of the area being deallocated
0191  *
0192  * CONTEXT:
0193  * pcpu_lock.
0194  */
0195 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
0196 {
0197     lockdep_assert_held(&pcpu_lock);
0198 
0199     pcpu_stats.nr_dealloc++;
0200     pcpu_stats.nr_cur_alloc--;
0201 
0202     chunk->nr_alloc--;
0203 }
0204 
0205 /*
0206  * pcpu_stats_chunk_alloc - increment chunk stats
0207  */
0208 static inline void pcpu_stats_chunk_alloc(void)
0209 {
0210     unsigned long flags;
0211     spin_lock_irqsave(&pcpu_lock, flags);
0212 
0213     pcpu_stats.nr_chunks++;
0214     pcpu_stats.nr_max_chunks =
0215         max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
0216 
0217     spin_unlock_irqrestore(&pcpu_lock, flags);
0218 }
0219 
0220 /*
0221  * pcpu_stats_chunk_dealloc - decrement chunk stats
0222  */
0223 static inline void pcpu_stats_chunk_dealloc(void)
0224 {
0225     unsigned long flags;
0226     spin_lock_irqsave(&pcpu_lock, flags);
0227 
0228     pcpu_stats.nr_chunks--;
0229 
0230     spin_unlock_irqrestore(&pcpu_lock, flags);
0231 }
0232 
0233 #else
0234 
0235 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
0236 {
0237 }
0238 
0239 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
0240 {
0241 }
0242 
0243 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
0244 {
0245 }
0246 
0247 static inline void pcpu_stats_chunk_alloc(void)
0248 {
0249 }
0250 
0251 static inline void pcpu_stats_chunk_dealloc(void)
0252 {
0253 }
0254 
0255 #endif /* !CONFIG_PERCPU_STATS */
0256 
0257 #endif