0001
0002 #ifndef _MM_PERCPU_INTERNAL_H
0003 #define _MM_PERCPU_INTERNAL_H
0004
0005 #include <linux/types.h>
0006 #include <linux/percpu.h>
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 struct pcpu_block_md {
0020 int scan_hint;
0021 int scan_hint_start;
0022
0023 int contig_hint;
0024 int contig_hint_start;
0025
0026 int left_free;
0027
0028 int right_free;
0029
0030 int first_free;
0031 int nr_bits;
0032 };
0033
0034 struct pcpu_chunk {
0035 #ifdef CONFIG_PERCPU_STATS
0036 int nr_alloc;
0037 size_t max_alloc_size;
0038 #endif
0039
0040 struct list_head list;
0041 int free_bytes;
0042 struct pcpu_block_md chunk_md;
0043 void *base_addr;
0044
0045 unsigned long *alloc_map;
0046 unsigned long *bound_map;
0047 struct pcpu_block_md *md_blocks;
0048
0049 void *data;
0050 bool immutable;
0051 bool isolated;
0052
0053 int start_offset;
0054
0055
0056 int end_offset;
0057
0058
0059 #ifdef CONFIG_MEMCG_KMEM
0060 struct obj_cgroup **obj_cgroups;
0061 #endif
0062
0063 int nr_pages;
0064 int nr_populated;
0065 int nr_empty_pop_pages;
0066 unsigned long populated[];
0067 };
0068
0069 extern spinlock_t pcpu_lock;
0070
0071 extern struct list_head *pcpu_chunk_lists;
0072 extern int pcpu_nr_slots;
0073 extern int pcpu_sidelined_slot;
0074 extern int pcpu_to_depopulate_slot;
0075 extern int pcpu_nr_empty_pop_pages;
0076
0077 extern struct pcpu_chunk *pcpu_first_chunk;
0078 extern struct pcpu_chunk *pcpu_reserved_chunk;
0079
0080
0081
0082
0083
0084
0085
0086
0087 static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
0088 {
0089 return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
0090 }
0091
0092
0093
0094
0095
0096
0097
0098
0099 static inline int pcpu_nr_pages_to_map_bits(int pages)
0100 {
0101 return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
0102 }
0103
0104
0105
0106
0107
0108
0109
0110
0111 static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
0112 {
0113 return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
0114 }
0115
0116
0117
0118
0119
0120
0121
0122
0123 static inline size_t pcpu_obj_full_size(size_t size)
0124 {
0125 size_t extra_size = 0;
0126
0127 #ifdef CONFIG_MEMCG_KMEM
0128 extra_size += size / PCPU_MIN_ALLOC_SIZE * sizeof(struct obj_cgroup *);
0129 #endif
0130
0131 return size * num_possible_cpus() + extra_size;
0132 }
0133
0134 #ifdef CONFIG_PERCPU_STATS
0135
0136 #include <linux/spinlock.h>
0137
0138 struct percpu_stats {
0139 u64 nr_alloc;
0140 u64 nr_dealloc;
0141 u64 nr_cur_alloc;
0142 u64 nr_max_alloc;
0143 u32 nr_chunks;
0144 u32 nr_max_chunks;
0145 size_t min_alloc_size;
0146 size_t max_alloc_size;
0147 };
0148
0149 extern struct percpu_stats pcpu_stats;
0150 extern struct pcpu_alloc_info pcpu_stats_ai;
0151
0152
0153
0154
0155 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
0156 {
0157 memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
0158
0159
0160 pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
0161 }
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
0172 {
0173 lockdep_assert_held(&pcpu_lock);
0174
0175 pcpu_stats.nr_alloc++;
0176 pcpu_stats.nr_cur_alloc++;
0177 pcpu_stats.nr_max_alloc =
0178 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
0179 pcpu_stats.min_alloc_size =
0180 min(pcpu_stats.min_alloc_size, size);
0181 pcpu_stats.max_alloc_size =
0182 max(pcpu_stats.max_alloc_size, size);
0183
0184 chunk->nr_alloc++;
0185 chunk->max_alloc_size = max(chunk->max_alloc_size, size);
0186 }
0187
0188
0189
0190
0191
0192
0193
0194
0195 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
0196 {
0197 lockdep_assert_held(&pcpu_lock);
0198
0199 pcpu_stats.nr_dealloc++;
0200 pcpu_stats.nr_cur_alloc--;
0201
0202 chunk->nr_alloc--;
0203 }
0204
0205
0206
0207
0208 static inline void pcpu_stats_chunk_alloc(void)
0209 {
0210 unsigned long flags;
0211 spin_lock_irqsave(&pcpu_lock, flags);
0212
0213 pcpu_stats.nr_chunks++;
0214 pcpu_stats.nr_max_chunks =
0215 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
0216
0217 spin_unlock_irqrestore(&pcpu_lock, flags);
0218 }
0219
0220
0221
0222
0223 static inline void pcpu_stats_chunk_dealloc(void)
0224 {
0225 unsigned long flags;
0226 spin_lock_irqsave(&pcpu_lock, flags);
0227
0228 pcpu_stats.nr_chunks--;
0229
0230 spin_unlock_irqrestore(&pcpu_lock, flags);
0231 }
0232
0233 #else
0234
0235 static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
0236 {
0237 }
0238
0239 static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
0240 {
0241 }
0242
0243 static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
0244 {
0245 }
0246
0247 static inline void pcpu_stats_chunk_alloc(void)
0248 {
0249 }
0250
0251 static inline void pcpu_stats_chunk_dealloc(void)
0252 {
0253 }
0254
0255 #endif
0256
0257 #endif