0001
0002 #ifndef _LINUX_VMSTAT_H
0003 #define _LINUX_VMSTAT_H
0004
0005 #include <linux/types.h>
0006 #include <linux/percpu.h>
0007 #include <linux/mmzone.h>
0008 #include <linux/vm_event_item.h>
0009 #include <linux/atomic.h>
0010 #include <linux/static_key.h>
0011 #include <linux/mmdebug.h>
0012
0013 extern int sysctl_stat_interval;
0014
0015 #ifdef CONFIG_NUMA
0016 #define ENABLE_NUMA_STAT 1
0017 #define DISABLE_NUMA_STAT 0
0018 extern int sysctl_vm_numa_stat;
0019 DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
0020 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
0021 void *buffer, size_t *length, loff_t *ppos);
0022 #endif
0023
0024 struct reclaim_stat {
0025 unsigned nr_dirty;
0026 unsigned nr_unqueued_dirty;
0027 unsigned nr_congested;
0028 unsigned nr_writeback;
0029 unsigned nr_immediate;
0030 unsigned nr_pageout;
0031 unsigned nr_activate[ANON_AND_FILE];
0032 unsigned nr_ref_keep;
0033 unsigned nr_unmap_fail;
0034 unsigned nr_lazyfree_fail;
0035 };
0036
0037 enum writeback_stat_item {
0038 NR_DIRTY_THRESHOLD,
0039 NR_DIRTY_BG_THRESHOLD,
0040 NR_VM_WRITEBACK_STAT_ITEMS,
0041 };
0042
0043 #ifdef CONFIG_VM_EVENT_COUNTERS
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 struct vm_event_state {
0055 unsigned long event[NR_VM_EVENT_ITEMS];
0056 };
0057
0058 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
0059
0060
0061
0062
0063
0064 static inline void __count_vm_event(enum vm_event_item item)
0065 {
0066 raw_cpu_inc(vm_event_states.event[item]);
0067 }
0068
0069 static inline void count_vm_event(enum vm_event_item item)
0070 {
0071 this_cpu_inc(vm_event_states.event[item]);
0072 }
0073
0074 static inline void __count_vm_events(enum vm_event_item item, long delta)
0075 {
0076 raw_cpu_add(vm_event_states.event[item], delta);
0077 }
0078
0079 static inline void count_vm_events(enum vm_event_item item, long delta)
0080 {
0081 this_cpu_add(vm_event_states.event[item], delta);
0082 }
0083
0084 extern void all_vm_events(unsigned long *);
0085
0086 extern void vm_events_fold_cpu(int cpu);
0087
0088 #else
0089
0090
0091 static inline void count_vm_event(enum vm_event_item item)
0092 {
0093 }
0094 static inline void count_vm_events(enum vm_event_item item, long delta)
0095 {
0096 }
0097 static inline void __count_vm_event(enum vm_event_item item)
0098 {
0099 }
0100 static inline void __count_vm_events(enum vm_event_item item, long delta)
0101 {
0102 }
0103 static inline void all_vm_events(unsigned long *ret)
0104 {
0105 }
0106 static inline void vm_events_fold_cpu(int cpu)
0107 {
0108 }
0109
0110 #endif
0111
0112 #ifdef CONFIG_NUMA_BALANCING
0113 #define count_vm_numa_event(x) count_vm_event(x)
0114 #define count_vm_numa_events(x, y) count_vm_events(x, y)
0115 #else
0116 #define count_vm_numa_event(x) do {} while (0)
0117 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
0118 #endif
0119
0120 #ifdef CONFIG_DEBUG_TLBFLUSH
0121 #define count_vm_tlb_event(x) count_vm_event(x)
0122 #define count_vm_tlb_events(x, y) count_vm_events(x, y)
0123 #else
0124 #define count_vm_tlb_event(x) do {} while (0)
0125 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
0126 #endif
0127
0128 #ifdef CONFIG_DEBUG_VM_VMACACHE
0129 #define count_vm_vmacache_event(x) count_vm_event(x)
0130 #else
0131 #define count_vm_vmacache_event(x) do {} while (0)
0132 #endif
0133
0134 #define __count_zid_vm_events(item, zid, delta) \
0135 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
0136
0137
0138
0139
0140 extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
0141 extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
0142 extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
0143
0144 #ifdef CONFIG_NUMA
0145 static inline void zone_numa_event_add(long x, struct zone *zone,
0146 enum numa_stat_item item)
0147 {
0148 atomic_long_add(x, &zone->vm_numa_event[item]);
0149 atomic_long_add(x, &vm_numa_event[item]);
0150 }
0151
0152 static inline unsigned long zone_numa_event_state(struct zone *zone,
0153 enum numa_stat_item item)
0154 {
0155 return atomic_long_read(&zone->vm_numa_event[item]);
0156 }
0157
0158 static inline unsigned long
0159 global_numa_event_state(enum numa_stat_item item)
0160 {
0161 return atomic_long_read(&vm_numa_event[item]);
0162 }
0163 #endif
0164
0165 static inline void zone_page_state_add(long x, struct zone *zone,
0166 enum zone_stat_item item)
0167 {
0168 atomic_long_add(x, &zone->vm_stat[item]);
0169 atomic_long_add(x, &vm_zone_stat[item]);
0170 }
0171
0172 static inline void node_page_state_add(long x, struct pglist_data *pgdat,
0173 enum node_stat_item item)
0174 {
0175 atomic_long_add(x, &pgdat->vm_stat[item]);
0176 atomic_long_add(x, &vm_node_stat[item]);
0177 }
0178
0179 static inline unsigned long global_zone_page_state(enum zone_stat_item item)
0180 {
0181 long x = atomic_long_read(&vm_zone_stat[item]);
0182 #ifdef CONFIG_SMP
0183 if (x < 0)
0184 x = 0;
0185 #endif
0186 return x;
0187 }
0188
0189 static inline
0190 unsigned long global_node_page_state_pages(enum node_stat_item item)
0191 {
0192 long x = atomic_long_read(&vm_node_stat[item]);
0193 #ifdef CONFIG_SMP
0194 if (x < 0)
0195 x = 0;
0196 #endif
0197 return x;
0198 }
0199
0200 static inline unsigned long global_node_page_state(enum node_stat_item item)
0201 {
0202 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
0203
0204 return global_node_page_state_pages(item);
0205 }
0206
0207 static inline unsigned long zone_page_state(struct zone *zone,
0208 enum zone_stat_item item)
0209 {
0210 long x = atomic_long_read(&zone->vm_stat[item]);
0211 #ifdef CONFIG_SMP
0212 if (x < 0)
0213 x = 0;
0214 #endif
0215 return x;
0216 }
0217
0218
0219
0220
0221
0222
0223
0224 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
0225 enum zone_stat_item item)
0226 {
0227 long x = atomic_long_read(&zone->vm_stat[item]);
0228
0229 #ifdef CONFIG_SMP
0230 int cpu;
0231 for_each_online_cpu(cpu)
0232 x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
0233
0234 if (x < 0)
0235 x = 0;
0236 #endif
0237 return x;
0238 }
0239
0240 #ifdef CONFIG_NUMA
0241
0242 static inline void
0243 __count_numa_event(struct zone *zone, enum numa_stat_item item)
0244 {
0245 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
0246
0247 raw_cpu_inc(pzstats->vm_numa_event[item]);
0248 }
0249
0250 static inline void
0251 __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
0252 {
0253 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
0254
0255 raw_cpu_add(pzstats->vm_numa_event[item], delta);
0256 }
0257
0258 extern unsigned long sum_zone_node_page_state(int node,
0259 enum zone_stat_item item);
0260 extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
0261 extern unsigned long node_page_state(struct pglist_data *pgdat,
0262 enum node_stat_item item);
0263 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
0264 enum node_stat_item item);
0265 extern void fold_vm_numa_events(void);
0266 #else
0267 #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
0268 #define node_page_state(node, item) global_node_page_state(item)
0269 #define node_page_state_pages(node, item) global_node_page_state_pages(item)
0270 static inline void fold_vm_numa_events(void)
0271 {
0272 }
0273 #endif
0274
0275 #ifdef CONFIG_SMP
0276 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
0277 void __inc_zone_page_state(struct page *, enum zone_stat_item);
0278 void __dec_zone_page_state(struct page *, enum zone_stat_item);
0279
0280 void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
0281 void __inc_node_page_state(struct page *, enum node_stat_item);
0282 void __dec_node_page_state(struct page *, enum node_stat_item);
0283
0284 void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
0285 void inc_zone_page_state(struct page *, enum zone_stat_item);
0286 void dec_zone_page_state(struct page *, enum zone_stat_item);
0287
0288 void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
0289 void inc_node_page_state(struct page *, enum node_stat_item);
0290 void dec_node_page_state(struct page *, enum node_stat_item);
0291
0292 extern void inc_node_state(struct pglist_data *, enum node_stat_item);
0293 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
0294 extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
0295 extern void dec_zone_state(struct zone *, enum zone_stat_item);
0296 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
0297 extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
0298
0299 void quiet_vmstat(void);
0300 void cpu_vm_stats_fold(int cpu);
0301 void refresh_zone_stat_thresholds(void);
0302
0303 struct ctl_table;
0304 int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
0305 loff_t *ppos);
0306
0307 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
0308
0309 int calculate_pressure_threshold(struct zone *zone);
0310 int calculate_normal_threshold(struct zone *zone);
0311 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
0312 int (*calculate_pressure)(struct zone *));
0313 #else
0314
0315
0316
0317
0318
0319 static inline void __mod_zone_page_state(struct zone *zone,
0320 enum zone_stat_item item, long delta)
0321 {
0322 zone_page_state_add(delta, zone, item);
0323 }
0324
0325 static inline void __mod_node_page_state(struct pglist_data *pgdat,
0326 enum node_stat_item item, int delta)
0327 {
0328 if (vmstat_item_in_bytes(item)) {
0329
0330
0331
0332
0333
0334
0335 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
0336 delta >>= PAGE_SHIFT;
0337 }
0338
0339 node_page_state_add(delta, pgdat, item);
0340 }
0341
0342 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
0343 {
0344 atomic_long_inc(&zone->vm_stat[item]);
0345 atomic_long_inc(&vm_zone_stat[item]);
0346 }
0347
0348 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
0349 {
0350 atomic_long_inc(&pgdat->vm_stat[item]);
0351 atomic_long_inc(&vm_node_stat[item]);
0352 }
0353
0354 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
0355 {
0356 atomic_long_dec(&zone->vm_stat[item]);
0357 atomic_long_dec(&vm_zone_stat[item]);
0358 }
0359
0360 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
0361 {
0362 atomic_long_dec(&pgdat->vm_stat[item]);
0363 atomic_long_dec(&vm_node_stat[item]);
0364 }
0365
0366 static inline void __inc_zone_page_state(struct page *page,
0367 enum zone_stat_item item)
0368 {
0369 __inc_zone_state(page_zone(page), item);
0370 }
0371
0372 static inline void __inc_node_page_state(struct page *page,
0373 enum node_stat_item item)
0374 {
0375 __inc_node_state(page_pgdat(page), item);
0376 }
0377
0378
0379 static inline void __dec_zone_page_state(struct page *page,
0380 enum zone_stat_item item)
0381 {
0382 __dec_zone_state(page_zone(page), item);
0383 }
0384
0385 static inline void __dec_node_page_state(struct page *page,
0386 enum node_stat_item item)
0387 {
0388 __dec_node_state(page_pgdat(page), item);
0389 }
0390
0391
0392
0393
0394
0395
0396 #define inc_zone_page_state __inc_zone_page_state
0397 #define dec_zone_page_state __dec_zone_page_state
0398 #define mod_zone_page_state __mod_zone_page_state
0399
0400 #define inc_node_page_state __inc_node_page_state
0401 #define dec_node_page_state __dec_node_page_state
0402 #define mod_node_page_state __mod_node_page_state
0403
0404 #define inc_zone_state __inc_zone_state
0405 #define inc_node_state __inc_node_state
0406 #define dec_zone_state __dec_zone_state
0407
0408 #define set_pgdat_percpu_threshold(pgdat, callback) { }
0409
0410 static inline void refresh_zone_stat_thresholds(void) { }
0411 static inline void cpu_vm_stats_fold(int cpu) { }
0412 static inline void quiet_vmstat(void) { }
0413
0414 static inline void drain_zonestat(struct zone *zone,
0415 struct per_cpu_zonestat *pzstats) { }
0416 #endif
0417
0418 static inline void __zone_stat_mod_folio(struct folio *folio,
0419 enum zone_stat_item item, long nr)
0420 {
0421 __mod_zone_page_state(folio_zone(folio), item, nr);
0422 }
0423
0424 static inline void __zone_stat_add_folio(struct folio *folio,
0425 enum zone_stat_item item)
0426 {
0427 __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
0428 }
0429
0430 static inline void __zone_stat_sub_folio(struct folio *folio,
0431 enum zone_stat_item item)
0432 {
0433 __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
0434 }
0435
0436 static inline void zone_stat_mod_folio(struct folio *folio,
0437 enum zone_stat_item item, long nr)
0438 {
0439 mod_zone_page_state(folio_zone(folio), item, nr);
0440 }
0441
0442 static inline void zone_stat_add_folio(struct folio *folio,
0443 enum zone_stat_item item)
0444 {
0445 mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
0446 }
0447
0448 static inline void zone_stat_sub_folio(struct folio *folio,
0449 enum zone_stat_item item)
0450 {
0451 mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
0452 }
0453
0454 static inline void __node_stat_mod_folio(struct folio *folio,
0455 enum node_stat_item item, long nr)
0456 {
0457 __mod_node_page_state(folio_pgdat(folio), item, nr);
0458 }
0459
0460 static inline void __node_stat_add_folio(struct folio *folio,
0461 enum node_stat_item item)
0462 {
0463 __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
0464 }
0465
0466 static inline void __node_stat_sub_folio(struct folio *folio,
0467 enum node_stat_item item)
0468 {
0469 __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
0470 }
0471
0472 static inline void node_stat_mod_folio(struct folio *folio,
0473 enum node_stat_item item, long nr)
0474 {
0475 mod_node_page_state(folio_pgdat(folio), item, nr);
0476 }
0477
0478 static inline void node_stat_add_folio(struct folio *folio,
0479 enum node_stat_item item)
0480 {
0481 mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
0482 }
0483
0484 static inline void node_stat_sub_folio(struct folio *folio,
0485 enum node_stat_item item)
0486 {
0487 mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
0488 }
0489
0490 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
0491 int migratetype)
0492 {
0493 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
0494 if (is_migrate_cma(migratetype))
0495 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
0496 }
0497
0498 extern const char * const vmstat_text[];
0499
0500 static inline const char *zone_stat_name(enum zone_stat_item item)
0501 {
0502 return vmstat_text[item];
0503 }
0504
0505 #ifdef CONFIG_NUMA
0506 static inline const char *numa_stat_name(enum numa_stat_item item)
0507 {
0508 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
0509 item];
0510 }
0511 #endif
0512
0513 static inline const char *node_stat_name(enum node_stat_item item)
0514 {
0515 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
0516 NR_VM_NUMA_EVENT_ITEMS +
0517 item];
0518 }
0519
0520 static inline const char *lru_list_name(enum lru_list lru)
0521 {
0522 return node_stat_name(NR_LRU_BASE + lru) + 3;
0523 }
0524
0525 static inline const char *writeback_stat_name(enum writeback_stat_item item)
0526 {
0527 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
0528 NR_VM_NUMA_EVENT_ITEMS +
0529 NR_VM_NODE_STAT_ITEMS +
0530 item];
0531 }
0532
0533 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
0534 static inline const char *vm_event_name(enum vm_event_item item)
0535 {
0536 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
0537 NR_VM_NUMA_EVENT_ITEMS +
0538 NR_VM_NODE_STAT_ITEMS +
0539 NR_VM_WRITEBACK_STAT_ITEMS +
0540 item];
0541 }
0542 #endif
0543
0544 #ifdef CONFIG_MEMCG
0545
0546 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
0547 int val);
0548
0549 static inline void mod_lruvec_state(struct lruvec *lruvec,
0550 enum node_stat_item idx, int val)
0551 {
0552 unsigned long flags;
0553
0554 local_irq_save(flags);
0555 __mod_lruvec_state(lruvec, idx, val);
0556 local_irq_restore(flags);
0557 }
0558
0559 void __mod_lruvec_page_state(struct page *page,
0560 enum node_stat_item idx, int val);
0561
0562 static inline void mod_lruvec_page_state(struct page *page,
0563 enum node_stat_item idx, int val)
0564 {
0565 unsigned long flags;
0566
0567 local_irq_save(flags);
0568 __mod_lruvec_page_state(page, idx, val);
0569 local_irq_restore(flags);
0570 }
0571
0572 #else
0573
0574 static inline void __mod_lruvec_state(struct lruvec *lruvec,
0575 enum node_stat_item idx, int val)
0576 {
0577 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
0578 }
0579
0580 static inline void mod_lruvec_state(struct lruvec *lruvec,
0581 enum node_stat_item idx, int val)
0582 {
0583 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
0584 }
0585
0586 static inline void __mod_lruvec_page_state(struct page *page,
0587 enum node_stat_item idx, int val)
0588 {
0589 __mod_node_page_state(page_pgdat(page), idx, val);
0590 }
0591
0592 static inline void mod_lruvec_page_state(struct page *page,
0593 enum node_stat_item idx, int val)
0594 {
0595 mod_node_page_state(page_pgdat(page), idx, val);
0596 }
0597
0598 #endif
0599
0600 static inline void __inc_lruvec_page_state(struct page *page,
0601 enum node_stat_item idx)
0602 {
0603 __mod_lruvec_page_state(page, idx, 1);
0604 }
0605
0606 static inline void __dec_lruvec_page_state(struct page *page,
0607 enum node_stat_item idx)
0608 {
0609 __mod_lruvec_page_state(page, idx, -1);
0610 }
0611
0612 static inline void __lruvec_stat_mod_folio(struct folio *folio,
0613 enum node_stat_item idx, int val)
0614 {
0615 __mod_lruvec_page_state(&folio->page, idx, val);
0616 }
0617
0618 static inline void __lruvec_stat_add_folio(struct folio *folio,
0619 enum node_stat_item idx)
0620 {
0621 __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
0622 }
0623
0624 static inline void __lruvec_stat_sub_folio(struct folio *folio,
0625 enum node_stat_item idx)
0626 {
0627 __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
0628 }
0629
0630 static inline void inc_lruvec_page_state(struct page *page,
0631 enum node_stat_item idx)
0632 {
0633 mod_lruvec_page_state(page, idx, 1);
0634 }
0635
0636 static inline void dec_lruvec_page_state(struct page *page,
0637 enum node_stat_item idx)
0638 {
0639 mod_lruvec_page_state(page, idx, -1);
0640 }
0641
0642 static inline void lruvec_stat_mod_folio(struct folio *folio,
0643 enum node_stat_item idx, int val)
0644 {
0645 mod_lruvec_page_state(&folio->page, idx, val);
0646 }
0647
0648 static inline void lruvec_stat_add_folio(struct folio *folio,
0649 enum node_stat_item idx)
0650 {
0651 lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
0652 }
0653
0654 static inline void lruvec_stat_sub_folio(struct folio *folio,
0655 enum node_stat_item idx)
0656 {
0657 lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
0658 }
0659 #endif