0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef _LINUX_MEMCONTROL_H
0012 #define _LINUX_MEMCONTROL_H
0013 #include <linux/cgroup.h>
0014 #include <linux/vm_event_item.h>
0015 #include <linux/hardirq.h>
0016 #include <linux/jump_label.h>
0017 #include <linux/page_counter.h>
0018 #include <linux/vmpressure.h>
0019 #include <linux/eventfd.h>
0020 #include <linux/mm.h>
0021 #include <linux/vmstat.h>
0022 #include <linux/writeback.h>
0023 #include <linux/page-flags.h>
0024
0025 struct mem_cgroup;
0026 struct obj_cgroup;
0027 struct page;
0028 struct mm_struct;
0029 struct kmem_cache;
0030
0031
0032 enum memcg_stat_item {
0033 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
0034 MEMCG_SOCK,
0035 MEMCG_PERCPU_B,
0036 MEMCG_VMALLOC,
0037 MEMCG_KMEM,
0038 MEMCG_ZSWAP_B,
0039 MEMCG_ZSWAPPED,
0040 MEMCG_NR_STAT,
0041 };
0042
0043 enum memcg_memory_event {
0044 MEMCG_LOW,
0045 MEMCG_HIGH,
0046 MEMCG_MAX,
0047 MEMCG_OOM,
0048 MEMCG_OOM_KILL,
0049 MEMCG_OOM_GROUP_KILL,
0050 MEMCG_SWAP_HIGH,
0051 MEMCG_SWAP_MAX,
0052 MEMCG_SWAP_FAIL,
0053 MEMCG_NR_MEMORY_EVENTS,
0054 };
0055
0056 struct mem_cgroup_reclaim_cookie {
0057 pg_data_t *pgdat;
0058 unsigned int generation;
0059 };
0060
0061 #ifdef CONFIG_MEMCG
0062
0063 #define MEM_CGROUP_ID_SHIFT 16
0064 #define MEM_CGROUP_ID_MAX USHRT_MAX
0065
0066 struct mem_cgroup_id {
0067 int id;
0068 refcount_t ref;
0069 };
0070
0071
0072
0073
0074
0075
0076
0077 enum mem_cgroup_events_target {
0078 MEM_CGROUP_TARGET_THRESH,
0079 MEM_CGROUP_TARGET_SOFTLIMIT,
0080 MEM_CGROUP_NTARGETS,
0081 };
0082
0083 struct memcg_vmstats_percpu {
0084
0085 long state[MEMCG_NR_STAT];
0086 unsigned long events[NR_VM_EVENT_ITEMS];
0087
0088
0089 long state_prev[MEMCG_NR_STAT];
0090 unsigned long events_prev[NR_VM_EVENT_ITEMS];
0091
0092
0093 unsigned long nr_page_events;
0094 unsigned long targets[MEM_CGROUP_NTARGETS];
0095 };
0096
0097 struct memcg_vmstats {
0098
0099 long state[MEMCG_NR_STAT];
0100 unsigned long events[NR_VM_EVENT_ITEMS];
0101
0102
0103 long state_pending[MEMCG_NR_STAT];
0104 unsigned long events_pending[NR_VM_EVENT_ITEMS];
0105 };
0106
0107 struct mem_cgroup_reclaim_iter {
0108 struct mem_cgroup *position;
0109
0110 unsigned int generation;
0111 };
0112
0113
0114
0115
0116
0117 struct shrinker_info {
0118 struct rcu_head rcu;
0119 atomic_long_t *nr_deferred;
0120 unsigned long *map;
0121 };
0122
0123 struct lruvec_stats_percpu {
0124
0125 long state[NR_VM_NODE_STAT_ITEMS];
0126
0127
0128 long state_prev[NR_VM_NODE_STAT_ITEMS];
0129 };
0130
0131 struct lruvec_stats {
0132
0133 long state[NR_VM_NODE_STAT_ITEMS];
0134
0135
0136 long state_pending[NR_VM_NODE_STAT_ITEMS];
0137 };
0138
0139
0140
0141
0142 struct mem_cgroup_per_node {
0143 struct lruvec lruvec;
0144
0145 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
0146 struct lruvec_stats lruvec_stats;
0147
0148 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
0149
0150 struct mem_cgroup_reclaim_iter iter;
0151
0152 struct shrinker_info __rcu *shrinker_info;
0153
0154 struct rb_node tree_node;
0155 unsigned long usage_in_excess;
0156
0157 bool on_tree;
0158 struct mem_cgroup *memcg;
0159
0160 };
0161
0162 struct mem_cgroup_threshold {
0163 struct eventfd_ctx *eventfd;
0164 unsigned long threshold;
0165 };
0166
0167
0168 struct mem_cgroup_threshold_ary {
0169
0170 int current_threshold;
0171
0172 unsigned int size;
0173
0174 struct mem_cgroup_threshold entries[];
0175 };
0176
0177 struct mem_cgroup_thresholds {
0178
0179 struct mem_cgroup_threshold_ary *primary;
0180
0181
0182
0183
0184
0185 struct mem_cgroup_threshold_ary *spare;
0186 };
0187
0188 #if defined(CONFIG_SMP)
0189 struct memcg_padding {
0190 char x[0];
0191 } ____cacheline_internodealigned_in_smp;
0192 #define MEMCG_PADDING(name) struct memcg_padding name
0193 #else
0194 #define MEMCG_PADDING(name)
0195 #endif
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205 #define MEMCG_CGWB_FRN_CNT 4
0206
0207 struct memcg_cgwb_frn {
0208 u64 bdi_id;
0209 int memcg_id;
0210 u64 at;
0211 struct wb_completion done;
0212 };
0213
0214
0215
0216
0217
0218
0219
0220 struct obj_cgroup {
0221 struct percpu_ref refcnt;
0222 struct mem_cgroup *memcg;
0223 atomic_t nr_charged_bytes;
0224 union {
0225 struct list_head list;
0226 struct rcu_head rcu;
0227 };
0228 };
0229
0230
0231
0232
0233
0234
0235
0236 struct mem_cgroup {
0237 struct cgroup_subsys_state css;
0238
0239
0240 struct mem_cgroup_id id;
0241
0242
0243 struct page_counter memory;
0244
0245 union {
0246 struct page_counter swap;
0247 struct page_counter memsw;
0248 };
0249
0250
0251 struct page_counter kmem;
0252 struct page_counter tcpmem;
0253
0254
0255 struct work_struct high_work;
0256
0257 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
0258 unsigned long zswap_max;
0259 #endif
0260
0261 unsigned long soft_limit;
0262
0263
0264 struct vmpressure vmpressure;
0265
0266
0267
0268
0269 bool oom_group;
0270
0271
0272 bool oom_lock;
0273 int under_oom;
0274
0275 int swappiness;
0276
0277 int oom_kill_disable;
0278
0279
0280 struct cgroup_file events_file;
0281 struct cgroup_file events_local_file;
0282
0283
0284 struct cgroup_file swap_events_file;
0285
0286
0287 struct mutex thresholds_lock;
0288
0289
0290 struct mem_cgroup_thresholds thresholds;
0291
0292
0293 struct mem_cgroup_thresholds memsw_thresholds;
0294
0295
0296 struct list_head oom_notify;
0297
0298
0299
0300
0301
0302 unsigned long move_charge_at_immigrate;
0303
0304 spinlock_t move_lock;
0305 unsigned long move_lock_flags;
0306
0307 MEMCG_PADDING(_pad1_);
0308
0309
0310 struct memcg_vmstats vmstats;
0311
0312
0313 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
0314 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
0315
0316 unsigned long socket_pressure;
0317
0318
0319 bool tcpmem_active;
0320 int tcpmem_pressure;
0321
0322 #ifdef CONFIG_MEMCG_KMEM
0323 int kmemcg_id;
0324 struct obj_cgroup __rcu *objcg;
0325
0326 struct list_head objcg_list;
0327 #endif
0328
0329 MEMCG_PADDING(_pad2_);
0330
0331
0332
0333
0334 atomic_t moving_account;
0335 struct task_struct *move_lock_task;
0336
0337 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
0338
0339 #ifdef CONFIG_CGROUP_WRITEBACK
0340 struct list_head cgwb_list;
0341 struct wb_domain cgwb_domain;
0342 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
0343 #endif
0344
0345
0346 struct list_head event_list;
0347 spinlock_t event_list_lock;
0348
0349 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0350 struct deferred_split deferred_split_queue;
0351 #endif
0352
0353 struct mem_cgroup_per_node *nodeinfo[];
0354 };
0355
0356
0357
0358
0359
0360 #define MEMCG_CHARGE_BATCH 32U
0361
0362 extern struct mem_cgroup *root_mem_cgroup;
0363
0364 enum page_memcg_data_flags {
0365
0366 MEMCG_DATA_OBJCGS = (1UL << 0),
0367
0368 MEMCG_DATA_KMEM = (1UL << 1),
0369
0370 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
0371 };
0372
0373 #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
0374
0375 static inline bool folio_memcg_kmem(struct folio *folio);
0376
0377
0378
0379
0380
0381
0382
0383
0384 static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
0385 {
0386 return READ_ONCE(objcg->memcg);
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399 static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
0400 {
0401 unsigned long memcg_data = folio->memcg_data;
0402
0403 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
0404 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
0405 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
0406
0407 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
0408 }
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420 static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
0421 {
0422 unsigned long memcg_data = folio->memcg_data;
0423
0424 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
0425 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
0426 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
0427
0428 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451 static inline struct mem_cgroup *folio_memcg(struct folio *folio)
0452 {
0453 if (folio_memcg_kmem(folio))
0454 return obj_cgroup_memcg(__folio_objcg(folio));
0455 return __folio_memcg(folio);
0456 }
0457
0458 static inline struct mem_cgroup *page_memcg(struct page *page)
0459 {
0460 return folio_memcg(page_folio(page));
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
0475 {
0476 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
0477
0478 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
0479 WARN_ON_ONCE(!rcu_read_lock_held());
0480
0481 if (memcg_data & MEMCG_DATA_KMEM) {
0482 struct obj_cgroup *objcg;
0483
0484 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
0485 return obj_cgroup_memcg(objcg);
0486 }
0487
0488 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
0489 }
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512 static inline struct mem_cgroup *page_memcg_check(struct page *page)
0513 {
0514
0515
0516
0517
0518 unsigned long memcg_data = READ_ONCE(page->memcg_data);
0519
0520 if (memcg_data & MEMCG_DATA_OBJCGS)
0521 return NULL;
0522
0523 if (memcg_data & MEMCG_DATA_KMEM) {
0524 struct obj_cgroup *objcg;
0525
0526 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
0527 return obj_cgroup_memcg(objcg);
0528 }
0529
0530 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
0531 }
0532
0533 static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
0534 {
0535 struct mem_cgroup *memcg;
0536
0537 rcu_read_lock();
0538 retry:
0539 memcg = obj_cgroup_memcg(objcg);
0540 if (unlikely(!css_tryget(&memcg->css)))
0541 goto retry;
0542 rcu_read_unlock();
0543
0544 return memcg;
0545 }
0546
0547 #ifdef CONFIG_MEMCG_KMEM
0548
0549
0550
0551
0552
0553
0554
0555
0556 static inline bool folio_memcg_kmem(struct folio *folio)
0557 {
0558 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
0559 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
0560 return folio->memcg_data & MEMCG_DATA_KMEM;
0561 }
0562
0563
0564 #else
0565 static inline bool folio_memcg_kmem(struct folio *folio)
0566 {
0567 return false;
0568 }
0569
0570 #endif
0571
0572 static inline bool PageMemcgKmem(struct page *page)
0573 {
0574 return folio_memcg_kmem(page_folio(page));
0575 }
0576
0577 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
0578 {
0579 return (memcg == root_mem_cgroup);
0580 }
0581
0582 static inline bool mem_cgroup_disabled(void)
0583 {
0584 return !cgroup_subsys_enabled(memory_cgrp_subsys);
0585 }
0586
0587 static inline void mem_cgroup_protection(struct mem_cgroup *root,
0588 struct mem_cgroup *memcg,
0589 unsigned long *min,
0590 unsigned long *low)
0591 {
0592 *min = *low = 0;
0593
0594 if (mem_cgroup_disabled())
0595 return;
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630 if (root == memcg)
0631 return;
0632
0633 *min = READ_ONCE(memcg->memory.emin);
0634 *low = READ_ONCE(memcg->memory.elow);
0635 }
0636
0637 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
0638 struct mem_cgroup *memcg);
0639
0640 static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
0641 {
0642
0643
0644
0645
0646 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
0647
0648 }
0649
0650 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
0651 {
0652 if (!mem_cgroup_supports_protection(memcg))
0653 return false;
0654
0655 return READ_ONCE(memcg->memory.elow) >=
0656 page_counter_read(&memcg->memory);
0657 }
0658
0659 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
0660 {
0661 if (!mem_cgroup_supports_protection(memcg))
0662 return false;
0663
0664 return READ_ONCE(memcg->memory.emin) >=
0665 page_counter_read(&memcg->memory);
0666 }
0667
0668 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684 static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
0685 gfp_t gfp)
0686 {
0687 if (mem_cgroup_disabled())
0688 return 0;
0689 return __mem_cgroup_charge(folio, mm, gfp);
0690 }
0691
0692 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
0693 gfp_t gfp, swp_entry_t entry);
0694 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
0695
0696 void __mem_cgroup_uncharge(struct folio *folio);
0697
0698
0699
0700
0701
0702
0703
0704 static inline void mem_cgroup_uncharge(struct folio *folio)
0705 {
0706 if (mem_cgroup_disabled())
0707 return;
0708 __mem_cgroup_uncharge(folio);
0709 }
0710
0711 void __mem_cgroup_uncharge_list(struct list_head *page_list);
0712 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
0713 {
0714 if (mem_cgroup_disabled())
0715 return;
0716 __mem_cgroup_uncharge_list(page_list);
0717 }
0718
0719 void mem_cgroup_migrate(struct folio *old, struct folio *new);
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
0731 struct pglist_data *pgdat)
0732 {
0733 struct mem_cgroup_per_node *mz;
0734 struct lruvec *lruvec;
0735
0736 if (mem_cgroup_disabled()) {
0737 lruvec = &pgdat->__lruvec;
0738 goto out;
0739 }
0740
0741 if (!memcg)
0742 memcg = root_mem_cgroup;
0743
0744 mz = memcg->nodeinfo[pgdat->node_id];
0745 lruvec = &mz->lruvec;
0746 out:
0747
0748
0749
0750
0751
0752 if (unlikely(lruvec->pgdat != pgdat))
0753 lruvec->pgdat = pgdat;
0754 return lruvec;
0755 }
0756
0757
0758
0759
0760
0761
0762
0763 static inline struct lruvec *folio_lruvec(struct folio *folio)
0764 {
0765 struct mem_cgroup *memcg = folio_memcg(folio);
0766
0767 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
0768 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
0769 }
0770
0771 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
0772
0773 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
0774
0775 struct lruvec *folio_lruvec_lock(struct folio *folio);
0776 struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
0777 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
0778 unsigned long *flags);
0779
0780 #ifdef CONFIG_DEBUG_VM
0781 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
0782 #else
0783 static inline
0784 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
0785 {
0786 }
0787 #endif
0788
0789 static inline
0790 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
0791 return css ? container_of(css, struct mem_cgroup, css) : NULL;
0792 }
0793
0794 static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
0795 {
0796 return percpu_ref_tryget(&objcg->refcnt);
0797 }
0798
0799 static inline void obj_cgroup_get(struct obj_cgroup *objcg)
0800 {
0801 percpu_ref_get(&objcg->refcnt);
0802 }
0803
0804 static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
0805 unsigned long nr)
0806 {
0807 percpu_ref_get_many(&objcg->refcnt, nr);
0808 }
0809
0810 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
0811 {
0812 percpu_ref_put(&objcg->refcnt);
0813 }
0814
0815 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
0816 {
0817 if (memcg)
0818 css_put(&memcg->css);
0819 }
0820
0821 #define mem_cgroup_from_counter(counter, member) \
0822 container_of(counter, struct mem_cgroup, member)
0823
0824 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
0825 struct mem_cgroup *,
0826 struct mem_cgroup_reclaim_cookie *);
0827 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
0828 int mem_cgroup_scan_tasks(struct mem_cgroup *,
0829 int (*)(struct task_struct *, void *), void *);
0830
0831 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
0832 {
0833 if (mem_cgroup_disabled())
0834 return 0;
0835
0836 return memcg->id.id;
0837 }
0838 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
0839
0840 #ifdef CONFIG_SHRINKER_DEBUG
0841 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
0842 {
0843 return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
0844 }
0845
0846 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
0847 #endif
0848
0849 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
0850 {
0851 return mem_cgroup_from_css(seq_css(m));
0852 }
0853
0854 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
0855 {
0856 struct mem_cgroup_per_node *mz;
0857
0858 if (mem_cgroup_disabled())
0859 return NULL;
0860
0861 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
0862 return mz->memcg;
0863 }
0864
0865
0866
0867
0868
0869
0870
0871
0872 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
0873 {
0874 return mem_cgroup_from_css(memcg->css.parent);
0875 }
0876
0877 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
0878 struct mem_cgroup *root)
0879 {
0880 if (root == memcg)
0881 return true;
0882 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
0883 }
0884
0885 static inline bool mm_match_cgroup(struct mm_struct *mm,
0886 struct mem_cgroup *memcg)
0887 {
0888 struct mem_cgroup *task_memcg;
0889 bool match = false;
0890
0891 rcu_read_lock();
0892 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
0893 if (task_memcg)
0894 match = mem_cgroup_is_descendant(task_memcg, memcg);
0895 rcu_read_unlock();
0896 return match;
0897 }
0898
0899 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
0900 ino_t page_cgroup_ino(struct page *page);
0901
0902 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
0903 {
0904 if (mem_cgroup_disabled())
0905 return true;
0906 return !!(memcg->css.flags & CSS_ONLINE);
0907 }
0908
0909 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
0910 int zid, int nr_pages);
0911
0912 static inline
0913 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
0914 enum lru_list lru, int zone_idx)
0915 {
0916 struct mem_cgroup_per_node *mz;
0917
0918 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
0919 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
0920 }
0921
0922 void mem_cgroup_handle_over_high(void);
0923
0924 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
0925
0926 unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
0927
0928 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
0929 struct task_struct *p);
0930
0931 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
0932
0933 static inline void mem_cgroup_enter_user_fault(void)
0934 {
0935 WARN_ON(current->in_user_fault);
0936 current->in_user_fault = 1;
0937 }
0938
0939 static inline void mem_cgroup_exit_user_fault(void)
0940 {
0941 WARN_ON(!current->in_user_fault);
0942 current->in_user_fault = 0;
0943 }
0944
0945 static inline bool task_in_memcg_oom(struct task_struct *p)
0946 {
0947 return p->memcg_in_oom;
0948 }
0949
0950 bool mem_cgroup_oom_synchronize(bool wait);
0951 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
0952 struct mem_cgroup *oom_domain);
0953 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
0954
0955 void folio_memcg_lock(struct folio *folio);
0956 void folio_memcg_unlock(struct folio *folio);
0957 void lock_page_memcg(struct page *page);
0958 void unlock_page_memcg(struct page *page);
0959
0960 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
0961
0962
0963 static inline void mod_memcg_state(struct mem_cgroup *memcg,
0964 int idx, int val)
0965 {
0966 unsigned long flags;
0967
0968 local_irq_save(flags);
0969 __mod_memcg_state(memcg, idx, val);
0970 local_irq_restore(flags);
0971 }
0972
0973 static inline void mod_memcg_page_state(struct page *page,
0974 int idx, int val)
0975 {
0976 struct mem_cgroup *memcg;
0977
0978 if (mem_cgroup_disabled())
0979 return;
0980
0981 rcu_read_lock();
0982 memcg = page_memcg(page);
0983 if (memcg)
0984 mod_memcg_state(memcg, idx, val);
0985 rcu_read_unlock();
0986 }
0987
0988 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
0989 {
0990 long x = READ_ONCE(memcg->vmstats.state[idx]);
0991 #ifdef CONFIG_SMP
0992 if (x < 0)
0993 x = 0;
0994 #endif
0995 return x;
0996 }
0997
0998 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
0999 enum node_stat_item idx)
1000 {
1001 struct mem_cgroup_per_node *pn;
1002 long x;
1003
1004 if (mem_cgroup_disabled())
1005 return node_page_state(lruvec_pgdat(lruvec), idx);
1006
1007 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1008 x = READ_ONCE(pn->lruvec_stats.state[idx]);
1009 #ifdef CONFIG_SMP
1010 if (x < 0)
1011 x = 0;
1012 #endif
1013 return x;
1014 }
1015
1016 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1017 enum node_stat_item idx)
1018 {
1019 struct mem_cgroup_per_node *pn;
1020 long x = 0;
1021 int cpu;
1022
1023 if (mem_cgroup_disabled())
1024 return node_page_state(lruvec_pgdat(lruvec), idx);
1025
1026 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1027 for_each_possible_cpu(cpu)
1028 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
1029 #ifdef CONFIG_SMP
1030 if (x < 0)
1031 x = 0;
1032 #endif
1033 return x;
1034 }
1035
1036 void mem_cgroup_flush_stats(void);
1037 void mem_cgroup_flush_stats_delayed(void);
1038
1039 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1040 int val);
1041 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
1042
1043 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1044 int val)
1045 {
1046 unsigned long flags;
1047
1048 local_irq_save(flags);
1049 __mod_lruvec_kmem_state(p, idx, val);
1050 local_irq_restore(flags);
1051 }
1052
1053 static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1054 enum node_stat_item idx, int val)
1055 {
1056 unsigned long flags;
1057
1058 local_irq_save(flags);
1059 __mod_memcg_lruvec_state(lruvec, idx, val);
1060 local_irq_restore(flags);
1061 }
1062
1063 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1064 unsigned long count);
1065
1066 static inline void count_memcg_events(struct mem_cgroup *memcg,
1067 enum vm_event_item idx,
1068 unsigned long count)
1069 {
1070 unsigned long flags;
1071
1072 local_irq_save(flags);
1073 __count_memcg_events(memcg, idx, count);
1074 local_irq_restore(flags);
1075 }
1076
1077 static inline void count_memcg_page_event(struct page *page,
1078 enum vm_event_item idx)
1079 {
1080 struct mem_cgroup *memcg = page_memcg(page);
1081
1082 if (memcg)
1083 count_memcg_events(memcg, idx, 1);
1084 }
1085
1086 static inline void count_memcg_folio_events(struct folio *folio,
1087 enum vm_event_item idx, unsigned long nr)
1088 {
1089 struct mem_cgroup *memcg = folio_memcg(folio);
1090
1091 if (memcg)
1092 count_memcg_events(memcg, idx, nr);
1093 }
1094
1095 static inline void count_memcg_event_mm(struct mm_struct *mm,
1096 enum vm_event_item idx)
1097 {
1098 struct mem_cgroup *memcg;
1099
1100 if (mem_cgroup_disabled())
1101 return;
1102
1103 rcu_read_lock();
1104 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1105 if (likely(memcg))
1106 count_memcg_events(memcg, idx, 1);
1107 rcu_read_unlock();
1108 }
1109
1110 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1111 enum memcg_memory_event event)
1112 {
1113 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1114 event == MEMCG_SWAP_FAIL;
1115
1116 atomic_long_inc(&memcg->memory_events_local[event]);
1117 if (!swap_event)
1118 cgroup_file_notify(&memcg->events_local_file);
1119
1120 do {
1121 atomic_long_inc(&memcg->memory_events[event]);
1122 if (swap_event)
1123 cgroup_file_notify(&memcg->swap_events_file);
1124 else
1125 cgroup_file_notify(&memcg->events_file);
1126
1127 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1128 break;
1129 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1130 break;
1131 } while ((memcg = parent_mem_cgroup(memcg)) &&
1132 !mem_cgroup_is_root(memcg));
1133 }
1134
1135 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1136 enum memcg_memory_event event)
1137 {
1138 struct mem_cgroup *memcg;
1139
1140 if (mem_cgroup_disabled())
1141 return;
1142
1143 rcu_read_lock();
1144 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1145 if (likely(memcg))
1146 memcg_memory_event(memcg, event);
1147 rcu_read_unlock();
1148 }
1149
1150 void split_page_memcg(struct page *head, unsigned int nr);
1151
1152 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1153 gfp_t gfp_mask,
1154 unsigned long *total_scanned);
1155
1156 #else
1157
1158 #define MEM_CGROUP_ID_SHIFT 0
1159 #define MEM_CGROUP_ID_MAX 0
1160
1161 static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1162 {
1163 return NULL;
1164 }
1165
1166 static inline struct mem_cgroup *page_memcg(struct page *page)
1167 {
1168 return NULL;
1169 }
1170
1171 static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
1172 {
1173 WARN_ON_ONCE(!rcu_read_lock_held());
1174 return NULL;
1175 }
1176
1177 static inline struct mem_cgroup *page_memcg_check(struct page *page)
1178 {
1179 return NULL;
1180 }
1181
1182 static inline bool folio_memcg_kmem(struct folio *folio)
1183 {
1184 return false;
1185 }
1186
1187 static inline bool PageMemcgKmem(struct page *page)
1188 {
1189 return false;
1190 }
1191
1192 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1193 {
1194 return true;
1195 }
1196
1197 static inline bool mem_cgroup_disabled(void)
1198 {
1199 return true;
1200 }
1201
1202 static inline void memcg_memory_event(struct mem_cgroup *memcg,
1203 enum memcg_memory_event event)
1204 {
1205 }
1206
1207 static inline void memcg_memory_event_mm(struct mm_struct *mm,
1208 enum memcg_memory_event event)
1209 {
1210 }
1211
1212 static inline void mem_cgroup_protection(struct mem_cgroup *root,
1213 struct mem_cgroup *memcg,
1214 unsigned long *min,
1215 unsigned long *low)
1216 {
1217 *min = *low = 0;
1218 }
1219
1220 static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1221 struct mem_cgroup *memcg)
1222 {
1223 }
1224
1225 static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1226 {
1227 return false;
1228 }
1229
1230 static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1231 {
1232 return false;
1233 }
1234
1235 static inline int mem_cgroup_charge(struct folio *folio,
1236 struct mm_struct *mm, gfp_t gfp)
1237 {
1238 return 0;
1239 }
1240
1241 static inline int mem_cgroup_swapin_charge_page(struct page *page,
1242 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1243 {
1244 return 0;
1245 }
1246
1247 static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1248 {
1249 }
1250
1251 static inline void mem_cgroup_uncharge(struct folio *folio)
1252 {
1253 }
1254
1255 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1256 {
1257 }
1258
1259 static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
1260 {
1261 }
1262
1263 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1264 struct pglist_data *pgdat)
1265 {
1266 return &pgdat->__lruvec;
1267 }
1268
1269 static inline struct lruvec *folio_lruvec(struct folio *folio)
1270 {
1271 struct pglist_data *pgdat = folio_pgdat(folio);
1272 return &pgdat->__lruvec;
1273 }
1274
1275 static inline
1276 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1277 {
1278 }
1279
1280 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1281 {
1282 return NULL;
1283 }
1284
1285 static inline bool mm_match_cgroup(struct mm_struct *mm,
1286 struct mem_cgroup *memcg)
1287 {
1288 return true;
1289 }
1290
1291 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1292 {
1293 return NULL;
1294 }
1295
1296 static inline
1297 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1298 {
1299 return NULL;
1300 }
1301
1302 static inline void obj_cgroup_put(struct obj_cgroup *objcg)
1303 {
1304 }
1305
1306 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1307 {
1308 }
1309
1310 static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
1311 {
1312 struct pglist_data *pgdat = folio_pgdat(folio);
1313
1314 spin_lock(&pgdat->__lruvec.lru_lock);
1315 return &pgdat->__lruvec;
1316 }
1317
1318 static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1319 {
1320 struct pglist_data *pgdat = folio_pgdat(folio);
1321
1322 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1323 return &pgdat->__lruvec;
1324 }
1325
1326 static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1327 unsigned long *flagsp)
1328 {
1329 struct pglist_data *pgdat = folio_pgdat(folio);
1330
1331 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1332 return &pgdat->__lruvec;
1333 }
1334
1335 static inline struct mem_cgroup *
1336 mem_cgroup_iter(struct mem_cgroup *root,
1337 struct mem_cgroup *prev,
1338 struct mem_cgroup_reclaim_cookie *reclaim)
1339 {
1340 return NULL;
1341 }
1342
1343 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1344 struct mem_cgroup *prev)
1345 {
1346 }
1347
1348 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1349 int (*fn)(struct task_struct *, void *), void *arg)
1350 {
1351 return 0;
1352 }
1353
1354 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1355 {
1356 return 0;
1357 }
1358
1359 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1360 {
1361 WARN_ON_ONCE(id);
1362
1363 return NULL;
1364 }
1365
1366 #ifdef CONFIG_SHRINKER_DEBUG
1367 static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
1368 {
1369 return 0;
1370 }
1371
1372 static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
1373 {
1374 return NULL;
1375 }
1376 #endif
1377
1378 static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1379 {
1380 return NULL;
1381 }
1382
1383 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1384 {
1385 return NULL;
1386 }
1387
1388 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1389 {
1390 return true;
1391 }
1392
1393 static inline
1394 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1395 enum lru_list lru, int zone_idx)
1396 {
1397 return 0;
1398 }
1399
1400 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1401 {
1402 return 0;
1403 }
1404
1405 static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1406 {
1407 return 0;
1408 }
1409
1410 static inline void
1411 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1412 {
1413 }
1414
1415 static inline void
1416 mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1417 {
1418 }
1419
1420 static inline void lock_page_memcg(struct page *page)
1421 {
1422 }
1423
1424 static inline void unlock_page_memcg(struct page *page)
1425 {
1426 }
1427
1428 static inline void folio_memcg_lock(struct folio *folio)
1429 {
1430 }
1431
1432 static inline void folio_memcg_unlock(struct folio *folio)
1433 {
1434 }
1435
1436 static inline void mem_cgroup_handle_over_high(void)
1437 {
1438 }
1439
1440 static inline void mem_cgroup_enter_user_fault(void)
1441 {
1442 }
1443
1444 static inline void mem_cgroup_exit_user_fault(void)
1445 {
1446 }
1447
1448 static inline bool task_in_memcg_oom(struct task_struct *p)
1449 {
1450 return false;
1451 }
1452
1453 static inline bool mem_cgroup_oom_synchronize(bool wait)
1454 {
1455 return false;
1456 }
1457
1458 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1459 struct task_struct *victim, struct mem_cgroup *oom_domain)
1460 {
1461 return NULL;
1462 }
1463
1464 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1465 {
1466 }
1467
1468 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1469 int idx,
1470 int nr)
1471 {
1472 }
1473
1474 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1475 int idx,
1476 int nr)
1477 {
1478 }
1479
1480 static inline void mod_memcg_page_state(struct page *page,
1481 int idx, int val)
1482 {
1483 }
1484
1485 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1486 {
1487 return 0;
1488 }
1489
1490 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1491 enum node_stat_item idx)
1492 {
1493 return node_page_state(lruvec_pgdat(lruvec), idx);
1494 }
1495
1496 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1497 enum node_stat_item idx)
1498 {
1499 return node_page_state(lruvec_pgdat(lruvec), idx);
1500 }
1501
1502 static inline void mem_cgroup_flush_stats(void)
1503 {
1504 }
1505
1506 static inline void mem_cgroup_flush_stats_delayed(void)
1507 {
1508 }
1509
1510 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1511 enum node_stat_item idx, int val)
1512 {
1513 }
1514
1515 static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1516 int val)
1517 {
1518 struct page *page = virt_to_head_page(p);
1519
1520 __mod_node_page_state(page_pgdat(page), idx, val);
1521 }
1522
1523 static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1524 int val)
1525 {
1526 struct page *page = virt_to_head_page(p);
1527
1528 mod_node_page_state(page_pgdat(page), idx, val);
1529 }
1530
1531 static inline void count_memcg_events(struct mem_cgroup *memcg,
1532 enum vm_event_item idx,
1533 unsigned long count)
1534 {
1535 }
1536
1537 static inline void __count_memcg_events(struct mem_cgroup *memcg,
1538 enum vm_event_item idx,
1539 unsigned long count)
1540 {
1541 }
1542
1543 static inline void count_memcg_page_event(struct page *page,
1544 int idx)
1545 {
1546 }
1547
1548 static inline void count_memcg_folio_events(struct folio *folio,
1549 enum vm_event_item idx, unsigned long nr)
1550 {
1551 }
1552
1553 static inline
1554 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1555 {
1556 }
1557
1558 static inline void split_page_memcg(struct page *head, unsigned int nr)
1559 {
1560 }
1561
1562 static inline
1563 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1564 gfp_t gfp_mask,
1565 unsigned long *total_scanned)
1566 {
1567 return 0;
1568 }
1569 #endif
1570
1571 static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1572 {
1573 __mod_lruvec_kmem_state(p, idx, 1);
1574 }
1575
1576 static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1577 {
1578 __mod_lruvec_kmem_state(p, idx, -1);
1579 }
1580
1581 static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1582 {
1583 struct mem_cgroup *memcg;
1584
1585 memcg = lruvec_memcg(lruvec);
1586 if (!memcg)
1587 return NULL;
1588 memcg = parent_mem_cgroup(memcg);
1589 if (!memcg)
1590 return NULL;
1591 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1592 }
1593
1594 static inline void unlock_page_lruvec(struct lruvec *lruvec)
1595 {
1596 spin_unlock(&lruvec->lru_lock);
1597 }
1598
1599 static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1600 {
1601 spin_unlock_irq(&lruvec->lru_lock);
1602 }
1603
1604 static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1605 unsigned long flags)
1606 {
1607 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1608 }
1609
1610
1611 static inline bool folio_matches_lruvec(struct folio *folio,
1612 struct lruvec *lruvec)
1613 {
1614 return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1615 lruvec_memcg(lruvec) == folio_memcg(folio);
1616 }
1617
1618
1619 static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
1620 struct lruvec *locked_lruvec)
1621 {
1622 if (locked_lruvec) {
1623 if (folio_matches_lruvec(folio, locked_lruvec))
1624 return locked_lruvec;
1625
1626 unlock_page_lruvec_irq(locked_lruvec);
1627 }
1628
1629 return folio_lruvec_lock_irq(folio);
1630 }
1631
1632
1633 static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
1634 struct lruvec *locked_lruvec, unsigned long *flags)
1635 {
1636 if (locked_lruvec) {
1637 if (folio_matches_lruvec(folio, locked_lruvec))
1638 return locked_lruvec;
1639
1640 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1641 }
1642
1643 return folio_lruvec_lock_irqsave(folio, flags);
1644 }
1645
1646 #ifdef CONFIG_CGROUP_WRITEBACK
1647
1648 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1649 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1650 unsigned long *pheadroom, unsigned long *pdirty,
1651 unsigned long *pwriteback);
1652
1653 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
1654 struct bdi_writeback *wb);
1655
1656 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1657 struct bdi_writeback *wb)
1658 {
1659 if (mem_cgroup_disabled())
1660 return;
1661
1662 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
1663 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
1664 }
1665
1666 void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1667
1668 #else
1669
1670 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1671 {
1672 return NULL;
1673 }
1674
1675 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1676 unsigned long *pfilepages,
1677 unsigned long *pheadroom,
1678 unsigned long *pdirty,
1679 unsigned long *pwriteback)
1680 {
1681 }
1682
1683 static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1684 struct bdi_writeback *wb)
1685 {
1686 }
1687
1688 static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1689 {
1690 }
1691
1692 #endif
1693
1694 struct sock;
1695 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1696 gfp_t gfp_mask);
1697 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1698 #ifdef CONFIG_MEMCG
1699 extern struct static_key_false memcg_sockets_enabled_key;
1700 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1701 void mem_cgroup_sk_alloc(struct sock *sk);
1702 void mem_cgroup_sk_free(struct sock *sk);
1703 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1704 {
1705 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1706 return true;
1707 do {
1708 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
1709 return true;
1710 } while ((memcg = parent_mem_cgroup(memcg)));
1711 return false;
1712 }
1713
1714 int alloc_shrinker_info(struct mem_cgroup *memcg);
1715 void free_shrinker_info(struct mem_cgroup *memcg);
1716 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1717 void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1718 #else
1719 #define mem_cgroup_sockets_enabled 0
1720 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1721 static inline void mem_cgroup_sk_free(struct sock *sk) { };
1722 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1723 {
1724 return false;
1725 }
1726
1727 static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1728 int nid, int shrinker_id)
1729 {
1730 }
1731 #endif
1732
1733 #ifdef CONFIG_MEMCG_KMEM
1734 bool mem_cgroup_kmem_disabled(void);
1735 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1736 void __memcg_kmem_uncharge_page(struct page *page, int order);
1737
1738 struct obj_cgroup *get_obj_cgroup_from_current(void);
1739 struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
1740
1741 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1742 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1743
1744 extern struct static_key_false memcg_kmem_enabled_key;
1745
1746 static inline bool memcg_kmem_enabled(void)
1747 {
1748 return static_branch_likely(&memcg_kmem_enabled_key);
1749 }
1750
1751 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1752 int order)
1753 {
1754 if (memcg_kmem_enabled())
1755 return __memcg_kmem_charge_page(page, gfp, order);
1756 return 0;
1757 }
1758
1759 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1760 {
1761 if (memcg_kmem_enabled())
1762 __memcg_kmem_uncharge_page(page, order);
1763 }
1764
1765
1766
1767
1768
1769 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1770 {
1771 return memcg ? memcg->kmemcg_id : -1;
1772 }
1773
1774 struct mem_cgroup *mem_cgroup_from_obj(void *p);
1775 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
1776
1777 static inline void count_objcg_event(struct obj_cgroup *objcg,
1778 enum vm_event_item idx)
1779 {
1780 struct mem_cgroup *memcg;
1781
1782 if (mem_cgroup_kmem_disabled())
1783 return;
1784
1785 rcu_read_lock();
1786 memcg = obj_cgroup_memcg(objcg);
1787 count_memcg_events(memcg, idx, 1);
1788 rcu_read_unlock();
1789 }
1790
1791 #else
1792 static inline bool mem_cgroup_kmem_disabled(void)
1793 {
1794 return true;
1795 }
1796
1797 static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1798 int order)
1799 {
1800 return 0;
1801 }
1802
1803 static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1804 {
1805 }
1806
1807 static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1808 int order)
1809 {
1810 return 0;
1811 }
1812
1813 static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1814 {
1815 }
1816
1817 static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
1818 {
1819 return NULL;
1820 }
1821
1822 static inline bool memcg_kmem_enabled(void)
1823 {
1824 return false;
1825 }
1826
1827 static inline int memcg_kmem_id(struct mem_cgroup *memcg)
1828 {
1829 return -1;
1830 }
1831
1832 static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1833 {
1834 return NULL;
1835 }
1836
1837 static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
1838 {
1839 return NULL;
1840 }
1841
1842 static inline void count_objcg_event(struct obj_cgroup *objcg,
1843 enum vm_event_item idx)
1844 {
1845 }
1846
1847 #endif
1848
1849 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1850 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
1851 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
1852 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
1853 #else
1854 static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
1855 {
1856 return true;
1857 }
1858 static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
1859 size_t size)
1860 {
1861 }
1862 static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
1863 size_t size)
1864 {
1865 }
1866 #endif
1867
1868 #endif