0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/mm.h>
0018 #include <linux/sched.h>
0019 #include <linux/kernel_stat.h>
0020 #include <linux/swap.h>
0021 #include <linux/mman.h>
0022 #include <linux/pagemap.h>
0023 #include <linux/pagevec.h>
0024 #include <linux/init.h>
0025 #include <linux/export.h>
0026 #include <linux/mm_inline.h>
0027 #include <linux/percpu_counter.h>
0028 #include <linux/memremap.h>
0029 #include <linux/percpu.h>
0030 #include <linux/cpu.h>
0031 #include <linux/notifier.h>
0032 #include <linux/backing-dev.h>
0033 #include <linux/memcontrol.h>
0034 #include <linux/gfp.h>
0035 #include <linux/uio.h>
0036 #include <linux/hugetlb.h>
0037 #include <linux/page_idle.h>
0038 #include <linux/local_lock.h>
0039 #include <linux/buffer_head.h>
0040
0041 #include "internal.h"
0042
0043 #define CREATE_TRACE_POINTS
0044 #include <trace/events/pagemap.h>
0045
0046
0047 int page_cluster;
0048
0049
0050 struct lru_rotate {
0051 local_lock_t lock;
0052 struct folio_batch fbatch;
0053 };
0054 static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
0055 .lock = INIT_LOCAL_LOCK(lock),
0056 };
0057
0058
0059
0060
0061
0062 struct cpu_fbatches {
0063 local_lock_t lock;
0064 struct folio_batch lru_add;
0065 struct folio_batch lru_deactivate_file;
0066 struct folio_batch lru_deactivate;
0067 struct folio_batch lru_lazyfree;
0068 #ifdef CONFIG_SMP
0069 struct folio_batch activate;
0070 #endif
0071 };
0072 static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
0073 .lock = INIT_LOCAL_LOCK(lock),
0074 };
0075
0076
0077
0078
0079
0080 static void __page_cache_release(struct folio *folio)
0081 {
0082 if (folio_test_lru(folio)) {
0083 struct lruvec *lruvec;
0084 unsigned long flags;
0085
0086 lruvec = folio_lruvec_lock_irqsave(folio, &flags);
0087 lruvec_del_folio(lruvec, folio);
0088 __folio_clear_lru_flags(folio);
0089 unlock_page_lruvec_irqrestore(lruvec, flags);
0090 }
0091
0092 if (unlikely(folio_test_mlocked(folio))) {
0093 long nr_pages = folio_nr_pages(folio);
0094
0095 __folio_clear_mlocked(folio);
0096 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
0097 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
0098 }
0099 }
0100
0101 static void __folio_put_small(struct folio *folio)
0102 {
0103 __page_cache_release(folio);
0104 mem_cgroup_uncharge(folio);
0105 free_unref_page(&folio->page, 0);
0106 }
0107
0108 static void __folio_put_large(struct folio *folio)
0109 {
0110
0111
0112
0113
0114
0115
0116 if (!folio_test_hugetlb(folio))
0117 __page_cache_release(folio);
0118 destroy_large_folio(folio);
0119 }
0120
0121 void __folio_put(struct folio *folio)
0122 {
0123 if (unlikely(folio_is_zone_device(folio)))
0124 free_zone_device_page(&folio->page);
0125 else if (unlikely(folio_test_large(folio)))
0126 __folio_put_large(folio);
0127 else
0128 __folio_put_small(folio);
0129 }
0130 EXPORT_SYMBOL(__folio_put);
0131
0132
0133
0134
0135
0136
0137
0138 void put_pages_list(struct list_head *pages)
0139 {
0140 struct folio *folio, *next;
0141
0142 list_for_each_entry_safe(folio, next, pages, lru) {
0143 if (!folio_put_testzero(folio)) {
0144 list_del(&folio->lru);
0145 continue;
0146 }
0147 if (folio_test_large(folio)) {
0148 list_del(&folio->lru);
0149 __folio_put_large(folio);
0150 continue;
0151 }
0152
0153 }
0154
0155 free_unref_page_list(pages);
0156 INIT_LIST_HEAD(pages);
0157 }
0158 EXPORT_SYMBOL(put_pages_list);
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
0174 struct page **pages)
0175 {
0176 int seg;
0177
0178 for (seg = 0; seg < nr_segs; seg++) {
0179 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
0180 return seg;
0181
0182 pages[seg] = kmap_to_page(kiov[seg].iov_base);
0183 get_page(pages[seg]);
0184 }
0185
0186 return seg;
0187 }
0188 EXPORT_SYMBOL_GPL(get_kernel_pages);
0189
0190 typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
0191
0192 static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
0193 {
0194 int was_unevictable = folio_test_clear_unevictable(folio);
0195 long nr_pages = folio_nr_pages(folio);
0196
0197 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 if (folio_evictable(folio)) {
0211 if (was_unevictable)
0212 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
0213 } else {
0214 folio_clear_active(folio);
0215 folio_set_unevictable(folio);
0216
0217
0218
0219
0220
0221
0222
0223 folio->mlock_count = 0;
0224 if (!was_unevictable)
0225 __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
0226 }
0227
0228 lruvec_add_folio(lruvec, folio);
0229 trace_mm_lru_insertion(folio);
0230 }
0231
0232 static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
0233 {
0234 int i;
0235 struct lruvec *lruvec = NULL;
0236 unsigned long flags = 0;
0237
0238 for (i = 0; i < folio_batch_count(fbatch); i++) {
0239 struct folio *folio = fbatch->folios[i];
0240
0241
0242 if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
0243 continue;
0244
0245 lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
0246 move_fn(lruvec, folio);
0247
0248 folio_set_lru(folio);
0249 }
0250
0251 if (lruvec)
0252 unlock_page_lruvec_irqrestore(lruvec, flags);
0253 folios_put(fbatch->folios, folio_batch_count(fbatch));
0254 folio_batch_init(fbatch);
0255 }
0256
0257 static void folio_batch_add_and_move(struct folio_batch *fbatch,
0258 struct folio *folio, move_fn_t move_fn)
0259 {
0260 if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
0261 !lru_cache_disabled())
0262 return;
0263 folio_batch_move_lru(fbatch, move_fn);
0264 }
0265
0266 static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
0267 {
0268 if (!folio_test_unevictable(folio)) {
0269 lruvec_del_folio(lruvec, folio);
0270 folio_clear_active(folio);
0271 lruvec_add_folio_tail(lruvec, folio);
0272 __count_vm_events(PGROTATED, folio_nr_pages(folio));
0273 }
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283 void folio_rotate_reclaimable(struct folio *folio)
0284 {
0285 if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
0286 !folio_test_unevictable(folio) && folio_test_lru(folio)) {
0287 struct folio_batch *fbatch;
0288 unsigned long flags;
0289
0290 folio_get(folio);
0291 local_lock_irqsave(&lru_rotate.lock, flags);
0292 fbatch = this_cpu_ptr(&lru_rotate.fbatch);
0293 folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
0294 local_unlock_irqrestore(&lru_rotate.lock, flags);
0295 }
0296 }
0297
0298 void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
0299 {
0300 do {
0301 unsigned long lrusize;
0302
0303
0304
0305
0306
0307
0308
0309
0310 spin_lock_irq(&lruvec->lru_lock);
0311
0312 if (file)
0313 lruvec->file_cost += nr_pages;
0314 else
0315 lruvec->anon_cost += nr_pages;
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
0326 lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
0327 lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
0328 lruvec_page_state(lruvec, NR_ACTIVE_FILE);
0329
0330 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
0331 lruvec->file_cost /= 2;
0332 lruvec->anon_cost /= 2;
0333 }
0334 spin_unlock_irq(&lruvec->lru_lock);
0335 } while ((lruvec = parent_lruvec(lruvec)));
0336 }
0337
0338 void lru_note_cost_folio(struct folio *folio)
0339 {
0340 lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
0341 folio_nr_pages(folio));
0342 }
0343
0344 static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
0345 {
0346 if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
0347 long nr_pages = folio_nr_pages(folio);
0348
0349 lruvec_del_folio(lruvec, folio);
0350 folio_set_active(folio);
0351 lruvec_add_folio(lruvec, folio);
0352 trace_mm_lru_activate(folio);
0353
0354 __count_vm_events(PGACTIVATE, nr_pages);
0355 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
0356 nr_pages);
0357 }
0358 }
0359
0360 #ifdef CONFIG_SMP
0361 static void folio_activate_drain(int cpu)
0362 {
0363 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu);
0364
0365 if (folio_batch_count(fbatch))
0366 folio_batch_move_lru(fbatch, folio_activate_fn);
0367 }
0368
0369 static void folio_activate(struct folio *folio)
0370 {
0371 if (folio_test_lru(folio) && !folio_test_active(folio) &&
0372 !folio_test_unevictable(folio)) {
0373 struct folio_batch *fbatch;
0374
0375 folio_get(folio);
0376 local_lock(&cpu_fbatches.lock);
0377 fbatch = this_cpu_ptr(&cpu_fbatches.activate);
0378 folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
0379 local_unlock(&cpu_fbatches.lock);
0380 }
0381 }
0382
0383 #else
0384 static inline void folio_activate_drain(int cpu)
0385 {
0386 }
0387
0388 static void folio_activate(struct folio *folio)
0389 {
0390 struct lruvec *lruvec;
0391
0392 if (folio_test_clear_lru(folio)) {
0393 lruvec = folio_lruvec_lock_irq(folio);
0394 folio_activate_fn(lruvec, folio);
0395 unlock_page_lruvec_irq(lruvec);
0396 folio_set_lru(folio);
0397 }
0398 }
0399 #endif
0400
0401 static void __lru_cache_activate_folio(struct folio *folio)
0402 {
0403 struct folio_batch *fbatch;
0404 int i;
0405
0406 local_lock(&cpu_fbatches.lock);
0407 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419 for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
0420 struct folio *batch_folio = fbatch->folios[i];
0421
0422 if (batch_folio == folio) {
0423 folio_set_active(folio);
0424 break;
0425 }
0426 }
0427
0428 local_unlock(&cpu_fbatches.lock);
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441 void folio_mark_accessed(struct folio *folio)
0442 {
0443 if (!folio_test_referenced(folio)) {
0444 folio_set_referenced(folio);
0445 } else if (folio_test_unevictable(folio)) {
0446
0447
0448
0449
0450
0451 } else if (!folio_test_active(folio)) {
0452
0453
0454
0455
0456
0457
0458 if (folio_test_lru(folio))
0459 folio_activate(folio);
0460 else
0461 __lru_cache_activate_folio(folio);
0462 folio_clear_referenced(folio);
0463 workingset_activation(folio);
0464 }
0465 if (folio_test_idle(folio))
0466 folio_clear_idle(folio);
0467 }
0468 EXPORT_SYMBOL(folio_mark_accessed);
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 void folio_add_lru(struct folio *folio)
0480 {
0481 struct folio_batch *fbatch;
0482
0483 VM_BUG_ON_FOLIO(folio_test_active(folio) &&
0484 folio_test_unevictable(folio), folio);
0485 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
0486
0487 folio_get(folio);
0488 local_lock(&cpu_fbatches.lock);
0489 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
0490 folio_batch_add_and_move(fbatch, folio, lru_add_fn);
0491 local_unlock(&cpu_fbatches.lock);
0492 }
0493 EXPORT_SYMBOL(folio_add_lru);
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 void lru_cache_add_inactive_or_unevictable(struct page *page,
0504 struct vm_area_struct *vma)
0505 {
0506 VM_BUG_ON_PAGE(PageLRU(page), page);
0507
0508 if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
0509 mlock_new_page(page);
0510 else
0511 lru_cache_add(page);
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535 static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
0536 {
0537 bool active = folio_test_active(folio);
0538 long nr_pages = folio_nr_pages(folio);
0539
0540 if (folio_test_unevictable(folio))
0541 return;
0542
0543
0544 if (folio_mapped(folio))
0545 return;
0546
0547 lruvec_del_folio(lruvec, folio);
0548 folio_clear_active(folio);
0549 folio_clear_referenced(folio);
0550
0551 if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
0552
0553
0554
0555
0556
0557
0558 lruvec_add_folio(lruvec, folio);
0559 folio_set_reclaim(folio);
0560 } else {
0561
0562
0563
0564
0565 lruvec_add_folio_tail(lruvec, folio);
0566 __count_vm_events(PGROTATED, nr_pages);
0567 }
0568
0569 if (active) {
0570 __count_vm_events(PGDEACTIVATE, nr_pages);
0571 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
0572 nr_pages);
0573 }
0574 }
0575
0576 static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
0577 {
0578 if (folio_test_active(folio) && !folio_test_unevictable(folio)) {
0579 long nr_pages = folio_nr_pages(folio);
0580
0581 lruvec_del_folio(lruvec, folio);
0582 folio_clear_active(folio);
0583 folio_clear_referenced(folio);
0584 lruvec_add_folio(lruvec, folio);
0585
0586 __count_vm_events(PGDEACTIVATE, nr_pages);
0587 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
0588 nr_pages);
0589 }
0590 }
0591
0592 static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
0593 {
0594 if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
0595 !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
0596 long nr_pages = folio_nr_pages(folio);
0597
0598 lruvec_del_folio(lruvec, folio);
0599 folio_clear_active(folio);
0600 folio_clear_referenced(folio);
0601
0602
0603
0604
0605
0606 folio_clear_swapbacked(folio);
0607 lruvec_add_folio(lruvec, folio);
0608
0609 __count_vm_events(PGLAZYFREE, nr_pages);
0610 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
0611 nr_pages);
0612 }
0613 }
0614
0615
0616
0617
0618
0619
0620 void lru_add_drain_cpu(int cpu)
0621 {
0622 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
0623 struct folio_batch *fbatch = &fbatches->lru_add;
0624
0625 if (folio_batch_count(fbatch))
0626 folio_batch_move_lru(fbatch, lru_add_fn);
0627
0628 fbatch = &per_cpu(lru_rotate.fbatch, cpu);
0629
0630 if (data_race(folio_batch_count(fbatch))) {
0631 unsigned long flags;
0632
0633
0634 local_lock_irqsave(&lru_rotate.lock, flags);
0635 folio_batch_move_lru(fbatch, lru_move_tail_fn);
0636 local_unlock_irqrestore(&lru_rotate.lock, flags);
0637 }
0638
0639 fbatch = &fbatches->lru_deactivate_file;
0640 if (folio_batch_count(fbatch))
0641 folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
0642
0643 fbatch = &fbatches->lru_deactivate;
0644 if (folio_batch_count(fbatch))
0645 folio_batch_move_lru(fbatch, lru_deactivate_fn);
0646
0647 fbatch = &fbatches->lru_lazyfree;
0648 if (folio_batch_count(fbatch))
0649 folio_batch_move_lru(fbatch, lru_lazyfree_fn);
0650
0651 folio_activate_drain(cpu);
0652 }
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664 void deactivate_file_folio(struct folio *folio)
0665 {
0666 struct folio_batch *fbatch;
0667
0668
0669 if (folio_test_unevictable(folio))
0670 return;
0671
0672 folio_get(folio);
0673 local_lock(&cpu_fbatches.lock);
0674 fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
0675 folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
0676 local_unlock(&cpu_fbatches.lock);
0677 }
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687 void deactivate_page(struct page *page)
0688 {
0689 struct folio *folio = page_folio(page);
0690
0691 if (folio_test_lru(folio) && folio_test_active(folio) &&
0692 !folio_test_unevictable(folio)) {
0693 struct folio_batch *fbatch;
0694
0695 folio_get(folio);
0696 local_lock(&cpu_fbatches.lock);
0697 fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
0698 folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
0699 local_unlock(&cpu_fbatches.lock);
0700 }
0701 }
0702
0703
0704
0705
0706
0707
0708
0709
0710 void mark_page_lazyfree(struct page *page)
0711 {
0712 struct folio *folio = page_folio(page);
0713
0714 if (folio_test_lru(folio) && folio_test_anon(folio) &&
0715 folio_test_swapbacked(folio) && !folio_test_swapcache(folio) &&
0716 !folio_test_unevictable(folio)) {
0717 struct folio_batch *fbatch;
0718
0719 folio_get(folio);
0720 local_lock(&cpu_fbatches.lock);
0721 fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
0722 folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
0723 local_unlock(&cpu_fbatches.lock);
0724 }
0725 }
0726
0727 void lru_add_drain(void)
0728 {
0729 local_lock(&cpu_fbatches.lock);
0730 lru_add_drain_cpu(smp_processor_id());
0731 local_unlock(&cpu_fbatches.lock);
0732 mlock_page_drain_local();
0733 }
0734
0735
0736
0737
0738
0739
0740
0741 static void lru_add_and_bh_lrus_drain(void)
0742 {
0743 local_lock(&cpu_fbatches.lock);
0744 lru_add_drain_cpu(smp_processor_id());
0745 local_unlock(&cpu_fbatches.lock);
0746 invalidate_bh_lrus_cpu();
0747 mlock_page_drain_local();
0748 }
0749
0750 void lru_add_drain_cpu_zone(struct zone *zone)
0751 {
0752 local_lock(&cpu_fbatches.lock);
0753 lru_add_drain_cpu(smp_processor_id());
0754 drain_local_pages(zone);
0755 local_unlock(&cpu_fbatches.lock);
0756 mlock_page_drain_local();
0757 }
0758
0759 #ifdef CONFIG_SMP
0760
0761 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
0762
0763 static void lru_add_drain_per_cpu(struct work_struct *dummy)
0764 {
0765 lru_add_and_bh_lrus_drain();
0766 }
0767
0768 static bool cpu_needs_drain(unsigned int cpu)
0769 {
0770 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
0771
0772
0773 return folio_batch_count(&fbatches->lru_add) ||
0774 data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
0775 folio_batch_count(&fbatches->lru_deactivate_file) ||
0776 folio_batch_count(&fbatches->lru_deactivate) ||
0777 folio_batch_count(&fbatches->lru_lazyfree) ||
0778 folio_batch_count(&fbatches->activate) ||
0779 need_mlock_page_drain(cpu) ||
0780 has_bh_in_lru(cpu, NULL);
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790 static inline void __lru_add_drain_all(bool force_all_cpus)
0791 {
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802 static unsigned int lru_drain_gen;
0803 static struct cpumask has_work;
0804 static DEFINE_MUTEX(lock);
0805 unsigned cpu, this_gen;
0806
0807
0808
0809
0810
0811 if (WARN_ON(!mm_percpu_wq))
0812 return;
0813
0814
0815
0816
0817
0818
0819 smp_mb();
0820
0821
0822
0823
0824
0825
0826
0827
0828 this_gen = smp_load_acquire(&lru_drain_gen);
0829
0830 mutex_lock(&lock);
0831
0832
0833
0834
0835
0836 if (unlikely(this_gen != lru_drain_gen && !force_all_cpus))
0837 goto done;
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860 WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
0861 smp_mb();
0862
0863 cpumask_clear(&has_work);
0864 for_each_online_cpu(cpu) {
0865 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
0866
0867 if (cpu_needs_drain(cpu)) {
0868 INIT_WORK(work, lru_add_drain_per_cpu);
0869 queue_work_on(cpu, mm_percpu_wq, work);
0870 __cpumask_set_cpu(cpu, &has_work);
0871 }
0872 }
0873
0874 for_each_cpu(cpu, &has_work)
0875 flush_work(&per_cpu(lru_add_drain_work, cpu));
0876
0877 done:
0878 mutex_unlock(&lock);
0879 }
0880
0881 void lru_add_drain_all(void)
0882 {
0883 __lru_add_drain_all(false);
0884 }
0885 #else
0886 void lru_add_drain_all(void)
0887 {
0888 lru_add_drain();
0889 }
0890 #endif
0891
0892 atomic_t lru_disable_count = ATOMIC_INIT(0);
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 void lru_cache_disable(void)
0903 {
0904 atomic_inc(&lru_disable_count);
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918 synchronize_rcu_expedited();
0919 #ifdef CONFIG_SMP
0920 __lru_add_drain_all(true);
0921 #else
0922 lru_add_and_bh_lrus_drain();
0923 #endif
0924 }
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934 void release_pages(struct page **pages, int nr)
0935 {
0936 int i;
0937 LIST_HEAD(pages_to_free);
0938 struct lruvec *lruvec = NULL;
0939 unsigned long flags = 0;
0940 unsigned int lock_batch;
0941
0942 for (i = 0; i < nr; i++) {
0943 struct folio *folio = page_folio(pages[i]);
0944
0945
0946
0947
0948
0949
0950 if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
0951 unlock_page_lruvec_irqrestore(lruvec, flags);
0952 lruvec = NULL;
0953 }
0954
0955 if (is_huge_zero_page(&folio->page))
0956 continue;
0957
0958 if (folio_is_zone_device(folio)) {
0959 if (lruvec) {
0960 unlock_page_lruvec_irqrestore(lruvec, flags);
0961 lruvec = NULL;
0962 }
0963 if (put_devmap_managed_page(&folio->page))
0964 continue;
0965 if (folio_put_testzero(folio))
0966 free_zone_device_page(&folio->page);
0967 continue;
0968 }
0969
0970 if (!folio_put_testzero(folio))
0971 continue;
0972
0973 if (folio_test_large(folio)) {
0974 if (lruvec) {
0975 unlock_page_lruvec_irqrestore(lruvec, flags);
0976 lruvec = NULL;
0977 }
0978 __folio_put_large(folio);
0979 continue;
0980 }
0981
0982 if (folio_test_lru(folio)) {
0983 struct lruvec *prev_lruvec = lruvec;
0984
0985 lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
0986 &flags);
0987 if (prev_lruvec != lruvec)
0988 lock_batch = 0;
0989
0990 lruvec_del_folio(lruvec, folio);
0991 __folio_clear_lru_flags(folio);
0992 }
0993
0994
0995
0996
0997
0998
0999
1000 if (unlikely(folio_test_mlocked(folio))) {
1001 __folio_clear_mlocked(folio);
1002 zone_stat_sub_folio(folio, NR_MLOCK);
1003 count_vm_event(UNEVICTABLE_PGCLEARED);
1004 }
1005
1006 list_add(&folio->lru, &pages_to_free);
1007 }
1008 if (lruvec)
1009 unlock_page_lruvec_irqrestore(lruvec, flags);
1010
1011 mem_cgroup_uncharge_list(&pages_to_free);
1012 free_unref_page_list(&pages_to_free);
1013 }
1014 EXPORT_SYMBOL(release_pages);
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026 void __pagevec_release(struct pagevec *pvec)
1027 {
1028 if (!pvec->percpu_pvec_drained) {
1029 lru_add_drain();
1030 pvec->percpu_pvec_drained = true;
1031 }
1032 release_pages(pvec->pages, pagevec_count(pvec));
1033 pagevec_reinit(pvec);
1034 }
1035 EXPORT_SYMBOL(__pagevec_release);
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
1047 {
1048 unsigned int i, j;
1049
1050 for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
1051 struct folio *folio = fbatch->folios[i];
1052 if (!xa_is_value(folio))
1053 fbatch->folios[j++] = folio;
1054 }
1055 fbatch->nr = j;
1056 }
1057
1058 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1059 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1060 xa_mark_t tag)
1061 {
1062 pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1063 PAGEVEC_SIZE, pvec->pages);
1064 return pagevec_count(pvec);
1065 }
1066 EXPORT_SYMBOL(pagevec_lookup_range_tag);
1067
1068
1069
1070
1071 void __init swap_setup(void)
1072 {
1073 unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1074
1075
1076 if (megs < 16)
1077 page_cluster = 2;
1078 else
1079 page_cluster = 3;
1080
1081
1082
1083
1084 }