0001
0002 #ifndef _LINUX_PAGEMAP_H
0003 #define _LINUX_PAGEMAP_H
0004
0005
0006
0007
0008 #include <linux/mm.h>
0009 #include <linux/fs.h>
0010 #include <linux/list.h>
0011 #include <linux/highmem.h>
0012 #include <linux/compiler.h>
0013 #include <linux/uaccess.h>
0014 #include <linux/gfp.h>
0015 #include <linux/bitops.h>
0016 #include <linux/hardirq.h> /* for in_interrupt() */
0017 #include <linux/hugetlb_inline.h>
0018
0019 struct folio_batch;
0020
0021 unsigned long invalidate_mapping_pages(struct address_space *mapping,
0022 pgoff_t start, pgoff_t end);
0023
0024 static inline void invalidate_remote_inode(struct inode *inode)
0025 {
0026 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
0027 S_ISLNK(inode->i_mode))
0028 invalidate_mapping_pages(inode->i_mapping, 0, -1);
0029 }
0030 int invalidate_inode_pages2(struct address_space *mapping);
0031 int invalidate_inode_pages2_range(struct address_space *mapping,
0032 pgoff_t start, pgoff_t end);
0033 int write_inode_now(struct inode *, int sync);
0034 int filemap_fdatawrite(struct address_space *);
0035 int filemap_flush(struct address_space *);
0036 int filemap_fdatawait_keep_errors(struct address_space *mapping);
0037 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
0038 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
0039 loff_t start_byte, loff_t end_byte);
0040
0041 static inline int filemap_fdatawait(struct address_space *mapping)
0042 {
0043 return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
0044 }
0045
0046 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
0047 int filemap_write_and_wait_range(struct address_space *mapping,
0048 loff_t lstart, loff_t lend);
0049 int __filemap_fdatawrite_range(struct address_space *mapping,
0050 loff_t start, loff_t end, int sync_mode);
0051 int filemap_fdatawrite_range(struct address_space *mapping,
0052 loff_t start, loff_t end);
0053 int filemap_check_errors(struct address_space *mapping);
0054 void __filemap_set_wb_err(struct address_space *mapping, int err);
0055 int filemap_fdatawrite_wbc(struct address_space *mapping,
0056 struct writeback_control *wbc);
0057
0058 static inline int filemap_write_and_wait(struct address_space *mapping)
0059 {
0060 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 static inline void filemap_set_wb_err(struct address_space *mapping, int err)
0078 {
0079
0080 if (unlikely(err))
0081 __filemap_set_wb_err(mapping, err);
0082 }
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094 static inline int filemap_check_wb_err(struct address_space *mapping,
0095 errseq_t since)
0096 {
0097 return errseq_check(&mapping->wb_err, since);
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
0108 {
0109 return errseq_sample(&mapping->wb_err);
0110 }
0111
0112
0113
0114
0115
0116
0117
0118
0119 static inline errseq_t file_sample_sb_err(struct file *file)
0120 {
0121 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
0122 }
0123
0124
0125
0126
0127
0128
0129 static inline int inode_drain_writes(struct inode *inode)
0130 {
0131 inode_dio_wait(inode);
0132 return filemap_write_and_wait(inode->i_mapping);
0133 }
0134
0135 static inline bool mapping_empty(struct address_space *mapping)
0136 {
0137 return xa_empty(&mapping->i_pages);
0138 }
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161 static inline bool mapping_shrinkable(struct address_space *mapping)
0162 {
0163 void *head;
0164
0165
0166
0167
0168
0169
0170 if (IS_ENABLED(CONFIG_HIGHMEM))
0171 return true;
0172
0173
0174 head = rcu_access_pointer(mapping->i_pages.xa_head);
0175 if (!head)
0176 return true;
0177
0178
0179
0180
0181
0182
0183
0184 if (!xa_is_node(head) && xa_is_value(head))
0185 return true;
0186
0187 return false;
0188 }
0189
0190
0191
0192
0193 enum mapping_flags {
0194 AS_EIO = 0,
0195 AS_ENOSPC = 1,
0196 AS_MM_ALL_LOCKS = 2,
0197 AS_UNEVICTABLE = 3,
0198 AS_EXITING = 4,
0199
0200 AS_NO_WRITEBACK_TAGS = 5,
0201 AS_LARGE_FOLIO_SUPPORT = 6,
0202 };
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218 static inline void mapping_set_error(struct address_space *mapping, int error)
0219 {
0220 if (likely(!error))
0221 return;
0222
0223
0224 __filemap_set_wb_err(mapping, error);
0225
0226
0227 if (mapping->host)
0228 errseq_set(&mapping->host->i_sb->s_wb_err, error);
0229
0230
0231 if (error == -ENOSPC)
0232 set_bit(AS_ENOSPC, &mapping->flags);
0233 else
0234 set_bit(AS_EIO, &mapping->flags);
0235 }
0236
0237 static inline void mapping_set_unevictable(struct address_space *mapping)
0238 {
0239 set_bit(AS_UNEVICTABLE, &mapping->flags);
0240 }
0241
0242 static inline void mapping_clear_unevictable(struct address_space *mapping)
0243 {
0244 clear_bit(AS_UNEVICTABLE, &mapping->flags);
0245 }
0246
0247 static inline bool mapping_unevictable(struct address_space *mapping)
0248 {
0249 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
0250 }
0251
0252 static inline void mapping_set_exiting(struct address_space *mapping)
0253 {
0254 set_bit(AS_EXITING, &mapping->flags);
0255 }
0256
0257 static inline int mapping_exiting(struct address_space *mapping)
0258 {
0259 return test_bit(AS_EXITING, &mapping->flags);
0260 }
0261
0262 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
0263 {
0264 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
0265 }
0266
0267 static inline int mapping_use_writeback_tags(struct address_space *mapping)
0268 {
0269 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
0270 }
0271
0272 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
0273 {
0274 return mapping->gfp_mask;
0275 }
0276
0277
0278 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
0279 gfp_t gfp_mask)
0280 {
0281 return mapping_gfp_mask(mapping) & gfp_mask;
0282 }
0283
0284
0285
0286
0287
0288 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
0289 {
0290 m->gfp_mask = mask;
0291 }
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304 static inline void mapping_set_large_folios(struct address_space *mapping)
0305 {
0306 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
0307 }
0308
0309
0310
0311
0312
0313 static inline bool mapping_large_folio_support(struct address_space *mapping)
0314 {
0315 return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
0316 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
0317 }
0318
0319 static inline int filemap_nr_thps(struct address_space *mapping)
0320 {
0321 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
0322 return atomic_read(&mapping->nr_thps);
0323 #else
0324 return 0;
0325 #endif
0326 }
0327
0328 static inline void filemap_nr_thps_inc(struct address_space *mapping)
0329 {
0330 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
0331 if (!mapping_large_folio_support(mapping))
0332 atomic_inc(&mapping->nr_thps);
0333 #else
0334 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
0335 #endif
0336 }
0337
0338 static inline void filemap_nr_thps_dec(struct address_space *mapping)
0339 {
0340 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
0341 if (!mapping_large_folio_support(mapping))
0342 atomic_dec(&mapping->nr_thps);
0343 #else
0344 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
0345 #endif
0346 }
0347
0348 struct address_space *page_mapping(struct page *);
0349 struct address_space *folio_mapping(struct folio *);
0350 struct address_space *swapcache_mapping(struct folio *);
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364 static inline struct address_space *folio_file_mapping(struct folio *folio)
0365 {
0366 if (unlikely(folio_test_swapcache(folio)))
0367 return swapcache_mapping(folio);
0368
0369 return folio->mapping;
0370 }
0371
0372 static inline struct address_space *page_file_mapping(struct page *page)
0373 {
0374 return folio_file_mapping(page_folio(page));
0375 }
0376
0377
0378
0379
0380 static inline struct address_space *page_mapping_file(struct page *page)
0381 {
0382 struct folio *folio = page_folio(page);
0383
0384 if (unlikely(folio_test_swapcache(folio)))
0385 return NULL;
0386 return folio_mapping(folio);
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398 static inline struct inode *folio_inode(struct folio *folio)
0399 {
0400 return folio->mapping->host;
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 static inline void folio_attach_private(struct folio *folio, void *data)
0412 {
0413 folio_get(folio);
0414 folio->private = data;
0415 folio_set_private(folio);
0416 }
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429 static inline void *folio_change_private(struct folio *folio, void *data)
0430 {
0431 void *old = folio_get_private(folio);
0432
0433 folio->private = data;
0434 return old;
0435 }
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446 static inline void *folio_detach_private(struct folio *folio)
0447 {
0448 void *data = folio_get_private(folio);
0449
0450 if (!folio_test_private(folio))
0451 return NULL;
0452 folio_clear_private(folio);
0453 folio->private = NULL;
0454 folio_put(folio);
0455
0456 return data;
0457 }
0458
0459 static inline void attach_page_private(struct page *page, void *data)
0460 {
0461 folio_attach_private(page_folio(page), data);
0462 }
0463
0464 static inline void *detach_page_private(struct page *page)
0465 {
0466 return folio_detach_private(page_folio(page));
0467 }
0468
0469 #ifdef CONFIG_NUMA
0470 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
0471 #else
0472 static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
0473 {
0474 return folio_alloc(gfp, order);
0475 }
0476 #endif
0477
0478 static inline struct page *__page_cache_alloc(gfp_t gfp)
0479 {
0480 return &filemap_alloc_folio(gfp, 0)->page;
0481 }
0482
0483 static inline struct page *page_cache_alloc(struct address_space *x)
0484 {
0485 return __page_cache_alloc(mapping_gfp_mask(x));
0486 }
0487
0488 static inline gfp_t readahead_gfp_mask(struct address_space *x)
0489 {
0490 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
0491 }
0492
0493 typedef int filler_t(struct file *, struct folio *);
0494
0495 pgoff_t page_cache_next_miss(struct address_space *mapping,
0496 pgoff_t index, unsigned long max_scan);
0497 pgoff_t page_cache_prev_miss(struct address_space *mapping,
0498 pgoff_t index, unsigned long max_scan);
0499
0500 #define FGP_ACCESSED 0x00000001
0501 #define FGP_LOCK 0x00000002
0502 #define FGP_CREAT 0x00000004
0503 #define FGP_WRITE 0x00000008
0504 #define FGP_NOFS 0x00000010
0505 #define FGP_NOWAIT 0x00000020
0506 #define FGP_FOR_MMAP 0x00000040
0507 #define FGP_HEAD 0x00000080
0508 #define FGP_ENTRY 0x00000100
0509 #define FGP_STABLE 0x00000200
0510
0511 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
0512 int fgp_flags, gfp_t gfp);
0513 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
0514 int fgp_flags, gfp_t gfp);
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 static inline struct folio *filemap_get_folio(struct address_space *mapping,
0527 pgoff_t index)
0528 {
0529 return __filemap_get_folio(mapping, index, 0, 0);
0530 }
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544 static inline struct folio *filemap_lock_folio(struct address_space *mapping,
0545 pgoff_t index)
0546 {
0547 return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
0548 }
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560 static inline struct page *find_get_page(struct address_space *mapping,
0561 pgoff_t offset)
0562 {
0563 return pagecache_get_page(mapping, offset, 0, 0);
0564 }
0565
0566 static inline struct page *find_get_page_flags(struct address_space *mapping,
0567 pgoff_t offset, int fgp_flags)
0568 {
0569 return pagecache_get_page(mapping, offset, fgp_flags, 0);
0570 }
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 static inline struct page *find_lock_page(struct address_space *mapping,
0586 pgoff_t index)
0587 {
0588 return pagecache_get_page(mapping, index, FGP_LOCK, 0);
0589 }
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610 static inline struct page *find_or_create_page(struct address_space *mapping,
0611 pgoff_t index, gfp_t gfp_mask)
0612 {
0613 return pagecache_get_page(mapping, index,
0614 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
0615 gfp_mask);
0616 }
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
0632 pgoff_t index)
0633 {
0634 return pagecache_get_page(mapping, index,
0635 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
0636 mapping_gfp_mask(mapping));
0637 }
0638
0639 #define swapcache_index(folio) __page_file_index(&(folio)->page)
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652 static inline pgoff_t folio_index(struct folio *folio)
0653 {
0654 if (unlikely(folio_test_swapcache(folio)))
0655 return swapcache_index(folio);
0656 return folio->index;
0657 }
0658
0659
0660
0661
0662
0663
0664
0665 static inline pgoff_t folio_next_index(struct folio *folio)
0666 {
0667 return folio->index + folio_nr_pages(folio);
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
0681 {
0682
0683 if (folio_test_hugetlb(folio))
0684 return &folio->page;
0685 return folio_page(folio, index & (folio_nr_pages(folio) - 1));
0686 }
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698 static inline bool folio_contains(struct folio *folio, pgoff_t index)
0699 {
0700
0701 if (folio_test_hugetlb(folio))
0702 return folio->index == index;
0703 return index - folio_index(folio) < folio_nr_pages(folio);
0704 }
0705
0706
0707
0708
0709
0710 static inline struct page *find_subpage(struct page *head, pgoff_t index)
0711 {
0712
0713 if (PageHuge(head))
0714 return head;
0715
0716 return head + (index & (thp_nr_pages(head) - 1));
0717 }
0718
0719 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
0720 pgoff_t end, struct folio_batch *fbatch);
0721 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
0722 unsigned int nr_pages, struct page **pages);
0723 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
0724 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
0725 struct page **pages);
0726 static inline unsigned find_get_pages_tag(struct address_space *mapping,
0727 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
0728 struct page **pages)
0729 {
0730 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
0731 nr_pages, pages);
0732 }
0733
0734 struct page *grab_cache_page_write_begin(struct address_space *mapping,
0735 pgoff_t index);
0736
0737
0738
0739
0740 static inline struct page *grab_cache_page(struct address_space *mapping,
0741 pgoff_t index)
0742 {
0743 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
0744 }
0745
0746 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
0747 filler_t *filler, struct file *file);
0748 struct page *read_cache_page(struct address_space *, pgoff_t index,
0749 filler_t *filler, struct file *file);
0750 extern struct page * read_cache_page_gfp(struct address_space *mapping,
0751 pgoff_t index, gfp_t gfp_mask);
0752
0753 static inline struct page *read_mapping_page(struct address_space *mapping,
0754 pgoff_t index, struct file *file)
0755 {
0756 return read_cache_page(mapping, index, NULL, file);
0757 }
0758
0759 static inline struct folio *read_mapping_folio(struct address_space *mapping,
0760 pgoff_t index, struct file *file)
0761 {
0762 return read_cache_folio(mapping, index, NULL, file);
0763 }
0764
0765
0766
0767
0768
0769 static inline pgoff_t page_to_index(struct page *page)
0770 {
0771 struct page *head;
0772
0773 if (likely(!PageTransTail(page)))
0774 return page->index;
0775
0776 head = compound_head(page);
0777
0778
0779
0780
0781 return head->index + page - head;
0782 }
0783
0784 extern pgoff_t hugetlb_basepage_index(struct page *page);
0785
0786
0787
0788
0789
0790 static inline pgoff_t page_to_pgoff(struct page *page)
0791 {
0792 if (unlikely(PageHuge(page)))
0793 return hugetlb_basepage_index(page);
0794 return page_to_index(page);
0795 }
0796
0797
0798
0799
0800 static inline loff_t page_offset(struct page *page)
0801 {
0802 return ((loff_t)page->index) << PAGE_SHIFT;
0803 }
0804
0805 static inline loff_t page_file_offset(struct page *page)
0806 {
0807 return ((loff_t)page_index(page)) << PAGE_SHIFT;
0808 }
0809
0810
0811
0812
0813
0814 static inline loff_t folio_pos(struct folio *folio)
0815 {
0816 return page_offset(&folio->page);
0817 }
0818
0819
0820
0821
0822
0823
0824
0825
0826 static inline loff_t folio_file_pos(struct folio *folio)
0827 {
0828 return page_file_offset(&folio->page);
0829 }
0830
0831
0832
0833
0834
0835 static inline pgoff_t folio_pgoff(struct folio *folio)
0836 {
0837 if (unlikely(folio_test_hugetlb(folio)))
0838 return hugetlb_basepage_index(&folio->page);
0839 return folio->index;
0840 }
0841
0842 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
0843 unsigned long address);
0844
0845 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
0846 unsigned long address)
0847 {
0848 pgoff_t pgoff;
0849 if (unlikely(is_vm_hugetlb_page(vma)))
0850 return linear_hugepage_index(vma, address);
0851 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
0852 pgoff += vma->vm_pgoff;
0853 return pgoff;
0854 }
0855
0856 struct wait_page_key {
0857 struct folio *folio;
0858 int bit_nr;
0859 int page_match;
0860 };
0861
0862 struct wait_page_queue {
0863 struct folio *folio;
0864 int bit_nr;
0865 wait_queue_entry_t wait;
0866 };
0867
0868 static inline bool wake_page_match(struct wait_page_queue *wait_page,
0869 struct wait_page_key *key)
0870 {
0871 if (wait_page->folio != key->folio)
0872 return false;
0873 key->page_match = 1;
0874
0875 if (wait_page->bit_nr != key->bit_nr)
0876 return false;
0877
0878 return true;
0879 }
0880
0881 void __folio_lock(struct folio *folio);
0882 int __folio_lock_killable(struct folio *folio);
0883 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
0884 unsigned int flags);
0885 void unlock_page(struct page *page);
0886 void folio_unlock(struct folio *folio);
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900 static inline bool folio_trylock(struct folio *folio)
0901 {
0902 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
0903 }
0904
0905
0906
0907
0908 static inline int trylock_page(struct page *page)
0909 {
0910 return folio_trylock(page_folio(page));
0911 }
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935 static inline void folio_lock(struct folio *folio)
0936 {
0937 might_sleep();
0938 if (!folio_trylock(folio))
0939 __folio_lock(folio);
0940 }
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953 static inline void lock_page(struct page *page)
0954 {
0955 struct folio *folio;
0956 might_sleep();
0957
0958 folio = page_folio(page);
0959 if (!folio_trylock(folio))
0960 __folio_lock(folio);
0961 }
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973 static inline int folio_lock_killable(struct folio *folio)
0974 {
0975 might_sleep();
0976 if (!folio_trylock(folio))
0977 return __folio_lock_killable(folio);
0978 return 0;
0979 }
0980
0981
0982
0983
0984
0985
0986 static inline int lock_page_killable(struct page *page)
0987 {
0988 return folio_lock_killable(page_folio(page));
0989 }
0990
0991
0992
0993
0994
0995
0996
0997
0998 static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
0999 unsigned int flags)
1000 {
1001 struct folio *folio;
1002 might_sleep();
1003
1004 folio = page_folio(page);
1005 return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
1006 }
1007
1008
1009
1010
1011
1012 void folio_wait_bit(struct folio *folio, int bit_nr);
1013 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1014
1015
1016
1017
1018
1019
1020
1021
1022 static inline void folio_wait_locked(struct folio *folio)
1023 {
1024 if (folio_test_locked(folio))
1025 folio_wait_bit(folio, PG_locked);
1026 }
1027
1028 static inline int folio_wait_locked_killable(struct folio *folio)
1029 {
1030 if (!folio_test_locked(folio))
1031 return 0;
1032 return folio_wait_bit_killable(folio, PG_locked);
1033 }
1034
1035 static inline void wait_on_page_locked(struct page *page)
1036 {
1037 folio_wait_locked(page_folio(page));
1038 }
1039
1040 static inline int wait_on_page_locked_killable(struct page *page)
1041 {
1042 return folio_wait_locked_killable(page_folio(page));
1043 }
1044
1045 int folio_put_wait_locked(struct folio *folio, int state);
1046 void wait_on_page_writeback(struct page *page);
1047 void folio_wait_writeback(struct folio *folio);
1048 int folio_wait_writeback_killable(struct folio *folio);
1049 void end_page_writeback(struct page *page);
1050 void folio_end_writeback(struct folio *folio);
1051 void wait_for_stable_page(struct page *page);
1052 void folio_wait_stable(struct folio *folio);
1053 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1054 static inline void __set_page_dirty(struct page *page,
1055 struct address_space *mapping, int warn)
1056 {
1057 __folio_mark_dirty(page_folio(page), mapping, warn);
1058 }
1059 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1060 void __folio_cancel_dirty(struct folio *folio);
1061 static inline void folio_cancel_dirty(struct folio *folio)
1062 {
1063
1064 if (folio_test_dirty(folio))
1065 __folio_cancel_dirty(folio);
1066 }
1067 bool folio_clear_dirty_for_io(struct folio *folio);
1068 bool clear_page_dirty_for_io(struct page *page);
1069 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1070 int __must_check folio_write_one(struct folio *folio);
1071 static inline int __must_check write_one_page(struct page *page)
1072 {
1073 return folio_write_one(page_folio(page));
1074 }
1075
1076 int __set_page_dirty_nobuffers(struct page *page);
1077 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1078
1079 #ifdef CONFIG_MIGRATION
1080 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1081 struct folio *src, enum migrate_mode mode);
1082 #else
1083 #define filemap_migrate_folio NULL
1084 #endif
1085 void page_endio(struct page *page, bool is_write, int err);
1086
1087 void folio_end_private_2(struct folio *folio);
1088 void folio_wait_private_2(struct folio *folio);
1089 int folio_wait_private_2_killable(struct folio *folio);
1090
1091
1092
1093
1094 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
1095
1096
1097
1098
1099 size_t fault_in_writeable(char __user *uaddr, size_t size);
1100 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
1101 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
1102 size_t fault_in_readable(const char __user *uaddr, size_t size);
1103
1104 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
1105 pgoff_t index, gfp_t gfp);
1106 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1107 pgoff_t index, gfp_t gfp);
1108 void filemap_remove_folio(struct folio *folio);
1109 void delete_from_page_cache(struct page *page);
1110 void __filemap_remove_folio(struct folio *folio, void *shadow);
1111 void replace_page_cache_page(struct page *old, struct page *new);
1112 void delete_from_page_cache_batch(struct address_space *mapping,
1113 struct folio_batch *fbatch);
1114 int try_to_release_page(struct page *page, gfp_t gfp);
1115 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1116 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
1117 int whence);
1118
1119
1120 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1121 pgoff_t index, gfp_t gfp, void **shadowp);
1122
1123 bool filemap_range_has_writeback(struct address_space *mapping,
1124 loff_t start_byte, loff_t end_byte);
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140 static inline bool filemap_range_needs_writeback(struct address_space *mapping,
1141 loff_t start_byte,
1142 loff_t end_byte)
1143 {
1144 if (!mapping->nrpages)
1145 return false;
1146 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
1147 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
1148 return false;
1149 return filemap_range_has_writeback(mapping, start_byte, end_byte);
1150 }
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 struct readahead_control {
1169 struct file *file;
1170 struct address_space *mapping;
1171 struct file_ra_state *ra;
1172
1173 pgoff_t _index;
1174 unsigned int _nr_pages;
1175 unsigned int _batch_count;
1176 };
1177
1178 #define DEFINE_READAHEAD(ractl, f, r, m, i) \
1179 struct readahead_control ractl = { \
1180 .file = f, \
1181 .mapping = m, \
1182 .ra = r, \
1183 ._index = i, \
1184 }
1185
1186 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
1187
1188 void page_cache_ra_unbounded(struct readahead_control *,
1189 unsigned long nr_to_read, unsigned long lookahead_count);
1190 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
1191 void page_cache_async_ra(struct readahead_control *, struct folio *,
1192 unsigned long req_count);
1193 void readahead_expand(struct readahead_control *ractl,
1194 loff_t new_start, size_t new_len);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 static inline
1210 void page_cache_sync_readahead(struct address_space *mapping,
1211 struct file_ra_state *ra, struct file *file, pgoff_t index,
1212 unsigned long req_count)
1213 {
1214 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1215 page_cache_sync_ra(&ractl, req_count);
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 static inline
1233 void page_cache_async_readahead(struct address_space *mapping,
1234 struct file_ra_state *ra, struct file *file,
1235 struct folio *folio, pgoff_t index, unsigned long req_count)
1236 {
1237 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1238 page_cache_async_ra(&ractl, folio, req_count);
1239 }
1240
1241 static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1242 {
1243 struct folio *folio;
1244
1245 BUG_ON(ractl->_batch_count > ractl->_nr_pages);
1246 ractl->_nr_pages -= ractl->_batch_count;
1247 ractl->_index += ractl->_batch_count;
1248
1249 if (!ractl->_nr_pages) {
1250 ractl->_batch_count = 0;
1251 return NULL;
1252 }
1253
1254 folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1255 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1256 ractl->_batch_count = folio_nr_pages(folio);
1257
1258 return folio;
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 static inline struct page *readahead_page(struct readahead_control *ractl)
1271 {
1272 struct folio *folio = __readahead_folio(ractl);
1273
1274 return &folio->page;
1275 }
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 static inline struct folio *readahead_folio(struct readahead_control *ractl)
1286 {
1287 struct folio *folio = __readahead_folio(ractl);
1288
1289 if (folio)
1290 folio_put(folio);
1291 return folio;
1292 }
1293
1294 static inline unsigned int __readahead_batch(struct readahead_control *rac,
1295 struct page **array, unsigned int array_sz)
1296 {
1297 unsigned int i = 0;
1298 XA_STATE(xas, &rac->mapping->i_pages, 0);
1299 struct page *page;
1300
1301 BUG_ON(rac->_batch_count > rac->_nr_pages);
1302 rac->_nr_pages -= rac->_batch_count;
1303 rac->_index += rac->_batch_count;
1304 rac->_batch_count = 0;
1305
1306 xas_set(&xas, rac->_index);
1307 rcu_read_lock();
1308 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
1309 if (xas_retry(&xas, page))
1310 continue;
1311 VM_BUG_ON_PAGE(!PageLocked(page), page);
1312 VM_BUG_ON_PAGE(PageTail(page), page);
1313 array[i++] = page;
1314 rac->_batch_count += thp_nr_pages(page);
1315 if (i == array_sz)
1316 break;
1317 }
1318 rcu_read_unlock();
1319
1320 return i;
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334 #define readahead_page_batch(rac, array) \
1335 __readahead_batch(rac, array, ARRAY_SIZE(array))
1336
1337
1338
1339
1340
1341 static inline loff_t readahead_pos(struct readahead_control *rac)
1342 {
1343 return (loff_t)rac->_index * PAGE_SIZE;
1344 }
1345
1346
1347
1348
1349
1350 static inline size_t readahead_length(struct readahead_control *rac)
1351 {
1352 return rac->_nr_pages * PAGE_SIZE;
1353 }
1354
1355
1356
1357
1358
1359 static inline pgoff_t readahead_index(struct readahead_control *rac)
1360 {
1361 return rac->_index;
1362 }
1363
1364
1365
1366
1367
1368 static inline unsigned int readahead_count(struct readahead_control *rac)
1369 {
1370 return rac->_nr_pages;
1371 }
1372
1373
1374
1375
1376
1377 static inline size_t readahead_batch_length(struct readahead_control *rac)
1378 {
1379 return rac->_batch_count * PAGE_SIZE;
1380 }
1381
1382 static inline unsigned long dir_pages(struct inode *inode)
1383 {
1384 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1385 PAGE_SHIFT;
1386 }
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
1397 struct inode *inode)
1398 {
1399 loff_t size = i_size_read(inode);
1400 pgoff_t index = size >> PAGE_SHIFT;
1401 size_t offset = offset_in_folio(folio, size);
1402
1403 if (!folio->mapping)
1404 return -EFAULT;
1405
1406
1407 if (folio_next_index(folio) - 1 < index)
1408 return folio_size(folio);
1409
1410 if (folio->index > index || !offset)
1411 return -EFAULT;
1412
1413 return offset;
1414 }
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 static inline int page_mkwrite_check_truncate(struct page *page,
1425 struct inode *inode)
1426 {
1427 loff_t size = i_size_read(inode);
1428 pgoff_t index = size >> PAGE_SHIFT;
1429 int offset = offset_in_page(size);
1430
1431 if (page->mapping != inode->i_mapping)
1432 return -EFAULT;
1433
1434
1435 if (page->index < index)
1436 return PAGE_SIZE;
1437
1438 if (page->index > index || !offset)
1439 return -EFAULT;
1440
1441 return offset;
1442 }
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 static inline
1456 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
1457 {
1458 return folio_size(folio) >> inode->i_blkbits;
1459 }
1460
1461 static inline
1462 unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1463 {
1464 return i_blocks_per_folio(inode, page_folio(page));
1465 }
1466 #endif