Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_PAGEMAP_H
0003 #define _LINUX_PAGEMAP_H
0004 
0005 /*
0006  * Copyright 1995 Linus Torvalds
0007  */
0008 #include <linux/mm.h>
0009 #include <linux/fs.h>
0010 #include <linux/list.h>
0011 #include <linux/highmem.h>
0012 #include <linux/compiler.h>
0013 #include <linux/uaccess.h>
0014 #include <linux/gfp.h>
0015 #include <linux/bitops.h>
0016 #include <linux/hardirq.h> /* for in_interrupt() */
0017 #include <linux/hugetlb_inline.h>
0018 
0019 struct folio_batch;
0020 
0021 unsigned long invalidate_mapping_pages(struct address_space *mapping,
0022                     pgoff_t start, pgoff_t end);
0023 
0024 static inline void invalidate_remote_inode(struct inode *inode)
0025 {
0026     if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
0027         S_ISLNK(inode->i_mode))
0028         invalidate_mapping_pages(inode->i_mapping, 0, -1);
0029 }
0030 int invalidate_inode_pages2(struct address_space *mapping);
0031 int invalidate_inode_pages2_range(struct address_space *mapping,
0032         pgoff_t start, pgoff_t end);
0033 int write_inode_now(struct inode *, int sync);
0034 int filemap_fdatawrite(struct address_space *);
0035 int filemap_flush(struct address_space *);
0036 int filemap_fdatawait_keep_errors(struct address_space *mapping);
0037 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
0038 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
0039         loff_t start_byte, loff_t end_byte);
0040 
0041 static inline int filemap_fdatawait(struct address_space *mapping)
0042 {
0043     return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
0044 }
0045 
0046 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
0047 int filemap_write_and_wait_range(struct address_space *mapping,
0048         loff_t lstart, loff_t lend);
0049 int __filemap_fdatawrite_range(struct address_space *mapping,
0050         loff_t start, loff_t end, int sync_mode);
0051 int filemap_fdatawrite_range(struct address_space *mapping,
0052         loff_t start, loff_t end);
0053 int filemap_check_errors(struct address_space *mapping);
0054 void __filemap_set_wb_err(struct address_space *mapping, int err);
0055 int filemap_fdatawrite_wbc(struct address_space *mapping,
0056                struct writeback_control *wbc);
0057 
0058 static inline int filemap_write_and_wait(struct address_space *mapping)
0059 {
0060     return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
0061 }
0062 
0063 /**
0064  * filemap_set_wb_err - set a writeback error on an address_space
0065  * @mapping: mapping in which to set writeback error
0066  * @err: error to be set in mapping
0067  *
0068  * When writeback fails in some way, we must record that error so that
0069  * userspace can be informed when fsync and the like are called.  We endeavor
0070  * to report errors on any file that was open at the time of the error.  Some
0071  * internal callers also need to know when writeback errors have occurred.
0072  *
0073  * When a writeback error occurs, most filesystems will want to call
0074  * filemap_set_wb_err to record the error in the mapping so that it will be
0075  * automatically reported whenever fsync is called on the file.
0076  */
0077 static inline void filemap_set_wb_err(struct address_space *mapping, int err)
0078 {
0079     /* Fastpath for common case of no error */
0080     if (unlikely(err))
0081         __filemap_set_wb_err(mapping, err);
0082 }
0083 
0084 /**
0085  * filemap_check_wb_err - has an error occurred since the mark was sampled?
0086  * @mapping: mapping to check for writeback errors
0087  * @since: previously-sampled errseq_t
0088  *
0089  * Grab the errseq_t value from the mapping, and see if it has changed "since"
0090  * the given value was sampled.
0091  *
0092  * If it has then report the latest error set, otherwise return 0.
0093  */
0094 static inline int filemap_check_wb_err(struct address_space *mapping,
0095                     errseq_t since)
0096 {
0097     return errseq_check(&mapping->wb_err, since);
0098 }
0099 
0100 /**
0101  * filemap_sample_wb_err - sample the current errseq_t to test for later errors
0102  * @mapping: mapping to be sampled
0103  *
0104  * Writeback errors are always reported relative to a particular sample point
0105  * in the past. This function provides those sample points.
0106  */
0107 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
0108 {
0109     return errseq_sample(&mapping->wb_err);
0110 }
0111 
0112 /**
0113  * file_sample_sb_err - sample the current errseq_t to test for later errors
0114  * @file: file pointer to be sampled
0115  *
0116  * Grab the most current superblock-level errseq_t value for the given
0117  * struct file.
0118  */
0119 static inline errseq_t file_sample_sb_err(struct file *file)
0120 {
0121     return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
0122 }
0123 
0124 /*
0125  * Flush file data before changing attributes.  Caller must hold any locks
0126  * required to prevent further writes to this file until we're done setting
0127  * flags.
0128  */
0129 static inline int inode_drain_writes(struct inode *inode)
0130 {
0131     inode_dio_wait(inode);
0132     return filemap_write_and_wait(inode->i_mapping);
0133 }
0134 
0135 static inline bool mapping_empty(struct address_space *mapping)
0136 {
0137     return xa_empty(&mapping->i_pages);
0138 }
0139 
0140 /*
0141  * mapping_shrinkable - test if page cache state allows inode reclaim
0142  * @mapping: the page cache mapping
0143  *
0144  * This checks the mapping's cache state for the pupose of inode
0145  * reclaim and LRU management.
0146  *
0147  * The caller is expected to hold the i_lock, but is not required to
0148  * hold the i_pages lock, which usually protects cache state. That's
0149  * because the i_lock and the list_lru lock that protect the inode and
0150  * its LRU state don't nest inside the irq-safe i_pages lock.
0151  *
0152  * Cache deletions are performed under the i_lock, which ensures that
0153  * when an inode goes empty, it will reliably get queued on the LRU.
0154  *
0155  * Cache additions do not acquire the i_lock and may race with this
0156  * check, in which case we'll report the inode as shrinkable when it
0157  * has cache pages. This is okay: the shrinker also checks the
0158  * refcount and the referenced bit, which will be elevated or set in
0159  * the process of adding new cache pages to an inode.
0160  */
0161 static inline bool mapping_shrinkable(struct address_space *mapping)
0162 {
0163     void *head;
0164 
0165     /*
0166      * On highmem systems, there could be lowmem pressure from the
0167      * inodes before there is highmem pressure from the page
0168      * cache. Make inodes shrinkable regardless of cache state.
0169      */
0170     if (IS_ENABLED(CONFIG_HIGHMEM))
0171         return true;
0172 
0173     /* Cache completely empty? Shrink away. */
0174     head = rcu_access_pointer(mapping->i_pages.xa_head);
0175     if (!head)
0176         return true;
0177 
0178     /*
0179      * The xarray stores single offset-0 entries directly in the
0180      * head pointer, which allows non-resident page cache entries
0181      * to escape the shadow shrinker's list of xarray nodes. The
0182      * inode shrinker needs to pick them up under memory pressure.
0183      */
0184     if (!xa_is_node(head) && xa_is_value(head))
0185         return true;
0186 
0187     return false;
0188 }
0189 
0190 /*
0191  * Bits in mapping->flags.
0192  */
0193 enum mapping_flags {
0194     AS_EIO      = 0,    /* IO error on async write */
0195     AS_ENOSPC   = 1,    /* ENOSPC on async write */
0196     AS_MM_ALL_LOCKS = 2,    /* under mm_take_all_locks() */
0197     AS_UNEVICTABLE  = 3,    /* e.g., ramdisk, SHM_LOCK */
0198     AS_EXITING  = 4,    /* final truncate in progress */
0199     /* writeback related tags are not used */
0200     AS_NO_WRITEBACK_TAGS = 5,
0201     AS_LARGE_FOLIO_SUPPORT = 6,
0202 };
0203 
0204 /**
0205  * mapping_set_error - record a writeback error in the address_space
0206  * @mapping: the mapping in which an error should be set
0207  * @error: the error to set in the mapping
0208  *
0209  * When writeback fails in some way, we must record that error so that
0210  * userspace can be informed when fsync and the like are called.  We endeavor
0211  * to report errors on any file that was open at the time of the error.  Some
0212  * internal callers also need to know when writeback errors have occurred.
0213  *
0214  * When a writeback error occurs, most filesystems will want to call
0215  * mapping_set_error to record the error in the mapping so that it can be
0216  * reported when the application calls fsync(2).
0217  */
0218 static inline void mapping_set_error(struct address_space *mapping, int error)
0219 {
0220     if (likely(!error))
0221         return;
0222 
0223     /* Record in wb_err for checkers using errseq_t based tracking */
0224     __filemap_set_wb_err(mapping, error);
0225 
0226     /* Record it in superblock */
0227     if (mapping->host)
0228         errseq_set(&mapping->host->i_sb->s_wb_err, error);
0229 
0230     /* Record it in flags for now, for legacy callers */
0231     if (error == -ENOSPC)
0232         set_bit(AS_ENOSPC, &mapping->flags);
0233     else
0234         set_bit(AS_EIO, &mapping->flags);
0235 }
0236 
0237 static inline void mapping_set_unevictable(struct address_space *mapping)
0238 {
0239     set_bit(AS_UNEVICTABLE, &mapping->flags);
0240 }
0241 
0242 static inline void mapping_clear_unevictable(struct address_space *mapping)
0243 {
0244     clear_bit(AS_UNEVICTABLE, &mapping->flags);
0245 }
0246 
0247 static inline bool mapping_unevictable(struct address_space *mapping)
0248 {
0249     return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
0250 }
0251 
0252 static inline void mapping_set_exiting(struct address_space *mapping)
0253 {
0254     set_bit(AS_EXITING, &mapping->flags);
0255 }
0256 
0257 static inline int mapping_exiting(struct address_space *mapping)
0258 {
0259     return test_bit(AS_EXITING, &mapping->flags);
0260 }
0261 
0262 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
0263 {
0264     set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
0265 }
0266 
0267 static inline int mapping_use_writeback_tags(struct address_space *mapping)
0268 {
0269     return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
0270 }
0271 
0272 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
0273 {
0274     return mapping->gfp_mask;
0275 }
0276 
0277 /* Restricts the given gfp_mask to what the mapping allows. */
0278 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
0279         gfp_t gfp_mask)
0280 {
0281     return mapping_gfp_mask(mapping) & gfp_mask;
0282 }
0283 
0284 /*
0285  * This is non-atomic.  Only to be used before the mapping is activated.
0286  * Probably needs a barrier...
0287  */
0288 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
0289 {
0290     m->gfp_mask = mask;
0291 }
0292 
0293 /**
0294  * mapping_set_large_folios() - Indicate the file supports large folios.
0295  * @mapping: The file.
0296  *
0297  * The filesystem should call this function in its inode constructor to
0298  * indicate that the VFS can use large folios to cache the contents of
0299  * the file.
0300  *
0301  * Context: This should not be called while the inode is active as it
0302  * is non-atomic.
0303  */
0304 static inline void mapping_set_large_folios(struct address_space *mapping)
0305 {
0306     __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
0307 }
0308 
0309 /*
0310  * Large folio support currently depends on THP.  These dependencies are
0311  * being worked on but are not yet fixed.
0312  */
0313 static inline bool mapping_large_folio_support(struct address_space *mapping)
0314 {
0315     return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
0316         test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
0317 }
0318 
0319 static inline int filemap_nr_thps(struct address_space *mapping)
0320 {
0321 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
0322     return atomic_read(&mapping->nr_thps);
0323 #else
0324     return 0;
0325 #endif
0326 }
0327 
0328 static inline void filemap_nr_thps_inc(struct address_space *mapping)
0329 {
0330 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
0331     if (!mapping_large_folio_support(mapping))
0332         atomic_inc(&mapping->nr_thps);
0333 #else
0334     WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
0335 #endif
0336 }
0337 
0338 static inline void filemap_nr_thps_dec(struct address_space *mapping)
0339 {
0340 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
0341     if (!mapping_large_folio_support(mapping))
0342         atomic_dec(&mapping->nr_thps);
0343 #else
0344     WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
0345 #endif
0346 }
0347 
0348 struct address_space *page_mapping(struct page *);
0349 struct address_space *folio_mapping(struct folio *);
0350 struct address_space *swapcache_mapping(struct folio *);
0351 
0352 /**
0353  * folio_file_mapping - Find the mapping this folio belongs to.
0354  * @folio: The folio.
0355  *
0356  * For folios which are in the page cache, return the mapping that this
0357  * page belongs to.  Folios in the swap cache return the mapping of the
0358  * swap file or swap device where the data is stored.  This is different
0359  * from the mapping returned by folio_mapping().  The only reason to
0360  * use it is if, like NFS, you return 0 from ->activate_swapfile.
0361  *
0362  * Do not call this for folios which aren't in the page cache or swap cache.
0363  */
0364 static inline struct address_space *folio_file_mapping(struct folio *folio)
0365 {
0366     if (unlikely(folio_test_swapcache(folio)))
0367         return swapcache_mapping(folio);
0368 
0369     return folio->mapping;
0370 }
0371 
0372 static inline struct address_space *page_file_mapping(struct page *page)
0373 {
0374     return folio_file_mapping(page_folio(page));
0375 }
0376 
0377 /*
0378  * For file cache pages, return the address_space, otherwise return NULL
0379  */
0380 static inline struct address_space *page_mapping_file(struct page *page)
0381 {
0382     struct folio *folio = page_folio(page);
0383 
0384     if (unlikely(folio_test_swapcache(folio)))
0385         return NULL;
0386     return folio_mapping(folio);
0387 }
0388 
0389 /**
0390  * folio_inode - Get the host inode for this folio.
0391  * @folio: The folio.
0392  *
0393  * For folios which are in the page cache, return the inode that this folio
0394  * belongs to.
0395  *
0396  * Do not call this for folios which aren't in the page cache.
0397  */
0398 static inline struct inode *folio_inode(struct folio *folio)
0399 {
0400     return folio->mapping->host;
0401 }
0402 
0403 /**
0404  * folio_attach_private - Attach private data to a folio.
0405  * @folio: Folio to attach data to.
0406  * @data: Data to attach to folio.
0407  *
0408  * Attaching private data to a folio increments the page's reference count.
0409  * The data must be detached before the folio will be freed.
0410  */
0411 static inline void folio_attach_private(struct folio *folio, void *data)
0412 {
0413     folio_get(folio);
0414     folio->private = data;
0415     folio_set_private(folio);
0416 }
0417 
0418 /**
0419  * folio_change_private - Change private data on a folio.
0420  * @folio: Folio to change the data on.
0421  * @data: Data to set on the folio.
0422  *
0423  * Change the private data attached to a folio and return the old
0424  * data.  The page must previously have had data attached and the data
0425  * must be detached before the folio will be freed.
0426  *
0427  * Return: Data that was previously attached to the folio.
0428  */
0429 static inline void *folio_change_private(struct folio *folio, void *data)
0430 {
0431     void *old = folio_get_private(folio);
0432 
0433     folio->private = data;
0434     return old;
0435 }
0436 
0437 /**
0438  * folio_detach_private - Detach private data from a folio.
0439  * @folio: Folio to detach data from.
0440  *
0441  * Removes the data that was previously attached to the folio and decrements
0442  * the refcount on the page.
0443  *
0444  * Return: Data that was attached to the folio.
0445  */
0446 static inline void *folio_detach_private(struct folio *folio)
0447 {
0448     void *data = folio_get_private(folio);
0449 
0450     if (!folio_test_private(folio))
0451         return NULL;
0452     folio_clear_private(folio);
0453     folio->private = NULL;
0454     folio_put(folio);
0455 
0456     return data;
0457 }
0458 
0459 static inline void attach_page_private(struct page *page, void *data)
0460 {
0461     folio_attach_private(page_folio(page), data);
0462 }
0463 
0464 static inline void *detach_page_private(struct page *page)
0465 {
0466     return folio_detach_private(page_folio(page));
0467 }
0468 
0469 #ifdef CONFIG_NUMA
0470 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
0471 #else
0472 static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
0473 {
0474     return folio_alloc(gfp, order);
0475 }
0476 #endif
0477 
0478 static inline struct page *__page_cache_alloc(gfp_t gfp)
0479 {
0480     return &filemap_alloc_folio(gfp, 0)->page;
0481 }
0482 
0483 static inline struct page *page_cache_alloc(struct address_space *x)
0484 {
0485     return __page_cache_alloc(mapping_gfp_mask(x));
0486 }
0487 
0488 static inline gfp_t readahead_gfp_mask(struct address_space *x)
0489 {
0490     return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
0491 }
0492 
0493 typedef int filler_t(struct file *, struct folio *);
0494 
0495 pgoff_t page_cache_next_miss(struct address_space *mapping,
0496                  pgoff_t index, unsigned long max_scan);
0497 pgoff_t page_cache_prev_miss(struct address_space *mapping,
0498                  pgoff_t index, unsigned long max_scan);
0499 
0500 #define FGP_ACCESSED        0x00000001
0501 #define FGP_LOCK        0x00000002
0502 #define FGP_CREAT       0x00000004
0503 #define FGP_WRITE       0x00000008
0504 #define FGP_NOFS        0x00000010
0505 #define FGP_NOWAIT      0x00000020
0506 #define FGP_FOR_MMAP        0x00000040
0507 #define FGP_HEAD        0x00000080
0508 #define FGP_ENTRY       0x00000100
0509 #define FGP_STABLE      0x00000200
0510 
0511 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
0512         int fgp_flags, gfp_t gfp);
0513 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
0514         int fgp_flags, gfp_t gfp);
0515 
0516 /**
0517  * filemap_get_folio - Find and get a folio.
0518  * @mapping: The address_space to search.
0519  * @index: The page index.
0520  *
0521  * Looks up the page cache entry at @mapping & @index.  If a folio is
0522  * present, it is returned with an increased refcount.
0523  *
0524  * Otherwise, %NULL is returned.
0525  */
0526 static inline struct folio *filemap_get_folio(struct address_space *mapping,
0527                     pgoff_t index)
0528 {
0529     return __filemap_get_folio(mapping, index, 0, 0);
0530 }
0531 
0532 /**
0533  * filemap_lock_folio - Find and lock a folio.
0534  * @mapping: The address_space to search.
0535  * @index: The page index.
0536  *
0537  * Looks up the page cache entry at @mapping & @index.  If a folio is
0538  * present, it is returned locked with an increased refcount.
0539  *
0540  * Context: May sleep.
0541  * Return: A folio or %NULL if there is no folio in the cache for this
0542  * index.  Will not return a shadow, swap or DAX entry.
0543  */
0544 static inline struct folio *filemap_lock_folio(struct address_space *mapping,
0545                     pgoff_t index)
0546 {
0547     return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
0548 }
0549 
0550 /**
0551  * find_get_page - find and get a page reference
0552  * @mapping: the address_space to search
0553  * @offset: the page index
0554  *
0555  * Looks up the page cache slot at @mapping & @offset.  If there is a
0556  * page cache page, it is returned with an increased refcount.
0557  *
0558  * Otherwise, %NULL is returned.
0559  */
0560 static inline struct page *find_get_page(struct address_space *mapping,
0561                     pgoff_t offset)
0562 {
0563     return pagecache_get_page(mapping, offset, 0, 0);
0564 }
0565 
0566 static inline struct page *find_get_page_flags(struct address_space *mapping,
0567                     pgoff_t offset, int fgp_flags)
0568 {
0569     return pagecache_get_page(mapping, offset, fgp_flags, 0);
0570 }
0571 
0572 /**
0573  * find_lock_page - locate, pin and lock a pagecache page
0574  * @mapping: the address_space to search
0575  * @index: the page index
0576  *
0577  * Looks up the page cache entry at @mapping & @index.  If there is a
0578  * page cache page, it is returned locked and with an increased
0579  * refcount.
0580  *
0581  * Context: May sleep.
0582  * Return: A struct page or %NULL if there is no page in the cache for this
0583  * index.
0584  */
0585 static inline struct page *find_lock_page(struct address_space *mapping,
0586                     pgoff_t index)
0587 {
0588     return pagecache_get_page(mapping, index, FGP_LOCK, 0);
0589 }
0590 
0591 /**
0592  * find_or_create_page - locate or add a pagecache page
0593  * @mapping: the page's address_space
0594  * @index: the page's index into the mapping
0595  * @gfp_mask: page allocation mode
0596  *
0597  * Looks up the page cache slot at @mapping & @offset.  If there is a
0598  * page cache page, it is returned locked and with an increased
0599  * refcount.
0600  *
0601  * If the page is not present, a new page is allocated using @gfp_mask
0602  * and added to the page cache and the VM's LRU list.  The page is
0603  * returned locked and with an increased refcount.
0604  *
0605  * On memory exhaustion, %NULL is returned.
0606  *
0607  * find_or_create_page() may sleep, even if @gfp_flags specifies an
0608  * atomic allocation!
0609  */
0610 static inline struct page *find_or_create_page(struct address_space *mapping,
0611                     pgoff_t index, gfp_t gfp_mask)
0612 {
0613     return pagecache_get_page(mapping, index,
0614                     FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
0615                     gfp_mask);
0616 }
0617 
0618 /**
0619  * grab_cache_page_nowait - returns locked page at given index in given cache
0620  * @mapping: target address_space
0621  * @index: the page index
0622  *
0623  * Same as grab_cache_page(), but do not wait if the page is unavailable.
0624  * This is intended for speculative data generators, where the data can
0625  * be regenerated if the page couldn't be grabbed.  This routine should
0626  * be safe to call while holding the lock for another page.
0627  *
0628  * Clear __GFP_FS when allocating the page to avoid recursion into the fs
0629  * and deadlock against the caller's locked page.
0630  */
0631 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
0632                 pgoff_t index)
0633 {
0634     return pagecache_get_page(mapping, index,
0635             FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
0636             mapping_gfp_mask(mapping));
0637 }
0638 
0639 #define swapcache_index(folio)  __page_file_index(&(folio)->page)
0640 
0641 /**
0642  * folio_index - File index of a folio.
0643  * @folio: The folio.
0644  *
0645  * For a folio which is either in the page cache or the swap cache,
0646  * return its index within the address_space it belongs to.  If you know
0647  * the page is definitely in the page cache, you can look at the folio's
0648  * index directly.
0649  *
0650  * Return: The index (offset in units of pages) of a folio in its file.
0651  */
0652 static inline pgoff_t folio_index(struct folio *folio)
0653 {
0654         if (unlikely(folio_test_swapcache(folio)))
0655                 return swapcache_index(folio);
0656         return folio->index;
0657 }
0658 
0659 /**
0660  * folio_next_index - Get the index of the next folio.
0661  * @folio: The current folio.
0662  *
0663  * Return: The index of the folio which follows this folio in the file.
0664  */
0665 static inline pgoff_t folio_next_index(struct folio *folio)
0666 {
0667     return folio->index + folio_nr_pages(folio);
0668 }
0669 
0670 /**
0671  * folio_file_page - The page for a particular index.
0672  * @folio: The folio which contains this index.
0673  * @index: The index we want to look up.
0674  *
0675  * Sometimes after looking up a folio in the page cache, we need to
0676  * obtain the specific page for an index (eg a page fault).
0677  *
0678  * Return: The page containing the file data for this index.
0679  */
0680 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
0681 {
0682     /* HugeTLBfs indexes the page cache in units of hpage_size */
0683     if (folio_test_hugetlb(folio))
0684         return &folio->page;
0685     return folio_page(folio, index & (folio_nr_pages(folio) - 1));
0686 }
0687 
0688 /**
0689  * folio_contains - Does this folio contain this index?
0690  * @folio: The folio.
0691  * @index: The page index within the file.
0692  *
0693  * Context: The caller should have the page locked in order to prevent
0694  * (eg) shmem from moving the page between the page cache and swap cache
0695  * and changing its index in the middle of the operation.
0696  * Return: true or false.
0697  */
0698 static inline bool folio_contains(struct folio *folio, pgoff_t index)
0699 {
0700     /* HugeTLBfs indexes the page cache in units of hpage_size */
0701     if (folio_test_hugetlb(folio))
0702         return folio->index == index;
0703     return index - folio_index(folio) < folio_nr_pages(folio);
0704 }
0705 
0706 /*
0707  * Given the page we found in the page cache, return the page corresponding
0708  * to this index in the file
0709  */
0710 static inline struct page *find_subpage(struct page *head, pgoff_t index)
0711 {
0712     /* HugeTLBfs wants the head page regardless */
0713     if (PageHuge(head))
0714         return head;
0715 
0716     return head + (index & (thp_nr_pages(head) - 1));
0717 }
0718 
0719 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
0720         pgoff_t end, struct folio_batch *fbatch);
0721 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
0722                    unsigned int nr_pages, struct page **pages);
0723 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
0724             pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
0725             struct page **pages);
0726 static inline unsigned find_get_pages_tag(struct address_space *mapping,
0727             pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
0728             struct page **pages)
0729 {
0730     return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
0731                     nr_pages, pages);
0732 }
0733 
0734 struct page *grab_cache_page_write_begin(struct address_space *mapping,
0735             pgoff_t index);
0736 
0737 /*
0738  * Returns locked page at given index in given cache, creating it if needed.
0739  */
0740 static inline struct page *grab_cache_page(struct address_space *mapping,
0741                                 pgoff_t index)
0742 {
0743     return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
0744 }
0745 
0746 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
0747         filler_t *filler, struct file *file);
0748 struct page *read_cache_page(struct address_space *, pgoff_t index,
0749         filler_t *filler, struct file *file);
0750 extern struct page * read_cache_page_gfp(struct address_space *mapping,
0751                 pgoff_t index, gfp_t gfp_mask);
0752 
0753 static inline struct page *read_mapping_page(struct address_space *mapping,
0754                 pgoff_t index, struct file *file)
0755 {
0756     return read_cache_page(mapping, index, NULL, file);
0757 }
0758 
0759 static inline struct folio *read_mapping_folio(struct address_space *mapping,
0760                 pgoff_t index, struct file *file)
0761 {
0762     return read_cache_folio(mapping, index, NULL, file);
0763 }
0764 
0765 /*
0766  * Get index of the page within radix-tree (but not for hugetlb pages).
0767  * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
0768  */
0769 static inline pgoff_t page_to_index(struct page *page)
0770 {
0771     struct page *head;
0772 
0773     if (likely(!PageTransTail(page)))
0774         return page->index;
0775 
0776     head = compound_head(page);
0777     /*
0778      *  We don't initialize ->index for tail pages: calculate based on
0779      *  head page
0780      */
0781     return head->index + page - head;
0782 }
0783 
0784 extern pgoff_t hugetlb_basepage_index(struct page *page);
0785 
0786 /*
0787  * Get the offset in PAGE_SIZE (even for hugetlb pages).
0788  * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
0789  */
0790 static inline pgoff_t page_to_pgoff(struct page *page)
0791 {
0792     if (unlikely(PageHuge(page)))
0793         return hugetlb_basepage_index(page);
0794     return page_to_index(page);
0795 }
0796 
0797 /*
0798  * Return byte-offset into filesystem object for page.
0799  */
0800 static inline loff_t page_offset(struct page *page)
0801 {
0802     return ((loff_t)page->index) << PAGE_SHIFT;
0803 }
0804 
0805 static inline loff_t page_file_offset(struct page *page)
0806 {
0807     return ((loff_t)page_index(page)) << PAGE_SHIFT;
0808 }
0809 
0810 /**
0811  * folio_pos - Returns the byte position of this folio in its file.
0812  * @folio: The folio.
0813  */
0814 static inline loff_t folio_pos(struct folio *folio)
0815 {
0816     return page_offset(&folio->page);
0817 }
0818 
0819 /**
0820  * folio_file_pos - Returns the byte position of this folio in its file.
0821  * @folio: The folio.
0822  *
0823  * This differs from folio_pos() for folios which belong to a swap file.
0824  * NFS is the only filesystem today which needs to use folio_file_pos().
0825  */
0826 static inline loff_t folio_file_pos(struct folio *folio)
0827 {
0828     return page_file_offset(&folio->page);
0829 }
0830 
0831 /*
0832  * Get the offset in PAGE_SIZE (even for hugetlb folios).
0833  * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
0834  */
0835 static inline pgoff_t folio_pgoff(struct folio *folio)
0836 {
0837     if (unlikely(folio_test_hugetlb(folio)))
0838         return hugetlb_basepage_index(&folio->page);
0839     return folio->index;
0840 }
0841 
0842 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
0843                      unsigned long address);
0844 
0845 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
0846                     unsigned long address)
0847 {
0848     pgoff_t pgoff;
0849     if (unlikely(is_vm_hugetlb_page(vma)))
0850         return linear_hugepage_index(vma, address);
0851     pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
0852     pgoff += vma->vm_pgoff;
0853     return pgoff;
0854 }
0855 
0856 struct wait_page_key {
0857     struct folio *folio;
0858     int bit_nr;
0859     int page_match;
0860 };
0861 
0862 struct wait_page_queue {
0863     struct folio *folio;
0864     int bit_nr;
0865     wait_queue_entry_t wait;
0866 };
0867 
0868 static inline bool wake_page_match(struct wait_page_queue *wait_page,
0869                   struct wait_page_key *key)
0870 {
0871     if (wait_page->folio != key->folio)
0872            return false;
0873     key->page_match = 1;
0874 
0875     if (wait_page->bit_nr != key->bit_nr)
0876         return false;
0877 
0878     return true;
0879 }
0880 
0881 void __folio_lock(struct folio *folio);
0882 int __folio_lock_killable(struct folio *folio);
0883 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
0884                 unsigned int flags);
0885 void unlock_page(struct page *page);
0886 void folio_unlock(struct folio *folio);
0887 
0888 /**
0889  * folio_trylock() - Attempt to lock a folio.
0890  * @folio: The folio to attempt to lock.
0891  *
0892  * Sometimes it is undesirable to wait for a folio to be unlocked (eg
0893  * when the locks are being taken in the wrong order, or if making
0894  * progress through a batch of folios is more important than processing
0895  * them in order).  Usually folio_lock() is the correct function to call.
0896  *
0897  * Context: Any context.
0898  * Return: Whether the lock was successfully acquired.
0899  */
0900 static inline bool folio_trylock(struct folio *folio)
0901 {
0902     return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
0903 }
0904 
0905 /*
0906  * Return true if the page was successfully locked
0907  */
0908 static inline int trylock_page(struct page *page)
0909 {
0910     return folio_trylock(page_folio(page));
0911 }
0912 
0913 /**
0914  * folio_lock() - Lock this folio.
0915  * @folio: The folio to lock.
0916  *
0917  * The folio lock protects against many things, probably more than it
0918  * should.  It is primarily held while a folio is being brought uptodate,
0919  * either from its backing file or from swap.  It is also held while a
0920  * folio is being truncated from its address_space, so holding the lock
0921  * is sufficient to keep folio->mapping stable.
0922  *
0923  * The folio lock is also held while write() is modifying the page to
0924  * provide POSIX atomicity guarantees (as long as the write does not
0925  * cross a page boundary).  Other modifications to the data in the folio
0926  * do not hold the folio lock and can race with writes, eg DMA and stores
0927  * to mapped pages.
0928  *
0929  * Context: May sleep.  If you need to acquire the locks of two or
0930  * more folios, they must be in order of ascending index, if they are
0931  * in the same address_space.  If they are in different address_spaces,
0932  * acquire the lock of the folio which belongs to the address_space which
0933  * has the lowest address in memory first.
0934  */
0935 static inline void folio_lock(struct folio *folio)
0936 {
0937     might_sleep();
0938     if (!folio_trylock(folio))
0939         __folio_lock(folio);
0940 }
0941 
0942 /**
0943  * lock_page() - Lock the folio containing this page.
0944  * @page: The page to lock.
0945  *
0946  * See folio_lock() for a description of what the lock protects.
0947  * This is a legacy function and new code should probably use folio_lock()
0948  * instead.
0949  *
0950  * Context: May sleep.  Pages in the same folio share a lock, so do not
0951  * attempt to lock two pages which share a folio.
0952  */
0953 static inline void lock_page(struct page *page)
0954 {
0955     struct folio *folio;
0956     might_sleep();
0957 
0958     folio = page_folio(page);
0959     if (!folio_trylock(folio))
0960         __folio_lock(folio);
0961 }
0962 
0963 /**
0964  * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
0965  * @folio: The folio to lock.
0966  *
0967  * Attempts to lock the folio, like folio_lock(), except that the sleep
0968  * to acquire the lock is interruptible by a fatal signal.
0969  *
0970  * Context: May sleep; see folio_lock().
0971  * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
0972  */
0973 static inline int folio_lock_killable(struct folio *folio)
0974 {
0975     might_sleep();
0976     if (!folio_trylock(folio))
0977         return __folio_lock_killable(folio);
0978     return 0;
0979 }
0980 
0981 /*
0982  * lock_page_killable is like lock_page but can be interrupted by fatal
0983  * signals.  It returns 0 if it locked the page and -EINTR if it was
0984  * killed while waiting.
0985  */
0986 static inline int lock_page_killable(struct page *page)
0987 {
0988     return folio_lock_killable(page_folio(page));
0989 }
0990 
0991 /*
0992  * lock_page_or_retry - Lock the page, unless this would block and the
0993  * caller indicated that it can handle a retry.
0994  *
0995  * Return value and mmap_lock implications depend on flags; see
0996  * __folio_lock_or_retry().
0997  */
0998 static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
0999                      unsigned int flags)
1000 {
1001     struct folio *folio;
1002     might_sleep();
1003 
1004     folio = page_folio(page);
1005     return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
1006 }
1007 
1008 /*
1009  * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
1010  * and should not be used directly.
1011  */
1012 void folio_wait_bit(struct folio *folio, int bit_nr);
1013 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1014 
1015 /* 
1016  * Wait for a folio to be unlocked.
1017  *
1018  * This must be called with the caller "holding" the folio,
1019  * ie with increased folio reference count so that the folio won't
1020  * go away during the wait.
1021  */
1022 static inline void folio_wait_locked(struct folio *folio)
1023 {
1024     if (folio_test_locked(folio))
1025         folio_wait_bit(folio, PG_locked);
1026 }
1027 
1028 static inline int folio_wait_locked_killable(struct folio *folio)
1029 {
1030     if (!folio_test_locked(folio))
1031         return 0;
1032     return folio_wait_bit_killable(folio, PG_locked);
1033 }
1034 
1035 static inline void wait_on_page_locked(struct page *page)
1036 {
1037     folio_wait_locked(page_folio(page));
1038 }
1039 
1040 static inline int wait_on_page_locked_killable(struct page *page)
1041 {
1042     return folio_wait_locked_killable(page_folio(page));
1043 }
1044 
1045 int folio_put_wait_locked(struct folio *folio, int state);
1046 void wait_on_page_writeback(struct page *page);
1047 void folio_wait_writeback(struct folio *folio);
1048 int folio_wait_writeback_killable(struct folio *folio);
1049 void end_page_writeback(struct page *page);
1050 void folio_end_writeback(struct folio *folio);
1051 void wait_for_stable_page(struct page *page);
1052 void folio_wait_stable(struct folio *folio);
1053 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1054 static inline void __set_page_dirty(struct page *page,
1055         struct address_space *mapping, int warn)
1056 {
1057     __folio_mark_dirty(page_folio(page), mapping, warn);
1058 }
1059 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1060 void __folio_cancel_dirty(struct folio *folio);
1061 static inline void folio_cancel_dirty(struct folio *folio)
1062 {
1063     /* Avoid atomic ops, locking, etc. when not actually needed. */
1064     if (folio_test_dirty(folio))
1065         __folio_cancel_dirty(folio);
1066 }
1067 bool folio_clear_dirty_for_io(struct folio *folio);
1068 bool clear_page_dirty_for_io(struct page *page);
1069 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1070 int __must_check folio_write_one(struct folio *folio);
1071 static inline int __must_check write_one_page(struct page *page)
1072 {
1073     return folio_write_one(page_folio(page));
1074 }
1075 
1076 int __set_page_dirty_nobuffers(struct page *page);
1077 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1078 
1079 #ifdef CONFIG_MIGRATION
1080 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1081         struct folio *src, enum migrate_mode mode);
1082 #else
1083 #define filemap_migrate_folio NULL
1084 #endif
1085 void page_endio(struct page *page, bool is_write, int err);
1086 
1087 void folio_end_private_2(struct folio *folio);
1088 void folio_wait_private_2(struct folio *folio);
1089 int folio_wait_private_2_killable(struct folio *folio);
1090 
1091 /*
1092  * Add an arbitrary waiter to a page's wait queue
1093  */
1094 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
1095 
1096 /*
1097  * Fault in userspace address range.
1098  */
1099 size_t fault_in_writeable(char __user *uaddr, size_t size);
1100 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
1101 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
1102 size_t fault_in_readable(const char __user *uaddr, size_t size);
1103 
1104 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
1105         pgoff_t index, gfp_t gfp);
1106 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1107         pgoff_t index, gfp_t gfp);
1108 void filemap_remove_folio(struct folio *folio);
1109 void delete_from_page_cache(struct page *page);
1110 void __filemap_remove_folio(struct folio *folio, void *shadow);
1111 void replace_page_cache_page(struct page *old, struct page *new);
1112 void delete_from_page_cache_batch(struct address_space *mapping,
1113                   struct folio_batch *fbatch);
1114 int try_to_release_page(struct page *page, gfp_t gfp);
1115 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1116 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
1117         int whence);
1118 
1119 /* Must be non-static for BPF error injection */
1120 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1121         pgoff_t index, gfp_t gfp, void **shadowp);
1122 
1123 bool filemap_range_has_writeback(struct address_space *mapping,
1124                  loff_t start_byte, loff_t end_byte);
1125 
1126 /**
1127  * filemap_range_needs_writeback - check if range potentially needs writeback
1128  * @mapping:           address space within which to check
1129  * @start_byte:        offset in bytes where the range starts
1130  * @end_byte:          offset in bytes where the range ends (inclusive)
1131  *
1132  * Find at least one page in the range supplied, usually used to check if
1133  * direct writing in this range will trigger a writeback. Used by O_DIRECT
1134  * read/write with IOCB_NOWAIT, to see if the caller needs to do
1135  * filemap_write_and_wait_range() before proceeding.
1136  *
1137  * Return: %true if the caller should do filemap_write_and_wait_range() before
1138  * doing O_DIRECT to a page in this range, %false otherwise.
1139  */
1140 static inline bool filemap_range_needs_writeback(struct address_space *mapping,
1141                          loff_t start_byte,
1142                          loff_t end_byte)
1143 {
1144     if (!mapping->nrpages)
1145         return false;
1146     if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
1147         !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
1148         return false;
1149     return filemap_range_has_writeback(mapping, start_byte, end_byte);
1150 }
1151 
1152 /**
1153  * struct readahead_control - Describes a readahead request.
1154  *
1155  * A readahead request is for consecutive pages.  Filesystems which
1156  * implement the ->readahead method should call readahead_page() or
1157  * readahead_page_batch() in a loop and attempt to start I/O against
1158  * each page in the request.
1159  *
1160  * Most of the fields in this struct are private and should be accessed
1161  * by the functions below.
1162  *
1163  * @file: The file, used primarily by network filesystems for authentication.
1164  *    May be NULL if invoked internally by the filesystem.
1165  * @mapping: Readahead this filesystem object.
1166  * @ra: File readahead state.  May be NULL.
1167  */
1168 struct readahead_control {
1169     struct file *file;
1170     struct address_space *mapping;
1171     struct file_ra_state *ra;
1172 /* private: use the readahead_* accessors instead */
1173     pgoff_t _index;
1174     unsigned int _nr_pages;
1175     unsigned int _batch_count;
1176 };
1177 
1178 #define DEFINE_READAHEAD(ractl, f, r, m, i)             \
1179     struct readahead_control ractl = {              \
1180         .file = f,                      \
1181         .mapping = m,                       \
1182         .ra = r,                        \
1183         ._index = i,                        \
1184     }
1185 
1186 #define VM_READAHEAD_PAGES  (SZ_128K / PAGE_SIZE)
1187 
1188 void page_cache_ra_unbounded(struct readahead_control *,
1189         unsigned long nr_to_read, unsigned long lookahead_count);
1190 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
1191 void page_cache_async_ra(struct readahead_control *, struct folio *,
1192         unsigned long req_count);
1193 void readahead_expand(struct readahead_control *ractl,
1194               loff_t new_start, size_t new_len);
1195 
1196 /**
1197  * page_cache_sync_readahead - generic file readahead
1198  * @mapping: address_space which holds the pagecache and I/O vectors
1199  * @ra: file_ra_state which holds the readahead state
1200  * @file: Used by the filesystem for authentication.
1201  * @index: Index of first page to be read.
1202  * @req_count: Total number of pages being read by the caller.
1203  *
1204  * page_cache_sync_readahead() should be called when a cache miss happened:
1205  * it will submit the read.  The readahead logic may decide to piggyback more
1206  * pages onto the read request if access patterns suggest it will improve
1207  * performance.
1208  */
1209 static inline
1210 void page_cache_sync_readahead(struct address_space *mapping,
1211         struct file_ra_state *ra, struct file *file, pgoff_t index,
1212         unsigned long req_count)
1213 {
1214     DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1215     page_cache_sync_ra(&ractl, req_count);
1216 }
1217 
1218 /**
1219  * page_cache_async_readahead - file readahead for marked pages
1220  * @mapping: address_space which holds the pagecache and I/O vectors
1221  * @ra: file_ra_state which holds the readahead state
1222  * @file: Used by the filesystem for authentication.
1223  * @folio: The folio at @index which triggered the readahead call.
1224  * @index: Index of first page to be read.
1225  * @req_count: Total number of pages being read by the caller.
1226  *
1227  * page_cache_async_readahead() should be called when a page is used which
1228  * is marked as PageReadahead; this is a marker to suggest that the application
1229  * has used up enough of the readahead window that we should start pulling in
1230  * more pages.
1231  */
1232 static inline
1233 void page_cache_async_readahead(struct address_space *mapping,
1234         struct file_ra_state *ra, struct file *file,
1235         struct folio *folio, pgoff_t index, unsigned long req_count)
1236 {
1237     DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1238     page_cache_async_ra(&ractl, folio, req_count);
1239 }
1240 
1241 static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1242 {
1243     struct folio *folio;
1244 
1245     BUG_ON(ractl->_batch_count > ractl->_nr_pages);
1246     ractl->_nr_pages -= ractl->_batch_count;
1247     ractl->_index += ractl->_batch_count;
1248 
1249     if (!ractl->_nr_pages) {
1250         ractl->_batch_count = 0;
1251         return NULL;
1252     }
1253 
1254     folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1255     VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1256     ractl->_batch_count = folio_nr_pages(folio);
1257 
1258     return folio;
1259 }
1260 
1261 /**
1262  * readahead_page - Get the next page to read.
1263  * @ractl: The current readahead request.
1264  *
1265  * Context: The page is locked and has an elevated refcount.  The caller
1266  * should decreases the refcount once the page has been submitted for I/O
1267  * and unlock the page once all I/O to that page has completed.
1268  * Return: A pointer to the next page, or %NULL if we are done.
1269  */
1270 static inline struct page *readahead_page(struct readahead_control *ractl)
1271 {
1272     struct folio *folio = __readahead_folio(ractl);
1273 
1274     return &folio->page;
1275 }
1276 
1277 /**
1278  * readahead_folio - Get the next folio to read.
1279  * @ractl: The current readahead request.
1280  *
1281  * Context: The folio is locked.  The caller should unlock the folio once
1282  * all I/O to that folio has completed.
1283  * Return: A pointer to the next folio, or %NULL if we are done.
1284  */
1285 static inline struct folio *readahead_folio(struct readahead_control *ractl)
1286 {
1287     struct folio *folio = __readahead_folio(ractl);
1288 
1289     if (folio)
1290         folio_put(folio);
1291     return folio;
1292 }
1293 
1294 static inline unsigned int __readahead_batch(struct readahead_control *rac,
1295         struct page **array, unsigned int array_sz)
1296 {
1297     unsigned int i = 0;
1298     XA_STATE(xas, &rac->mapping->i_pages, 0);
1299     struct page *page;
1300 
1301     BUG_ON(rac->_batch_count > rac->_nr_pages);
1302     rac->_nr_pages -= rac->_batch_count;
1303     rac->_index += rac->_batch_count;
1304     rac->_batch_count = 0;
1305 
1306     xas_set(&xas, rac->_index);
1307     rcu_read_lock();
1308     xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
1309         if (xas_retry(&xas, page))
1310             continue;
1311         VM_BUG_ON_PAGE(!PageLocked(page), page);
1312         VM_BUG_ON_PAGE(PageTail(page), page);
1313         array[i++] = page;
1314         rac->_batch_count += thp_nr_pages(page);
1315         if (i == array_sz)
1316             break;
1317     }
1318     rcu_read_unlock();
1319 
1320     return i;
1321 }
1322 
1323 /**
1324  * readahead_page_batch - Get a batch of pages to read.
1325  * @rac: The current readahead request.
1326  * @array: An array of pointers to struct page.
1327  *
1328  * Context: The pages are locked and have an elevated refcount.  The caller
1329  * should decreases the refcount once the page has been submitted for I/O
1330  * and unlock the page once all I/O to that page has completed.
1331  * Return: The number of pages placed in the array.  0 indicates the request
1332  * is complete.
1333  */
1334 #define readahead_page_batch(rac, array)                \
1335     __readahead_batch(rac, array, ARRAY_SIZE(array))
1336 
1337 /**
1338  * readahead_pos - The byte offset into the file of this readahead request.
1339  * @rac: The readahead request.
1340  */
1341 static inline loff_t readahead_pos(struct readahead_control *rac)
1342 {
1343     return (loff_t)rac->_index * PAGE_SIZE;
1344 }
1345 
1346 /**
1347  * readahead_length - The number of bytes in this readahead request.
1348  * @rac: The readahead request.
1349  */
1350 static inline size_t readahead_length(struct readahead_control *rac)
1351 {
1352     return rac->_nr_pages * PAGE_SIZE;
1353 }
1354 
1355 /**
1356  * readahead_index - The index of the first page in this readahead request.
1357  * @rac: The readahead request.
1358  */
1359 static inline pgoff_t readahead_index(struct readahead_control *rac)
1360 {
1361     return rac->_index;
1362 }
1363 
1364 /**
1365  * readahead_count - The number of pages in this readahead request.
1366  * @rac: The readahead request.
1367  */
1368 static inline unsigned int readahead_count(struct readahead_control *rac)
1369 {
1370     return rac->_nr_pages;
1371 }
1372 
1373 /**
1374  * readahead_batch_length - The number of bytes in the current batch.
1375  * @rac: The readahead request.
1376  */
1377 static inline size_t readahead_batch_length(struct readahead_control *rac)
1378 {
1379     return rac->_batch_count * PAGE_SIZE;
1380 }
1381 
1382 static inline unsigned long dir_pages(struct inode *inode)
1383 {
1384     return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1385                    PAGE_SHIFT;
1386 }
1387 
1388 /**
1389  * folio_mkwrite_check_truncate - check if folio was truncated
1390  * @folio: the folio to check
1391  * @inode: the inode to check the folio against
1392  *
1393  * Return: the number of bytes in the folio up to EOF,
1394  * or -EFAULT if the folio was truncated.
1395  */
1396 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
1397                           struct inode *inode)
1398 {
1399     loff_t size = i_size_read(inode);
1400     pgoff_t index = size >> PAGE_SHIFT;
1401     size_t offset = offset_in_folio(folio, size);
1402 
1403     if (!folio->mapping)
1404         return -EFAULT;
1405 
1406     /* folio is wholly inside EOF */
1407     if (folio_next_index(folio) - 1 < index)
1408         return folio_size(folio);
1409     /* folio is wholly past EOF */
1410     if (folio->index > index || !offset)
1411         return -EFAULT;
1412     /* folio is partially inside EOF */
1413     return offset;
1414 }
1415 
1416 /**
1417  * page_mkwrite_check_truncate - check if page was truncated
1418  * @page: the page to check
1419  * @inode: the inode to check the page against
1420  *
1421  * Returns the number of bytes in the page up to EOF,
1422  * or -EFAULT if the page was truncated.
1423  */
1424 static inline int page_mkwrite_check_truncate(struct page *page,
1425                           struct inode *inode)
1426 {
1427     loff_t size = i_size_read(inode);
1428     pgoff_t index = size >> PAGE_SHIFT;
1429     int offset = offset_in_page(size);
1430 
1431     if (page->mapping != inode->i_mapping)
1432         return -EFAULT;
1433 
1434     /* page is wholly inside EOF */
1435     if (page->index < index)
1436         return PAGE_SIZE;
1437     /* page is wholly past EOF */
1438     if (page->index > index || !offset)
1439         return -EFAULT;
1440     /* page is partially inside EOF */
1441     return offset;
1442 }
1443 
1444 /**
1445  * i_blocks_per_folio - How many blocks fit in this folio.
1446  * @inode: The inode which contains the blocks.
1447  * @folio: The folio.
1448  *
1449  * If the block size is larger than the size of this folio, return zero.
1450  *
1451  * Context: The caller should hold a refcount on the folio to prevent it
1452  * from being split.
1453  * Return: The number of filesystem blocks covered by this folio.
1454  */
1455 static inline
1456 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
1457 {
1458     return folio_size(folio) >> inode->i_blkbits;
1459 }
1460 
1461 static inline
1462 unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1463 {
1464     return i_blocks_per_folio(inode, page_folio(page));
1465 }
1466 #endif /* _LINUX_PAGEMAP_H */