Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/mm/filemap.c
0004  *
0005  * Copyright (C) 1994-1999  Linus Torvalds
0006  */
0007 
0008 /*
0009  * This file handles the generic file mmap semantics used by
0010  * most "normal" filesystems (but you don't /have/ to use this:
0011  * the NFS filesystem used to do this differently, for example)
0012  */
0013 #include <linux/export.h>
0014 #include <linux/compiler.h>
0015 #include <linux/dax.h>
0016 #include <linux/fs.h>
0017 #include <linux/sched/signal.h>
0018 #include <linux/uaccess.h>
0019 #include <linux/capability.h>
0020 #include <linux/kernel_stat.h>
0021 #include <linux/gfp.h>
0022 #include <linux/mm.h>
0023 #include <linux/swap.h>
0024 #include <linux/swapops.h>
0025 #include <linux/mman.h>
0026 #include <linux/pagemap.h>
0027 #include <linux/file.h>
0028 #include <linux/uio.h>
0029 #include <linux/error-injection.h>
0030 #include <linux/hash.h>
0031 #include <linux/writeback.h>
0032 #include <linux/backing-dev.h>
0033 #include <linux/pagevec.h>
0034 #include <linux/security.h>
0035 #include <linux/cpuset.h>
0036 #include <linux/hugetlb.h>
0037 #include <linux/memcontrol.h>
0038 #include <linux/shmem_fs.h>
0039 #include <linux/rmap.h>
0040 #include <linux/delayacct.h>
0041 #include <linux/psi.h>
0042 #include <linux/ramfs.h>
0043 #include <linux/page_idle.h>
0044 #include <linux/migrate.h>
0045 #include <asm/pgalloc.h>
0046 #include <asm/tlbflush.h>
0047 #include "internal.h"
0048 
0049 #define CREATE_TRACE_POINTS
0050 #include <trace/events/filemap.h>
0051 
0052 /*
0053  * FIXME: remove all knowledge of the buffer layer from the core VM
0054  */
0055 #include <linux/buffer_head.h> /* for try_to_free_buffers */
0056 
0057 #include <asm/mman.h>
0058 
0059 /*
0060  * Shared mappings implemented 30.11.1994. It's not fully working yet,
0061  * though.
0062  *
0063  * Shared mappings now work. 15.8.1995  Bruno.
0064  *
0065  * finished 'unifying' the page and buffer cache and SMP-threaded the
0066  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
0067  *
0068  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
0069  */
0070 
0071 /*
0072  * Lock ordering:
0073  *
0074  *  ->i_mmap_rwsem      (truncate_pagecache)
0075  *    ->private_lock        (__free_pte->block_dirty_folio)
0076  *      ->swap_lock     (exclusive_swap_page, others)
0077  *        ->i_pages lock
0078  *
0079  *  ->i_rwsem
0080  *    ->invalidate_lock     (acquired by fs in truncate path)
0081  *      ->i_mmap_rwsem      (truncate->unmap_mapping_range)
0082  *
0083  *  ->mmap_lock
0084  *    ->i_mmap_rwsem
0085  *      ->page_table_lock or pte_lock   (various, mainly in memory.c)
0086  *        ->i_pages lock    (arch-dependent flush_dcache_mmap_lock)
0087  *
0088  *  ->mmap_lock
0089  *    ->invalidate_lock     (filemap_fault)
0090  *      ->lock_page     (filemap_fault, access_process_vm)
0091  *
0092  *  ->i_rwsem           (generic_perform_write)
0093  *    ->mmap_lock       (fault_in_readable->do_page_fault)
0094  *
0095  *  bdi->wb.list_lock
0096  *    sb_lock           (fs/fs-writeback.c)
0097  *    ->i_pages lock        (__sync_single_inode)
0098  *
0099  *  ->i_mmap_rwsem
0100  *    ->anon_vma.lock       (vma_adjust)
0101  *
0102  *  ->anon_vma.lock
0103  *    ->page_table_lock or pte_lock (anon_vma_prepare and various)
0104  *
0105  *  ->page_table_lock or pte_lock
0106  *    ->swap_lock       (try_to_unmap_one)
0107  *    ->private_lock        (try_to_unmap_one)
0108  *    ->i_pages lock        (try_to_unmap_one)
0109  *    ->lruvec->lru_lock    (follow_page->mark_page_accessed)
0110  *    ->lruvec->lru_lock    (check_pte_range->isolate_lru_page)
0111  *    ->private_lock        (page_remove_rmap->set_page_dirty)
0112  *    ->i_pages lock        (page_remove_rmap->set_page_dirty)
0113  *    bdi.wb->list_lock     (page_remove_rmap->set_page_dirty)
0114  *    ->inode->i_lock       (page_remove_rmap->set_page_dirty)
0115  *    ->memcg->move_lock    (page_remove_rmap->lock_page_memcg)
0116  *    bdi.wb->list_lock     (zap_pte_range->set_page_dirty)
0117  *    ->inode->i_lock       (zap_pte_range->set_page_dirty)
0118  *    ->private_lock        (zap_pte_range->block_dirty_folio)
0119  *
0120  * ->i_mmap_rwsem
0121  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
0122  */
0123 
0124 static void page_cache_delete(struct address_space *mapping,
0125                    struct folio *folio, void *shadow)
0126 {
0127     XA_STATE(xas, &mapping->i_pages, folio->index);
0128     long nr = 1;
0129 
0130     mapping_set_update(&xas, mapping);
0131 
0132     /* hugetlb pages are represented by a single entry in the xarray */
0133     if (!folio_test_hugetlb(folio)) {
0134         xas_set_order(&xas, folio->index, folio_order(folio));
0135         nr = folio_nr_pages(folio);
0136     }
0137 
0138     VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
0139 
0140     xas_store(&xas, shadow);
0141     xas_init_marks(&xas);
0142 
0143     folio->mapping = NULL;
0144     /* Leave page->index set: truncation lookup relies upon it */
0145     mapping->nrpages -= nr;
0146 }
0147 
0148 static void filemap_unaccount_folio(struct address_space *mapping,
0149         struct folio *folio)
0150 {
0151     long nr;
0152 
0153     VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
0154     if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
0155         pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
0156              current->comm, folio_pfn(folio));
0157         dump_page(&folio->page, "still mapped when deleted");
0158         dump_stack();
0159         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
0160 
0161         if (mapping_exiting(mapping) && !folio_test_large(folio)) {
0162             int mapcount = page_mapcount(&folio->page);
0163 
0164             if (folio_ref_count(folio) >= mapcount + 2) {
0165                 /*
0166                  * All vmas have already been torn down, so it's
0167                  * a good bet that actually the page is unmapped
0168                  * and we'd rather not leak it: if we're wrong,
0169                  * another bad page check should catch it later.
0170                  */
0171                 page_mapcount_reset(&folio->page);
0172                 folio_ref_sub(folio, mapcount);
0173             }
0174         }
0175     }
0176 
0177     /* hugetlb folios do not participate in page cache accounting. */
0178     if (folio_test_hugetlb(folio))
0179         return;
0180 
0181     nr = folio_nr_pages(folio);
0182 
0183     __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
0184     if (folio_test_swapbacked(folio)) {
0185         __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
0186         if (folio_test_pmd_mappable(folio))
0187             __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
0188     } else if (folio_test_pmd_mappable(folio)) {
0189         __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
0190         filemap_nr_thps_dec(mapping);
0191     }
0192 
0193     /*
0194      * At this point folio must be either written or cleaned by
0195      * truncate.  Dirty folio here signals a bug and loss of
0196      * unwritten data - on ordinary filesystems.
0197      *
0198      * But it's harmless on in-memory filesystems like tmpfs; and can
0199      * occur when a driver which did get_user_pages() sets page dirty
0200      * before putting it, while the inode is being finally evicted.
0201      *
0202      * Below fixes dirty accounting after removing the folio entirely
0203      * but leaves the dirty flag set: it has no effect for truncated
0204      * folio and anyway will be cleared before returning folio to
0205      * buddy allocator.
0206      */
0207     if (WARN_ON_ONCE(folio_test_dirty(folio) &&
0208              mapping_can_writeback(mapping)))
0209         folio_account_cleaned(folio, inode_to_wb(mapping->host));
0210 }
0211 
0212 /*
0213  * Delete a page from the page cache and free it. Caller has to make
0214  * sure the page is locked and that nobody else uses it - or that usage
0215  * is safe.  The caller must hold the i_pages lock.
0216  */
0217 void __filemap_remove_folio(struct folio *folio, void *shadow)
0218 {
0219     struct address_space *mapping = folio->mapping;
0220 
0221     trace_mm_filemap_delete_from_page_cache(folio);
0222     filemap_unaccount_folio(mapping, folio);
0223     page_cache_delete(mapping, folio, shadow);
0224 }
0225 
0226 void filemap_free_folio(struct address_space *mapping, struct folio *folio)
0227 {
0228     void (*free_folio)(struct folio *);
0229     int refs = 1;
0230 
0231     free_folio = mapping->a_ops->free_folio;
0232     if (free_folio)
0233         free_folio(folio);
0234 
0235     if (folio_test_large(folio) && !folio_test_hugetlb(folio))
0236         refs = folio_nr_pages(folio);
0237     folio_put_refs(folio, refs);
0238 }
0239 
0240 /**
0241  * filemap_remove_folio - Remove folio from page cache.
0242  * @folio: The folio.
0243  *
0244  * This must be called only on folios that are locked and have been
0245  * verified to be in the page cache.  It will never put the folio into
0246  * the free list because the caller has a reference on the page.
0247  */
0248 void filemap_remove_folio(struct folio *folio)
0249 {
0250     struct address_space *mapping = folio->mapping;
0251 
0252     BUG_ON(!folio_test_locked(folio));
0253     spin_lock(&mapping->host->i_lock);
0254     xa_lock_irq(&mapping->i_pages);
0255     __filemap_remove_folio(folio, NULL);
0256     xa_unlock_irq(&mapping->i_pages);
0257     if (mapping_shrinkable(mapping))
0258         inode_add_lru(mapping->host);
0259     spin_unlock(&mapping->host->i_lock);
0260 
0261     filemap_free_folio(mapping, folio);
0262 }
0263 
0264 /*
0265  * page_cache_delete_batch - delete several folios from page cache
0266  * @mapping: the mapping to which folios belong
0267  * @fbatch: batch of folios to delete
0268  *
0269  * The function walks over mapping->i_pages and removes folios passed in
0270  * @fbatch from the mapping. The function expects @fbatch to be sorted
0271  * by page index and is optimised for it to be dense.
0272  * It tolerates holes in @fbatch (mapping entries at those indices are not
0273  * modified).
0274  *
0275  * The function expects the i_pages lock to be held.
0276  */
0277 static void page_cache_delete_batch(struct address_space *mapping,
0278                  struct folio_batch *fbatch)
0279 {
0280     XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
0281     long total_pages = 0;
0282     int i = 0;
0283     struct folio *folio;
0284 
0285     mapping_set_update(&xas, mapping);
0286     xas_for_each(&xas, folio, ULONG_MAX) {
0287         if (i >= folio_batch_count(fbatch))
0288             break;
0289 
0290         /* A swap/dax/shadow entry got inserted? Skip it. */
0291         if (xa_is_value(folio))
0292             continue;
0293         /*
0294          * A page got inserted in our range? Skip it. We have our
0295          * pages locked so they are protected from being removed.
0296          * If we see a page whose index is higher than ours, it
0297          * means our page has been removed, which shouldn't be
0298          * possible because we're holding the PageLock.
0299          */
0300         if (folio != fbatch->folios[i]) {
0301             VM_BUG_ON_FOLIO(folio->index >
0302                     fbatch->folios[i]->index, folio);
0303             continue;
0304         }
0305 
0306         WARN_ON_ONCE(!folio_test_locked(folio));
0307 
0308         folio->mapping = NULL;
0309         /* Leave folio->index set: truncation lookup relies on it */
0310 
0311         i++;
0312         xas_store(&xas, NULL);
0313         total_pages += folio_nr_pages(folio);
0314     }
0315     mapping->nrpages -= total_pages;
0316 }
0317 
0318 void delete_from_page_cache_batch(struct address_space *mapping,
0319                   struct folio_batch *fbatch)
0320 {
0321     int i;
0322 
0323     if (!folio_batch_count(fbatch))
0324         return;
0325 
0326     spin_lock(&mapping->host->i_lock);
0327     xa_lock_irq(&mapping->i_pages);
0328     for (i = 0; i < folio_batch_count(fbatch); i++) {
0329         struct folio *folio = fbatch->folios[i];
0330 
0331         trace_mm_filemap_delete_from_page_cache(folio);
0332         filemap_unaccount_folio(mapping, folio);
0333     }
0334     page_cache_delete_batch(mapping, fbatch);
0335     xa_unlock_irq(&mapping->i_pages);
0336     if (mapping_shrinkable(mapping))
0337         inode_add_lru(mapping->host);
0338     spin_unlock(&mapping->host->i_lock);
0339 
0340     for (i = 0; i < folio_batch_count(fbatch); i++)
0341         filemap_free_folio(mapping, fbatch->folios[i]);
0342 }
0343 
0344 int filemap_check_errors(struct address_space *mapping)
0345 {
0346     int ret = 0;
0347     /* Check for outstanding write errors */
0348     if (test_bit(AS_ENOSPC, &mapping->flags) &&
0349         test_and_clear_bit(AS_ENOSPC, &mapping->flags))
0350         ret = -ENOSPC;
0351     if (test_bit(AS_EIO, &mapping->flags) &&
0352         test_and_clear_bit(AS_EIO, &mapping->flags))
0353         ret = -EIO;
0354     return ret;
0355 }
0356 EXPORT_SYMBOL(filemap_check_errors);
0357 
0358 static int filemap_check_and_keep_errors(struct address_space *mapping)
0359 {
0360     /* Check for outstanding write errors */
0361     if (test_bit(AS_EIO, &mapping->flags))
0362         return -EIO;
0363     if (test_bit(AS_ENOSPC, &mapping->flags))
0364         return -ENOSPC;
0365     return 0;
0366 }
0367 
0368 /**
0369  * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
0370  * @mapping:    address space structure to write
0371  * @wbc:    the writeback_control controlling the writeout
0372  *
0373  * Call writepages on the mapping using the provided wbc to control the
0374  * writeout.
0375  *
0376  * Return: %0 on success, negative error code otherwise.
0377  */
0378 int filemap_fdatawrite_wbc(struct address_space *mapping,
0379                struct writeback_control *wbc)
0380 {
0381     int ret;
0382 
0383     if (!mapping_can_writeback(mapping) ||
0384         !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
0385         return 0;
0386 
0387     wbc_attach_fdatawrite_inode(wbc, mapping->host);
0388     ret = do_writepages(mapping, wbc);
0389     wbc_detach_inode(wbc);
0390     return ret;
0391 }
0392 EXPORT_SYMBOL(filemap_fdatawrite_wbc);
0393 
0394 /**
0395  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
0396  * @mapping:    address space structure to write
0397  * @start:  offset in bytes where the range starts
0398  * @end:    offset in bytes where the range ends (inclusive)
0399  * @sync_mode:  enable synchronous operation
0400  *
0401  * Start writeback against all of a mapping's dirty pages that lie
0402  * within the byte offsets <start, end> inclusive.
0403  *
0404  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
0405  * opposed to a regular memory cleansing writeback.  The difference between
0406  * these two operations is that if a dirty page/buffer is encountered, it must
0407  * be waited upon, and not just skipped over.
0408  *
0409  * Return: %0 on success, negative error code otherwise.
0410  */
0411 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
0412                 loff_t end, int sync_mode)
0413 {
0414     struct writeback_control wbc = {
0415         .sync_mode = sync_mode,
0416         .nr_to_write = LONG_MAX,
0417         .range_start = start,
0418         .range_end = end,
0419     };
0420 
0421     return filemap_fdatawrite_wbc(mapping, &wbc);
0422 }
0423 
0424 static inline int __filemap_fdatawrite(struct address_space *mapping,
0425     int sync_mode)
0426 {
0427     return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
0428 }
0429 
0430 int filemap_fdatawrite(struct address_space *mapping)
0431 {
0432     return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
0433 }
0434 EXPORT_SYMBOL(filemap_fdatawrite);
0435 
0436 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
0437                 loff_t end)
0438 {
0439     return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
0440 }
0441 EXPORT_SYMBOL(filemap_fdatawrite_range);
0442 
0443 /**
0444  * filemap_flush - mostly a non-blocking flush
0445  * @mapping:    target address_space
0446  *
0447  * This is a mostly non-blocking flush.  Not suitable for data-integrity
0448  * purposes - I/O may not be started against all dirty pages.
0449  *
0450  * Return: %0 on success, negative error code otherwise.
0451  */
0452 int filemap_flush(struct address_space *mapping)
0453 {
0454     return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
0455 }
0456 EXPORT_SYMBOL(filemap_flush);
0457 
0458 /**
0459  * filemap_range_has_page - check if a page exists in range.
0460  * @mapping:           address space within which to check
0461  * @start_byte:        offset in bytes where the range starts
0462  * @end_byte:          offset in bytes where the range ends (inclusive)
0463  *
0464  * Find at least one page in the range supplied, usually used to check if
0465  * direct writing in this range will trigger a writeback.
0466  *
0467  * Return: %true if at least one page exists in the specified range,
0468  * %false otherwise.
0469  */
0470 bool filemap_range_has_page(struct address_space *mapping,
0471                loff_t start_byte, loff_t end_byte)
0472 {
0473     struct page *page;
0474     XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
0475     pgoff_t max = end_byte >> PAGE_SHIFT;
0476 
0477     if (end_byte < start_byte)
0478         return false;
0479 
0480     rcu_read_lock();
0481     for (;;) {
0482         page = xas_find(&xas, max);
0483         if (xas_retry(&xas, page))
0484             continue;
0485         /* Shadow entries don't count */
0486         if (xa_is_value(page))
0487             continue;
0488         /*
0489          * We don't need to try to pin this page; we're about to
0490          * release the RCU lock anyway.  It is enough to know that
0491          * there was a page here recently.
0492          */
0493         break;
0494     }
0495     rcu_read_unlock();
0496 
0497     return page != NULL;
0498 }
0499 EXPORT_SYMBOL(filemap_range_has_page);
0500 
0501 static void __filemap_fdatawait_range(struct address_space *mapping,
0502                      loff_t start_byte, loff_t end_byte)
0503 {
0504     pgoff_t index = start_byte >> PAGE_SHIFT;
0505     pgoff_t end = end_byte >> PAGE_SHIFT;
0506     struct pagevec pvec;
0507     int nr_pages;
0508 
0509     if (end_byte < start_byte)
0510         return;
0511 
0512     pagevec_init(&pvec);
0513     while (index <= end) {
0514         unsigned i;
0515 
0516         nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
0517                 end, PAGECACHE_TAG_WRITEBACK);
0518         if (!nr_pages)
0519             break;
0520 
0521         for (i = 0; i < nr_pages; i++) {
0522             struct page *page = pvec.pages[i];
0523 
0524             wait_on_page_writeback(page);
0525             ClearPageError(page);
0526         }
0527         pagevec_release(&pvec);
0528         cond_resched();
0529     }
0530 }
0531 
0532 /**
0533  * filemap_fdatawait_range - wait for writeback to complete
0534  * @mapping:        address space structure to wait for
0535  * @start_byte:     offset in bytes where the range starts
0536  * @end_byte:       offset in bytes where the range ends (inclusive)
0537  *
0538  * Walk the list of under-writeback pages of the given address space
0539  * in the given range and wait for all of them.  Check error status of
0540  * the address space and return it.
0541  *
0542  * Since the error status of the address space is cleared by this function,
0543  * callers are responsible for checking the return value and handling and/or
0544  * reporting the error.
0545  *
0546  * Return: error status of the address space.
0547  */
0548 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
0549                 loff_t end_byte)
0550 {
0551     __filemap_fdatawait_range(mapping, start_byte, end_byte);
0552     return filemap_check_errors(mapping);
0553 }
0554 EXPORT_SYMBOL(filemap_fdatawait_range);
0555 
0556 /**
0557  * filemap_fdatawait_range_keep_errors - wait for writeback to complete
0558  * @mapping:        address space structure to wait for
0559  * @start_byte:     offset in bytes where the range starts
0560  * @end_byte:       offset in bytes where the range ends (inclusive)
0561  *
0562  * Walk the list of under-writeback pages of the given address space in the
0563  * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
0564  * this function does not clear error status of the address space.
0565  *
0566  * Use this function if callers don't handle errors themselves.  Expected
0567  * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
0568  * fsfreeze(8)
0569  */
0570 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
0571         loff_t start_byte, loff_t end_byte)
0572 {
0573     __filemap_fdatawait_range(mapping, start_byte, end_byte);
0574     return filemap_check_and_keep_errors(mapping);
0575 }
0576 EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
0577 
0578 /**
0579  * file_fdatawait_range - wait for writeback to complete
0580  * @file:       file pointing to address space structure to wait for
0581  * @start_byte:     offset in bytes where the range starts
0582  * @end_byte:       offset in bytes where the range ends (inclusive)
0583  *
0584  * Walk the list of under-writeback pages of the address space that file
0585  * refers to, in the given range and wait for all of them.  Check error
0586  * status of the address space vs. the file->f_wb_err cursor and return it.
0587  *
0588  * Since the error status of the file is advanced by this function,
0589  * callers are responsible for checking the return value and handling and/or
0590  * reporting the error.
0591  *
0592  * Return: error status of the address space vs. the file->f_wb_err cursor.
0593  */
0594 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
0595 {
0596     struct address_space *mapping = file->f_mapping;
0597 
0598     __filemap_fdatawait_range(mapping, start_byte, end_byte);
0599     return file_check_and_advance_wb_err(file);
0600 }
0601 EXPORT_SYMBOL(file_fdatawait_range);
0602 
0603 /**
0604  * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
0605  * @mapping: address space structure to wait for
0606  *
0607  * Walk the list of under-writeback pages of the given address space
0608  * and wait for all of them.  Unlike filemap_fdatawait(), this function
0609  * does not clear error status of the address space.
0610  *
0611  * Use this function if callers don't handle errors themselves.  Expected
0612  * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
0613  * fsfreeze(8)
0614  *
0615  * Return: error status of the address space.
0616  */
0617 int filemap_fdatawait_keep_errors(struct address_space *mapping)
0618 {
0619     __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
0620     return filemap_check_and_keep_errors(mapping);
0621 }
0622 EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
0623 
0624 /* Returns true if writeback might be needed or already in progress. */
0625 static bool mapping_needs_writeback(struct address_space *mapping)
0626 {
0627     return mapping->nrpages;
0628 }
0629 
0630 bool filemap_range_has_writeback(struct address_space *mapping,
0631                  loff_t start_byte, loff_t end_byte)
0632 {
0633     XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
0634     pgoff_t max = end_byte >> PAGE_SHIFT;
0635     struct page *page;
0636 
0637     if (end_byte < start_byte)
0638         return false;
0639 
0640     rcu_read_lock();
0641     xas_for_each(&xas, page, max) {
0642         if (xas_retry(&xas, page))
0643             continue;
0644         if (xa_is_value(page))
0645             continue;
0646         if (PageDirty(page) || PageLocked(page) || PageWriteback(page))
0647             break;
0648     }
0649     rcu_read_unlock();
0650     return page != NULL;
0651 }
0652 EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
0653 
0654 /**
0655  * filemap_write_and_wait_range - write out & wait on a file range
0656  * @mapping:    the address_space for the pages
0657  * @lstart: offset in bytes where the range starts
0658  * @lend:   offset in bytes where the range ends (inclusive)
0659  *
0660  * Write out and wait upon file offsets lstart->lend, inclusive.
0661  *
0662  * Note that @lend is inclusive (describes the last byte to be written) so
0663  * that this function can be used to write to the very end-of-file (end = -1).
0664  *
0665  * Return: error status of the address space.
0666  */
0667 int filemap_write_and_wait_range(struct address_space *mapping,
0668                  loff_t lstart, loff_t lend)
0669 {
0670     int err = 0, err2;
0671 
0672     if (mapping_needs_writeback(mapping)) {
0673         err = __filemap_fdatawrite_range(mapping, lstart, lend,
0674                          WB_SYNC_ALL);
0675         /*
0676          * Even if the above returned error, the pages may be
0677          * written partially (e.g. -ENOSPC), so we wait for it.
0678          * But the -EIO is special case, it may indicate the worst
0679          * thing (e.g. bug) happened, so we avoid waiting for it.
0680          */
0681         if (err != -EIO)
0682             __filemap_fdatawait_range(mapping, lstart, lend);
0683     }
0684     err2 = filemap_check_errors(mapping);
0685     if (!err)
0686         err = err2;
0687     return err;
0688 }
0689 EXPORT_SYMBOL(filemap_write_and_wait_range);
0690 
0691 void __filemap_set_wb_err(struct address_space *mapping, int err)
0692 {
0693     errseq_t eseq = errseq_set(&mapping->wb_err, err);
0694 
0695     trace_filemap_set_wb_err(mapping, eseq);
0696 }
0697 EXPORT_SYMBOL(__filemap_set_wb_err);
0698 
0699 /**
0700  * file_check_and_advance_wb_err - report wb error (if any) that was previously
0701  *                 and advance wb_err to current one
0702  * @file: struct file on which the error is being reported
0703  *
0704  * When userland calls fsync (or something like nfsd does the equivalent), we
0705  * want to report any writeback errors that occurred since the last fsync (or
0706  * since the file was opened if there haven't been any).
0707  *
0708  * Grab the wb_err from the mapping. If it matches what we have in the file,
0709  * then just quickly return 0. The file is all caught up.
0710  *
0711  * If it doesn't match, then take the mapping value, set the "seen" flag in
0712  * it and try to swap it into place. If it works, or another task beat us
0713  * to it with the new value, then update the f_wb_err and return the error
0714  * portion. The error at this point must be reported via proper channels
0715  * (a'la fsync, or NFS COMMIT operation, etc.).
0716  *
0717  * While we handle mapping->wb_err with atomic operations, the f_wb_err
0718  * value is protected by the f_lock since we must ensure that it reflects
0719  * the latest value swapped in for this file descriptor.
0720  *
0721  * Return: %0 on success, negative error code otherwise.
0722  */
0723 int file_check_and_advance_wb_err(struct file *file)
0724 {
0725     int err = 0;
0726     errseq_t old = READ_ONCE(file->f_wb_err);
0727     struct address_space *mapping = file->f_mapping;
0728 
0729     /* Locklessly handle the common case where nothing has changed */
0730     if (errseq_check(&mapping->wb_err, old)) {
0731         /* Something changed, must use slow path */
0732         spin_lock(&file->f_lock);
0733         old = file->f_wb_err;
0734         err = errseq_check_and_advance(&mapping->wb_err,
0735                         &file->f_wb_err);
0736         trace_file_check_and_advance_wb_err(file, old);
0737         spin_unlock(&file->f_lock);
0738     }
0739 
0740     /*
0741      * We're mostly using this function as a drop in replacement for
0742      * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
0743      * that the legacy code would have had on these flags.
0744      */
0745     clear_bit(AS_EIO, &mapping->flags);
0746     clear_bit(AS_ENOSPC, &mapping->flags);
0747     return err;
0748 }
0749 EXPORT_SYMBOL(file_check_and_advance_wb_err);
0750 
0751 /**
0752  * file_write_and_wait_range - write out & wait on a file range
0753  * @file:   file pointing to address_space with pages
0754  * @lstart: offset in bytes where the range starts
0755  * @lend:   offset in bytes where the range ends (inclusive)
0756  *
0757  * Write out and wait upon file offsets lstart->lend, inclusive.
0758  *
0759  * Note that @lend is inclusive (describes the last byte to be written) so
0760  * that this function can be used to write to the very end-of-file (end = -1).
0761  *
0762  * After writing out and waiting on the data, we check and advance the
0763  * f_wb_err cursor to the latest value, and return any errors detected there.
0764  *
0765  * Return: %0 on success, negative error code otherwise.
0766  */
0767 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
0768 {
0769     int err = 0, err2;
0770     struct address_space *mapping = file->f_mapping;
0771 
0772     if (mapping_needs_writeback(mapping)) {
0773         err = __filemap_fdatawrite_range(mapping, lstart, lend,
0774                          WB_SYNC_ALL);
0775         /* See comment of filemap_write_and_wait() */
0776         if (err != -EIO)
0777             __filemap_fdatawait_range(mapping, lstart, lend);
0778     }
0779     err2 = file_check_and_advance_wb_err(file);
0780     if (!err)
0781         err = err2;
0782     return err;
0783 }
0784 EXPORT_SYMBOL(file_write_and_wait_range);
0785 
0786 /**
0787  * replace_page_cache_page - replace a pagecache page with a new one
0788  * @old:    page to be replaced
0789  * @new:    page to replace with
0790  *
0791  * This function replaces a page in the pagecache with a new one.  On
0792  * success it acquires the pagecache reference for the new page and
0793  * drops it for the old page.  Both the old and new pages must be
0794  * locked.  This function does not add the new page to the LRU, the
0795  * caller must do that.
0796  *
0797  * The remove + add is atomic.  This function cannot fail.
0798  */
0799 void replace_page_cache_page(struct page *old, struct page *new)
0800 {
0801     struct folio *fold = page_folio(old);
0802     struct folio *fnew = page_folio(new);
0803     struct address_space *mapping = old->mapping;
0804     void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
0805     pgoff_t offset = old->index;
0806     XA_STATE(xas, &mapping->i_pages, offset);
0807 
0808     VM_BUG_ON_PAGE(!PageLocked(old), old);
0809     VM_BUG_ON_PAGE(!PageLocked(new), new);
0810     VM_BUG_ON_PAGE(new->mapping, new);
0811 
0812     get_page(new);
0813     new->mapping = mapping;
0814     new->index = offset;
0815 
0816     mem_cgroup_migrate(fold, fnew);
0817 
0818     xas_lock_irq(&xas);
0819     xas_store(&xas, new);
0820 
0821     old->mapping = NULL;
0822     /* hugetlb pages do not participate in page cache accounting. */
0823     if (!PageHuge(old))
0824         __dec_lruvec_page_state(old, NR_FILE_PAGES);
0825     if (!PageHuge(new))
0826         __inc_lruvec_page_state(new, NR_FILE_PAGES);
0827     if (PageSwapBacked(old))
0828         __dec_lruvec_page_state(old, NR_SHMEM);
0829     if (PageSwapBacked(new))
0830         __inc_lruvec_page_state(new, NR_SHMEM);
0831     xas_unlock_irq(&xas);
0832     if (free_folio)
0833         free_folio(fold);
0834     folio_put(fold);
0835 }
0836 EXPORT_SYMBOL_GPL(replace_page_cache_page);
0837 
0838 noinline int __filemap_add_folio(struct address_space *mapping,
0839         struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
0840 {
0841     XA_STATE(xas, &mapping->i_pages, index);
0842     int huge = folio_test_hugetlb(folio);
0843     bool charged = false;
0844     long nr = 1;
0845 
0846     VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
0847     VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
0848     mapping_set_update(&xas, mapping);
0849 
0850     if (!huge) {
0851         int error = mem_cgroup_charge(folio, NULL, gfp);
0852         VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
0853         if (error)
0854             return error;
0855         charged = true;
0856         xas_set_order(&xas, index, folio_order(folio));
0857         nr = folio_nr_pages(folio);
0858     }
0859 
0860     gfp &= GFP_RECLAIM_MASK;
0861     folio_ref_add(folio, nr);
0862     folio->mapping = mapping;
0863     folio->index = xas.xa_index;
0864 
0865     do {
0866         unsigned int order = xa_get_order(xas.xa, xas.xa_index);
0867         void *entry, *old = NULL;
0868 
0869         if (order > folio_order(folio))
0870             xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
0871                     order, gfp);
0872         xas_lock_irq(&xas);
0873         xas_for_each_conflict(&xas, entry) {
0874             old = entry;
0875             if (!xa_is_value(entry)) {
0876                 xas_set_err(&xas, -EEXIST);
0877                 goto unlock;
0878             }
0879         }
0880 
0881         if (old) {
0882             if (shadowp)
0883                 *shadowp = old;
0884             /* entry may have been split before we acquired lock */
0885             order = xa_get_order(xas.xa, xas.xa_index);
0886             if (order > folio_order(folio)) {
0887                 /* How to handle large swap entries? */
0888                 BUG_ON(shmem_mapping(mapping));
0889                 xas_split(&xas, old, order);
0890                 xas_reset(&xas);
0891             }
0892         }
0893 
0894         xas_store(&xas, folio);
0895         if (xas_error(&xas))
0896             goto unlock;
0897 
0898         mapping->nrpages += nr;
0899 
0900         /* hugetlb pages do not participate in page cache accounting */
0901         if (!huge) {
0902             __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
0903             if (folio_test_pmd_mappable(folio))
0904                 __lruvec_stat_mod_folio(folio,
0905                         NR_FILE_THPS, nr);
0906         }
0907 unlock:
0908         xas_unlock_irq(&xas);
0909     } while (xas_nomem(&xas, gfp));
0910 
0911     if (xas_error(&xas))
0912         goto error;
0913 
0914     trace_mm_filemap_add_to_page_cache(folio);
0915     return 0;
0916 error:
0917     if (charged)
0918         mem_cgroup_uncharge(folio);
0919     folio->mapping = NULL;
0920     /* Leave page->index set: truncation relies upon it */
0921     folio_put_refs(folio, nr);
0922     return xas_error(&xas);
0923 }
0924 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
0925 
0926 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
0927                 pgoff_t index, gfp_t gfp)
0928 {
0929     void *shadow = NULL;
0930     int ret;
0931 
0932     __folio_set_locked(folio);
0933     ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
0934     if (unlikely(ret))
0935         __folio_clear_locked(folio);
0936     else {
0937         /*
0938          * The folio might have been evicted from cache only
0939          * recently, in which case it should be activated like
0940          * any other repeatedly accessed folio.
0941          * The exception is folios getting rewritten; evicting other
0942          * data from the working set, only to cache data that will
0943          * get overwritten with something else, is a waste of memory.
0944          */
0945         WARN_ON_ONCE(folio_test_active(folio));
0946         if (!(gfp & __GFP_WRITE) && shadow)
0947             workingset_refault(folio, shadow);
0948         folio_add_lru(folio);
0949     }
0950     return ret;
0951 }
0952 EXPORT_SYMBOL_GPL(filemap_add_folio);
0953 
0954 #ifdef CONFIG_NUMA
0955 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
0956 {
0957     int n;
0958     struct folio *folio;
0959 
0960     if (cpuset_do_page_mem_spread()) {
0961         unsigned int cpuset_mems_cookie;
0962         do {
0963             cpuset_mems_cookie = read_mems_allowed_begin();
0964             n = cpuset_mem_spread_node();
0965             folio = __folio_alloc_node(gfp, order, n);
0966         } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
0967 
0968         return folio;
0969     }
0970     return folio_alloc(gfp, order);
0971 }
0972 EXPORT_SYMBOL(filemap_alloc_folio);
0973 #endif
0974 
0975 /*
0976  * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
0977  *
0978  * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
0979  *
0980  * @mapping1: the first mapping to lock
0981  * @mapping2: the second mapping to lock
0982  */
0983 void filemap_invalidate_lock_two(struct address_space *mapping1,
0984                  struct address_space *mapping2)
0985 {
0986     if (mapping1 > mapping2)
0987         swap(mapping1, mapping2);
0988     if (mapping1)
0989         down_write(&mapping1->invalidate_lock);
0990     if (mapping2 && mapping1 != mapping2)
0991         down_write_nested(&mapping2->invalidate_lock, 1);
0992 }
0993 EXPORT_SYMBOL(filemap_invalidate_lock_two);
0994 
0995 /*
0996  * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
0997  *
0998  * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
0999  *
1000  * @mapping1: the first mapping to unlock
1001  * @mapping2: the second mapping to unlock
1002  */
1003 void filemap_invalidate_unlock_two(struct address_space *mapping1,
1004                    struct address_space *mapping2)
1005 {
1006     if (mapping1)
1007         up_write(&mapping1->invalidate_lock);
1008     if (mapping2 && mapping1 != mapping2)
1009         up_write(&mapping2->invalidate_lock);
1010 }
1011 EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1012 
1013 /*
1014  * In order to wait for pages to become available there must be
1015  * waitqueues associated with pages. By using a hash table of
1016  * waitqueues where the bucket discipline is to maintain all
1017  * waiters on the same queue and wake all when any of the pages
1018  * become available, and for the woken contexts to check to be
1019  * sure the appropriate page became available, this saves space
1020  * at a cost of "thundering herd" phenomena during rare hash
1021  * collisions.
1022  */
1023 #define PAGE_WAIT_TABLE_BITS 8
1024 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1025 static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1026 
1027 static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1028 {
1029     return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1030 }
1031 
1032 void __init pagecache_init(void)
1033 {
1034     int i;
1035 
1036     for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1037         init_waitqueue_head(&folio_wait_table[i]);
1038 
1039     page_writeback_init();
1040 }
1041 
1042 /*
1043  * The page wait code treats the "wait->flags" somewhat unusually, because
1044  * we have multiple different kinds of waits, not just the usual "exclusive"
1045  * one.
1046  *
1047  * We have:
1048  *
1049  *  (a) no special bits set:
1050  *
1051  *  We're just waiting for the bit to be released, and when a waker
1052  *  calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1053  *  and remove it from the wait queue.
1054  *
1055  *  Simple and straightforward.
1056  *
1057  *  (b) WQ_FLAG_EXCLUSIVE:
1058  *
1059  *  The waiter is waiting to get the lock, and only one waiter should
1060  *  be woken up to avoid any thundering herd behavior. We'll set the
1061  *  WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1062  *
1063  *  This is the traditional exclusive wait.
1064  *
1065  *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1066  *
1067  *  The waiter is waiting to get the bit, and additionally wants the
1068  *  lock to be transferred to it for fair lock behavior. If the lock
1069  *  cannot be taken, we stop walking the wait queue without waking
1070  *  the waiter.
1071  *
1072  *  This is the "fair lock handoff" case, and in addition to setting
1073  *  WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1074  *  that it now has the lock.
1075  */
1076 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1077 {
1078     unsigned int flags;
1079     struct wait_page_key *key = arg;
1080     struct wait_page_queue *wait_page
1081         = container_of(wait, struct wait_page_queue, wait);
1082 
1083     if (!wake_page_match(wait_page, key))
1084         return 0;
1085 
1086     /*
1087      * If it's a lock handoff wait, we get the bit for it, and
1088      * stop walking (and do not wake it up) if we can't.
1089      */
1090     flags = wait->flags;
1091     if (flags & WQ_FLAG_EXCLUSIVE) {
1092         if (test_bit(key->bit_nr, &key->folio->flags))
1093             return -1;
1094         if (flags & WQ_FLAG_CUSTOM) {
1095             if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1096                 return -1;
1097             flags |= WQ_FLAG_DONE;
1098         }
1099     }
1100 
1101     /*
1102      * We are holding the wait-queue lock, but the waiter that
1103      * is waiting for this will be checking the flags without
1104      * any locking.
1105      *
1106      * So update the flags atomically, and wake up the waiter
1107      * afterwards to avoid any races. This store-release pairs
1108      * with the load-acquire in folio_wait_bit_common().
1109      */
1110     smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1111     wake_up_state(wait->private, mode);
1112 
1113     /*
1114      * Ok, we have successfully done what we're waiting for,
1115      * and we can unconditionally remove the wait entry.
1116      *
1117      * Note that this pairs with the "finish_wait()" in the
1118      * waiter, and has to be the absolute last thing we do.
1119      * After this list_del_init(&wait->entry) the wait entry
1120      * might be de-allocated and the process might even have
1121      * exited.
1122      */
1123     list_del_init_careful(&wait->entry);
1124     return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1125 }
1126 
1127 static void folio_wake_bit(struct folio *folio, int bit_nr)
1128 {
1129     wait_queue_head_t *q = folio_waitqueue(folio);
1130     struct wait_page_key key;
1131     unsigned long flags;
1132     wait_queue_entry_t bookmark;
1133 
1134     key.folio = folio;
1135     key.bit_nr = bit_nr;
1136     key.page_match = 0;
1137 
1138     bookmark.flags = 0;
1139     bookmark.private = NULL;
1140     bookmark.func = NULL;
1141     INIT_LIST_HEAD(&bookmark.entry);
1142 
1143     spin_lock_irqsave(&q->lock, flags);
1144     __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1145 
1146     while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1147         /*
1148          * Take a breather from holding the lock,
1149          * allow pages that finish wake up asynchronously
1150          * to acquire the lock and remove themselves
1151          * from wait queue
1152          */
1153         spin_unlock_irqrestore(&q->lock, flags);
1154         cpu_relax();
1155         spin_lock_irqsave(&q->lock, flags);
1156         __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1157     }
1158 
1159     /*
1160      * It's possible to miss clearing waiters here, when we woke our page
1161      * waiters, but the hashed waitqueue has waiters for other pages on it.
1162      * That's okay, it's a rare case. The next waker will clear it.
1163      *
1164      * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1165      * other), the flag may be cleared in the course of freeing the page;
1166      * but that is not required for correctness.
1167      */
1168     if (!waitqueue_active(q) || !key.page_match)
1169         folio_clear_waiters(folio);
1170 
1171     spin_unlock_irqrestore(&q->lock, flags);
1172 }
1173 
1174 static void folio_wake(struct folio *folio, int bit)
1175 {
1176     if (!folio_test_waiters(folio))
1177         return;
1178     folio_wake_bit(folio, bit);
1179 }
1180 
1181 /*
1182  * A choice of three behaviors for folio_wait_bit_common():
1183  */
1184 enum behavior {
1185     EXCLUSIVE,  /* Hold ref to page and take the bit when woken, like
1186              * __folio_lock() waiting on then setting PG_locked.
1187              */
1188     SHARED,     /* Hold ref to page and check the bit when woken, like
1189              * folio_wait_writeback() waiting on PG_writeback.
1190              */
1191     DROP,       /* Drop ref to page before wait, no check when woken,
1192              * like folio_put_wait_locked() on PG_locked.
1193              */
1194 };
1195 
1196 /*
1197  * Attempt to check (or get) the folio flag, and mark us done
1198  * if successful.
1199  */
1200 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1201                     struct wait_queue_entry *wait)
1202 {
1203     if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1204         if (test_and_set_bit(bit_nr, &folio->flags))
1205             return false;
1206     } else if (test_bit(bit_nr, &folio->flags))
1207         return false;
1208 
1209     wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1210     return true;
1211 }
1212 
1213 /* How many times do we accept lock stealing from under a waiter? */
1214 int sysctl_page_lock_unfairness = 5;
1215 
1216 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1217         int state, enum behavior behavior)
1218 {
1219     wait_queue_head_t *q = folio_waitqueue(folio);
1220     int unfairness = sysctl_page_lock_unfairness;
1221     struct wait_page_queue wait_page;
1222     wait_queue_entry_t *wait = &wait_page.wait;
1223     bool thrashing = false;
1224     bool delayacct = false;
1225     unsigned long pflags;
1226 
1227     if (bit_nr == PG_locked &&
1228         !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1229         if (!folio_test_swapbacked(folio)) {
1230             delayacct_thrashing_start();
1231             delayacct = true;
1232         }
1233         psi_memstall_enter(&pflags);
1234         thrashing = true;
1235     }
1236 
1237     init_wait(wait);
1238     wait->func = wake_page_function;
1239     wait_page.folio = folio;
1240     wait_page.bit_nr = bit_nr;
1241 
1242 repeat:
1243     wait->flags = 0;
1244     if (behavior == EXCLUSIVE) {
1245         wait->flags = WQ_FLAG_EXCLUSIVE;
1246         if (--unfairness < 0)
1247             wait->flags |= WQ_FLAG_CUSTOM;
1248     }
1249 
1250     /*
1251      * Do one last check whether we can get the
1252      * page bit synchronously.
1253      *
1254      * Do the folio_set_waiters() marking before that
1255      * to let any waker we _just_ missed know they
1256      * need to wake us up (otherwise they'll never
1257      * even go to the slow case that looks at the
1258      * page queue), and add ourselves to the wait
1259      * queue if we need to sleep.
1260      *
1261      * This part needs to be done under the queue
1262      * lock to avoid races.
1263      */
1264     spin_lock_irq(&q->lock);
1265     folio_set_waiters(folio);
1266     if (!folio_trylock_flag(folio, bit_nr, wait))
1267         __add_wait_queue_entry_tail(q, wait);
1268     spin_unlock_irq(&q->lock);
1269 
1270     /*
1271      * From now on, all the logic will be based on
1272      * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1273      * see whether the page bit testing has already
1274      * been done by the wake function.
1275      *
1276      * We can drop our reference to the folio.
1277      */
1278     if (behavior == DROP)
1279         folio_put(folio);
1280 
1281     /*
1282      * Note that until the "finish_wait()", or until
1283      * we see the WQ_FLAG_WOKEN flag, we need to
1284      * be very careful with the 'wait->flags', because
1285      * we may race with a waker that sets them.
1286      */
1287     for (;;) {
1288         unsigned int flags;
1289 
1290         set_current_state(state);
1291 
1292         /* Loop until we've been woken or interrupted */
1293         flags = smp_load_acquire(&wait->flags);
1294         if (!(flags & WQ_FLAG_WOKEN)) {
1295             if (signal_pending_state(state, current))
1296                 break;
1297 
1298             io_schedule();
1299             continue;
1300         }
1301 
1302         /* If we were non-exclusive, we're done */
1303         if (behavior != EXCLUSIVE)
1304             break;
1305 
1306         /* If the waker got the lock for us, we're done */
1307         if (flags & WQ_FLAG_DONE)
1308             break;
1309 
1310         /*
1311          * Otherwise, if we're getting the lock, we need to
1312          * try to get it ourselves.
1313          *
1314          * And if that fails, we'll have to retry this all.
1315          */
1316         if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1317             goto repeat;
1318 
1319         wait->flags |= WQ_FLAG_DONE;
1320         break;
1321     }
1322 
1323     /*
1324      * If a signal happened, this 'finish_wait()' may remove the last
1325      * waiter from the wait-queues, but the folio waiters bit will remain
1326      * set. That's ok. The next wakeup will take care of it, and trying
1327      * to do it here would be difficult and prone to races.
1328      */
1329     finish_wait(q, wait);
1330 
1331     if (thrashing) {
1332         if (delayacct)
1333             delayacct_thrashing_end();
1334         psi_memstall_leave(&pflags);
1335     }
1336 
1337     /*
1338      * NOTE! The wait->flags weren't stable until we've done the
1339      * 'finish_wait()', and we could have exited the loop above due
1340      * to a signal, and had a wakeup event happen after the signal
1341      * test but before the 'finish_wait()'.
1342      *
1343      * So only after the finish_wait() can we reliably determine
1344      * if we got woken up or not, so we can now figure out the final
1345      * return value based on that state without races.
1346      *
1347      * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1348      * waiter, but an exclusive one requires WQ_FLAG_DONE.
1349      */
1350     if (behavior == EXCLUSIVE)
1351         return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1352 
1353     return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1354 }
1355 
1356 #ifdef CONFIG_MIGRATION
1357 /**
1358  * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1359  * @entry: migration swap entry.
1360  * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required
1361  *        for pte entries, pass NULL for pmd entries.
1362  * @ptl: already locked ptl. This function will drop the lock.
1363  *
1364  * Wait for a migration entry referencing the given page to be removed. This is
1365  * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1366  * this can be called without taking a reference on the page. Instead this
1367  * should be called while holding the ptl for the migration entry referencing
1368  * the page.
1369  *
1370  * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock().
1371  *
1372  * This follows the same logic as folio_wait_bit_common() so see the comments
1373  * there.
1374  */
1375 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
1376                 spinlock_t *ptl)
1377 {
1378     struct wait_page_queue wait_page;
1379     wait_queue_entry_t *wait = &wait_page.wait;
1380     bool thrashing = false;
1381     bool delayacct = false;
1382     unsigned long pflags;
1383     wait_queue_head_t *q;
1384     struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
1385 
1386     q = folio_waitqueue(folio);
1387     if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1388         if (!folio_test_swapbacked(folio)) {
1389             delayacct_thrashing_start();
1390             delayacct = true;
1391         }
1392         psi_memstall_enter(&pflags);
1393         thrashing = true;
1394     }
1395 
1396     init_wait(wait);
1397     wait->func = wake_page_function;
1398     wait_page.folio = folio;
1399     wait_page.bit_nr = PG_locked;
1400     wait->flags = 0;
1401 
1402     spin_lock_irq(&q->lock);
1403     folio_set_waiters(folio);
1404     if (!folio_trylock_flag(folio, PG_locked, wait))
1405         __add_wait_queue_entry_tail(q, wait);
1406     spin_unlock_irq(&q->lock);
1407 
1408     /*
1409      * If a migration entry exists for the page the migration path must hold
1410      * a valid reference to the page, and it must take the ptl to remove the
1411      * migration entry. So the page is valid until the ptl is dropped.
1412      */
1413     if (ptep)
1414         pte_unmap_unlock(ptep, ptl);
1415     else
1416         spin_unlock(ptl);
1417 
1418     for (;;) {
1419         unsigned int flags;
1420 
1421         set_current_state(TASK_UNINTERRUPTIBLE);
1422 
1423         /* Loop until we've been woken or interrupted */
1424         flags = smp_load_acquire(&wait->flags);
1425         if (!(flags & WQ_FLAG_WOKEN)) {
1426             if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1427                 break;
1428 
1429             io_schedule();
1430             continue;
1431         }
1432         break;
1433     }
1434 
1435     finish_wait(q, wait);
1436 
1437     if (thrashing) {
1438         if (delayacct)
1439             delayacct_thrashing_end();
1440         psi_memstall_leave(&pflags);
1441     }
1442 }
1443 #endif
1444 
1445 void folio_wait_bit(struct folio *folio, int bit_nr)
1446 {
1447     folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1448 }
1449 EXPORT_SYMBOL(folio_wait_bit);
1450 
1451 int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1452 {
1453     return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1454 }
1455 EXPORT_SYMBOL(folio_wait_bit_killable);
1456 
1457 /**
1458  * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1459  * @folio: The folio to wait for.
1460  * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1461  *
1462  * The caller should hold a reference on @folio.  They expect the page to
1463  * become unlocked relatively soon, but do not wish to hold up migration
1464  * (for example) by holding the reference while waiting for the folio to
1465  * come unlocked.  After this function returns, the caller should not
1466  * dereference @folio.
1467  *
1468  * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1469  */
1470 int folio_put_wait_locked(struct folio *folio, int state)
1471 {
1472     return folio_wait_bit_common(folio, PG_locked, state, DROP);
1473 }
1474 
1475 /**
1476  * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1477  * @folio: Folio defining the wait queue of interest
1478  * @waiter: Waiter to add to the queue
1479  *
1480  * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1481  */
1482 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1483 {
1484     wait_queue_head_t *q = folio_waitqueue(folio);
1485     unsigned long flags;
1486 
1487     spin_lock_irqsave(&q->lock, flags);
1488     __add_wait_queue_entry_tail(q, waiter);
1489     folio_set_waiters(folio);
1490     spin_unlock_irqrestore(&q->lock, flags);
1491 }
1492 EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1493 
1494 #ifndef clear_bit_unlock_is_negative_byte
1495 
1496 /*
1497  * PG_waiters is the high bit in the same byte as PG_lock.
1498  *
1499  * On x86 (and on many other architectures), we can clear PG_lock and
1500  * test the sign bit at the same time. But if the architecture does
1501  * not support that special operation, we just do this all by hand
1502  * instead.
1503  *
1504  * The read of PG_waiters has to be after (or concurrently with) PG_locked
1505  * being cleared, but a memory barrier should be unnecessary since it is
1506  * in the same byte as PG_locked.
1507  */
1508 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1509 {
1510     clear_bit_unlock(nr, mem);
1511     /* smp_mb__after_atomic(); */
1512     return test_bit(PG_waiters, mem);
1513 }
1514 
1515 #endif
1516 
1517 /**
1518  * folio_unlock - Unlock a locked folio.
1519  * @folio: The folio.
1520  *
1521  * Unlocks the folio and wakes up any thread sleeping on the page lock.
1522  *
1523  * Context: May be called from interrupt or process context.  May not be
1524  * called from NMI context.
1525  */
1526 void folio_unlock(struct folio *folio)
1527 {
1528     /* Bit 7 allows x86 to check the byte's sign bit */
1529     BUILD_BUG_ON(PG_waiters != 7);
1530     BUILD_BUG_ON(PG_locked > 7);
1531     VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1532     if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
1533         folio_wake_bit(folio, PG_locked);
1534 }
1535 EXPORT_SYMBOL(folio_unlock);
1536 
1537 /**
1538  * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1539  * @folio: The folio.
1540  *
1541  * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1542  * it.  The folio reference held for PG_private_2 being set is released.
1543  *
1544  * This is, for example, used when a netfs folio is being written to a local
1545  * disk cache, thereby allowing writes to the cache for the same folio to be
1546  * serialised.
1547  */
1548 void folio_end_private_2(struct folio *folio)
1549 {
1550     VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1551     clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1552     folio_wake_bit(folio, PG_private_2);
1553     folio_put(folio);
1554 }
1555 EXPORT_SYMBOL(folio_end_private_2);
1556 
1557 /**
1558  * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1559  * @folio: The folio to wait on.
1560  *
1561  * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
1562  */
1563 void folio_wait_private_2(struct folio *folio)
1564 {
1565     while (folio_test_private_2(folio))
1566         folio_wait_bit(folio, PG_private_2);
1567 }
1568 EXPORT_SYMBOL(folio_wait_private_2);
1569 
1570 /**
1571  * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1572  * @folio: The folio to wait on.
1573  *
1574  * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
1575  * fatal signal is received by the calling task.
1576  *
1577  * Return:
1578  * - 0 if successful.
1579  * - -EINTR if a fatal signal was encountered.
1580  */
1581 int folio_wait_private_2_killable(struct folio *folio)
1582 {
1583     int ret = 0;
1584 
1585     while (folio_test_private_2(folio)) {
1586         ret = folio_wait_bit_killable(folio, PG_private_2);
1587         if (ret < 0)
1588             break;
1589     }
1590 
1591     return ret;
1592 }
1593 EXPORT_SYMBOL(folio_wait_private_2_killable);
1594 
1595 /**
1596  * folio_end_writeback - End writeback against a folio.
1597  * @folio: The folio.
1598  */
1599 void folio_end_writeback(struct folio *folio)
1600 {
1601     /*
1602      * folio_test_clear_reclaim() could be used here but it is an
1603      * atomic operation and overkill in this particular case. Failing
1604      * to shuffle a folio marked for immediate reclaim is too mild
1605      * a gain to justify taking an atomic operation penalty at the
1606      * end of every folio writeback.
1607      */
1608     if (folio_test_reclaim(folio)) {
1609         folio_clear_reclaim(folio);
1610         folio_rotate_reclaimable(folio);
1611     }
1612 
1613     /*
1614      * Writeback does not hold a folio reference of its own, relying
1615      * on truncation to wait for the clearing of PG_writeback.
1616      * But here we must make sure that the folio is not freed and
1617      * reused before the folio_wake().
1618      */
1619     folio_get(folio);
1620     if (!__folio_end_writeback(folio))
1621         BUG();
1622 
1623     smp_mb__after_atomic();
1624     folio_wake(folio, PG_writeback);
1625     acct_reclaim_writeback(folio);
1626     folio_put(folio);
1627 }
1628 EXPORT_SYMBOL(folio_end_writeback);
1629 
1630 /*
1631  * After completing I/O on a page, call this routine to update the page
1632  * flags appropriately
1633  */
1634 void page_endio(struct page *page, bool is_write, int err)
1635 {
1636     if (!is_write) {
1637         if (!err) {
1638             SetPageUptodate(page);
1639         } else {
1640             ClearPageUptodate(page);
1641             SetPageError(page);
1642         }
1643         unlock_page(page);
1644     } else {
1645         if (err) {
1646             struct address_space *mapping;
1647 
1648             SetPageError(page);
1649             mapping = page_mapping(page);
1650             if (mapping)
1651                 mapping_set_error(mapping, err);
1652         }
1653         end_page_writeback(page);
1654     }
1655 }
1656 EXPORT_SYMBOL_GPL(page_endio);
1657 
1658 /**
1659  * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1660  * @folio: The folio to lock
1661  */
1662 void __folio_lock(struct folio *folio)
1663 {
1664     folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1665                 EXCLUSIVE);
1666 }
1667 EXPORT_SYMBOL(__folio_lock);
1668 
1669 int __folio_lock_killable(struct folio *folio)
1670 {
1671     return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1672                     EXCLUSIVE);
1673 }
1674 EXPORT_SYMBOL_GPL(__folio_lock_killable);
1675 
1676 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1677 {
1678     struct wait_queue_head *q = folio_waitqueue(folio);
1679     int ret = 0;
1680 
1681     wait->folio = folio;
1682     wait->bit_nr = PG_locked;
1683 
1684     spin_lock_irq(&q->lock);
1685     __add_wait_queue_entry_tail(q, &wait->wait);
1686     folio_set_waiters(folio);
1687     ret = !folio_trylock(folio);
1688     /*
1689      * If we were successful now, we know we're still on the
1690      * waitqueue as we're still under the lock. This means it's
1691      * safe to remove and return success, we know the callback
1692      * isn't going to trigger.
1693      */
1694     if (!ret)
1695         __remove_wait_queue(q, &wait->wait);
1696     else
1697         ret = -EIOCBQUEUED;
1698     spin_unlock_irq(&q->lock);
1699     return ret;
1700 }
1701 
1702 /*
1703  * Return values:
1704  * true - folio is locked; mmap_lock is still held.
1705  * false - folio is not locked.
1706  *     mmap_lock has been released (mmap_read_unlock(), unless flags had both
1707  *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1708  *     which case mmap_lock is still held.
1709  *
1710  * If neither ALLOW_RETRY nor KILLABLE are set, will always return true
1711  * with the folio locked and the mmap_lock unperturbed.
1712  */
1713 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
1714              unsigned int flags)
1715 {
1716     if (fault_flag_allow_retry_first(flags)) {
1717         /*
1718          * CAUTION! In this case, mmap_lock is not released
1719          * even though return 0.
1720          */
1721         if (flags & FAULT_FLAG_RETRY_NOWAIT)
1722             return false;
1723 
1724         mmap_read_unlock(mm);
1725         if (flags & FAULT_FLAG_KILLABLE)
1726             folio_wait_locked_killable(folio);
1727         else
1728             folio_wait_locked(folio);
1729         return false;
1730     }
1731     if (flags & FAULT_FLAG_KILLABLE) {
1732         bool ret;
1733 
1734         ret = __folio_lock_killable(folio);
1735         if (ret) {
1736             mmap_read_unlock(mm);
1737             return false;
1738         }
1739     } else {
1740         __folio_lock(folio);
1741     }
1742 
1743     return true;
1744 }
1745 
1746 /**
1747  * page_cache_next_miss() - Find the next gap in the page cache.
1748  * @mapping: Mapping.
1749  * @index: Index.
1750  * @max_scan: Maximum range to search.
1751  *
1752  * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1753  * gap with the lowest index.
1754  *
1755  * This function may be called under the rcu_read_lock.  However, this will
1756  * not atomically search a snapshot of the cache at a single point in time.
1757  * For example, if a gap is created at index 5, then subsequently a gap is
1758  * created at index 10, page_cache_next_miss covering both indices may
1759  * return 10 if called under the rcu_read_lock.
1760  *
1761  * Return: The index of the gap if found, otherwise an index outside the
1762  * range specified (in which case 'return - index >= max_scan' will be true).
1763  * In the rare case of index wrap-around, 0 will be returned.
1764  */
1765 pgoff_t page_cache_next_miss(struct address_space *mapping,
1766                  pgoff_t index, unsigned long max_scan)
1767 {
1768     XA_STATE(xas, &mapping->i_pages, index);
1769 
1770     while (max_scan--) {
1771         void *entry = xas_next(&xas);
1772         if (!entry || xa_is_value(entry))
1773             break;
1774         if (xas.xa_index == 0)
1775             break;
1776     }
1777 
1778     return xas.xa_index;
1779 }
1780 EXPORT_SYMBOL(page_cache_next_miss);
1781 
1782 /**
1783  * page_cache_prev_miss() - Find the previous gap in the page cache.
1784  * @mapping: Mapping.
1785  * @index: Index.
1786  * @max_scan: Maximum range to search.
1787  *
1788  * Search the range [max(index - max_scan + 1, 0), index] for the
1789  * gap with the highest index.
1790  *
1791  * This function may be called under the rcu_read_lock.  However, this will
1792  * not atomically search a snapshot of the cache at a single point in time.
1793  * For example, if a gap is created at index 10, then subsequently a gap is
1794  * created at index 5, page_cache_prev_miss() covering both indices may
1795  * return 5 if called under the rcu_read_lock.
1796  *
1797  * Return: The index of the gap if found, otherwise an index outside the
1798  * range specified (in which case 'index - return >= max_scan' will be true).
1799  * In the rare case of wrap-around, ULONG_MAX will be returned.
1800  */
1801 pgoff_t page_cache_prev_miss(struct address_space *mapping,
1802                  pgoff_t index, unsigned long max_scan)
1803 {
1804     XA_STATE(xas, &mapping->i_pages, index);
1805 
1806     while (max_scan--) {
1807         void *entry = xas_prev(&xas);
1808         if (!entry || xa_is_value(entry))
1809             break;
1810         if (xas.xa_index == ULONG_MAX)
1811             break;
1812     }
1813 
1814     return xas.xa_index;
1815 }
1816 EXPORT_SYMBOL(page_cache_prev_miss);
1817 
1818 /*
1819  * Lockless page cache protocol:
1820  * On the lookup side:
1821  * 1. Load the folio from i_pages
1822  * 2. Increment the refcount if it's not zero
1823  * 3. If the folio is not found by xas_reload(), put the refcount and retry
1824  *
1825  * On the removal side:
1826  * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1827  * B. Remove the page from i_pages
1828  * C. Return the page to the page allocator
1829  *
1830  * This means that any page may have its reference count temporarily
1831  * increased by a speculative page cache (or fast GUP) lookup as it can
1832  * be allocated by another user before the RCU grace period expires.
1833  * Because the refcount temporarily acquired here may end up being the
1834  * last refcount on the page, any page allocation must be freeable by
1835  * folio_put().
1836  */
1837 
1838 /*
1839  * mapping_get_entry - Get a page cache entry.
1840  * @mapping: the address_space to search
1841  * @index: The page cache index.
1842  *
1843  * Looks up the page cache entry at @mapping & @index.  If it is a folio,
1844  * it is returned with an increased refcount.  If it is a shadow entry
1845  * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1846  * it is returned without further action.
1847  *
1848  * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1849  */
1850 static void *mapping_get_entry(struct address_space *mapping, pgoff_t index)
1851 {
1852     XA_STATE(xas, &mapping->i_pages, index);
1853     struct folio *folio;
1854 
1855     rcu_read_lock();
1856 repeat:
1857     xas_reset(&xas);
1858     folio = xas_load(&xas);
1859     if (xas_retry(&xas, folio))
1860         goto repeat;
1861     /*
1862      * A shadow entry of a recently evicted page, or a swap entry from
1863      * shmem/tmpfs.  Return it without attempting to raise page count.
1864      */
1865     if (!folio || xa_is_value(folio))
1866         goto out;
1867 
1868     if (!folio_try_get_rcu(folio))
1869         goto repeat;
1870 
1871     if (unlikely(folio != xas_reload(&xas))) {
1872         folio_put(folio);
1873         goto repeat;
1874     }
1875 out:
1876     rcu_read_unlock();
1877 
1878     return folio;
1879 }
1880 
1881 /**
1882  * __filemap_get_folio - Find and get a reference to a folio.
1883  * @mapping: The address_space to search.
1884  * @index: The page index.
1885  * @fgp_flags: %FGP flags modify how the folio is returned.
1886  * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
1887  *
1888  * Looks up the page cache entry at @mapping & @index.
1889  *
1890  * @fgp_flags can be zero or more of these flags:
1891  *
1892  * * %FGP_ACCESSED - The folio will be marked accessed.
1893  * * %FGP_LOCK - The folio is returned locked.
1894  * * %FGP_ENTRY - If there is a shadow / swap / DAX entry, return it
1895  *   instead of allocating a new folio to replace it.
1896  * * %FGP_CREAT - If no page is present then a new page is allocated using
1897  *   @gfp and added to the page cache and the VM's LRU list.
1898  *   The page is returned locked and with an increased refcount.
1899  * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
1900  *   page is already in cache.  If the page was allocated, unlock it before
1901  *   returning so the caller can do the same dance.
1902  * * %FGP_WRITE - The page will be written to by the caller.
1903  * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
1904  * * %FGP_NOWAIT - Don't get blocked by page lock.
1905  * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
1906  *
1907  * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1908  * if the %GFP flags specified for %FGP_CREAT are atomic.
1909  *
1910  * If there is a page cache page, it is returned with an increased refcount.
1911  *
1912  * Return: The found folio or %NULL otherwise.
1913  */
1914 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1915         int fgp_flags, gfp_t gfp)
1916 {
1917     struct folio *folio;
1918 
1919 repeat:
1920     folio = mapping_get_entry(mapping, index);
1921     if (xa_is_value(folio)) {
1922         if (fgp_flags & FGP_ENTRY)
1923             return folio;
1924         folio = NULL;
1925     }
1926     if (!folio)
1927         goto no_page;
1928 
1929     if (fgp_flags & FGP_LOCK) {
1930         if (fgp_flags & FGP_NOWAIT) {
1931             if (!folio_trylock(folio)) {
1932                 folio_put(folio);
1933                 return NULL;
1934             }
1935         } else {
1936             folio_lock(folio);
1937         }
1938 
1939         /* Has the page been truncated? */
1940         if (unlikely(folio->mapping != mapping)) {
1941             folio_unlock(folio);
1942             folio_put(folio);
1943             goto repeat;
1944         }
1945         VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1946     }
1947 
1948     if (fgp_flags & FGP_ACCESSED)
1949         folio_mark_accessed(folio);
1950     else if (fgp_flags & FGP_WRITE) {
1951         /* Clear idle flag for buffer write */
1952         if (folio_test_idle(folio))
1953             folio_clear_idle(folio);
1954     }
1955 
1956     if (fgp_flags & FGP_STABLE)
1957         folio_wait_stable(folio);
1958 no_page:
1959     if (!folio && (fgp_flags & FGP_CREAT)) {
1960         int err;
1961         if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1962             gfp |= __GFP_WRITE;
1963         if (fgp_flags & FGP_NOFS)
1964             gfp &= ~__GFP_FS;
1965         if (fgp_flags & FGP_NOWAIT) {
1966             gfp &= ~GFP_KERNEL;
1967             gfp |= GFP_NOWAIT | __GFP_NOWARN;
1968         }
1969 
1970         folio = filemap_alloc_folio(gfp, 0);
1971         if (!folio)
1972             return NULL;
1973 
1974         if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1975             fgp_flags |= FGP_LOCK;
1976 
1977         /* Init accessed so avoid atomic mark_page_accessed later */
1978         if (fgp_flags & FGP_ACCESSED)
1979             __folio_set_referenced(folio);
1980 
1981         err = filemap_add_folio(mapping, folio, index, gfp);
1982         if (unlikely(err)) {
1983             folio_put(folio);
1984             folio = NULL;
1985             if (err == -EEXIST)
1986                 goto repeat;
1987         }
1988 
1989         /*
1990          * filemap_add_folio locks the page, and for mmap
1991          * we expect an unlocked page.
1992          */
1993         if (folio && (fgp_flags & FGP_FOR_MMAP))
1994             folio_unlock(folio);
1995     }
1996 
1997     return folio;
1998 }
1999 EXPORT_SYMBOL(__filemap_get_folio);
2000 
2001 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
2002         xa_mark_t mark)
2003 {
2004     struct folio *folio;
2005 
2006 retry:
2007     if (mark == XA_PRESENT)
2008         folio = xas_find(xas, max);
2009     else
2010         folio = xas_find_marked(xas, max, mark);
2011 
2012     if (xas_retry(xas, folio))
2013         goto retry;
2014     /*
2015      * A shadow entry of a recently evicted page, a swap
2016      * entry from shmem/tmpfs or a DAX entry.  Return it
2017      * without attempting to raise page count.
2018      */
2019     if (!folio || xa_is_value(folio))
2020         return folio;
2021 
2022     if (!folio_try_get_rcu(folio))
2023         goto reset;
2024 
2025     if (unlikely(folio != xas_reload(xas))) {
2026         folio_put(folio);
2027         goto reset;
2028     }
2029 
2030     return folio;
2031 reset:
2032     xas_reset(xas);
2033     goto retry;
2034 }
2035 
2036 /**
2037  * find_get_entries - gang pagecache lookup
2038  * @mapping:    The address_space to search
2039  * @start:  The starting page cache index
2040  * @end:    The final page index (inclusive).
2041  * @fbatch: Where the resulting entries are placed.
2042  * @indices:    The cache indices corresponding to the entries in @entries
2043  *
2044  * find_get_entries() will search for and return a batch of entries in
2045  * the mapping.  The entries are placed in @fbatch.  find_get_entries()
2046  * takes a reference on any actual folios it returns.
2047  *
2048  * The entries have ascending indexes.  The indices may not be consecutive
2049  * due to not-present entries or large folios.
2050  *
2051  * Any shadow entries of evicted folios, or swap entries from
2052  * shmem/tmpfs, are included in the returned array.
2053  *
2054  * Return: The number of entries which were found.
2055  */
2056 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
2057         pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2058 {
2059     XA_STATE(xas, &mapping->i_pages, start);
2060     struct folio *folio;
2061 
2062     rcu_read_lock();
2063     while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2064         indices[fbatch->nr] = xas.xa_index;
2065         if (!folio_batch_add(fbatch, folio))
2066             break;
2067     }
2068     rcu_read_unlock();
2069 
2070     return folio_batch_count(fbatch);
2071 }
2072 
2073 /**
2074  * find_lock_entries - Find a batch of pagecache entries.
2075  * @mapping:    The address_space to search.
2076  * @start:  The starting page cache index.
2077  * @end:    The final page index (inclusive).
2078  * @fbatch: Where the resulting entries are placed.
2079  * @indices:    The cache indices of the entries in @fbatch.
2080  *
2081  * find_lock_entries() will return a batch of entries from @mapping.
2082  * Swap, shadow and DAX entries are included.  Folios are returned
2083  * locked and with an incremented refcount.  Folios which are locked
2084  * by somebody else or under writeback are skipped.  Folios which are
2085  * partially outside the range are not returned.
2086  *
2087  * The entries have ascending indexes.  The indices may not be consecutive
2088  * due to not-present entries, large folios, folios which could not be
2089  * locked or folios under writeback.
2090  *
2091  * Return: The number of entries which were found.
2092  */
2093 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
2094         pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2095 {
2096     XA_STATE(xas, &mapping->i_pages, start);
2097     struct folio *folio;
2098 
2099     rcu_read_lock();
2100     while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2101         if (!xa_is_value(folio)) {
2102             if (folio->index < start)
2103                 goto put;
2104             if (folio->index + folio_nr_pages(folio) - 1 > end)
2105                 goto put;
2106             if (!folio_trylock(folio))
2107                 goto put;
2108             if (folio->mapping != mapping ||
2109                 folio_test_writeback(folio))
2110                 goto unlock;
2111             VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2112                     folio);
2113         }
2114         indices[fbatch->nr] = xas.xa_index;
2115         if (!folio_batch_add(fbatch, folio))
2116             break;
2117         continue;
2118 unlock:
2119         folio_unlock(folio);
2120 put:
2121         folio_put(folio);
2122     }
2123     rcu_read_unlock();
2124 
2125     return folio_batch_count(fbatch);
2126 }
2127 
2128 /**
2129  * filemap_get_folios - Get a batch of folios
2130  * @mapping:    The address_space to search
2131  * @start:  The starting page index
2132  * @end:    The final page index (inclusive)
2133  * @fbatch: The batch to fill.
2134  *
2135  * Search for and return a batch of folios in the mapping starting at
2136  * index @start and up to index @end (inclusive).  The folios are returned
2137  * in @fbatch with an elevated reference count.
2138  *
2139  * The first folio may start before @start; if it does, it will contain
2140  * @start.  The final folio may extend beyond @end; if it does, it will
2141  * contain @end.  The folios have ascending indices.  There may be gaps
2142  * between the folios if there are indices which have no folio in the
2143  * page cache.  If folios are added to or removed from the page cache
2144  * while this is running, they may or may not be found by this call.
2145  *
2146  * Return: The number of folios which were found.
2147  * We also update @start to index the next folio for the traversal.
2148  */
2149 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2150         pgoff_t end, struct folio_batch *fbatch)
2151 {
2152     XA_STATE(xas, &mapping->i_pages, *start);
2153     struct folio *folio;
2154 
2155     rcu_read_lock();
2156     while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2157         /* Skip over shadow, swap and DAX entries */
2158         if (xa_is_value(folio))
2159             continue;
2160         if (!folio_batch_add(fbatch, folio)) {
2161             unsigned long nr = folio_nr_pages(folio);
2162 
2163             if (folio_test_hugetlb(folio))
2164                 nr = 1;
2165             *start = folio->index + nr;
2166             goto out;
2167         }
2168     }
2169 
2170     /*
2171      * We come here when there is no page beyond @end. We take care to not
2172      * overflow the index @start as it confuses some of the callers. This
2173      * breaks the iteration when there is a page at index -1 but that is
2174      * already broken anyway.
2175      */
2176     if (end == (pgoff_t)-1)
2177         *start = (pgoff_t)-1;
2178     else
2179         *start = end + 1;
2180 out:
2181     rcu_read_unlock();
2182 
2183     return folio_batch_count(fbatch);
2184 }
2185 EXPORT_SYMBOL(filemap_get_folios);
2186 
2187 static inline
2188 bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
2189 {
2190     if (!folio_test_large(folio) || folio_test_hugetlb(folio))
2191         return false;
2192     if (index >= max)
2193         return false;
2194     return index < folio->index + folio_nr_pages(folio) - 1;
2195 }
2196 
2197 /**
2198  * find_get_pages_contig - gang contiguous pagecache lookup
2199  * @mapping:    The address_space to search
2200  * @index:  The starting page index
2201  * @nr_pages:   The maximum number of pages
2202  * @pages:  Where the resulting pages are placed
2203  *
2204  * find_get_pages_contig() works exactly like find_get_pages_range(),
2205  * except that the returned number of pages are guaranteed to be
2206  * contiguous.
2207  *
2208  * Return: the number of pages which were found.
2209  */
2210 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
2211                    unsigned int nr_pages, struct page **pages)
2212 {
2213     XA_STATE(xas, &mapping->i_pages, index);
2214     struct folio *folio;
2215     unsigned int ret = 0;
2216 
2217     if (unlikely(!nr_pages))
2218         return 0;
2219 
2220     rcu_read_lock();
2221     for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2222         if (xas_retry(&xas, folio))
2223             continue;
2224         /*
2225          * If the entry has been swapped out, we can stop looking.
2226          * No current caller is looking for DAX entries.
2227          */
2228         if (xa_is_value(folio))
2229             break;
2230 
2231         if (!folio_try_get_rcu(folio))
2232             goto retry;
2233 
2234         if (unlikely(folio != xas_reload(&xas)))
2235             goto put_page;
2236 
2237 again:
2238         pages[ret] = folio_file_page(folio, xas.xa_index);
2239         if (++ret == nr_pages)
2240             break;
2241         if (folio_more_pages(folio, xas.xa_index, ULONG_MAX)) {
2242             xas.xa_index++;
2243             folio_ref_inc(folio);
2244             goto again;
2245         }
2246         continue;
2247 put_page:
2248         folio_put(folio);
2249 retry:
2250         xas_reset(&xas);
2251     }
2252     rcu_read_unlock();
2253     return ret;
2254 }
2255 EXPORT_SYMBOL(find_get_pages_contig);
2256 
2257 /**
2258  * find_get_pages_range_tag - Find and return head pages matching @tag.
2259  * @mapping:    the address_space to search
2260  * @index:  the starting page index
2261  * @end:    The final page index (inclusive)
2262  * @tag:    the tag index
2263  * @nr_pages:   the maximum number of pages
2264  * @pages:  where the resulting pages are placed
2265  *
2266  * Like find_get_pages_range(), except we only return head pages which are
2267  * tagged with @tag.  @index is updated to the index immediately after the
2268  * last page we return, ready for the next iteration.
2269  *
2270  * Return: the number of pages which were found.
2271  */
2272 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
2273             pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
2274             struct page **pages)
2275 {
2276     XA_STATE(xas, &mapping->i_pages, *index);
2277     struct folio *folio;
2278     unsigned ret = 0;
2279 
2280     if (unlikely(!nr_pages))
2281         return 0;
2282 
2283     rcu_read_lock();
2284     while ((folio = find_get_entry(&xas, end, tag))) {
2285         /*
2286          * Shadow entries should never be tagged, but this iteration
2287          * is lockless so there is a window for page reclaim to evict
2288          * a page we saw tagged.  Skip over it.
2289          */
2290         if (xa_is_value(folio))
2291             continue;
2292 
2293         pages[ret] = &folio->page;
2294         if (++ret == nr_pages) {
2295             *index = folio->index + folio_nr_pages(folio);
2296             goto out;
2297         }
2298     }
2299 
2300     /*
2301      * We come here when we got to @end. We take care to not overflow the
2302      * index @index as it confuses some of the callers. This breaks the
2303      * iteration when there is a page at index -1 but that is already
2304      * broken anyway.
2305      */
2306     if (end == (pgoff_t)-1)
2307         *index = (pgoff_t)-1;
2308     else
2309         *index = end + 1;
2310 out:
2311     rcu_read_unlock();
2312 
2313     return ret;
2314 }
2315 EXPORT_SYMBOL(find_get_pages_range_tag);
2316 
2317 /*
2318  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2319  * a _large_ part of the i/o request. Imagine the worst scenario:
2320  *
2321  *      ---R__________________________________________B__________
2322  *         ^ reading here                             ^ bad block(assume 4k)
2323  *
2324  * read(R) => miss => readahead(R...B) => media error => frustrating retries
2325  * => failing the whole request => read(R) => read(R+1) =>
2326  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2327  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2328  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2329  *
2330  * It is going insane. Fix it by quickly scaling down the readahead size.
2331  */
2332 static void shrink_readahead_size_eio(struct file_ra_state *ra)
2333 {
2334     ra->ra_pages /= 4;
2335 }
2336 
2337 /*
2338  * filemap_get_read_batch - Get a batch of folios for read
2339  *
2340  * Get a batch of folios which represent a contiguous range of bytes in
2341  * the file.  No exceptional entries will be returned.  If @index is in
2342  * the middle of a folio, the entire folio will be returned.  The last
2343  * folio in the batch may have the readahead flag set or the uptodate flag
2344  * clear so that the caller can take the appropriate action.
2345  */
2346 static void filemap_get_read_batch(struct address_space *mapping,
2347         pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2348 {
2349     XA_STATE(xas, &mapping->i_pages, index);
2350     struct folio *folio;
2351 
2352     rcu_read_lock();
2353     for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2354         if (xas_retry(&xas, folio))
2355             continue;
2356         if (xas.xa_index > max || xa_is_value(folio))
2357             break;
2358         if (xa_is_sibling(folio))
2359             break;
2360         if (!folio_try_get_rcu(folio))
2361             goto retry;
2362 
2363         if (unlikely(folio != xas_reload(&xas)))
2364             goto put_folio;
2365 
2366         if (!folio_batch_add(fbatch, folio))
2367             break;
2368         if (!folio_test_uptodate(folio))
2369             break;
2370         if (folio_test_readahead(folio))
2371             break;
2372         xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
2373         continue;
2374 put_folio:
2375         folio_put(folio);
2376 retry:
2377         xas_reset(&xas);
2378     }
2379     rcu_read_unlock();
2380 }
2381 
2382 static int filemap_read_folio(struct file *file, filler_t filler,
2383         struct folio *folio)
2384 {
2385     int error;
2386 
2387     /*
2388      * A previous I/O error may have been due to temporary failures,
2389      * eg. multipath errors.  PG_error will be set again if read_folio
2390      * fails.
2391      */
2392     folio_clear_error(folio);
2393     /* Start the actual read. The read will unlock the page. */
2394     error = filler(file, folio);
2395     if (error)
2396         return error;
2397 
2398     error = folio_wait_locked_killable(folio);
2399     if (error)
2400         return error;
2401     if (folio_test_uptodate(folio))
2402         return 0;
2403     if (file)
2404         shrink_readahead_size_eio(&file->f_ra);
2405     return -EIO;
2406 }
2407 
2408 static bool filemap_range_uptodate(struct address_space *mapping,
2409         loff_t pos, struct iov_iter *iter, struct folio *folio)
2410 {
2411     int count;
2412 
2413     if (folio_test_uptodate(folio))
2414         return true;
2415     /* pipes can't handle partially uptodate pages */
2416     if (iov_iter_is_pipe(iter))
2417         return false;
2418     if (!mapping->a_ops->is_partially_uptodate)
2419         return false;
2420     if (mapping->host->i_blkbits >= folio_shift(folio))
2421         return false;
2422 
2423     count = iter->count;
2424     if (folio_pos(folio) > pos) {
2425         count -= folio_pos(folio) - pos;
2426         pos = 0;
2427     } else {
2428         pos -= folio_pos(folio);
2429     }
2430 
2431     return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2432 }
2433 
2434 static int filemap_update_page(struct kiocb *iocb,
2435         struct address_space *mapping, struct iov_iter *iter,
2436         struct folio *folio)
2437 {
2438     int error;
2439 
2440     if (iocb->ki_flags & IOCB_NOWAIT) {
2441         if (!filemap_invalidate_trylock_shared(mapping))
2442             return -EAGAIN;
2443     } else {
2444         filemap_invalidate_lock_shared(mapping);
2445     }
2446 
2447     if (!folio_trylock(folio)) {
2448         error = -EAGAIN;
2449         if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2450             goto unlock_mapping;
2451         if (!(iocb->ki_flags & IOCB_WAITQ)) {
2452             filemap_invalidate_unlock_shared(mapping);
2453             /*
2454              * This is where we usually end up waiting for a
2455              * previously submitted readahead to finish.
2456              */
2457             folio_put_wait_locked(folio, TASK_KILLABLE);
2458             return AOP_TRUNCATED_PAGE;
2459         }
2460         error = __folio_lock_async(folio, iocb->ki_waitq);
2461         if (error)
2462             goto unlock_mapping;
2463     }
2464 
2465     error = AOP_TRUNCATED_PAGE;
2466     if (!folio->mapping)
2467         goto unlock;
2468 
2469     error = 0;
2470     if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, folio))
2471         goto unlock;
2472 
2473     error = -EAGAIN;
2474     if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2475         goto unlock;
2476 
2477     error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2478             folio);
2479     goto unlock_mapping;
2480 unlock:
2481     folio_unlock(folio);
2482 unlock_mapping:
2483     filemap_invalidate_unlock_shared(mapping);
2484     if (error == AOP_TRUNCATED_PAGE)
2485         folio_put(folio);
2486     return error;
2487 }
2488 
2489 static int filemap_create_folio(struct file *file,
2490         struct address_space *mapping, pgoff_t index,
2491         struct folio_batch *fbatch)
2492 {
2493     struct folio *folio;
2494     int error;
2495 
2496     folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2497     if (!folio)
2498         return -ENOMEM;
2499 
2500     /*
2501      * Protect against truncate / hole punch. Grabbing invalidate_lock
2502      * here assures we cannot instantiate and bring uptodate new
2503      * pagecache folios after evicting page cache during truncate
2504      * and before actually freeing blocks.  Note that we could
2505      * release invalidate_lock after inserting the folio into
2506      * the page cache as the locked folio would then be enough to
2507      * synchronize with hole punching. But there are code paths
2508      * such as filemap_update_page() filling in partially uptodate
2509      * pages or ->readahead() that need to hold invalidate_lock
2510      * while mapping blocks for IO so let's hold the lock here as
2511      * well to keep locking rules simple.
2512      */
2513     filemap_invalidate_lock_shared(mapping);
2514     error = filemap_add_folio(mapping, folio, index,
2515             mapping_gfp_constraint(mapping, GFP_KERNEL));
2516     if (error == -EEXIST)
2517         error = AOP_TRUNCATED_PAGE;
2518     if (error)
2519         goto error;
2520 
2521     error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2522     if (error)
2523         goto error;
2524 
2525     filemap_invalidate_unlock_shared(mapping);
2526     folio_batch_add(fbatch, folio);
2527     return 0;
2528 error:
2529     filemap_invalidate_unlock_shared(mapping);
2530     folio_put(folio);
2531     return error;
2532 }
2533 
2534 static int filemap_readahead(struct kiocb *iocb, struct file *file,
2535         struct address_space *mapping, struct folio *folio,
2536         pgoff_t last_index)
2537 {
2538     DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2539 
2540     if (iocb->ki_flags & IOCB_NOIO)
2541         return -EAGAIN;
2542     page_cache_async_ra(&ractl, folio, last_index - folio->index);
2543     return 0;
2544 }
2545 
2546 static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter,
2547         struct folio_batch *fbatch)
2548 {
2549     struct file *filp = iocb->ki_filp;
2550     struct address_space *mapping = filp->f_mapping;
2551     struct file_ra_state *ra = &filp->f_ra;
2552     pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2553     pgoff_t last_index;
2554     struct folio *folio;
2555     int err = 0;
2556 
2557     last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE);
2558 retry:
2559     if (fatal_signal_pending(current))
2560         return -EINTR;
2561 
2562     filemap_get_read_batch(mapping, index, last_index, fbatch);
2563     if (!folio_batch_count(fbatch)) {
2564         if (iocb->ki_flags & IOCB_NOIO)
2565             return -EAGAIN;
2566         page_cache_sync_readahead(mapping, ra, filp, index,
2567                 last_index - index);
2568         filemap_get_read_batch(mapping, index, last_index, fbatch);
2569     }
2570     if (!folio_batch_count(fbatch)) {
2571         if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2572             return -EAGAIN;
2573         err = filemap_create_folio(filp, mapping,
2574                 iocb->ki_pos >> PAGE_SHIFT, fbatch);
2575         if (err == AOP_TRUNCATED_PAGE)
2576             goto retry;
2577         return err;
2578     }
2579 
2580     folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2581     if (folio_test_readahead(folio)) {
2582         err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2583         if (err)
2584             goto err;
2585     }
2586     if (!folio_test_uptodate(folio)) {
2587         if ((iocb->ki_flags & IOCB_WAITQ) &&
2588             folio_batch_count(fbatch) > 1)
2589             iocb->ki_flags |= IOCB_NOWAIT;
2590         err = filemap_update_page(iocb, mapping, iter, folio);
2591         if (err)
2592             goto err;
2593     }
2594 
2595     return 0;
2596 err:
2597     if (err < 0)
2598         folio_put(folio);
2599     if (likely(--fbatch->nr))
2600         return 0;
2601     if (err == AOP_TRUNCATED_PAGE)
2602         goto retry;
2603     return err;
2604 }
2605 
2606 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2607 {
2608     unsigned int shift = folio_shift(folio);
2609 
2610     return (pos1 >> shift == pos2 >> shift);
2611 }
2612 
2613 /**
2614  * filemap_read - Read data from the page cache.
2615  * @iocb: The iocb to read.
2616  * @iter: Destination for the data.
2617  * @already_read: Number of bytes already read by the caller.
2618  *
2619  * Copies data from the page cache.  If the data is not currently present,
2620  * uses the readahead and read_folio address_space operations to fetch it.
2621  *
2622  * Return: Total number of bytes copied, including those already read by
2623  * the caller.  If an error happens before any bytes are copied, returns
2624  * a negative error number.
2625  */
2626 ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2627         ssize_t already_read)
2628 {
2629     struct file *filp = iocb->ki_filp;
2630     struct file_ra_state *ra = &filp->f_ra;
2631     struct address_space *mapping = filp->f_mapping;
2632     struct inode *inode = mapping->host;
2633     struct folio_batch fbatch;
2634     int i, error = 0;
2635     bool writably_mapped;
2636     loff_t isize, end_offset;
2637 
2638     if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2639         return 0;
2640     if (unlikely(!iov_iter_count(iter)))
2641         return 0;
2642 
2643     iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2644     folio_batch_init(&fbatch);
2645 
2646     do {
2647         cond_resched();
2648 
2649         /*
2650          * If we've already successfully copied some data, then we
2651          * can no longer safely return -EIOCBQUEUED. Hence mark
2652          * an async read NOWAIT at that point.
2653          */
2654         if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2655             iocb->ki_flags |= IOCB_NOWAIT;
2656 
2657         if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2658             break;
2659 
2660         error = filemap_get_pages(iocb, iter, &fbatch);
2661         if (error < 0)
2662             break;
2663 
2664         /*
2665          * i_size must be checked after we know the pages are Uptodate.
2666          *
2667          * Checking i_size after the check allows us to calculate
2668          * the correct value for "nr", which means the zero-filled
2669          * part of the page is not copied back to userspace (unless
2670          * another truncate extends the file - this is desired though).
2671          */
2672         isize = i_size_read(inode);
2673         if (unlikely(iocb->ki_pos >= isize))
2674             goto put_folios;
2675         end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2676 
2677         /*
2678          * Once we start copying data, we don't want to be touching any
2679          * cachelines that might be contended:
2680          */
2681         writably_mapped = mapping_writably_mapped(mapping);
2682 
2683         /*
2684          * When a read accesses the same folio several times, only
2685          * mark it as accessed the first time.
2686          */
2687         if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
2688                             fbatch.folios[0]))
2689             folio_mark_accessed(fbatch.folios[0]);
2690 
2691         for (i = 0; i < folio_batch_count(&fbatch); i++) {
2692             struct folio *folio = fbatch.folios[i];
2693             size_t fsize = folio_size(folio);
2694             size_t offset = iocb->ki_pos & (fsize - 1);
2695             size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2696                          fsize - offset);
2697             size_t copied;
2698 
2699             if (end_offset < folio_pos(folio))
2700                 break;
2701             if (i > 0)
2702                 folio_mark_accessed(folio);
2703             /*
2704              * If users can be writing to this folio using arbitrary
2705              * virtual addresses, take care of potential aliasing
2706              * before reading the folio on the kernel side.
2707              */
2708             if (writably_mapped)
2709                 flush_dcache_folio(folio);
2710 
2711             copied = copy_folio_to_iter(folio, offset, bytes, iter);
2712 
2713             already_read += copied;
2714             iocb->ki_pos += copied;
2715             ra->prev_pos = iocb->ki_pos;
2716 
2717             if (copied < bytes) {
2718                 error = -EFAULT;
2719                 break;
2720             }
2721         }
2722 put_folios:
2723         for (i = 0; i < folio_batch_count(&fbatch); i++)
2724             folio_put(fbatch.folios[i]);
2725         folio_batch_init(&fbatch);
2726     } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2727 
2728     file_accessed(filp);
2729 
2730     return already_read ? already_read : error;
2731 }
2732 EXPORT_SYMBOL_GPL(filemap_read);
2733 
2734 /**
2735  * generic_file_read_iter - generic filesystem read routine
2736  * @iocb:   kernel I/O control block
2737  * @iter:   destination for the data read
2738  *
2739  * This is the "read_iter()" routine for all filesystems
2740  * that can use the page cache directly.
2741  *
2742  * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2743  * be returned when no data can be read without waiting for I/O requests
2744  * to complete; it doesn't prevent readahead.
2745  *
2746  * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2747  * requests shall be made for the read or for readahead.  When no data
2748  * can be read, -EAGAIN shall be returned.  When readahead would be
2749  * triggered, a partial, possibly empty read shall be returned.
2750  *
2751  * Return:
2752  * * number of bytes copied, even for partial reads
2753  * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2754  */
2755 ssize_t
2756 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2757 {
2758     size_t count = iov_iter_count(iter);
2759     ssize_t retval = 0;
2760 
2761     if (!count)
2762         return 0; /* skip atime */
2763 
2764     if (iocb->ki_flags & IOCB_DIRECT) {
2765         struct file *file = iocb->ki_filp;
2766         struct address_space *mapping = file->f_mapping;
2767         struct inode *inode = mapping->host;
2768 
2769         if (iocb->ki_flags & IOCB_NOWAIT) {
2770             if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
2771                         iocb->ki_pos + count - 1))
2772                 return -EAGAIN;
2773         } else {
2774             retval = filemap_write_and_wait_range(mapping,
2775                         iocb->ki_pos,
2776                             iocb->ki_pos + count - 1);
2777             if (retval < 0)
2778                 return retval;
2779         }
2780 
2781         file_accessed(file);
2782 
2783         retval = mapping->a_ops->direct_IO(iocb, iter);
2784         if (retval >= 0) {
2785             iocb->ki_pos += retval;
2786             count -= retval;
2787         }
2788         if (retval != -EIOCBQUEUED)
2789             iov_iter_revert(iter, count - iov_iter_count(iter));
2790 
2791         /*
2792          * Btrfs can have a short DIO read if we encounter
2793          * compressed extents, so if there was an error, or if
2794          * we've already read everything we wanted to, or if
2795          * there was a short read because we hit EOF, go ahead
2796          * and return.  Otherwise fallthrough to buffered io for
2797          * the rest of the read.  Buffered reads will not work for
2798          * DAX files, so don't bother trying.
2799          */
2800         if (retval < 0 || !count || IS_DAX(inode))
2801             return retval;
2802         if (iocb->ki_pos >= i_size_read(inode))
2803             return retval;
2804     }
2805 
2806     return filemap_read(iocb, iter, retval);
2807 }
2808 EXPORT_SYMBOL(generic_file_read_iter);
2809 
2810 static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2811         struct address_space *mapping, struct folio *folio,
2812         loff_t start, loff_t end, bool seek_data)
2813 {
2814     const struct address_space_operations *ops = mapping->a_ops;
2815     size_t offset, bsz = i_blocksize(mapping->host);
2816 
2817     if (xa_is_value(folio) || folio_test_uptodate(folio))
2818         return seek_data ? start : end;
2819     if (!ops->is_partially_uptodate)
2820         return seek_data ? end : start;
2821 
2822     xas_pause(xas);
2823     rcu_read_unlock();
2824     folio_lock(folio);
2825     if (unlikely(folio->mapping != mapping))
2826         goto unlock;
2827 
2828     offset = offset_in_folio(folio, start) & ~(bsz - 1);
2829 
2830     do {
2831         if (ops->is_partially_uptodate(folio, offset, bsz) ==
2832                             seek_data)
2833             break;
2834         start = (start + bsz) & ~(bsz - 1);
2835         offset += bsz;
2836     } while (offset < folio_size(folio));
2837 unlock:
2838     folio_unlock(folio);
2839     rcu_read_lock();
2840     return start;
2841 }
2842 
2843 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
2844 {
2845     if (xa_is_value(folio))
2846         return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
2847     return folio_size(folio);
2848 }
2849 
2850 /**
2851  * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
2852  * @mapping: Address space to search.
2853  * @start: First byte to consider.
2854  * @end: Limit of search (exclusive).
2855  * @whence: Either SEEK_HOLE or SEEK_DATA.
2856  *
2857  * If the page cache knows which blocks contain holes and which blocks
2858  * contain data, your filesystem can use this function to implement
2859  * SEEK_HOLE and SEEK_DATA.  This is useful for filesystems which are
2860  * entirely memory-based such as tmpfs, and filesystems which support
2861  * unwritten extents.
2862  *
2863  * Return: The requested offset on success, or -ENXIO if @whence specifies
2864  * SEEK_DATA and there is no data after @start.  There is an implicit hole
2865  * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
2866  * and @end contain data.
2867  */
2868 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
2869         loff_t end, int whence)
2870 {
2871     XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
2872     pgoff_t max = (end - 1) >> PAGE_SHIFT;
2873     bool seek_data = (whence == SEEK_DATA);
2874     struct folio *folio;
2875 
2876     if (end <= start)
2877         return -ENXIO;
2878 
2879     rcu_read_lock();
2880     while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
2881         loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
2882         size_t seek_size;
2883 
2884         if (start < pos) {
2885             if (!seek_data)
2886                 goto unlock;
2887             start = pos;
2888         }
2889 
2890         seek_size = seek_folio_size(&xas, folio);
2891         pos = round_up((u64)pos + 1, seek_size);
2892         start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
2893                 seek_data);
2894         if (start < pos)
2895             goto unlock;
2896         if (start >= end)
2897             break;
2898         if (seek_size > PAGE_SIZE)
2899             xas_set(&xas, pos >> PAGE_SHIFT);
2900         if (!xa_is_value(folio))
2901             folio_put(folio);
2902     }
2903     if (seek_data)
2904         start = -ENXIO;
2905 unlock:
2906     rcu_read_unlock();
2907     if (folio && !xa_is_value(folio))
2908         folio_put(folio);
2909     if (start > end)
2910         return end;
2911     return start;
2912 }
2913 
2914 #ifdef CONFIG_MMU
2915 #define MMAP_LOTSAMISS  (100)
2916 /*
2917  * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
2918  * @vmf - the vm_fault for this fault.
2919  * @folio - the folio to lock.
2920  * @fpin - the pointer to the file we may pin (or is already pinned).
2921  *
2922  * This works similar to lock_folio_or_retry in that it can drop the
2923  * mmap_lock.  It differs in that it actually returns the folio locked
2924  * if it returns 1 and 0 if it couldn't lock the folio.  If we did have
2925  * to drop the mmap_lock then fpin will point to the pinned file and
2926  * needs to be fput()'ed at a later point.
2927  */
2928 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
2929                      struct file **fpin)
2930 {
2931     if (folio_trylock(folio))
2932         return 1;
2933 
2934     /*
2935      * NOTE! This will make us return with VM_FAULT_RETRY, but with
2936      * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
2937      * is supposed to work. We have way too many special cases..
2938      */
2939     if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
2940         return 0;
2941 
2942     *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
2943     if (vmf->flags & FAULT_FLAG_KILLABLE) {
2944         if (__folio_lock_killable(folio)) {
2945             /*
2946              * We didn't have the right flags to drop the mmap_lock,
2947              * but all fault_handlers only check for fatal signals
2948              * if we return VM_FAULT_RETRY, so we need to drop the
2949              * mmap_lock here and return 0 if we don't have a fpin.
2950              */
2951             if (*fpin == NULL)
2952                 mmap_read_unlock(vmf->vma->vm_mm);
2953             return 0;
2954         }
2955     } else
2956         __folio_lock(folio);
2957 
2958     return 1;
2959 }
2960 
2961 /*
2962  * Synchronous readahead happens when we don't even find a page in the page
2963  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
2964  * to drop the mmap sem we return the file that was pinned in order for us to do
2965  * that.  If we didn't pin a file then we return NULL.  The file that is
2966  * returned needs to be fput()'ed when we're done with it.
2967  */
2968 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
2969 {
2970     struct file *file = vmf->vma->vm_file;
2971     struct file_ra_state *ra = &file->f_ra;
2972     struct address_space *mapping = file->f_mapping;
2973     DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
2974     struct file *fpin = NULL;
2975     unsigned long vm_flags = vmf->vma->vm_flags;
2976     unsigned int mmap_miss;
2977 
2978 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2979     /* Use the readahead code, even if readahead is disabled */
2980     if (vm_flags & VM_HUGEPAGE) {
2981         fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2982         ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
2983         ra->size = HPAGE_PMD_NR;
2984         /*
2985          * Fetch two PMD folios, so we get the chance to actually
2986          * readahead, unless we've been told not to.
2987          */
2988         if (!(vm_flags & VM_RAND_READ))
2989             ra->size *= 2;
2990         ra->async_size = HPAGE_PMD_NR;
2991         page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
2992         return fpin;
2993     }
2994 #endif
2995 
2996     /* If we don't want any read-ahead, don't bother */
2997     if (vm_flags & VM_RAND_READ)
2998         return fpin;
2999     if (!ra->ra_pages)
3000         return fpin;
3001 
3002     if (vm_flags & VM_SEQ_READ) {
3003         fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3004         page_cache_sync_ra(&ractl, ra->ra_pages);
3005         return fpin;
3006     }
3007 
3008     /* Avoid banging the cache line if not needed */
3009     mmap_miss = READ_ONCE(ra->mmap_miss);
3010     if (mmap_miss < MMAP_LOTSAMISS * 10)
3011         WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3012 
3013     /*
3014      * Do we miss much more than hit in this file? If so,
3015      * stop bothering with read-ahead. It will only hurt.
3016      */
3017     if (mmap_miss > MMAP_LOTSAMISS)
3018         return fpin;
3019 
3020     /*
3021      * mmap read-around
3022      */
3023     fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3024     ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3025     ra->size = ra->ra_pages;
3026     ra->async_size = ra->ra_pages / 4;
3027     ractl._index = ra->start;
3028     page_cache_ra_order(&ractl, ra, 0);
3029     return fpin;
3030 }
3031 
3032 /*
3033  * Asynchronous readahead happens when we find the page and PG_readahead,
3034  * so we want to possibly extend the readahead further.  We return the file that
3035  * was pinned if we have to drop the mmap_lock in order to do IO.
3036  */
3037 static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3038                         struct folio *folio)
3039 {
3040     struct file *file = vmf->vma->vm_file;
3041     struct file_ra_state *ra = &file->f_ra;
3042     DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3043     struct file *fpin = NULL;
3044     unsigned int mmap_miss;
3045 
3046     /* If we don't want any read-ahead, don't bother */
3047     if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3048         return fpin;
3049 
3050     mmap_miss = READ_ONCE(ra->mmap_miss);
3051     if (mmap_miss)
3052         WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3053 
3054     if (folio_test_readahead(folio)) {
3055         fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3056         page_cache_async_ra(&ractl, folio, ra->ra_pages);
3057     }
3058     return fpin;
3059 }
3060 
3061 /**
3062  * filemap_fault - read in file data for page fault handling
3063  * @vmf:    struct vm_fault containing details of the fault
3064  *
3065  * filemap_fault() is invoked via the vma operations vector for a
3066  * mapped memory region to read in file data during a page fault.
3067  *
3068  * The goto's are kind of ugly, but this streamlines the normal case of having
3069  * it in the page cache, and handles the special cases reasonably without
3070  * having a lot of duplicated code.
3071  *
3072  * vma->vm_mm->mmap_lock must be held on entry.
3073  *
3074  * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3075  * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
3076  *
3077  * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3078  * has not been released.
3079  *
3080  * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3081  *
3082  * Return: bitwise-OR of %VM_FAULT_ codes.
3083  */
3084 vm_fault_t filemap_fault(struct vm_fault *vmf)
3085 {
3086     int error;
3087     struct file *file = vmf->vma->vm_file;
3088     struct file *fpin = NULL;
3089     struct address_space *mapping = file->f_mapping;
3090     struct inode *inode = mapping->host;
3091     pgoff_t max_idx, index = vmf->pgoff;
3092     struct folio *folio;
3093     vm_fault_t ret = 0;
3094     bool mapping_locked = false;
3095 
3096     max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3097     if (unlikely(index >= max_idx))
3098         return VM_FAULT_SIGBUS;
3099 
3100     /*
3101      * Do we have something in the page cache already?
3102      */
3103     folio = filemap_get_folio(mapping, index);
3104     if (likely(folio)) {
3105         /*
3106          * We found the page, so try async readahead before waiting for
3107          * the lock.
3108          */
3109         if (!(vmf->flags & FAULT_FLAG_TRIED))
3110             fpin = do_async_mmap_readahead(vmf, folio);
3111         if (unlikely(!folio_test_uptodate(folio))) {
3112             filemap_invalidate_lock_shared(mapping);
3113             mapping_locked = true;
3114         }
3115     } else {
3116         /* No page in the page cache at all */
3117         count_vm_event(PGMAJFAULT);
3118         count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3119         ret = VM_FAULT_MAJOR;
3120         fpin = do_sync_mmap_readahead(vmf);
3121 retry_find:
3122         /*
3123          * See comment in filemap_create_folio() why we need
3124          * invalidate_lock
3125          */
3126         if (!mapping_locked) {
3127             filemap_invalidate_lock_shared(mapping);
3128             mapping_locked = true;
3129         }
3130         folio = __filemap_get_folio(mapping, index,
3131                       FGP_CREAT|FGP_FOR_MMAP,
3132                       vmf->gfp_mask);
3133         if (!folio) {
3134             if (fpin)
3135                 goto out_retry;
3136             filemap_invalidate_unlock_shared(mapping);
3137             return VM_FAULT_OOM;
3138         }
3139     }
3140 
3141     if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3142         goto out_retry;
3143 
3144     /* Did it get truncated? */
3145     if (unlikely(folio->mapping != mapping)) {
3146         folio_unlock(folio);
3147         folio_put(folio);
3148         goto retry_find;
3149     }
3150     VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3151 
3152     /*
3153      * We have a locked page in the page cache, now we need to check
3154      * that it's up-to-date. If not, it is going to be due to an error.
3155      */
3156     if (unlikely(!folio_test_uptodate(folio))) {
3157         /*
3158          * The page was in cache and uptodate and now it is not.
3159          * Strange but possible since we didn't hold the page lock all
3160          * the time. Let's drop everything get the invalidate lock and
3161          * try again.
3162          */
3163         if (!mapping_locked) {
3164             folio_unlock(folio);
3165             folio_put(folio);
3166             goto retry_find;
3167         }
3168         goto page_not_uptodate;
3169     }
3170 
3171     /*
3172      * We've made it this far and we had to drop our mmap_lock, now is the
3173      * time to return to the upper layer and have it re-find the vma and
3174      * redo the fault.
3175      */
3176     if (fpin) {
3177         folio_unlock(folio);
3178         goto out_retry;
3179     }
3180     if (mapping_locked)
3181         filemap_invalidate_unlock_shared(mapping);
3182 
3183     /*
3184      * Found the page and have a reference on it.
3185      * We must recheck i_size under page lock.
3186      */
3187     max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3188     if (unlikely(index >= max_idx)) {
3189         folio_unlock(folio);
3190         folio_put(folio);
3191         return VM_FAULT_SIGBUS;
3192     }
3193 
3194     vmf->page = folio_file_page(folio, index);
3195     return ret | VM_FAULT_LOCKED;
3196 
3197 page_not_uptodate:
3198     /*
3199      * Umm, take care of errors if the page isn't up-to-date.
3200      * Try to re-read it _once_. We do this synchronously,
3201      * because there really aren't any performance issues here
3202      * and we need to check for errors.
3203      */
3204     fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3205     error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3206     if (fpin)
3207         goto out_retry;
3208     folio_put(folio);
3209 
3210     if (!error || error == AOP_TRUNCATED_PAGE)
3211         goto retry_find;
3212     filemap_invalidate_unlock_shared(mapping);
3213 
3214     return VM_FAULT_SIGBUS;
3215 
3216 out_retry:
3217     /*
3218      * We dropped the mmap_lock, we need to return to the fault handler to
3219      * re-find the vma and come back and find our hopefully still populated
3220      * page.
3221      */
3222     if (folio)
3223         folio_put(folio);
3224     if (mapping_locked)
3225         filemap_invalidate_unlock_shared(mapping);
3226     if (fpin)
3227         fput(fpin);
3228     return ret | VM_FAULT_RETRY;
3229 }
3230 EXPORT_SYMBOL(filemap_fault);
3231 
3232 static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
3233 {
3234     struct mm_struct *mm = vmf->vma->vm_mm;
3235 
3236     /* Huge page is mapped? No need to proceed. */
3237     if (pmd_trans_huge(*vmf->pmd)) {
3238         unlock_page(page);
3239         put_page(page);
3240         return true;
3241     }
3242 
3243     if (pmd_none(*vmf->pmd) && PageTransHuge(page)) {
3244         vm_fault_t ret = do_set_pmd(vmf, page);
3245         if (!ret) {
3246             /* The page is mapped successfully, reference consumed. */
3247             unlock_page(page);
3248             return true;
3249         }
3250     }
3251 
3252     if (pmd_none(*vmf->pmd))
3253         pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3254 
3255     /* See comment in handle_pte_fault() */
3256     if (pmd_devmap_trans_unstable(vmf->pmd)) {
3257         unlock_page(page);
3258         put_page(page);
3259         return true;
3260     }
3261 
3262     return false;
3263 }
3264 
3265 static struct folio *next_uptodate_page(struct folio *folio,
3266                        struct address_space *mapping,
3267                        struct xa_state *xas, pgoff_t end_pgoff)
3268 {
3269     unsigned long max_idx;
3270 
3271     do {
3272         if (!folio)
3273             return NULL;
3274         if (xas_retry(xas, folio))
3275             continue;
3276         if (xa_is_value(folio))
3277             continue;
3278         if (folio_test_locked(folio))
3279             continue;
3280         if (!folio_try_get_rcu(folio))
3281             continue;
3282         /* Has the page moved or been split? */
3283         if (unlikely(folio != xas_reload(xas)))
3284             goto skip;
3285         if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3286             goto skip;
3287         if (!folio_trylock(folio))
3288             goto skip;
3289         if (folio->mapping != mapping)
3290             goto unlock;
3291         if (!folio_test_uptodate(folio))
3292             goto unlock;
3293         max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3294         if (xas->xa_index >= max_idx)
3295             goto unlock;
3296         return folio;
3297 unlock:
3298         folio_unlock(folio);
3299 skip:
3300         folio_put(folio);
3301     } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3302 
3303     return NULL;
3304 }
3305 
3306 static inline struct folio *first_map_page(struct address_space *mapping,
3307                       struct xa_state *xas,
3308                       pgoff_t end_pgoff)
3309 {
3310     return next_uptodate_page(xas_find(xas, end_pgoff),
3311                   mapping, xas, end_pgoff);
3312 }
3313 
3314 static inline struct folio *next_map_page(struct address_space *mapping,
3315                      struct xa_state *xas,
3316                      pgoff_t end_pgoff)
3317 {
3318     return next_uptodate_page(xas_next_entry(xas, end_pgoff),
3319                   mapping, xas, end_pgoff);
3320 }
3321 
3322 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3323                  pgoff_t start_pgoff, pgoff_t end_pgoff)
3324 {
3325     struct vm_area_struct *vma = vmf->vma;
3326     struct file *file = vma->vm_file;
3327     struct address_space *mapping = file->f_mapping;
3328     pgoff_t last_pgoff = start_pgoff;
3329     unsigned long addr;
3330     XA_STATE(xas, &mapping->i_pages, start_pgoff);
3331     struct folio *folio;
3332     struct page *page;
3333     unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3334     vm_fault_t ret = 0;
3335 
3336     rcu_read_lock();
3337     folio = first_map_page(mapping, &xas, end_pgoff);
3338     if (!folio)
3339         goto out;
3340 
3341     if (filemap_map_pmd(vmf, &folio->page)) {
3342         ret = VM_FAULT_NOPAGE;
3343         goto out;
3344     }
3345 
3346     addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3347     vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3348     do {
3349 again:
3350         page = folio_file_page(folio, xas.xa_index);
3351         if (PageHWPoison(page))
3352             goto unlock;
3353 
3354         if (mmap_miss > 0)
3355             mmap_miss--;
3356 
3357         addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3358         vmf->pte += xas.xa_index - last_pgoff;
3359         last_pgoff = xas.xa_index;
3360 
3361         /*
3362          * NOTE: If there're PTE markers, we'll leave them to be
3363          * handled in the specific fault path, and it'll prohibit the
3364          * fault-around logic.
3365          */
3366         if (!pte_none(*vmf->pte))
3367             goto unlock;
3368 
3369         /* We're about to handle the fault */
3370         if (vmf->address == addr)
3371             ret = VM_FAULT_NOPAGE;
3372 
3373         do_set_pte(vmf, page, addr);
3374         /* no need to invalidate: a not-present page won't be cached */
3375         update_mmu_cache(vma, addr, vmf->pte);
3376         if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3377             xas.xa_index++;
3378             folio_ref_inc(folio);
3379             goto again;
3380         }
3381         folio_unlock(folio);
3382         continue;
3383 unlock:
3384         if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3385             xas.xa_index++;
3386             goto again;
3387         }
3388         folio_unlock(folio);
3389         folio_put(folio);
3390     } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3391     pte_unmap_unlock(vmf->pte, vmf->ptl);
3392 out:
3393     rcu_read_unlock();
3394     WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3395     return ret;
3396 }
3397 EXPORT_SYMBOL(filemap_map_pages);
3398 
3399 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3400 {
3401     struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3402     struct folio *folio = page_folio(vmf->page);
3403     vm_fault_t ret = VM_FAULT_LOCKED;
3404 
3405     sb_start_pagefault(mapping->host->i_sb);
3406     file_update_time(vmf->vma->vm_file);
3407     folio_lock(folio);
3408     if (folio->mapping != mapping) {
3409         folio_unlock(folio);
3410         ret = VM_FAULT_NOPAGE;
3411         goto out;
3412     }
3413     /*
3414      * We mark the folio dirty already here so that when freeze is in
3415      * progress, we are guaranteed that writeback during freezing will
3416      * see the dirty folio and writeprotect it again.
3417      */
3418     folio_mark_dirty(folio);
3419     folio_wait_stable(folio);
3420 out:
3421     sb_end_pagefault(mapping->host->i_sb);
3422     return ret;
3423 }
3424 
3425 const struct vm_operations_struct generic_file_vm_ops = {
3426     .fault      = filemap_fault,
3427     .map_pages  = filemap_map_pages,
3428     .page_mkwrite   = filemap_page_mkwrite,
3429 };
3430 
3431 /* This is used for a general mmap of a disk file */
3432 
3433 int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3434 {
3435     struct address_space *mapping = file->f_mapping;
3436 
3437     if (!mapping->a_ops->read_folio)
3438         return -ENOEXEC;
3439     file_accessed(file);
3440     vma->vm_ops = &generic_file_vm_ops;
3441     return 0;
3442 }
3443 
3444 /*
3445  * This is for filesystems which do not implement ->writepage.
3446  */
3447 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3448 {
3449     if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
3450         return -EINVAL;
3451     return generic_file_mmap(file, vma);
3452 }
3453 #else
3454 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3455 {
3456     return VM_FAULT_SIGBUS;
3457 }
3458 int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3459 {
3460     return -ENOSYS;
3461 }
3462 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3463 {
3464     return -ENOSYS;
3465 }
3466 #endif /* CONFIG_MMU */
3467 
3468 EXPORT_SYMBOL(filemap_page_mkwrite);
3469 EXPORT_SYMBOL(generic_file_mmap);
3470 EXPORT_SYMBOL(generic_file_readonly_mmap);
3471 
3472 static struct folio *do_read_cache_folio(struct address_space *mapping,
3473         pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3474 {
3475     struct folio *folio;
3476     int err;
3477 
3478     if (!filler)
3479         filler = mapping->a_ops->read_folio;
3480 repeat:
3481     folio = filemap_get_folio(mapping, index);
3482     if (!folio) {
3483         folio = filemap_alloc_folio(gfp, 0);
3484         if (!folio)
3485             return ERR_PTR(-ENOMEM);
3486         err = filemap_add_folio(mapping, folio, index, gfp);
3487         if (unlikely(err)) {
3488             folio_put(folio);
3489             if (err == -EEXIST)
3490                 goto repeat;
3491             /* Presumably ENOMEM for xarray node */
3492             return ERR_PTR(err);
3493         }
3494 
3495         goto filler;
3496     }
3497     if (folio_test_uptodate(folio))
3498         goto out;
3499 
3500     if (!folio_trylock(folio)) {
3501         folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3502         goto repeat;
3503     }
3504 
3505     /* Folio was truncated from mapping */
3506     if (!folio->mapping) {
3507         folio_unlock(folio);
3508         folio_put(folio);
3509         goto repeat;
3510     }
3511 
3512     /* Someone else locked and filled the page in a very small window */
3513     if (folio_test_uptodate(folio)) {
3514         folio_unlock(folio);
3515         goto out;
3516     }
3517 
3518 filler:
3519     err = filemap_read_folio(file, filler, folio);
3520     if (err) {
3521         folio_put(folio);
3522         if (err == AOP_TRUNCATED_PAGE)
3523             goto repeat;
3524         return ERR_PTR(err);
3525     }
3526 
3527 out:
3528     folio_mark_accessed(folio);
3529     return folio;
3530 }
3531 
3532 /**
3533  * read_cache_folio - Read into page cache, fill it if needed.
3534  * @mapping: The address_space to read from.
3535  * @index: The index to read.
3536  * @filler: Function to perform the read, or NULL to use aops->read_folio().
3537  * @file: Passed to filler function, may be NULL if not required.
3538  *
3539  * Read one page into the page cache.  If it succeeds, the folio returned
3540  * will contain @index, but it may not be the first page of the folio.
3541  *
3542  * If the filler function returns an error, it will be returned to the
3543  * caller.
3544  *
3545  * Context: May sleep.  Expects mapping->invalidate_lock to be held.
3546  * Return: An uptodate folio on success, ERR_PTR() on failure.
3547  */
3548 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3549         filler_t filler, struct file *file)
3550 {
3551     return do_read_cache_folio(mapping, index, filler, file,
3552             mapping_gfp_mask(mapping));
3553 }
3554 EXPORT_SYMBOL(read_cache_folio);
3555 
3556 static struct page *do_read_cache_page(struct address_space *mapping,
3557         pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3558 {
3559     struct folio *folio;
3560 
3561     folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3562     if (IS_ERR(folio))
3563         return &folio->page;
3564     return folio_file_page(folio, index);
3565 }
3566 
3567 struct page *read_cache_page(struct address_space *mapping,
3568             pgoff_t index, filler_t *filler, struct file *file)
3569 {
3570     return do_read_cache_page(mapping, index, filler, file,
3571             mapping_gfp_mask(mapping));
3572 }
3573 EXPORT_SYMBOL(read_cache_page);
3574 
3575 /**
3576  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3577  * @mapping:    the page's address_space
3578  * @index:  the page index
3579  * @gfp:    the page allocator flags to use if allocating
3580  *
3581  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3582  * any new page allocations done using the specified allocation flags.
3583  *
3584  * If the page does not get brought uptodate, return -EIO.
3585  *
3586  * The function expects mapping->invalidate_lock to be already held.
3587  *
3588  * Return: up to date page on success, ERR_PTR() on failure.
3589  */
3590 struct page *read_cache_page_gfp(struct address_space *mapping,
3591                 pgoff_t index,
3592                 gfp_t gfp)
3593 {
3594     return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3595 }
3596 EXPORT_SYMBOL(read_cache_page_gfp);
3597 
3598 /*
3599  * Warn about a page cache invalidation failure during a direct I/O write.
3600  */
3601 void dio_warn_stale_pagecache(struct file *filp)
3602 {
3603     static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3604     char pathname[128];
3605     char *path;
3606 
3607     errseq_set(&filp->f_mapping->wb_err, -EIO);
3608     if (__ratelimit(&_rs)) {
3609         path = file_path(filp, pathname, sizeof(pathname));
3610         if (IS_ERR(path))
3611             path = "(unknown)";
3612         pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
3613         pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3614             current->comm);
3615     }
3616 }
3617 
3618 ssize_t
3619 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3620 {
3621     struct file *file = iocb->ki_filp;
3622     struct address_space *mapping = file->f_mapping;
3623     struct inode    *inode = mapping->host;
3624     loff_t      pos = iocb->ki_pos;
3625     ssize_t     written;
3626     size_t      write_len;
3627     pgoff_t     end;
3628 
3629     write_len = iov_iter_count(from);
3630     end = (pos + write_len - 1) >> PAGE_SHIFT;
3631 
3632     if (iocb->ki_flags & IOCB_NOWAIT) {
3633         /* If there are pages to writeback, return */
3634         if (filemap_range_has_page(file->f_mapping, pos,
3635                        pos + write_len - 1))
3636             return -EAGAIN;
3637     } else {
3638         written = filemap_write_and_wait_range(mapping, pos,
3639                             pos + write_len - 1);
3640         if (written)
3641             goto out;
3642     }
3643 
3644     /*
3645      * After a write we want buffered reads to be sure to go to disk to get
3646      * the new data.  We invalidate clean cached page from the region we're
3647      * about to write.  We do this *before* the write so that we can return
3648      * without clobbering -EIOCBQUEUED from ->direct_IO().
3649      */
3650     written = invalidate_inode_pages2_range(mapping,
3651                     pos >> PAGE_SHIFT, end);
3652     /*
3653      * If a page can not be invalidated, return 0 to fall back
3654      * to buffered write.
3655      */
3656     if (written) {
3657         if (written == -EBUSY)
3658             return 0;
3659         goto out;
3660     }
3661 
3662     written = mapping->a_ops->direct_IO(iocb, from);
3663 
3664     /*
3665      * Finally, try again to invalidate clean pages which might have been
3666      * cached by non-direct readahead, or faulted in by get_user_pages()
3667      * if the source of the write was an mmap'ed region of the file
3668      * we're writing.  Either one is a pretty crazy thing to do,
3669      * so we don't support it 100%.  If this invalidation
3670      * fails, tough, the write still worked...
3671      *
3672      * Most of the time we do not need this since dio_complete() will do
3673      * the invalidation for us. However there are some file systems that
3674      * do not end up with dio_complete() being called, so let's not break
3675      * them by removing it completely.
3676      *
3677      * Noticeable example is a blkdev_direct_IO().
3678      *
3679      * Skip invalidation for async writes or if mapping has no pages.
3680      */
3681     if (written > 0 && mapping->nrpages &&
3682         invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
3683         dio_warn_stale_pagecache(file);
3684 
3685     if (written > 0) {
3686         pos += written;
3687         write_len -= written;
3688         if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3689             i_size_write(inode, pos);
3690             mark_inode_dirty(inode);
3691         }
3692         iocb->ki_pos = pos;
3693     }
3694     if (written != -EIOCBQUEUED)
3695         iov_iter_revert(from, write_len - iov_iter_count(from));
3696 out:
3697     return written;
3698 }
3699 EXPORT_SYMBOL(generic_file_direct_write);
3700 
3701 ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
3702 {
3703     struct file *file = iocb->ki_filp;
3704     loff_t pos = iocb->ki_pos;
3705     struct address_space *mapping = file->f_mapping;
3706     const struct address_space_operations *a_ops = mapping->a_ops;
3707     long status = 0;
3708     ssize_t written = 0;
3709 
3710     do {
3711         struct page *page;
3712         unsigned long offset;   /* Offset into pagecache page */
3713         unsigned long bytes;    /* Bytes to write to page */
3714         size_t copied;      /* Bytes copied from user */
3715         void *fsdata;
3716 
3717         offset = (pos & (PAGE_SIZE - 1));
3718         bytes = min_t(unsigned long, PAGE_SIZE - offset,
3719                         iov_iter_count(i));
3720 
3721 again:
3722         /*
3723          * Bring in the user page that we will copy from _first_.
3724          * Otherwise there's a nasty deadlock on copying from the
3725          * same page as we're writing to, without it being marked
3726          * up-to-date.
3727          */
3728         if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
3729             status = -EFAULT;
3730             break;
3731         }
3732 
3733         if (fatal_signal_pending(current)) {
3734             status = -EINTR;
3735             break;
3736         }
3737 
3738         status = a_ops->write_begin(file, mapping, pos, bytes,
3739                         &page, &fsdata);
3740         if (unlikely(status < 0))
3741             break;
3742 
3743         if (mapping_writably_mapped(mapping))
3744             flush_dcache_page(page);
3745 
3746         copied = copy_page_from_iter_atomic(page, offset, bytes, i);
3747         flush_dcache_page(page);
3748 
3749         status = a_ops->write_end(file, mapping, pos, bytes, copied,
3750                         page, fsdata);
3751         if (unlikely(status != copied)) {
3752             iov_iter_revert(i, copied - max(status, 0L));
3753             if (unlikely(status < 0))
3754                 break;
3755         }
3756         cond_resched();
3757 
3758         if (unlikely(status == 0)) {
3759             /*
3760              * A short copy made ->write_end() reject the
3761              * thing entirely.  Might be memory poisoning
3762              * halfway through, might be a race with munmap,
3763              * might be severe memory pressure.
3764              */
3765             if (copied)
3766                 bytes = copied;
3767             goto again;
3768         }
3769         pos += status;
3770         written += status;
3771 
3772         balance_dirty_pages_ratelimited(mapping);
3773     } while (iov_iter_count(i));
3774 
3775     return written ? written : status;
3776 }
3777 EXPORT_SYMBOL(generic_perform_write);
3778 
3779 /**
3780  * __generic_file_write_iter - write data to a file
3781  * @iocb:   IO state structure (file, offset, etc.)
3782  * @from:   iov_iter with data to write
3783  *
3784  * This function does all the work needed for actually writing data to a
3785  * file. It does all basic checks, removes SUID from the file, updates
3786  * modification times and calls proper subroutines depending on whether we
3787  * do direct IO or a standard buffered write.
3788  *
3789  * It expects i_rwsem to be grabbed unless we work on a block device or similar
3790  * object which does not need locking at all.
3791  *
3792  * This function does *not* take care of syncing data in case of O_SYNC write.
3793  * A caller has to handle it. This is mainly due to the fact that we want to
3794  * avoid syncing under i_rwsem.
3795  *
3796  * Return:
3797  * * number of bytes written, even for truncated writes
3798  * * negative error code if no data has been written at all
3799  */
3800 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3801 {
3802     struct file *file = iocb->ki_filp;
3803     struct address_space *mapping = file->f_mapping;
3804     struct inode    *inode = mapping->host;
3805     ssize_t     written = 0;
3806     ssize_t     err;
3807     ssize_t     status;
3808 
3809     /* We can write back this queue in page reclaim */
3810     current->backing_dev_info = inode_to_bdi(inode);
3811     err = file_remove_privs(file);
3812     if (err)
3813         goto out;
3814 
3815     err = file_update_time(file);
3816     if (err)
3817         goto out;
3818 
3819     if (iocb->ki_flags & IOCB_DIRECT) {
3820         loff_t pos, endbyte;
3821 
3822         written = generic_file_direct_write(iocb, from);
3823         /*
3824          * If the write stopped short of completing, fall back to
3825          * buffered writes.  Some filesystems do this for writes to
3826          * holes, for example.  For DAX files, a buffered write will
3827          * not succeed (even if it did, DAX does not handle dirty
3828          * page-cache pages correctly).
3829          */
3830         if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
3831             goto out;
3832 
3833         pos = iocb->ki_pos;
3834         status = generic_perform_write(iocb, from);
3835         /*
3836          * If generic_perform_write() returned a synchronous error
3837          * then we want to return the number of bytes which were
3838          * direct-written, or the error code if that was zero.  Note
3839          * that this differs from normal direct-io semantics, which
3840          * will return -EFOO even if some bytes were written.
3841          */
3842         if (unlikely(status < 0)) {
3843             err = status;
3844             goto out;
3845         }
3846         /*
3847          * We need to ensure that the page cache pages are written to
3848          * disk and invalidated to preserve the expected O_DIRECT
3849          * semantics.
3850          */
3851         endbyte = pos + status - 1;
3852         err = filemap_write_and_wait_range(mapping, pos, endbyte);
3853         if (err == 0) {
3854             iocb->ki_pos = endbyte + 1;
3855             written += status;
3856             invalidate_mapping_pages(mapping,
3857                          pos >> PAGE_SHIFT,
3858                          endbyte >> PAGE_SHIFT);
3859         } else {
3860             /*
3861              * We don't know how much we wrote, so just return
3862              * the number of bytes which were direct-written
3863              */
3864         }
3865     } else {
3866         written = generic_perform_write(iocb, from);
3867         if (likely(written > 0))
3868             iocb->ki_pos += written;
3869     }
3870 out:
3871     current->backing_dev_info = NULL;
3872     return written ? written : err;
3873 }
3874 EXPORT_SYMBOL(__generic_file_write_iter);
3875 
3876 /**
3877  * generic_file_write_iter - write data to a file
3878  * @iocb:   IO state structure
3879  * @from:   iov_iter with data to write
3880  *
3881  * This is a wrapper around __generic_file_write_iter() to be used by most
3882  * filesystems. It takes care of syncing the file in case of O_SYNC file
3883  * and acquires i_rwsem as needed.
3884  * Return:
3885  * * negative error code if no data has been written at all of
3886  *   vfs_fsync_range() failed for a synchronous write
3887  * * number of bytes written, even for truncated writes
3888  */
3889 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3890 {
3891     struct file *file = iocb->ki_filp;
3892     struct inode *inode = file->f_mapping->host;
3893     ssize_t ret;
3894 
3895     inode_lock(inode);
3896     ret = generic_write_checks(iocb, from);
3897     if (ret > 0)
3898         ret = __generic_file_write_iter(iocb, from);
3899     inode_unlock(inode);
3900 
3901     if (ret > 0)
3902         ret = generic_write_sync(iocb, ret);
3903     return ret;
3904 }
3905 EXPORT_SYMBOL(generic_file_write_iter);
3906 
3907 /**
3908  * filemap_release_folio() - Release fs-specific metadata on a folio.
3909  * @folio: The folio which the kernel is trying to free.
3910  * @gfp: Memory allocation flags (and I/O mode).
3911  *
3912  * The address_space is trying to release any data attached to a folio
3913  * (presumably at folio->private).
3914  *
3915  * This will also be called if the private_2 flag is set on a page,
3916  * indicating that the folio has other metadata associated with it.
3917  *
3918  * The @gfp argument specifies whether I/O may be performed to release
3919  * this page (__GFP_IO), and whether the call may block
3920  * (__GFP_RECLAIM & __GFP_FS).
3921  *
3922  * Return: %true if the release was successful, otherwise %false.
3923  */
3924 bool filemap_release_folio(struct folio *folio, gfp_t gfp)
3925 {
3926     struct address_space * const mapping = folio->mapping;
3927 
3928     BUG_ON(!folio_test_locked(folio));
3929     if (folio_test_writeback(folio))
3930         return false;
3931 
3932     if (mapping && mapping->a_ops->release_folio)
3933         return mapping->a_ops->release_folio(folio, gfp);
3934     return try_to_free_buffers(folio);
3935 }
3936 EXPORT_SYMBOL(filemap_release_folio);