Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * fs/mpage.c
0004  *
0005  * Copyright (C) 2002, Linus Torvalds.
0006  *
0007  * Contains functions related to preparing and submitting BIOs which contain
0008  * multiple pagecache pages.
0009  *
0010  * 15May2002    Andrew Morton
0011  *      Initial version
0012  * 27Jun2002    axboe@suse.de
0013  *      use bio_add_page() to build bio's just the right size
0014  */
0015 
0016 #include <linux/kernel.h>
0017 #include <linux/export.h>
0018 #include <linux/mm.h>
0019 #include <linux/kdev_t.h>
0020 #include <linux/gfp.h>
0021 #include <linux/bio.h>
0022 #include <linux/fs.h>
0023 #include <linux/buffer_head.h>
0024 #include <linux/blkdev.h>
0025 #include <linux/highmem.h>
0026 #include <linux/prefetch.h>
0027 #include <linux/mpage.h>
0028 #include <linux/mm_inline.h>
0029 #include <linux/writeback.h>
0030 #include <linux/backing-dev.h>
0031 #include <linux/pagevec.h>
0032 #include "internal.h"
0033 
0034 /*
0035  * I/O completion handler for multipage BIOs.
0036  *
0037  * The mpage code never puts partial pages into a BIO (except for end-of-file).
0038  * If a page does not map to a contiguous run of blocks then it simply falls
0039  * back to block_read_full_folio().
0040  *
0041  * Why is this?  If a page's completion depends on a number of different BIOs
0042  * which can complete in any order (or at the same time) then determining the
0043  * status of that page is hard.  See end_buffer_async_read() for the details.
0044  * There is no point in duplicating all that complexity.
0045  */
0046 static void mpage_end_io(struct bio *bio)
0047 {
0048     struct bio_vec *bv;
0049     struct bvec_iter_all iter_all;
0050 
0051     bio_for_each_segment_all(bv, bio, iter_all) {
0052         struct page *page = bv->bv_page;
0053         page_endio(page, bio_op(bio),
0054                blk_status_to_errno(bio->bi_status));
0055     }
0056 
0057     bio_put(bio);
0058 }
0059 
0060 static struct bio *mpage_bio_submit(struct bio *bio)
0061 {
0062     bio->bi_end_io = mpage_end_io;
0063     guard_bio_eod(bio);
0064     submit_bio(bio);
0065     return NULL;
0066 }
0067 
0068 /*
0069  * support function for mpage_readahead.  The fs supplied get_block might
0070  * return an up to date buffer.  This is used to map that buffer into
0071  * the page, which allows read_folio to avoid triggering a duplicate call
0072  * to get_block.
0073  *
0074  * The idea is to avoid adding buffers to pages that don't already have
0075  * them.  So when the buffer is up to date and the page size == block size,
0076  * this marks the page up to date instead of adding new buffers.
0077  */
0078 static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
0079         int page_block)
0080 {
0081     struct inode *inode = folio->mapping->host;
0082     struct buffer_head *page_bh, *head;
0083     int block = 0;
0084 
0085     head = folio_buffers(folio);
0086     if (!head) {
0087         /*
0088          * don't make any buffers if there is only one buffer on
0089          * the folio and the folio just needs to be set up to date
0090          */
0091         if (inode->i_blkbits == PAGE_SHIFT &&
0092             buffer_uptodate(bh)) {
0093             folio_mark_uptodate(folio);
0094             return;
0095         }
0096         create_empty_buffers(&folio->page, i_blocksize(inode), 0);
0097         head = folio_buffers(folio);
0098     }
0099 
0100     page_bh = head;
0101     do {
0102         if (block == page_block) {
0103             page_bh->b_state = bh->b_state;
0104             page_bh->b_bdev = bh->b_bdev;
0105             page_bh->b_blocknr = bh->b_blocknr;
0106             break;
0107         }
0108         page_bh = page_bh->b_this_page;
0109         block++;
0110     } while (page_bh != head);
0111 }
0112 
0113 struct mpage_readpage_args {
0114     struct bio *bio;
0115     struct folio *folio;
0116     unsigned int nr_pages;
0117     bool is_readahead;
0118     sector_t last_block_in_bio;
0119     struct buffer_head map_bh;
0120     unsigned long first_logical_block;
0121     get_block_t *get_block;
0122 };
0123 
0124 /*
0125  * This is the worker routine which does all the work of mapping the disk
0126  * blocks and constructs largest possible bios, submits them for IO if the
0127  * blocks are not contiguous on the disk.
0128  *
0129  * We pass a buffer_head back and forth and use its buffer_mapped() flag to
0130  * represent the validity of its disk mapping and to decide when to do the next
0131  * get_block() call.
0132  */
0133 static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
0134 {
0135     struct folio *folio = args->folio;
0136     struct inode *inode = folio->mapping->host;
0137     const unsigned blkbits = inode->i_blkbits;
0138     const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
0139     const unsigned blocksize = 1 << blkbits;
0140     struct buffer_head *map_bh = &args->map_bh;
0141     sector_t block_in_file;
0142     sector_t last_block;
0143     sector_t last_block_in_file;
0144     sector_t blocks[MAX_BUF_PER_PAGE];
0145     unsigned page_block;
0146     unsigned first_hole = blocks_per_page;
0147     struct block_device *bdev = NULL;
0148     int length;
0149     int fully_mapped = 1;
0150     blk_opf_t opf = REQ_OP_READ;
0151     unsigned nblocks;
0152     unsigned relative_block;
0153     gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
0154 
0155     /* MAX_BUF_PER_PAGE, for example */
0156     VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
0157 
0158     if (args->is_readahead) {
0159         opf |= REQ_RAHEAD;
0160         gfp |= __GFP_NORETRY | __GFP_NOWARN;
0161     }
0162 
0163     if (folio_buffers(folio))
0164         goto confused;
0165 
0166     block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
0167     last_block = block_in_file + args->nr_pages * blocks_per_page;
0168     last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
0169     if (last_block > last_block_in_file)
0170         last_block = last_block_in_file;
0171     page_block = 0;
0172 
0173     /*
0174      * Map blocks using the result from the previous get_blocks call first.
0175      */
0176     nblocks = map_bh->b_size >> blkbits;
0177     if (buffer_mapped(map_bh) &&
0178             block_in_file > args->first_logical_block &&
0179             block_in_file < (args->first_logical_block + nblocks)) {
0180         unsigned map_offset = block_in_file - args->first_logical_block;
0181         unsigned last = nblocks - map_offset;
0182 
0183         for (relative_block = 0; ; relative_block++) {
0184             if (relative_block == last) {
0185                 clear_buffer_mapped(map_bh);
0186                 break;
0187             }
0188             if (page_block == blocks_per_page)
0189                 break;
0190             blocks[page_block] = map_bh->b_blocknr + map_offset +
0191                         relative_block;
0192             page_block++;
0193             block_in_file++;
0194         }
0195         bdev = map_bh->b_bdev;
0196     }
0197 
0198     /*
0199      * Then do more get_blocks calls until we are done with this folio.
0200      */
0201     map_bh->b_page = &folio->page;
0202     while (page_block < blocks_per_page) {
0203         map_bh->b_state = 0;
0204         map_bh->b_size = 0;
0205 
0206         if (block_in_file < last_block) {
0207             map_bh->b_size = (last_block-block_in_file) << blkbits;
0208             if (args->get_block(inode, block_in_file, map_bh, 0))
0209                 goto confused;
0210             args->first_logical_block = block_in_file;
0211         }
0212 
0213         if (!buffer_mapped(map_bh)) {
0214             fully_mapped = 0;
0215             if (first_hole == blocks_per_page)
0216                 first_hole = page_block;
0217             page_block++;
0218             block_in_file++;
0219             continue;
0220         }
0221 
0222         /* some filesystems will copy data into the page during
0223          * the get_block call, in which case we don't want to
0224          * read it again.  map_buffer_to_folio copies the data
0225          * we just collected from get_block into the folio's buffers
0226          * so read_folio doesn't have to repeat the get_block call
0227          */
0228         if (buffer_uptodate(map_bh)) {
0229             map_buffer_to_folio(folio, map_bh, page_block);
0230             goto confused;
0231         }
0232     
0233         if (first_hole != blocks_per_page)
0234             goto confused;      /* hole -> non-hole */
0235 
0236         /* Contiguous blocks? */
0237         if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
0238             goto confused;
0239         nblocks = map_bh->b_size >> blkbits;
0240         for (relative_block = 0; ; relative_block++) {
0241             if (relative_block == nblocks) {
0242                 clear_buffer_mapped(map_bh);
0243                 break;
0244             } else if (page_block == blocks_per_page)
0245                 break;
0246             blocks[page_block] = map_bh->b_blocknr+relative_block;
0247             page_block++;
0248             block_in_file++;
0249         }
0250         bdev = map_bh->b_bdev;
0251     }
0252 
0253     if (first_hole != blocks_per_page) {
0254         folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
0255         if (first_hole == 0) {
0256             folio_mark_uptodate(folio);
0257             folio_unlock(folio);
0258             goto out;
0259         }
0260     } else if (fully_mapped) {
0261         folio_set_mappedtodisk(folio);
0262     }
0263 
0264     /*
0265      * This folio will go to BIO.  Do we need to send this BIO off first?
0266      */
0267     if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
0268         args->bio = mpage_bio_submit(args->bio);
0269 
0270 alloc_new:
0271     if (args->bio == NULL) {
0272         if (first_hole == blocks_per_page) {
0273             if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
0274                                 &folio->page))
0275                 goto out;
0276         }
0277         args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf,
0278                       gfp);
0279         if (args->bio == NULL)
0280             goto confused;
0281         args->bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
0282     }
0283 
0284     length = first_hole << blkbits;
0285     if (!bio_add_folio(args->bio, folio, length, 0)) {
0286         args->bio = mpage_bio_submit(args->bio);
0287         goto alloc_new;
0288     }
0289 
0290     relative_block = block_in_file - args->first_logical_block;
0291     nblocks = map_bh->b_size >> blkbits;
0292     if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
0293         (first_hole != blocks_per_page))
0294         args->bio = mpage_bio_submit(args->bio);
0295     else
0296         args->last_block_in_bio = blocks[blocks_per_page - 1];
0297 out:
0298     return args->bio;
0299 
0300 confused:
0301     if (args->bio)
0302         args->bio = mpage_bio_submit(args->bio);
0303     if (!folio_test_uptodate(folio))
0304         block_read_full_folio(folio, args->get_block);
0305     else
0306         folio_unlock(folio);
0307     goto out;
0308 }
0309 
0310 /**
0311  * mpage_readahead - start reads against pages
0312  * @rac: Describes which pages to read.
0313  * @get_block: The filesystem's block mapper function.
0314  *
0315  * This function walks the pages and the blocks within each page, building and
0316  * emitting large BIOs.
0317  *
0318  * If anything unusual happens, such as:
0319  *
0320  * - encountering a page which has buffers
0321  * - encountering a page which has a non-hole after a hole
0322  * - encountering a page with non-contiguous blocks
0323  *
0324  * then this code just gives up and calls the buffer_head-based read function.
0325  * It does handle a page which has holes at the end - that is a common case:
0326  * the end-of-file on blocksize < PAGE_SIZE setups.
0327  *
0328  * BH_Boundary explanation:
0329  *
0330  * There is a problem.  The mpage read code assembles several pages, gets all
0331  * their disk mappings, and then submits them all.  That's fine, but obtaining
0332  * the disk mappings may require I/O.  Reads of indirect blocks, for example.
0333  *
0334  * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
0335  * submitted in the following order:
0336  *
0337  *  12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
0338  *
0339  * because the indirect block has to be read to get the mappings of blocks
0340  * 13,14,15,16.  Obviously, this impacts performance.
0341  *
0342  * So what we do it to allow the filesystem's get_block() function to set
0343  * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
0344  * after this one will require I/O against a block which is probably close to
0345  * this one.  So you should push what I/O you have currently accumulated.
0346  *
0347  * This all causes the disk requests to be issued in the correct order.
0348  */
0349 void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
0350 {
0351     struct folio *folio;
0352     struct mpage_readpage_args args = {
0353         .get_block = get_block,
0354         .is_readahead = true,
0355     };
0356 
0357     while ((folio = readahead_folio(rac))) {
0358         prefetchw(&folio->flags);
0359         args.folio = folio;
0360         args.nr_pages = readahead_count(rac);
0361         args.bio = do_mpage_readpage(&args);
0362     }
0363     if (args.bio)
0364         mpage_bio_submit(args.bio);
0365 }
0366 EXPORT_SYMBOL(mpage_readahead);
0367 
0368 /*
0369  * This isn't called much at all
0370  */
0371 int mpage_read_folio(struct folio *folio, get_block_t get_block)
0372 {
0373     struct mpage_readpage_args args = {
0374         .folio = folio,
0375         .nr_pages = 1,
0376         .get_block = get_block,
0377     };
0378 
0379     args.bio = do_mpage_readpage(&args);
0380     if (args.bio)
0381         mpage_bio_submit(args.bio);
0382     return 0;
0383 }
0384 EXPORT_SYMBOL(mpage_read_folio);
0385 
0386 /*
0387  * Writing is not so simple.
0388  *
0389  * If the page has buffers then they will be used for obtaining the disk
0390  * mapping.  We only support pages which are fully mapped-and-dirty, with a
0391  * special case for pages which are unmapped at the end: end-of-file.
0392  *
0393  * If the page has no buffers (preferred) then the page is mapped here.
0394  *
0395  * If all blocks are found to be contiguous then the page can go into the
0396  * BIO.  Otherwise fall back to the mapping's writepage().
0397  * 
0398  * FIXME: This code wants an estimate of how many pages are still to be
0399  * written, so it can intelligently allocate a suitably-sized BIO.  For now,
0400  * just allocate full-size (16-page) BIOs.
0401  */
0402 
0403 struct mpage_data {
0404     struct bio *bio;
0405     sector_t last_block_in_bio;
0406     get_block_t *get_block;
0407 };
0408 
0409 /*
0410  * We have our BIO, so we can now mark the buffers clean.  Make
0411  * sure to only clean buffers which we know we'll be writing.
0412  */
0413 static void clean_buffers(struct page *page, unsigned first_unmapped)
0414 {
0415     unsigned buffer_counter = 0;
0416     struct buffer_head *bh, *head;
0417     if (!page_has_buffers(page))
0418         return;
0419     head = page_buffers(page);
0420     bh = head;
0421 
0422     do {
0423         if (buffer_counter++ == first_unmapped)
0424             break;
0425         clear_buffer_dirty(bh);
0426         bh = bh->b_this_page;
0427     } while (bh != head);
0428 
0429     /*
0430      * we cannot drop the bh if the page is not uptodate or a concurrent
0431      * read_folio would fail to serialize with the bh and it would read from
0432      * disk before we reach the platter.
0433      */
0434     if (buffer_heads_over_limit && PageUptodate(page))
0435         try_to_free_buffers(page_folio(page));
0436 }
0437 
0438 /*
0439  * For situations where we want to clean all buffers attached to a page.
0440  * We don't need to calculate how many buffers are attached to the page,
0441  * we just need to specify a number larger than the maximum number of buffers.
0442  */
0443 void clean_page_buffers(struct page *page)
0444 {
0445     clean_buffers(page, ~0U);
0446 }
0447 
0448 static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
0449               void *data)
0450 {
0451     struct mpage_data *mpd = data;
0452     struct bio *bio = mpd->bio;
0453     struct address_space *mapping = page->mapping;
0454     struct inode *inode = page->mapping->host;
0455     const unsigned blkbits = inode->i_blkbits;
0456     unsigned long end_index;
0457     const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
0458     sector_t last_block;
0459     sector_t block_in_file;
0460     sector_t blocks[MAX_BUF_PER_PAGE];
0461     unsigned page_block;
0462     unsigned first_unmapped = blocks_per_page;
0463     struct block_device *bdev = NULL;
0464     int boundary = 0;
0465     sector_t boundary_block = 0;
0466     struct block_device *boundary_bdev = NULL;
0467     int length;
0468     struct buffer_head map_bh;
0469     loff_t i_size = i_size_read(inode);
0470     int ret = 0;
0471 
0472     if (page_has_buffers(page)) {
0473         struct buffer_head *head = page_buffers(page);
0474         struct buffer_head *bh = head;
0475 
0476         /* If they're all mapped and dirty, do it */
0477         page_block = 0;
0478         do {
0479             BUG_ON(buffer_locked(bh));
0480             if (!buffer_mapped(bh)) {
0481                 /*
0482                  * unmapped dirty buffers are created by
0483                  * block_dirty_folio -> mmapped data
0484                  */
0485                 if (buffer_dirty(bh))
0486                     goto confused;
0487                 if (first_unmapped == blocks_per_page)
0488                     first_unmapped = page_block;
0489                 continue;
0490             }
0491 
0492             if (first_unmapped != blocks_per_page)
0493                 goto confused;  /* hole -> non-hole */
0494 
0495             if (!buffer_dirty(bh) || !buffer_uptodate(bh))
0496                 goto confused;
0497             if (page_block) {
0498                 if (bh->b_blocknr != blocks[page_block-1] + 1)
0499                     goto confused;
0500             }
0501             blocks[page_block++] = bh->b_blocknr;
0502             boundary = buffer_boundary(bh);
0503             if (boundary) {
0504                 boundary_block = bh->b_blocknr;
0505                 boundary_bdev = bh->b_bdev;
0506             }
0507             bdev = bh->b_bdev;
0508         } while ((bh = bh->b_this_page) != head);
0509 
0510         if (first_unmapped)
0511             goto page_is_mapped;
0512 
0513         /*
0514          * Page has buffers, but they are all unmapped. The page was
0515          * created by pagein or read over a hole which was handled by
0516          * block_read_full_folio().  If this address_space is also
0517          * using mpage_readahead then this can rarely happen.
0518          */
0519         goto confused;
0520     }
0521 
0522     /*
0523      * The page has no buffers: map it to disk
0524      */
0525     BUG_ON(!PageUptodate(page));
0526     block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
0527     last_block = (i_size - 1) >> blkbits;
0528     map_bh.b_page = page;
0529     for (page_block = 0; page_block < blocks_per_page; ) {
0530 
0531         map_bh.b_state = 0;
0532         map_bh.b_size = 1 << blkbits;
0533         if (mpd->get_block(inode, block_in_file, &map_bh, 1))
0534             goto confused;
0535         if (buffer_new(&map_bh))
0536             clean_bdev_bh_alias(&map_bh);
0537         if (buffer_boundary(&map_bh)) {
0538             boundary_block = map_bh.b_blocknr;
0539             boundary_bdev = map_bh.b_bdev;
0540         }
0541         if (page_block) {
0542             if (map_bh.b_blocknr != blocks[page_block-1] + 1)
0543                 goto confused;
0544         }
0545         blocks[page_block++] = map_bh.b_blocknr;
0546         boundary = buffer_boundary(&map_bh);
0547         bdev = map_bh.b_bdev;
0548         if (block_in_file == last_block)
0549             break;
0550         block_in_file++;
0551     }
0552     BUG_ON(page_block == 0);
0553 
0554     first_unmapped = page_block;
0555 
0556 page_is_mapped:
0557     end_index = i_size >> PAGE_SHIFT;
0558     if (page->index >= end_index) {
0559         /*
0560          * The page straddles i_size.  It must be zeroed out on each
0561          * and every writepage invocation because it may be mmapped.
0562          * "A file is mapped in multiples of the page size.  For a file
0563          * that is not a multiple of the page size, the remaining memory
0564          * is zeroed when mapped, and writes to that region are not
0565          * written out to the file."
0566          */
0567         unsigned offset = i_size & (PAGE_SIZE - 1);
0568 
0569         if (page->index > end_index || !offset)
0570             goto confused;
0571         zero_user_segment(page, offset, PAGE_SIZE);
0572     }
0573 
0574     /*
0575      * This page will go to BIO.  Do we need to send this BIO off first?
0576      */
0577     if (bio && mpd->last_block_in_bio != blocks[0] - 1)
0578         bio = mpage_bio_submit(bio);
0579 
0580 alloc_new:
0581     if (bio == NULL) {
0582         if (first_unmapped == blocks_per_page) {
0583             if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
0584                                 page, wbc))
0585                 goto out;
0586         }
0587         bio = bio_alloc(bdev, BIO_MAX_VECS,
0588                 REQ_OP_WRITE | wbc_to_write_flags(wbc),
0589                 GFP_NOFS);
0590         bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
0591         wbc_init_bio(wbc, bio);
0592     }
0593 
0594     /*
0595      * Must try to add the page before marking the buffer clean or
0596      * the confused fail path above (OOM) will be very confused when
0597      * it finds all bh marked clean (i.e. it will not write anything)
0598      */
0599     wbc_account_cgroup_owner(wbc, page, PAGE_SIZE);
0600     length = first_unmapped << blkbits;
0601     if (bio_add_page(bio, page, length, 0) < length) {
0602         bio = mpage_bio_submit(bio);
0603         goto alloc_new;
0604     }
0605 
0606     clean_buffers(page, first_unmapped);
0607 
0608     BUG_ON(PageWriteback(page));
0609     set_page_writeback(page);
0610     unlock_page(page);
0611     if (boundary || (first_unmapped != blocks_per_page)) {
0612         bio = mpage_bio_submit(bio);
0613         if (boundary_block) {
0614             write_boundary_block(boundary_bdev,
0615                     boundary_block, 1 << blkbits);
0616         }
0617     } else {
0618         mpd->last_block_in_bio = blocks[blocks_per_page - 1];
0619     }
0620     goto out;
0621 
0622 confused:
0623     if (bio)
0624         bio = mpage_bio_submit(bio);
0625 
0626     /*
0627      * The caller has a ref on the inode, so *mapping is stable
0628      */
0629     ret = block_write_full_page(page, mpd->get_block, wbc);
0630     mapping_set_error(mapping, ret);
0631 out:
0632     mpd->bio = bio;
0633     return ret;
0634 }
0635 
0636 /**
0637  * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
0638  * @mapping: address space structure to write
0639  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
0640  * @get_block: the filesystem's block mapper function.
0641  *
0642  * This is a library function, which implements the writepages()
0643  * address_space_operation.
0644  *
0645  * If a page is already under I/O, generic_writepages() skips it, even
0646  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
0647  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
0648  * and msync() need to guarantee that all the data which was dirty at the time
0649  * the call was made get new I/O started against them.  If wbc->sync_mode is
0650  * WB_SYNC_ALL then we were called for data integrity and we must wait for
0651  * existing IO to complete.
0652  */
0653 int
0654 mpage_writepages(struct address_space *mapping,
0655         struct writeback_control *wbc, get_block_t get_block)
0656 {
0657     struct mpage_data mpd = {
0658         .get_block  = get_block,
0659     };
0660     struct blk_plug plug;
0661     int ret;
0662 
0663     blk_start_plug(&plug);
0664     ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
0665     if (mpd.bio)
0666         mpage_bio_submit(mpd.bio);
0667     blk_finish_plug(&plug);
0668     return ret;
0669 }
0670 EXPORT_SYMBOL(mpage_writepages);