Back to home page

LXR

 
 

    


0001 /*
0002  * Copyright (C) 2010 Red Hat, Inc.
0003  * Copyright (c) 2016 Christoph Hellwig.
0004  *
0005  * This program is free software; you can redistribute it and/or modify it
0006  * under the terms and conditions of the GNU General Public License,
0007  * version 2, as published by the Free Software Foundation.
0008  *
0009  * This program is distributed in the hope it will be useful, but WITHOUT
0010  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
0011  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
0012  * more details.
0013  */
0014 #include <linux/module.h>
0015 #include <linux/compiler.h>
0016 #include <linux/fs.h>
0017 #include <linux/iomap.h>
0018 #include <linux/uaccess.h>
0019 #include <linux/gfp.h>
0020 #include <linux/mm.h>
0021 #include <linux/swap.h>
0022 #include <linux/pagemap.h>
0023 #include <linux/file.h>
0024 #include <linux/uio.h>
0025 #include <linux/backing-dev.h>
0026 #include <linux/buffer_head.h>
0027 #include <linux/task_io_accounting_ops.h>
0028 #include <linux/dax.h>
0029 #include "internal.h"
0030 
0031 /*
0032  * Execute a iomap write on a segment of the mapping that spans a
0033  * contiguous range of pages that have identical block mapping state.
0034  *
0035  * This avoids the need to map pages individually, do individual allocations
0036  * for each page and most importantly avoid the need for filesystem specific
0037  * locking per page. Instead, all the operations are amortised over the entire
0038  * range of pages. It is assumed that the filesystems will lock whatever
0039  * resources they require in the iomap_begin call, and release them in the
0040  * iomap_end call.
0041  */
0042 loff_t
0043 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
0044         struct iomap_ops *ops, void *data, iomap_actor_t actor)
0045 {
0046     struct iomap iomap = { 0 };
0047     loff_t written = 0, ret;
0048 
0049     /*
0050      * Need to map a range from start position for length bytes. This can
0051      * span multiple pages - it is only guaranteed to return a range of a
0052      * single type of pages (e.g. all into a hole, all mapped or all
0053      * unwritten). Failure at this point has nothing to undo.
0054      *
0055      * If allocation is required for this range, reserve the space now so
0056      * that the allocation is guaranteed to succeed later on. Once we copy
0057      * the data into the page cache pages, then we cannot fail otherwise we
0058      * expose transient stale data. If the reserve fails, we can safely
0059      * back out at this point as there is nothing to undo.
0060      */
0061     ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
0062     if (ret)
0063         return ret;
0064     if (WARN_ON(iomap.offset > pos))
0065         return -EIO;
0066 
0067     /*
0068      * Cut down the length to the one actually provided by the filesystem,
0069      * as it might not be able to give us the whole size that we requested.
0070      */
0071     if (iomap.offset + iomap.length < pos + length)
0072         length = iomap.offset + iomap.length - pos;
0073 
0074     /*
0075      * Now that we have guaranteed that the space allocation will succeed.
0076      * we can do the copy-in page by page without having to worry about
0077      * failures exposing transient data.
0078      */
0079     written = actor(inode, pos, length, data, &iomap);
0080 
0081     /*
0082      * Now the data has been copied, commit the range we've copied.  This
0083      * should not fail unless the filesystem has had a fatal error.
0084      */
0085     if (ops->iomap_end) {
0086         ret = ops->iomap_end(inode, pos, length,
0087                      written > 0 ? written : 0,
0088                      flags, &iomap);
0089     }
0090 
0091     return written ? written : ret;
0092 }
0093 
0094 static void
0095 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
0096 {
0097     loff_t i_size = i_size_read(inode);
0098 
0099     /*
0100      * Only truncate newly allocated pages beyoned EOF, even if the
0101      * write started inside the existing inode size.
0102      */
0103     if (pos + len > i_size)
0104         truncate_pagecache_range(inode, max(pos, i_size), pos + len);
0105 }
0106 
0107 static int
0108 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
0109         struct page **pagep, struct iomap *iomap)
0110 {
0111     pgoff_t index = pos >> PAGE_SHIFT;
0112     struct page *page;
0113     int status = 0;
0114 
0115     BUG_ON(pos + len > iomap->offset + iomap->length);
0116 
0117     if (fatal_signal_pending(current))
0118         return -EINTR;
0119 
0120     page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
0121     if (!page)
0122         return -ENOMEM;
0123 
0124     status = __block_write_begin_int(page, pos, len, NULL, iomap);
0125     if (unlikely(status)) {
0126         unlock_page(page);
0127         put_page(page);
0128         page = NULL;
0129 
0130         iomap_write_failed(inode, pos, len);
0131     }
0132 
0133     *pagep = page;
0134     return status;
0135 }
0136 
0137 static int
0138 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
0139         unsigned copied, struct page *page)
0140 {
0141     int ret;
0142 
0143     ret = generic_write_end(NULL, inode->i_mapping, pos, len,
0144             copied, page, NULL);
0145     if (ret < len)
0146         iomap_write_failed(inode, pos, len);
0147     return ret;
0148 }
0149 
0150 static loff_t
0151 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
0152         struct iomap *iomap)
0153 {
0154     struct iov_iter *i = data;
0155     long status = 0;
0156     ssize_t written = 0;
0157     unsigned int flags = AOP_FLAG_NOFS;
0158 
0159     /*
0160      * Copies from kernel address space cannot fail (NFSD is a big user).
0161      */
0162     if (!iter_is_iovec(i))
0163         flags |= AOP_FLAG_UNINTERRUPTIBLE;
0164 
0165     do {
0166         struct page *page;
0167         unsigned long offset;   /* Offset into pagecache page */
0168         unsigned long bytes;    /* Bytes to write to page */
0169         size_t copied;      /* Bytes copied from user */
0170 
0171         offset = (pos & (PAGE_SIZE - 1));
0172         bytes = min_t(unsigned long, PAGE_SIZE - offset,
0173                         iov_iter_count(i));
0174 again:
0175         if (bytes > length)
0176             bytes = length;
0177 
0178         /*
0179          * Bring in the user page that we will copy from _first_.
0180          * Otherwise there's a nasty deadlock on copying from the
0181          * same page as we're writing to, without it being marked
0182          * up-to-date.
0183          *
0184          * Not only is this an optimisation, but it is also required
0185          * to check that the address is actually valid, when atomic
0186          * usercopies are used, below.
0187          */
0188         if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
0189             status = -EFAULT;
0190             break;
0191         }
0192 
0193         status = iomap_write_begin(inode, pos, bytes, flags, &page,
0194                 iomap);
0195         if (unlikely(status))
0196             break;
0197 
0198         if (mapping_writably_mapped(inode->i_mapping))
0199             flush_dcache_page(page);
0200 
0201         copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
0202 
0203         flush_dcache_page(page);
0204 
0205         status = iomap_write_end(inode, pos, bytes, copied, page);
0206         if (unlikely(status < 0))
0207             break;
0208         copied = status;
0209 
0210         cond_resched();
0211 
0212         iov_iter_advance(i, copied);
0213         if (unlikely(copied == 0)) {
0214             /*
0215              * If we were unable to copy any data at all, we must
0216              * fall back to a single segment length write.
0217              *
0218              * If we didn't fallback here, we could livelock
0219              * because not all segments in the iov can be copied at
0220              * once without a pagefault.
0221              */
0222             bytes = min_t(unsigned long, PAGE_SIZE - offset,
0223                         iov_iter_single_seg_count(i));
0224             goto again;
0225         }
0226         pos += copied;
0227         written += copied;
0228         length -= copied;
0229 
0230         balance_dirty_pages_ratelimited(inode->i_mapping);
0231     } while (iov_iter_count(i) && length);
0232 
0233     return written ? written : status;
0234 }
0235 
0236 ssize_t
0237 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
0238         struct iomap_ops *ops)
0239 {
0240     struct inode *inode = iocb->ki_filp->f_mapping->host;
0241     loff_t pos = iocb->ki_pos, ret = 0, written = 0;
0242 
0243     while (iov_iter_count(iter)) {
0244         ret = iomap_apply(inode, pos, iov_iter_count(iter),
0245                 IOMAP_WRITE, ops, iter, iomap_write_actor);
0246         if (ret <= 0)
0247             break;
0248         pos += ret;
0249         written += ret;
0250     }
0251 
0252     return written ? written : ret;
0253 }
0254 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
0255 
0256 static struct page *
0257 __iomap_read_page(struct inode *inode, loff_t offset)
0258 {
0259     struct address_space *mapping = inode->i_mapping;
0260     struct page *page;
0261 
0262     page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
0263     if (IS_ERR(page))
0264         return page;
0265     if (!PageUptodate(page)) {
0266         put_page(page);
0267         return ERR_PTR(-EIO);
0268     }
0269     return page;
0270 }
0271 
0272 static loff_t
0273 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
0274         struct iomap *iomap)
0275 {
0276     long status = 0;
0277     ssize_t written = 0;
0278 
0279     do {
0280         struct page *page, *rpage;
0281         unsigned long offset;   /* Offset into pagecache page */
0282         unsigned long bytes;    /* Bytes to write to page */
0283 
0284         offset = (pos & (PAGE_SIZE - 1));
0285         bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
0286 
0287         rpage = __iomap_read_page(inode, pos);
0288         if (IS_ERR(rpage))
0289             return PTR_ERR(rpage);
0290 
0291         status = iomap_write_begin(inode, pos, bytes,
0292                 AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
0293                 &page, iomap);
0294         put_page(rpage);
0295         if (unlikely(status))
0296             return status;
0297 
0298         WARN_ON_ONCE(!PageUptodate(page));
0299 
0300         status = iomap_write_end(inode, pos, bytes, bytes, page);
0301         if (unlikely(status <= 0)) {
0302             if (WARN_ON_ONCE(status == 0))
0303                 return -EIO;
0304             return status;
0305         }
0306 
0307         cond_resched();
0308 
0309         pos += status;
0310         written += status;
0311         length -= status;
0312 
0313         balance_dirty_pages_ratelimited(inode->i_mapping);
0314     } while (length);
0315 
0316     return written;
0317 }
0318 
0319 int
0320 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
0321         struct iomap_ops *ops)
0322 {
0323     loff_t ret;
0324 
0325     while (len) {
0326         ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
0327                 iomap_dirty_actor);
0328         if (ret <= 0)
0329             return ret;
0330         pos += ret;
0331         len -= ret;
0332     }
0333 
0334     return 0;
0335 }
0336 EXPORT_SYMBOL_GPL(iomap_file_dirty);
0337 
0338 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
0339         unsigned bytes, struct iomap *iomap)
0340 {
0341     struct page *page;
0342     int status;
0343 
0344     status = iomap_write_begin(inode, pos, bytes,
0345             AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
0346     if (status)
0347         return status;
0348 
0349     zero_user(page, offset, bytes);
0350     mark_page_accessed(page);
0351 
0352     return iomap_write_end(inode, pos, bytes, bytes, page);
0353 }
0354 
0355 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
0356         struct iomap *iomap)
0357 {
0358     sector_t sector = iomap->blkno +
0359         (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
0360 
0361     return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
0362 }
0363 
0364 static loff_t
0365 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
0366         void *data, struct iomap *iomap)
0367 {
0368     bool *did_zero = data;
0369     loff_t written = 0;
0370     int status;
0371 
0372     /* already zeroed?  we're done. */
0373     if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
0374             return count;
0375 
0376     do {
0377         unsigned offset, bytes;
0378 
0379         offset = pos & (PAGE_SIZE - 1); /* Within page */
0380         bytes = min_t(unsigned, PAGE_SIZE - offset, count);
0381 
0382         if (IS_DAX(inode))
0383             status = iomap_dax_zero(pos, offset, bytes, iomap);
0384         else
0385             status = iomap_zero(inode, pos, offset, bytes, iomap);
0386         if (status < 0)
0387             return status;
0388 
0389         pos += bytes;
0390         count -= bytes;
0391         written += bytes;
0392         if (did_zero)
0393             *did_zero = true;
0394     } while (count > 0);
0395 
0396     return written;
0397 }
0398 
0399 int
0400 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
0401         struct iomap_ops *ops)
0402 {
0403     loff_t ret;
0404 
0405     while (len > 0) {
0406         ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
0407                 ops, did_zero, iomap_zero_range_actor);
0408         if (ret <= 0)
0409             return ret;
0410 
0411         pos += ret;
0412         len -= ret;
0413     }
0414 
0415     return 0;
0416 }
0417 EXPORT_SYMBOL_GPL(iomap_zero_range);
0418 
0419 int
0420 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
0421         struct iomap_ops *ops)
0422 {
0423     unsigned blocksize = (1 << inode->i_blkbits);
0424     unsigned off = pos & (blocksize - 1);
0425 
0426     /* Block boundary? Nothing to do */
0427     if (!off)
0428         return 0;
0429     return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
0430 }
0431 EXPORT_SYMBOL_GPL(iomap_truncate_page);
0432 
0433 static loff_t
0434 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
0435         void *data, struct iomap *iomap)
0436 {
0437     struct page *page = data;
0438     int ret;
0439 
0440     ret = __block_write_begin_int(page, pos, length, NULL, iomap);
0441     if (ret)
0442         return ret;
0443 
0444     block_commit_write(page, 0, length);
0445     return length;
0446 }
0447 
0448 int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
0449         struct iomap_ops *ops)
0450 {
0451     struct page *page = vmf->page;
0452     struct inode *inode = file_inode(vma->vm_file);
0453     unsigned long length;
0454     loff_t offset, size;
0455     ssize_t ret;
0456 
0457     lock_page(page);
0458     size = i_size_read(inode);
0459     if ((page->mapping != inode->i_mapping) ||
0460         (page_offset(page) > size)) {
0461         /* We overload EFAULT to mean page got truncated */
0462         ret = -EFAULT;
0463         goto out_unlock;
0464     }
0465 
0466     /* page is wholly or partially inside EOF */
0467     if (((page->index + 1) << PAGE_SHIFT) > size)
0468         length = size & ~PAGE_MASK;
0469     else
0470         length = PAGE_SIZE;
0471 
0472     offset = page_offset(page);
0473     while (length > 0) {
0474         ret = iomap_apply(inode, offset, length,
0475                 IOMAP_WRITE | IOMAP_FAULT, ops, page,
0476                 iomap_page_mkwrite_actor);
0477         if (unlikely(ret <= 0))
0478             goto out_unlock;
0479         offset += ret;
0480         length -= ret;
0481     }
0482 
0483     set_page_dirty(page);
0484     wait_for_stable_page(page);
0485     return 0;
0486 out_unlock:
0487     unlock_page(page);
0488     return ret;
0489 }
0490 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
0491 
0492 struct fiemap_ctx {
0493     struct fiemap_extent_info *fi;
0494     struct iomap prev;
0495 };
0496 
0497 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
0498         struct iomap *iomap, u32 flags)
0499 {
0500     switch (iomap->type) {
0501     case IOMAP_HOLE:
0502         /* skip holes */
0503         return 0;
0504     case IOMAP_DELALLOC:
0505         flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
0506         break;
0507     case IOMAP_UNWRITTEN:
0508         flags |= FIEMAP_EXTENT_UNWRITTEN;
0509         break;
0510     case IOMAP_MAPPED:
0511         break;
0512     }
0513 
0514     if (iomap->flags & IOMAP_F_MERGED)
0515         flags |= FIEMAP_EXTENT_MERGED;
0516     if (iomap->flags & IOMAP_F_SHARED)
0517         flags |= FIEMAP_EXTENT_SHARED;
0518 
0519     return fiemap_fill_next_extent(fi, iomap->offset,
0520             iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
0521             iomap->length, flags);
0522 
0523 }
0524 
0525 static loff_t
0526 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
0527         struct iomap *iomap)
0528 {
0529     struct fiemap_ctx *ctx = data;
0530     loff_t ret = length;
0531 
0532     if (iomap->type == IOMAP_HOLE)
0533         return length;
0534 
0535     ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
0536     ctx->prev = *iomap;
0537     switch (ret) {
0538     case 0:     /* success */
0539         return length;
0540     case 1:     /* extent array full */
0541         return 0;
0542     default:
0543         return ret;
0544     }
0545 }
0546 
0547 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
0548         loff_t start, loff_t len, struct iomap_ops *ops)
0549 {
0550     struct fiemap_ctx ctx;
0551     loff_t ret;
0552 
0553     memset(&ctx, 0, sizeof(ctx));
0554     ctx.fi = fi;
0555     ctx.prev.type = IOMAP_HOLE;
0556 
0557     ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
0558     if (ret)
0559         return ret;
0560 
0561     if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
0562         ret = filemap_write_and_wait(inode->i_mapping);
0563         if (ret)
0564             return ret;
0565     }
0566 
0567     while (len > 0) {
0568         ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
0569                 iomap_fiemap_actor);
0570         /* inode with no (attribute) mapping will give ENOENT */
0571         if (ret == -ENOENT)
0572             break;
0573         if (ret < 0)
0574             return ret;
0575         if (ret == 0)
0576             break;
0577 
0578         start += ret;
0579         len -= ret;
0580     }
0581 
0582     if (ctx.prev.type != IOMAP_HOLE) {
0583         ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
0584         if (ret < 0)
0585             return ret;
0586     }
0587 
0588     return 0;
0589 }
0590 EXPORT_SYMBOL_GPL(iomap_fiemap);
0591 
0592 /*
0593  * Private flags for iomap_dio, must not overlap with the public ones in
0594  * iomap.h:
0595  */
0596 #define IOMAP_DIO_WRITE     (1 << 30)
0597 #define IOMAP_DIO_DIRTY     (1 << 31)
0598 
0599 struct iomap_dio {
0600     struct kiocb        *iocb;
0601     iomap_dio_end_io_t  *end_io;
0602     loff_t          i_size;
0603     loff_t          size;
0604     atomic_t        ref;
0605     unsigned        flags;
0606     int         error;
0607 
0608     union {
0609         /* used during submission and for synchronous completion: */
0610         struct {
0611             struct iov_iter     *iter;
0612             struct task_struct  *waiter;
0613             struct request_queue    *last_queue;
0614             blk_qc_t        cookie;
0615         } submit;
0616 
0617         /* used for aio completion: */
0618         struct {
0619             struct work_struct  work;
0620         } aio;
0621     };
0622 };
0623 
0624 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
0625 {
0626     struct kiocb *iocb = dio->iocb;
0627     ssize_t ret;
0628 
0629     if (dio->end_io) {
0630         ret = dio->end_io(iocb,
0631                 dio->error ? dio->error : dio->size,
0632                 dio->flags);
0633     } else {
0634         ret = dio->error;
0635     }
0636 
0637     if (likely(!ret)) {
0638         ret = dio->size;
0639         /* check for short read */
0640         if (iocb->ki_pos + ret > dio->i_size &&
0641             !(dio->flags & IOMAP_DIO_WRITE))
0642             ret = dio->i_size - iocb->ki_pos;
0643         iocb->ki_pos += ret;
0644     }
0645 
0646     inode_dio_end(file_inode(iocb->ki_filp));
0647     kfree(dio);
0648 
0649     return ret;
0650 }
0651 
0652 static void iomap_dio_complete_work(struct work_struct *work)
0653 {
0654     struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
0655     struct kiocb *iocb = dio->iocb;
0656     bool is_write = (dio->flags & IOMAP_DIO_WRITE);
0657     ssize_t ret;
0658 
0659     ret = iomap_dio_complete(dio);
0660     if (is_write && ret > 0)
0661         ret = generic_write_sync(iocb, ret);
0662     iocb->ki_complete(iocb, ret, 0);
0663 }
0664 
0665 /*
0666  * Set an error in the dio if none is set yet.  We have to use cmpxchg
0667  * as the submission context and the completion context(s) can race to
0668  * update the error.
0669  */
0670 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
0671 {
0672     cmpxchg(&dio->error, 0, ret);
0673 }
0674 
0675 static void iomap_dio_bio_end_io(struct bio *bio)
0676 {
0677     struct iomap_dio *dio = bio->bi_private;
0678     bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
0679 
0680     if (bio->bi_error)
0681         iomap_dio_set_error(dio, bio->bi_error);
0682 
0683     if (atomic_dec_and_test(&dio->ref)) {
0684         if (is_sync_kiocb(dio->iocb)) {
0685             struct task_struct *waiter = dio->submit.waiter;
0686 
0687             WRITE_ONCE(dio->submit.waiter, NULL);
0688             wake_up_process(waiter);
0689         } else if (dio->flags & IOMAP_DIO_WRITE) {
0690             struct inode *inode = file_inode(dio->iocb->ki_filp);
0691 
0692             INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
0693             queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
0694         } else {
0695             iomap_dio_complete_work(&dio->aio.work);
0696         }
0697     }
0698 
0699     if (should_dirty) {
0700         bio_check_pages_dirty(bio);
0701     } else {
0702         struct bio_vec *bvec;
0703         int i;
0704 
0705         bio_for_each_segment_all(bvec, bio, i)
0706             put_page(bvec->bv_page);
0707         bio_put(bio);
0708     }
0709 }
0710 
0711 static blk_qc_t
0712 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
0713         unsigned len)
0714 {
0715     struct page *page = ZERO_PAGE(0);
0716     struct bio *bio;
0717 
0718     bio = bio_alloc(GFP_KERNEL, 1);
0719     bio->bi_bdev = iomap->bdev;
0720     bio->bi_iter.bi_sector =
0721         iomap->blkno + ((pos - iomap->offset) >> 9);
0722     bio->bi_private = dio;
0723     bio->bi_end_io = iomap_dio_bio_end_io;
0724 
0725     get_page(page);
0726     if (bio_add_page(bio, page, len, 0) != len)
0727         BUG();
0728     bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
0729 
0730     atomic_inc(&dio->ref);
0731     return submit_bio(bio);
0732 }
0733 
0734 static loff_t
0735 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
0736         void *data, struct iomap *iomap)
0737 {
0738     struct iomap_dio *dio = data;
0739     unsigned blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
0740     unsigned fs_block_size = (1 << inode->i_blkbits), pad;
0741     unsigned align = iov_iter_alignment(dio->submit.iter);
0742     struct iov_iter iter;
0743     struct bio *bio;
0744     bool need_zeroout = false;
0745     int nr_pages, ret;
0746 
0747     if ((pos | length | align) & ((1 << blkbits) - 1))
0748         return -EINVAL;
0749 
0750     switch (iomap->type) {
0751     case IOMAP_HOLE:
0752         if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
0753             return -EIO;
0754         /*FALLTHRU*/
0755     case IOMAP_UNWRITTEN:
0756         if (!(dio->flags & IOMAP_DIO_WRITE)) {
0757             iov_iter_zero(length, dio->submit.iter);
0758             dio->size += length;
0759             return length;
0760         }
0761         dio->flags |= IOMAP_DIO_UNWRITTEN;
0762         need_zeroout = true;
0763         break;
0764     case IOMAP_MAPPED:
0765         if (iomap->flags & IOMAP_F_SHARED)
0766             dio->flags |= IOMAP_DIO_COW;
0767         if (iomap->flags & IOMAP_F_NEW)
0768             need_zeroout = true;
0769         break;
0770     default:
0771         WARN_ON_ONCE(1);
0772         return -EIO;
0773     }
0774 
0775     /*
0776      * Operate on a partial iter trimmed to the extent we were called for.
0777      * We'll update the iter in the dio once we're done with this extent.
0778      */
0779     iter = *dio->submit.iter;
0780     iov_iter_truncate(&iter, length);
0781 
0782     nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
0783     if (nr_pages <= 0)
0784         return nr_pages;
0785 
0786     if (need_zeroout) {
0787         /* zero out from the start of the block to the write offset */
0788         pad = pos & (fs_block_size - 1);
0789         if (pad)
0790             iomap_dio_zero(dio, iomap, pos - pad, pad);
0791     }
0792 
0793     do {
0794         if (dio->error)
0795             return 0;
0796 
0797         bio = bio_alloc(GFP_KERNEL, nr_pages);
0798         bio->bi_bdev = iomap->bdev;
0799         bio->bi_iter.bi_sector =
0800             iomap->blkno + ((pos - iomap->offset) >> 9);
0801         bio->bi_private = dio;
0802         bio->bi_end_io = iomap_dio_bio_end_io;
0803 
0804         ret = bio_iov_iter_get_pages(bio, &iter);
0805         if (unlikely(ret)) {
0806             bio_put(bio);
0807             return ret;
0808         }
0809 
0810         if (dio->flags & IOMAP_DIO_WRITE) {
0811             bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
0812             task_io_account_write(bio->bi_iter.bi_size);
0813         } else {
0814             bio_set_op_attrs(bio, REQ_OP_READ, 0);
0815             if (dio->flags & IOMAP_DIO_DIRTY)
0816                 bio_set_pages_dirty(bio);
0817         }
0818 
0819         dio->size += bio->bi_iter.bi_size;
0820         pos += bio->bi_iter.bi_size;
0821 
0822         nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
0823 
0824         atomic_inc(&dio->ref);
0825 
0826         dio->submit.last_queue = bdev_get_queue(iomap->bdev);
0827         dio->submit.cookie = submit_bio(bio);
0828     } while (nr_pages);
0829 
0830     if (need_zeroout) {
0831         /* zero out from the end of the write to the end of the block */
0832         pad = pos & (fs_block_size - 1);
0833         if (pad)
0834             iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
0835     }
0836 
0837     iov_iter_advance(dio->submit.iter, length);
0838     return length;
0839 }
0840 
0841 ssize_t
0842 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops,
0843         iomap_dio_end_io_t end_io)
0844 {
0845     struct address_space *mapping = iocb->ki_filp->f_mapping;
0846     struct inode *inode = file_inode(iocb->ki_filp);
0847     size_t count = iov_iter_count(iter);
0848     loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
0849     unsigned int flags = IOMAP_DIRECT;
0850     struct blk_plug plug;
0851     struct iomap_dio *dio;
0852 
0853     lockdep_assert_held(&inode->i_rwsem);
0854 
0855     if (!count)
0856         return 0;
0857 
0858     dio = kmalloc(sizeof(*dio), GFP_KERNEL);
0859     if (!dio)
0860         return -ENOMEM;
0861 
0862     dio->iocb = iocb;
0863     atomic_set(&dio->ref, 1);
0864     dio->size = 0;
0865     dio->i_size = i_size_read(inode);
0866     dio->end_io = end_io;
0867     dio->error = 0;
0868     dio->flags = 0;
0869 
0870     dio->submit.iter = iter;
0871     if (is_sync_kiocb(iocb)) {
0872         dio->submit.waiter = current;
0873         dio->submit.cookie = BLK_QC_T_NONE;
0874         dio->submit.last_queue = NULL;
0875     }
0876 
0877     if (iov_iter_rw(iter) == READ) {
0878         if (pos >= dio->i_size)
0879             goto out_free_dio;
0880 
0881         if (iter->type == ITER_IOVEC)
0882             dio->flags |= IOMAP_DIO_DIRTY;
0883     } else {
0884         dio->flags |= IOMAP_DIO_WRITE;
0885         flags |= IOMAP_WRITE;
0886     }
0887 
0888     if (mapping->nrpages) {
0889         ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
0890         if (ret)
0891             goto out_free_dio;
0892 
0893         ret = invalidate_inode_pages2_range(mapping,
0894                 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
0895         WARN_ON_ONCE(ret);
0896         ret = 0;
0897     }
0898 
0899     inode_dio_begin(inode);
0900 
0901     blk_start_plug(&plug);
0902     do {
0903         ret = iomap_apply(inode, pos, count, flags, ops, dio,
0904                 iomap_dio_actor);
0905         if (ret <= 0) {
0906             /* magic error code to fall back to buffered I/O */
0907             if (ret == -ENOTBLK)
0908                 ret = 0;
0909             break;
0910         }
0911         pos += ret;
0912     } while ((count = iov_iter_count(iter)) > 0);
0913     blk_finish_plug(&plug);
0914 
0915     if (ret < 0)
0916         iomap_dio_set_error(dio, ret);
0917 
0918     if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
0919             !inode->i_sb->s_dio_done_wq) {
0920         ret = sb_init_dio_done_wq(inode->i_sb);
0921         if (ret < 0)
0922             iomap_dio_set_error(dio, ret);
0923     }
0924 
0925     if (!atomic_dec_and_test(&dio->ref)) {
0926         if (!is_sync_kiocb(iocb))
0927             return -EIOCBQUEUED;
0928 
0929         for (;;) {
0930             set_current_state(TASK_UNINTERRUPTIBLE);
0931             if (!READ_ONCE(dio->submit.waiter))
0932                 break;
0933 
0934             if (!(iocb->ki_flags & IOCB_HIPRI) ||
0935                 !dio->submit.last_queue ||
0936                 !blk_mq_poll(dio->submit.last_queue,
0937                      dio->submit.cookie))
0938                 io_schedule();
0939         }
0940         __set_current_state(TASK_RUNNING);
0941     }
0942 
0943     /*
0944      * Try again to invalidate clean pages which might have been cached by
0945      * non-direct readahead, or faulted in by get_user_pages() if the source
0946      * of the write was an mmap'ed region of the file we're writing.  Either
0947      * one is a pretty crazy thing to do, so we don't support it 100%.  If
0948      * this invalidation fails, tough, the write still worked...
0949      */
0950     if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
0951         ret = invalidate_inode_pages2_range(mapping,
0952                 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
0953         WARN_ON_ONCE(ret);
0954     }
0955 
0956     return iomap_dio_complete(dio);
0957 
0958 out_free_dio:
0959     kfree(dio);
0960     return ret;
0961 }
0962 EXPORT_SYMBOL_GPL(iomap_dio_rw);