0001
0002
0003
0004
0005
0006 #include <linux/module.h>
0007 #include <linux/compiler.h>
0008 #include <linux/fs.h>
0009 #include <linux/fscrypt.h>
0010 #include <linux/pagemap.h>
0011 #include <linux/iomap.h>
0012 #include <linux/backing-dev.h>
0013 #include <linux/uio.h>
0014 #include <linux/task_io_accounting_ops.h>
0015 #include "trace.h"
0016
0017 #include "../internal.h"
0018
0019
0020
0021
0022
0023 #define IOMAP_DIO_WRITE_FUA (1 << 28)
0024 #define IOMAP_DIO_NEED_SYNC (1 << 29)
0025 #define IOMAP_DIO_WRITE (1 << 30)
0026 #define IOMAP_DIO_DIRTY (1 << 31)
0027
0028 struct iomap_dio {
0029 struct kiocb *iocb;
0030 const struct iomap_dio_ops *dops;
0031 loff_t i_size;
0032 loff_t size;
0033 atomic_t ref;
0034 unsigned flags;
0035 int error;
0036 size_t done_before;
0037 bool wait_for_completion;
0038
0039 union {
0040
0041 struct {
0042 struct iov_iter *iter;
0043 struct task_struct *waiter;
0044 struct bio *poll_bio;
0045 } submit;
0046
0047
0048 struct {
0049 struct work_struct work;
0050 } aio;
0051 };
0052 };
0053
0054 static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
0055 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf)
0056 {
0057 if (dio->dops && dio->dops->bio_set)
0058 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf,
0059 GFP_KERNEL, dio->dops->bio_set);
0060 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL);
0061 }
0062
0063 static void iomap_dio_submit_bio(const struct iomap_iter *iter,
0064 struct iomap_dio *dio, struct bio *bio, loff_t pos)
0065 {
0066 atomic_inc(&dio->ref);
0067
0068
0069 if ((dio->iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(dio->iocb)) {
0070 bio_set_polled(bio, dio->iocb);
0071 dio->submit.poll_bio = bio;
0072 }
0073
0074 if (dio->dops && dio->dops->submit_io)
0075 dio->dops->submit_io(iter, bio, pos);
0076 else
0077 submit_bio(bio);
0078 }
0079
0080 ssize_t iomap_dio_complete(struct iomap_dio *dio)
0081 {
0082 const struct iomap_dio_ops *dops = dio->dops;
0083 struct kiocb *iocb = dio->iocb;
0084 struct inode *inode = file_inode(iocb->ki_filp);
0085 loff_t offset = iocb->ki_pos;
0086 ssize_t ret = dio->error;
0087
0088 if (dops && dops->end_io)
0089 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
0090
0091 if (likely(!ret)) {
0092 ret = dio->size;
0093
0094 if (offset + ret > dio->i_size &&
0095 !(dio->flags & IOMAP_DIO_WRITE))
0096 ret = dio->i_size - offset;
0097 iocb->ki_pos += ret;
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 if (!dio->error && dio->size &&
0113 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
0114 int err;
0115 err = invalidate_inode_pages2_range(inode->i_mapping,
0116 offset >> PAGE_SHIFT,
0117 (offset + dio->size - 1) >> PAGE_SHIFT);
0118 if (err)
0119 dio_warn_stale_pagecache(iocb->ki_filp);
0120 }
0121
0122 inode_dio_end(file_inode(iocb->ki_filp));
0123
0124
0125
0126
0127 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
0128 ret = generic_write_sync(iocb, ret);
0129
0130 if (ret > 0)
0131 ret += dio->done_before;
0132
0133 kfree(dio);
0134
0135 return ret;
0136 }
0137 EXPORT_SYMBOL_GPL(iomap_dio_complete);
0138
0139 static void iomap_dio_complete_work(struct work_struct *work)
0140 {
0141 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
0142 struct kiocb *iocb = dio->iocb;
0143
0144 iocb->ki_complete(iocb, iomap_dio_complete(dio));
0145 }
0146
0147
0148
0149
0150
0151
0152 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
0153 {
0154 cmpxchg(&dio->error, 0, ret);
0155 }
0156
0157 void iomap_dio_bio_end_io(struct bio *bio)
0158 {
0159 struct iomap_dio *dio = bio->bi_private;
0160 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
0161
0162 if (bio->bi_status)
0163 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
0164
0165 if (atomic_dec_and_test(&dio->ref)) {
0166 if (dio->wait_for_completion) {
0167 struct task_struct *waiter = dio->submit.waiter;
0168 WRITE_ONCE(dio->submit.waiter, NULL);
0169 blk_wake_io_task(waiter);
0170 } else if (dio->flags & IOMAP_DIO_WRITE) {
0171 struct inode *inode = file_inode(dio->iocb->ki_filp);
0172
0173 WRITE_ONCE(dio->iocb->private, NULL);
0174 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
0175 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
0176 } else {
0177 WRITE_ONCE(dio->iocb->private, NULL);
0178 iomap_dio_complete_work(&dio->aio.work);
0179 }
0180 }
0181
0182 if (should_dirty) {
0183 bio_check_pages_dirty(bio);
0184 } else {
0185 bio_release_pages(bio, false);
0186 bio_put(bio);
0187 }
0188 }
0189 EXPORT_SYMBOL_GPL(iomap_dio_bio_end_io);
0190
0191 static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
0192 loff_t pos, unsigned len)
0193 {
0194 struct inode *inode = file_inode(dio->iocb->ki_filp);
0195 struct page *page = ZERO_PAGE(0);
0196 struct bio *bio;
0197
0198 bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
0199 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
0200 GFP_KERNEL);
0201 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
0202 bio->bi_private = dio;
0203 bio->bi_end_io = iomap_dio_bio_end_io;
0204
0205 get_page(page);
0206 __bio_add_page(bio, page, len, 0);
0207 iomap_dio_submit_bio(iter, dio, bio, pos);
0208 }
0209
0210
0211
0212
0213
0214
0215 static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
0216 const struct iomap *iomap, bool use_fua)
0217 {
0218 blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
0219
0220 if (!(dio->flags & IOMAP_DIO_WRITE)) {
0221 WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND);
0222 return REQ_OP_READ;
0223 }
0224
0225 if (iomap->flags & IOMAP_F_ZONE_APPEND)
0226 opflags |= REQ_OP_ZONE_APPEND;
0227 else
0228 opflags |= REQ_OP_WRITE;
0229
0230 if (use_fua)
0231 opflags |= REQ_FUA;
0232 else
0233 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
0234
0235 return opflags;
0236 }
0237
0238 static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
0239 struct iomap_dio *dio)
0240 {
0241 const struct iomap *iomap = &iter->iomap;
0242 struct inode *inode = iter->inode;
0243 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
0244 unsigned int fs_block_size = i_blocksize(inode), pad;
0245 loff_t length = iomap_length(iter);
0246 loff_t pos = iter->pos;
0247 blk_opf_t bio_opf;
0248 struct bio *bio;
0249 bool need_zeroout = false;
0250 bool use_fua = false;
0251 int nr_pages, ret = 0;
0252 size_t copied = 0;
0253 size_t orig_count;
0254
0255 if ((pos | length) & ((1 << blkbits) - 1) ||
0256 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
0257 return -EINVAL;
0258
0259 if (iomap->type == IOMAP_UNWRITTEN) {
0260 dio->flags |= IOMAP_DIO_UNWRITTEN;
0261 need_zeroout = true;
0262 }
0263
0264 if (iomap->flags & IOMAP_F_SHARED)
0265 dio->flags |= IOMAP_DIO_COW;
0266
0267 if (iomap->flags & IOMAP_F_NEW) {
0268 need_zeroout = true;
0269 } else if (iomap->type == IOMAP_MAPPED) {
0270
0271
0272
0273
0274
0275
0276
0277 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
0278 (dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev))
0279 use_fua = true;
0280 }
0281
0282
0283
0284
0285
0286
0287 orig_count = iov_iter_count(dio->submit.iter);
0288 iov_iter_truncate(dio->submit.iter, length);
0289
0290 if (!iov_iter_count(dio->submit.iter))
0291 goto out;
0292
0293
0294
0295
0296 if (need_zeroout ||
0297 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
0298 dio->iocb->ki_flags &= ~IOCB_HIPRI;
0299
0300 if (need_zeroout) {
0301
0302 pad = pos & (fs_block_size - 1);
0303 if (pad)
0304 iomap_dio_zero(iter, dio, pos - pad, pad);
0305 }
0306
0307
0308
0309
0310
0311
0312 bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
0313
0314 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
0315 do {
0316 size_t n;
0317 if (dio->error) {
0318 iov_iter_revert(dio->submit.iter, copied);
0319 copied = ret = 0;
0320 goto out;
0321 }
0322
0323 bio = iomap_dio_alloc_bio(iter, dio, nr_pages, bio_opf);
0324 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
0325 GFP_KERNEL);
0326 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
0327 bio->bi_ioprio = dio->iocb->ki_ioprio;
0328 bio->bi_private = dio;
0329 bio->bi_end_io = iomap_dio_bio_end_io;
0330
0331 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
0332 if (unlikely(ret)) {
0333
0334
0335
0336
0337
0338
0339 bio_put(bio);
0340 goto zero_tail;
0341 }
0342
0343 n = bio->bi_iter.bi_size;
0344 if (dio->flags & IOMAP_DIO_WRITE) {
0345 task_io_account_write(n);
0346 } else {
0347 if (dio->flags & IOMAP_DIO_DIRTY)
0348 bio_set_pages_dirty(bio);
0349 }
0350
0351 dio->size += n;
0352 copied += n;
0353
0354 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
0355 BIO_MAX_VECS);
0356
0357
0358
0359 if (nr_pages)
0360 dio->iocb->ki_flags &= ~IOCB_HIPRI;
0361 iomap_dio_submit_bio(iter, dio, bio, pos);
0362 pos += n;
0363 } while (nr_pages);
0364
0365
0366
0367
0368
0369
0370
0371 zero_tail:
0372 if (need_zeroout ||
0373 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
0374
0375 pad = pos & (fs_block_size - 1);
0376 if (pad)
0377 iomap_dio_zero(iter, dio, pos, fs_block_size - pad);
0378 }
0379 out:
0380
0381 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
0382 if (copied)
0383 return copied;
0384 return ret;
0385 }
0386
0387 static loff_t iomap_dio_hole_iter(const struct iomap_iter *iter,
0388 struct iomap_dio *dio)
0389 {
0390 loff_t length = iov_iter_zero(iomap_length(iter), dio->submit.iter);
0391
0392 dio->size += length;
0393 if (!length)
0394 return -EFAULT;
0395 return length;
0396 }
0397
0398 static loff_t iomap_dio_inline_iter(const struct iomap_iter *iomi,
0399 struct iomap_dio *dio)
0400 {
0401 const struct iomap *iomap = &iomi->iomap;
0402 struct iov_iter *iter = dio->submit.iter;
0403 void *inline_data = iomap_inline_data(iomap, iomi->pos);
0404 loff_t length = iomap_length(iomi);
0405 loff_t pos = iomi->pos;
0406 size_t copied;
0407
0408 if (WARN_ON_ONCE(!iomap_inline_data_valid(iomap)))
0409 return -EIO;
0410
0411 if (dio->flags & IOMAP_DIO_WRITE) {
0412 loff_t size = iomi->inode->i_size;
0413
0414 if (pos > size)
0415 memset(iomap_inline_data(iomap, size), 0, pos - size);
0416 copied = copy_from_iter(inline_data, length, iter);
0417 if (copied) {
0418 if (pos + copied > size)
0419 i_size_write(iomi->inode, pos + copied);
0420 mark_inode_dirty(iomi->inode);
0421 }
0422 } else {
0423 copied = copy_to_iter(inline_data, length, iter);
0424 }
0425 dio->size += copied;
0426 if (!copied)
0427 return -EFAULT;
0428 return copied;
0429 }
0430
0431 static loff_t iomap_dio_iter(const struct iomap_iter *iter,
0432 struct iomap_dio *dio)
0433 {
0434 switch (iter->iomap.type) {
0435 case IOMAP_HOLE:
0436 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
0437 return -EIO;
0438 return iomap_dio_hole_iter(iter, dio);
0439 case IOMAP_UNWRITTEN:
0440 if (!(dio->flags & IOMAP_DIO_WRITE))
0441 return iomap_dio_hole_iter(iter, dio);
0442 return iomap_dio_bio_iter(iter, dio);
0443 case IOMAP_MAPPED:
0444 return iomap_dio_bio_iter(iter, dio);
0445 case IOMAP_INLINE:
0446 return iomap_dio_inline_iter(iter, dio);
0447 case IOMAP_DELALLOC:
0448
0449
0450
0451
0452
0453
0454 pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
0455 dio->iocb->ki_filp, current->comm);
0456 return -EIO;
0457 default:
0458 WARN_ON_ONCE(1);
0459 return -EIO;
0460 }
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483 struct iomap_dio *
0484 __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
0485 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
0486 unsigned int dio_flags, void *private, size_t done_before)
0487 {
0488 struct address_space *mapping = iocb->ki_filp->f_mapping;
0489 struct inode *inode = file_inode(iocb->ki_filp);
0490 struct iomap_iter iomi = {
0491 .inode = inode,
0492 .pos = iocb->ki_pos,
0493 .len = iov_iter_count(iter),
0494 .flags = IOMAP_DIRECT,
0495 .private = private,
0496 };
0497 loff_t end = iomi.pos + iomi.len - 1, ret = 0;
0498 bool wait_for_completion =
0499 is_sync_kiocb(iocb) || (dio_flags & IOMAP_DIO_FORCE_WAIT);
0500 struct blk_plug plug;
0501 struct iomap_dio *dio;
0502
0503 if (!iomi.len)
0504 return NULL;
0505
0506 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
0507 if (!dio)
0508 return ERR_PTR(-ENOMEM);
0509
0510 dio->iocb = iocb;
0511 atomic_set(&dio->ref, 1);
0512 dio->size = 0;
0513 dio->i_size = i_size_read(inode);
0514 dio->dops = dops;
0515 dio->error = 0;
0516 dio->flags = 0;
0517 dio->done_before = done_before;
0518
0519 dio->submit.iter = iter;
0520 dio->submit.waiter = current;
0521 dio->submit.poll_bio = NULL;
0522
0523 if (iov_iter_rw(iter) == READ) {
0524 if (iomi.pos >= dio->i_size)
0525 goto out_free_dio;
0526
0527 if (iocb->ki_flags & IOCB_NOWAIT) {
0528 if (filemap_range_needs_writeback(mapping, iomi.pos,
0529 end)) {
0530 ret = -EAGAIN;
0531 goto out_free_dio;
0532 }
0533 iomi.flags |= IOMAP_NOWAIT;
0534 }
0535
0536 if (user_backed_iter(iter))
0537 dio->flags |= IOMAP_DIO_DIRTY;
0538 } else {
0539 iomi.flags |= IOMAP_WRITE;
0540 dio->flags |= IOMAP_DIO_WRITE;
0541
0542 if (iocb->ki_flags & IOCB_NOWAIT) {
0543 if (filemap_range_has_page(mapping, iomi.pos, end)) {
0544 ret = -EAGAIN;
0545 goto out_free_dio;
0546 }
0547 iomi.flags |= IOMAP_NOWAIT;
0548 }
0549
0550
0551 if (iocb_is_dsync(iocb) && !(dio_flags & IOMAP_DIO_NOSYNC)) {
0552 dio->flags |= IOMAP_DIO_NEED_SYNC;
0553
0554
0555
0556
0557
0558
0559
0560 if (!(iocb->ki_flags & IOCB_SYNC))
0561 dio->flags |= IOMAP_DIO_WRITE_FUA;
0562 }
0563 }
0564
0565 if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
0566 ret = -EAGAIN;
0567 if (iomi.pos >= dio->i_size ||
0568 iomi.pos + iomi.len > dio->i_size)
0569 goto out_free_dio;
0570 iomi.flags |= IOMAP_OVERWRITE_ONLY;
0571 }
0572
0573 ret = filemap_write_and_wait_range(mapping, iomi.pos, end);
0574 if (ret)
0575 goto out_free_dio;
0576
0577 if (iov_iter_rw(iter) == WRITE) {
0578
0579
0580
0581
0582
0583 if (invalidate_inode_pages2_range(mapping,
0584 iomi.pos >> PAGE_SHIFT, end >> PAGE_SHIFT)) {
0585 trace_iomap_dio_invalidate_fail(inode, iomi.pos,
0586 iomi.len);
0587 ret = -ENOTBLK;
0588 goto out_free_dio;
0589 }
0590
0591 if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
0592 ret = sb_init_dio_done_wq(inode->i_sb);
0593 if (ret < 0)
0594 goto out_free_dio;
0595 }
0596 }
0597
0598 inode_dio_begin(inode);
0599
0600 blk_start_plug(&plug);
0601 while ((ret = iomap_iter(&iomi, ops)) > 0) {
0602 iomi.processed = iomap_dio_iter(&iomi, dio);
0603
0604
0605
0606
0607 iocb->ki_flags &= ~IOCB_HIPRI;
0608 }
0609
0610 blk_finish_plug(&plug);
0611
0612
0613
0614
0615
0616
0617 if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
0618 iov_iter_revert(iter, iomi.pos - dio->i_size);
0619
0620 if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
0621 if (!(iocb->ki_flags & IOCB_NOWAIT))
0622 wait_for_completion = true;
0623 ret = 0;
0624 }
0625
0626
0627 if (ret == -ENOTBLK) {
0628 wait_for_completion = true;
0629 ret = 0;
0630 }
0631 if (ret < 0)
0632 iomap_dio_set_error(dio, ret);
0633
0634
0635
0636
0637
0638 if (dio->flags & IOMAP_DIO_WRITE_FUA)
0639 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
0640
0641 WRITE_ONCE(iocb->private, dio->submit.poll_bio);
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658 dio->wait_for_completion = wait_for_completion;
0659 if (!atomic_dec_and_test(&dio->ref)) {
0660 if (!wait_for_completion)
0661 return ERR_PTR(-EIOCBQUEUED);
0662
0663 for (;;) {
0664 set_current_state(TASK_UNINTERRUPTIBLE);
0665 if (!READ_ONCE(dio->submit.waiter))
0666 break;
0667
0668 blk_io_schedule();
0669 }
0670 __set_current_state(TASK_RUNNING);
0671 }
0672
0673 return dio;
0674
0675 out_free_dio:
0676 kfree(dio);
0677 if (ret)
0678 return ERR_PTR(ret);
0679 return NULL;
0680 }
0681 EXPORT_SYMBOL_GPL(__iomap_dio_rw);
0682
0683 ssize_t
0684 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
0685 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
0686 unsigned int dio_flags, void *private, size_t done_before)
0687 {
0688 struct iomap_dio *dio;
0689
0690 dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, private,
0691 done_before);
0692 if (IS_ERR_OR_NULL(dio))
0693 return PTR_ERR_OR_ZERO(dio);
0694 return iomap_dio_complete(dio);
0695 }
0696 EXPORT_SYMBOL_GPL(iomap_dio_rw);