0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/errno.h>
0010 #include <linux/fs.h>
0011 #include <linux/gfp.h>
0012 #include <linux/mm.h>
0013 #include <linux/pagemap.h>
0014 #include <linux/swap.h>
0015 #include <linux/buffer_head.h>
0016 #include <linux/writeback.h>
0017 #include <linux/bit_spinlock.h>
0018 #include <linux/bio.h>
0019
0020 #include "aops.h"
0021 #include "attrib.h"
0022 #include "debug.h"
0023 #include "inode.h"
0024 #include "mft.h"
0025 #include "runlist.h"
0026 #include "types.h"
0027 #include "ntfs.h"
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
0046 {
0047 unsigned long flags;
0048 struct buffer_head *first, *tmp;
0049 struct page *page;
0050 struct inode *vi;
0051 ntfs_inode *ni;
0052 int page_uptodate = 1;
0053
0054 page = bh->b_page;
0055 vi = page->mapping->host;
0056 ni = NTFS_I(vi);
0057
0058 if (likely(uptodate)) {
0059 loff_t i_size;
0060 s64 file_ofs, init_size;
0061
0062 set_buffer_uptodate(bh);
0063
0064 file_ofs = ((s64)page->index << PAGE_SHIFT) +
0065 bh_offset(bh);
0066 read_lock_irqsave(&ni->size_lock, flags);
0067 init_size = ni->initialized_size;
0068 i_size = i_size_read(vi);
0069 read_unlock_irqrestore(&ni->size_lock, flags);
0070 if (unlikely(init_size > i_size)) {
0071
0072 init_size = i_size;
0073 }
0074
0075 if (unlikely(file_ofs + bh->b_size > init_size)) {
0076 int ofs;
0077 void *kaddr;
0078
0079 ofs = 0;
0080 if (file_ofs < init_size)
0081 ofs = init_size - file_ofs;
0082 kaddr = kmap_atomic(page);
0083 memset(kaddr + bh_offset(bh) + ofs, 0,
0084 bh->b_size - ofs);
0085 flush_dcache_page(page);
0086 kunmap_atomic(kaddr);
0087 }
0088 } else {
0089 clear_buffer_uptodate(bh);
0090 SetPageError(page);
0091 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
0092 "0x%llx.", (unsigned long long)bh->b_blocknr);
0093 }
0094 first = page_buffers(page);
0095 spin_lock_irqsave(&first->b_uptodate_lock, flags);
0096 clear_buffer_async_read(bh);
0097 unlock_buffer(bh);
0098 tmp = bh;
0099 do {
0100 if (!buffer_uptodate(tmp))
0101 page_uptodate = 0;
0102 if (buffer_async_read(tmp)) {
0103 if (likely(buffer_locked(tmp)))
0104 goto still_busy;
0105
0106 BUG();
0107 }
0108 tmp = tmp->b_this_page;
0109 } while (tmp != bh);
0110 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
0111
0112
0113
0114
0115
0116
0117
0118
0119 if (!NInoMstProtected(ni)) {
0120 if (likely(page_uptodate && !PageError(page)))
0121 SetPageUptodate(page);
0122 } else {
0123 u8 *kaddr;
0124 unsigned int i, recs;
0125 u32 rec_size;
0126
0127 rec_size = ni->itype.index.block_size;
0128 recs = PAGE_SIZE / rec_size;
0129
0130 BUG_ON(!recs);
0131 kaddr = kmap_atomic(page);
0132 for (i = 0; i < recs; i++)
0133 post_read_mst_fixup((NTFS_RECORD*)(kaddr +
0134 i * rec_size), rec_size);
0135 kunmap_atomic(kaddr);
0136 flush_dcache_page(page);
0137 if (likely(page_uptodate && !PageError(page)))
0138 SetPageUptodate(page);
0139 }
0140 unlock_page(page);
0141 return;
0142 still_busy:
0143 spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
0144 return;
0145 }
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 static int ntfs_read_block(struct page *page)
0165 {
0166 loff_t i_size;
0167 VCN vcn;
0168 LCN lcn;
0169 s64 init_size;
0170 struct inode *vi;
0171 ntfs_inode *ni;
0172 ntfs_volume *vol;
0173 runlist_element *rl;
0174 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
0175 sector_t iblock, lblock, zblock;
0176 unsigned long flags;
0177 unsigned int blocksize, vcn_ofs;
0178 int i, nr;
0179 unsigned char blocksize_bits;
0180
0181 vi = page->mapping->host;
0182 ni = NTFS_I(vi);
0183 vol = ni->vol;
0184
0185
0186 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
0187
0188 blocksize = vol->sb->s_blocksize;
0189 blocksize_bits = vol->sb->s_blocksize_bits;
0190
0191 if (!page_has_buffers(page)) {
0192 create_empty_buffers(page, blocksize, 0);
0193 if (unlikely(!page_has_buffers(page))) {
0194 unlock_page(page);
0195 return -ENOMEM;
0196 }
0197 }
0198 bh = head = page_buffers(page);
0199 BUG_ON(!bh);
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
0213 read_lock_irqsave(&ni->size_lock, flags);
0214 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
0215 init_size = ni->initialized_size;
0216 i_size = i_size_read(vi);
0217 read_unlock_irqrestore(&ni->size_lock, flags);
0218 if (unlikely(init_size > i_size)) {
0219
0220 init_size = i_size;
0221 }
0222 zblock = (init_size + blocksize - 1) >> blocksize_bits;
0223
0224
0225 rl = NULL;
0226 nr = i = 0;
0227 do {
0228 int err = 0;
0229
0230 if (unlikely(buffer_uptodate(bh)))
0231 continue;
0232 if (unlikely(buffer_mapped(bh))) {
0233 arr[nr++] = bh;
0234 continue;
0235 }
0236 bh->b_bdev = vol->sb->s_bdev;
0237
0238 if (iblock < lblock) {
0239 bool is_retry = false;
0240
0241
0242 vcn = (VCN)iblock << blocksize_bits >>
0243 vol->cluster_size_bits;
0244 vcn_ofs = ((VCN)iblock << blocksize_bits) &
0245 vol->cluster_size_mask;
0246 if (!rl) {
0247 lock_retry_remap:
0248 down_read(&ni->runlist.lock);
0249 rl = ni->runlist.rl;
0250 }
0251 if (likely(rl != NULL)) {
0252
0253 while (rl->length && rl[1].vcn <= vcn)
0254 rl++;
0255 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
0256 } else
0257 lcn = LCN_RL_NOT_MAPPED;
0258
0259 if (lcn >= 0) {
0260
0261 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
0262 + vcn_ofs) >> blocksize_bits;
0263 set_buffer_mapped(bh);
0264
0265 if (iblock < zblock) {
0266 arr[nr++] = bh;
0267 continue;
0268 }
0269
0270 goto handle_zblock;
0271 }
0272
0273 if (lcn == LCN_HOLE)
0274 goto handle_hole;
0275
0276 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
0277 is_retry = true;
0278
0279
0280
0281
0282 up_read(&ni->runlist.lock);
0283 err = ntfs_map_runlist(ni, vcn);
0284 if (likely(!err))
0285 goto lock_retry_remap;
0286 rl = NULL;
0287 } else if (!rl)
0288 up_read(&ni->runlist.lock);
0289
0290
0291
0292
0293
0294 if (err == -ENOENT || lcn == LCN_ENOENT) {
0295 err = 0;
0296 goto handle_hole;
0297 }
0298
0299 if (!err)
0300 err = -EIO;
0301 bh->b_blocknr = -1;
0302 SetPageError(page);
0303 ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
0304 "attribute type 0x%x, vcn 0x%llx, "
0305 "offset 0x%x because its location on "
0306 "disk could not be determined%s "
0307 "(error code %i).", ni->mft_no,
0308 ni->type, (unsigned long long)vcn,
0309 vcn_ofs, is_retry ? " even after "
0310 "retrying" : "", err);
0311 }
0312
0313
0314
0315
0316
0317 handle_hole:
0318 bh->b_blocknr = -1UL;
0319 clear_buffer_mapped(bh);
0320 handle_zblock:
0321 zero_user(page, i * blocksize, blocksize);
0322 if (likely(!err))
0323 set_buffer_uptodate(bh);
0324 } while (i++, iblock++, (bh = bh->b_this_page) != head);
0325
0326
0327 if (rl)
0328 up_read(&ni->runlist.lock);
0329
0330
0331 if (nr) {
0332 struct buffer_head *tbh;
0333
0334
0335 for (i = 0; i < nr; i++) {
0336 tbh = arr[i];
0337 lock_buffer(tbh);
0338 tbh->b_end_io = ntfs_end_buffer_async_read;
0339 set_buffer_async_read(tbh);
0340 }
0341
0342 for (i = 0; i < nr; i++) {
0343 tbh = arr[i];
0344 if (likely(!buffer_uptodate(tbh)))
0345 submit_bh(REQ_OP_READ, tbh);
0346 else
0347 ntfs_end_buffer_async_read(tbh, 1);
0348 }
0349 return 0;
0350 }
0351
0352 if (likely(!PageError(page)))
0353 SetPageUptodate(page);
0354 else
0355 nr = -EIO;
0356 unlock_page(page);
0357 return nr;
0358 }
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 static int ntfs_read_folio(struct file *file, struct folio *folio)
0379 {
0380 struct page *page = &folio->page;
0381 loff_t i_size;
0382 struct inode *vi;
0383 ntfs_inode *ni, *base_ni;
0384 u8 *addr;
0385 ntfs_attr_search_ctx *ctx;
0386 MFT_RECORD *mrec;
0387 unsigned long flags;
0388 u32 attr_len;
0389 int err = 0;
0390
0391 retry_readpage:
0392 BUG_ON(!PageLocked(page));
0393 vi = page->mapping->host;
0394 i_size = i_size_read(vi);
0395
0396 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
0397 PAGE_SHIFT)) {
0398 zero_user(page, 0, PAGE_SIZE);
0399 ntfs_debug("Read outside i_size - truncated?");
0400 goto done;
0401 }
0402
0403
0404
0405
0406 if (PageUptodate(page)) {
0407 unlock_page(page);
0408 return 0;
0409 }
0410 ni = NTFS_I(vi);
0411
0412
0413
0414
0415
0416
0417
0418
0419 if (ni->type != AT_INDEX_ALLOCATION) {
0420
0421 if (NInoEncrypted(ni)) {
0422 BUG_ON(ni->type != AT_DATA);
0423 err = -EACCES;
0424 goto err_out;
0425 }
0426
0427 if (NInoNonResident(ni) && NInoCompressed(ni)) {
0428 BUG_ON(ni->type != AT_DATA);
0429 BUG_ON(ni->name_len);
0430 return ntfs_read_compressed_block(page);
0431 }
0432 }
0433
0434 if (NInoNonResident(ni)) {
0435
0436 return ntfs_read_block(page);
0437 }
0438
0439
0440
0441
0442
0443
0444
0445
0446 if (unlikely(page->index > 0)) {
0447 zero_user(page, 0, PAGE_SIZE);
0448 goto done;
0449 }
0450 if (!NInoAttr(ni))
0451 base_ni = ni;
0452 else
0453 base_ni = ni->ext.base_ntfs_ino;
0454
0455 mrec = map_mft_record(base_ni);
0456 if (IS_ERR(mrec)) {
0457 err = PTR_ERR(mrec);
0458 goto err_out;
0459 }
0460
0461
0462
0463
0464 if (unlikely(NInoNonResident(ni))) {
0465 unmap_mft_record(base_ni);
0466 goto retry_readpage;
0467 }
0468 ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
0469 if (unlikely(!ctx)) {
0470 err = -ENOMEM;
0471 goto unm_err_out;
0472 }
0473 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
0474 CASE_SENSITIVE, 0, NULL, 0, ctx);
0475 if (unlikely(err))
0476 goto put_unm_err_out;
0477 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
0478 read_lock_irqsave(&ni->size_lock, flags);
0479 if (unlikely(attr_len > ni->initialized_size))
0480 attr_len = ni->initialized_size;
0481 i_size = i_size_read(vi);
0482 read_unlock_irqrestore(&ni->size_lock, flags);
0483 if (unlikely(attr_len > i_size)) {
0484
0485 attr_len = i_size;
0486 }
0487 addr = kmap_atomic(page);
0488
0489 memcpy(addr, (u8*)ctx->attr +
0490 le16_to_cpu(ctx->attr->data.resident.value_offset),
0491 attr_len);
0492
0493 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
0494 flush_dcache_page(page);
0495 kunmap_atomic(addr);
0496 put_unm_err_out:
0497 ntfs_attr_put_search_ctx(ctx);
0498 unm_err_out:
0499 unmap_mft_record(base_ni);
0500 done:
0501 SetPageUptodate(page);
0502 err_out:
0503 unlock_page(page);
0504 return err;
0505 }
0506
0507 #ifdef NTFS_RW
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531 static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
0532 {
0533 VCN vcn;
0534 LCN lcn;
0535 s64 initialized_size;
0536 loff_t i_size;
0537 sector_t block, dblock, iblock;
0538 struct inode *vi;
0539 ntfs_inode *ni;
0540 ntfs_volume *vol;
0541 runlist_element *rl;
0542 struct buffer_head *bh, *head;
0543 unsigned long flags;
0544 unsigned int blocksize, vcn_ofs;
0545 int err;
0546 bool need_end_writeback;
0547 unsigned char blocksize_bits;
0548
0549 vi = page->mapping->host;
0550 ni = NTFS_I(vi);
0551 vol = ni->vol;
0552
0553 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
0554 "0x%lx.", ni->mft_no, ni->type, page->index);
0555
0556 BUG_ON(!NInoNonResident(ni));
0557 BUG_ON(NInoMstProtected(ni));
0558 blocksize = vol->sb->s_blocksize;
0559 blocksize_bits = vol->sb->s_blocksize_bits;
0560 if (!page_has_buffers(page)) {
0561 BUG_ON(!PageUptodate(page));
0562 create_empty_buffers(page, blocksize,
0563 (1 << BH_Uptodate) | (1 << BH_Dirty));
0564 if (unlikely(!page_has_buffers(page))) {
0565 ntfs_warning(vol->sb, "Error allocating page "
0566 "buffers. Redirtying page so we try "
0567 "again later.");
0568
0569
0570
0571
0572 redirty_page_for_writepage(wbc, page);
0573 unlock_page(page);
0574 return 0;
0575 }
0576 }
0577 bh = head = page_buffers(page);
0578 BUG_ON(!bh);
0579
0580
0581
0582
0583 block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
0584
0585 read_lock_irqsave(&ni->size_lock, flags);
0586 i_size = i_size_read(vi);
0587 initialized_size = ni->initialized_size;
0588 read_unlock_irqrestore(&ni->size_lock, flags);
0589
0590
0591 dblock = (i_size + blocksize - 1) >> blocksize_bits;
0592
0593
0594 iblock = initialized_size >> blocksize_bits;
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611 rl = NULL;
0612 err = 0;
0613 do {
0614 bool is_retry = false;
0615
0616 if (unlikely(block >= dblock)) {
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628 clear_buffer_dirty(bh);
0629 set_buffer_uptodate(bh);
0630 continue;
0631 }
0632
0633
0634 if (!buffer_dirty(bh))
0635 continue;
0636
0637
0638 if (unlikely((block >= iblock) &&
0639 (initialized_size < i_size))) {
0640
0641
0642
0643
0644
0645
0646
0647 if (block > iblock) {
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662 }
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672 if (!PageUptodate(page)) {
0673
0674
0675
0676 }
0677
0678
0679
0680
0681
0682
0683 ntfs_error(vol->sb, "Writing beyond initialized size "
0684 "is not supported yet. Sorry.");
0685 err = -EOPNOTSUPP;
0686 break;
0687
0688
0689
0690
0691 }
0692
0693
0694 if (buffer_mapped(bh))
0695 continue;
0696
0697
0698 bh->b_bdev = vol->sb->s_bdev;
0699
0700
0701 vcn = (VCN)block << blocksize_bits;
0702 vcn_ofs = vcn & vol->cluster_size_mask;
0703 vcn >>= vol->cluster_size_bits;
0704 if (!rl) {
0705 lock_retry_remap:
0706 down_read(&ni->runlist.lock);
0707 rl = ni->runlist.rl;
0708 }
0709 if (likely(rl != NULL)) {
0710
0711 while (rl->length && rl[1].vcn <= vcn)
0712 rl++;
0713 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
0714 } else
0715 lcn = LCN_RL_NOT_MAPPED;
0716
0717 if (lcn >= 0) {
0718
0719 bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
0720 vcn_ofs) >> blocksize_bits;
0721 set_buffer_mapped(bh);
0722 continue;
0723 }
0724
0725 if (lcn == LCN_HOLE) {
0726 u8 *kaddr;
0727 unsigned long *bpos, *bend;
0728
0729
0730 kaddr = kmap_atomic(page);
0731 bpos = (unsigned long *)(kaddr + bh_offset(bh));
0732 bend = (unsigned long *)((u8*)bpos + blocksize);
0733 do {
0734 if (unlikely(*bpos))
0735 break;
0736 } while (likely(++bpos < bend));
0737 kunmap_atomic(kaddr);
0738 if (bpos == bend) {
0739
0740
0741
0742
0743 bh->b_blocknr = -1;
0744 clear_buffer_dirty(bh);
0745 continue;
0746 }
0747
0748
0749
0750 ntfs_error(vol->sb, "Writing into sparse regions is "
0751 "not supported yet. Sorry.");
0752 err = -EOPNOTSUPP;
0753 break;
0754 }
0755
0756 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
0757 is_retry = true;
0758
0759
0760
0761
0762 up_read(&ni->runlist.lock);
0763 err = ntfs_map_runlist(ni, vcn);
0764 if (likely(!err))
0765 goto lock_retry_remap;
0766 rl = NULL;
0767 } else if (!rl)
0768 up_read(&ni->runlist.lock);
0769
0770
0771
0772
0773
0774 if (err == -ENOENT || lcn == LCN_ENOENT) {
0775 bh->b_blocknr = -1;
0776 clear_buffer_dirty(bh);
0777 zero_user(page, bh_offset(bh), blocksize);
0778 set_buffer_uptodate(bh);
0779 err = 0;
0780 continue;
0781 }
0782
0783 if (!err)
0784 err = -EIO;
0785 bh->b_blocknr = -1;
0786 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
0787 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
0788 "because its location on disk could not be "
0789 "determined%s (error code %i).", ni->mft_no,
0790 ni->type, (unsigned long long)vcn,
0791 vcn_ofs, is_retry ? " even after "
0792 "retrying" : "", err);
0793 break;
0794 } while (block++, (bh = bh->b_this_page) != head);
0795
0796
0797 if (rl)
0798 up_read(&ni->runlist.lock);
0799
0800
0801 bh = head;
0802
0803
0804 if (unlikely(!PageUptodate(page))) {
0805 int uptodate = 1;
0806 do {
0807 if (!buffer_uptodate(bh)) {
0808 uptodate = 0;
0809 bh = head;
0810 break;
0811 }
0812 } while ((bh = bh->b_this_page) != head);
0813 if (uptodate)
0814 SetPageUptodate(page);
0815 }
0816
0817
0818 do {
0819 if (buffer_mapped(bh) && buffer_dirty(bh)) {
0820 lock_buffer(bh);
0821 if (test_clear_buffer_dirty(bh)) {
0822 BUG_ON(!buffer_uptodate(bh));
0823 mark_buffer_async_write(bh);
0824 } else
0825 unlock_buffer(bh);
0826 } else if (unlikely(err)) {
0827
0828
0829
0830
0831 if (err != -ENOMEM)
0832 clear_buffer_dirty(bh);
0833 }
0834 } while ((bh = bh->b_this_page) != head);
0835
0836 if (unlikely(err)) {
0837
0838 if (unlikely(err == -EOPNOTSUPP))
0839 err = 0;
0840 else if (err == -ENOMEM) {
0841 ntfs_warning(vol->sb, "Error allocating memory. "
0842 "Redirtying page so we try again "
0843 "later.");
0844
0845
0846
0847
0848 redirty_page_for_writepage(wbc, page);
0849 err = 0;
0850 } else
0851 SetPageError(page);
0852 }
0853
0854 BUG_ON(PageWriteback(page));
0855 set_page_writeback(page);
0856
0857
0858 need_end_writeback = true;
0859 do {
0860 struct buffer_head *next = bh->b_this_page;
0861 if (buffer_async_write(bh)) {
0862 submit_bh(REQ_OP_WRITE, bh);
0863 need_end_writeback = false;
0864 }
0865 bh = next;
0866 } while (bh != head);
0867 unlock_page(page);
0868
0869
0870 if (unlikely(need_end_writeback))
0871 end_page_writeback(page);
0872
0873 ntfs_debug("Done.");
0874 return err;
0875 }
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901 static int ntfs_write_mst_block(struct page *page,
0902 struct writeback_control *wbc)
0903 {
0904 sector_t block, dblock, rec_block;
0905 struct inode *vi = page->mapping->host;
0906 ntfs_inode *ni = NTFS_I(vi);
0907 ntfs_volume *vol = ni->vol;
0908 u8 *kaddr;
0909 unsigned int rec_size = ni->itype.index.block_size;
0910 ntfs_inode *locked_nis[PAGE_SIZE / NTFS_BLOCK_SIZE];
0911 struct buffer_head *bh, *head, *tbh, *rec_start_bh;
0912 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
0913 runlist_element *rl;
0914 int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
0915 unsigned bh_size, rec_size_bits;
0916 bool sync, is_mft, page_is_dirty, rec_is_dirty;
0917 unsigned char bh_size_bits;
0918
0919 if (WARN_ON(rec_size < NTFS_BLOCK_SIZE))
0920 return -EINVAL;
0921
0922 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
0923 "0x%lx.", vi->i_ino, ni->type, page->index);
0924 BUG_ON(!NInoNonResident(ni));
0925 BUG_ON(!NInoMstProtected(ni));
0926 is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
0927
0928
0929
0930
0931
0932
0933 BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
0934 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
0935 bh_size = vol->sb->s_blocksize;
0936 bh_size_bits = vol->sb->s_blocksize_bits;
0937 max_bhs = PAGE_SIZE / bh_size;
0938 BUG_ON(!max_bhs);
0939 BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
0940
0941
0942 sync = (wbc->sync_mode == WB_SYNC_ALL);
0943
0944
0945 bh = head = page_buffers(page);
0946 BUG_ON(!bh);
0947
0948 rec_size_bits = ni->itype.index.block_size_bits;
0949 BUG_ON(!(PAGE_SIZE >> rec_size_bits));
0950 bhs_per_rec = rec_size >> bh_size_bits;
0951 BUG_ON(!bhs_per_rec);
0952
0953
0954 rec_block = block = (sector_t)page->index <<
0955 (PAGE_SHIFT - bh_size_bits);
0956
0957
0958 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
0959
0960 rl = NULL;
0961 err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
0962 page_is_dirty = rec_is_dirty = false;
0963 rec_start_bh = NULL;
0964 do {
0965 bool is_retry = false;
0966
0967 if (likely(block < rec_block)) {
0968 if (unlikely(block >= dblock)) {
0969 clear_buffer_dirty(bh);
0970 set_buffer_uptodate(bh);
0971 continue;
0972 }
0973
0974
0975
0976
0977
0978 if (!rec_is_dirty)
0979 continue;
0980 if (unlikely(err2)) {
0981 if (err2 != -ENOMEM)
0982 clear_buffer_dirty(bh);
0983 continue;
0984 }
0985 } else {
0986 BUG_ON(block > rec_block);
0987
0988 rec_block += bhs_per_rec;
0989 err2 = 0;
0990 if (unlikely(block >= dblock)) {
0991 clear_buffer_dirty(bh);
0992 continue;
0993 }
0994 if (!buffer_dirty(bh)) {
0995
0996 rec_is_dirty = false;
0997 continue;
0998 }
0999 rec_is_dirty = true;
1000 rec_start_bh = bh;
1001 }
1002
1003 if (unlikely(!buffer_mapped(bh))) {
1004 VCN vcn;
1005 LCN lcn;
1006 unsigned int vcn_ofs;
1007
1008 bh->b_bdev = vol->sb->s_bdev;
1009
1010 vcn = (VCN)block << bh_size_bits;
1011 vcn_ofs = vcn & vol->cluster_size_mask;
1012 vcn >>= vol->cluster_size_bits;
1013 if (!rl) {
1014 lock_retry_remap:
1015 down_read(&ni->runlist.lock);
1016 rl = ni->runlist.rl;
1017 }
1018 if (likely(rl != NULL)) {
1019
1020 while (rl->length && rl[1].vcn <= vcn)
1021 rl++;
1022 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1023 } else
1024 lcn = LCN_RL_NOT_MAPPED;
1025
1026 if (likely(lcn >= 0)) {
1027
1028 bh->b_blocknr = ((lcn <<
1029 vol->cluster_size_bits) +
1030 vcn_ofs) >> bh_size_bits;
1031 set_buffer_mapped(bh);
1032 } else {
1033
1034
1035
1036
1037
1038 if (!is_mft && !is_retry &&
1039 lcn == LCN_RL_NOT_MAPPED) {
1040 is_retry = true;
1041
1042
1043
1044
1045 up_read(&ni->runlist.lock);
1046 err2 = ntfs_map_runlist(ni, vcn);
1047 if (likely(!err2))
1048 goto lock_retry_remap;
1049 if (err2 == -ENOMEM)
1050 page_is_dirty = true;
1051 lcn = err2;
1052 } else {
1053 err2 = -EIO;
1054 if (!rl)
1055 up_read(&ni->runlist.lock);
1056 }
1057
1058 if (!err || err == -ENOMEM)
1059 err = err2;
1060 bh->b_blocknr = -1;
1061 ntfs_error(vol->sb, "Cannot write ntfs record "
1062 "0x%llx (inode 0x%lx, "
1063 "attribute type 0x%x) because "
1064 "its location on disk could "
1065 "not be determined (error "
1066 "code %lli).",
1067 (long long)block <<
1068 bh_size_bits >>
1069 vol->mft_record_size_bits,
1070 ni->mft_no, ni->type,
1071 (long long)lcn);
1072
1073
1074
1075
1076
1077
1078 if (rec_start_bh != bh) {
1079 while (bhs[--nr_bhs] != rec_start_bh)
1080 ;
1081 if (err2 != -ENOMEM) {
1082 do {
1083 clear_buffer_dirty(
1084 rec_start_bh);
1085 } while ((rec_start_bh =
1086 rec_start_bh->
1087 b_this_page) !=
1088 bh);
1089 }
1090 }
1091 continue;
1092 }
1093 }
1094 BUG_ON(!buffer_uptodate(bh));
1095 BUG_ON(nr_bhs >= max_bhs);
1096 bhs[nr_bhs++] = bh;
1097 } while (block++, (bh = bh->b_this_page) != head);
1098 if (unlikely(rl))
1099 up_read(&ni->runlist.lock);
1100
1101 if (!nr_bhs)
1102 goto done;
1103
1104 kaddr = kmap(page);
1105
1106 BUG_ON(!PageUptodate(page));
1107 ClearPageUptodate(page);
1108 for (i = 0; i < nr_bhs; i++) {
1109 unsigned int ofs;
1110
1111
1112 if (i % bhs_per_rec)
1113 continue;
1114 tbh = bhs[i];
1115 ofs = bh_offset(tbh);
1116 if (is_mft) {
1117 ntfs_inode *tni;
1118 unsigned long mft_no;
1119
1120
1121 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1122 >> rec_size_bits;
1123
1124 tni = NULL;
1125 if (!ntfs_may_write_mft_record(vol, mft_no,
1126 (MFT_RECORD*)(kaddr + ofs), &tni)) {
1127
1128
1129
1130
1131
1132 page_is_dirty = true;
1133
1134
1135
1136
1137 do {
1138 bhs[i] = NULL;
1139 } while (++i % bhs_per_rec);
1140 continue;
1141 }
1142
1143
1144
1145
1146
1147 if (tni)
1148 locked_nis[nr_locked_nis++] = tni;
1149 }
1150
1151 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
1152 rec_size);
1153 if (unlikely(err2)) {
1154 if (!err || err == -ENOMEM)
1155 err = -EIO;
1156 ntfs_error(vol->sb, "Failed to apply mst fixups "
1157 "(inode 0x%lx, attribute type 0x%x, "
1158 "page index 0x%lx, page offset 0x%x)!"
1159 " Unmount and run chkdsk.", vi->i_ino,
1160 ni->type, page->index, ofs);
1161
1162
1163
1164
1165 do {
1166 clear_buffer_dirty(bhs[i]);
1167 bhs[i] = NULL;
1168 } while (++i % bhs_per_rec);
1169 continue;
1170 }
1171 nr_recs++;
1172 }
1173
1174 if (!nr_recs)
1175 goto unm_done;
1176 flush_dcache_page(page);
1177
1178 for (i = 0; i < nr_bhs; i++) {
1179 tbh = bhs[i];
1180 if (!tbh)
1181 continue;
1182 if (!trylock_buffer(tbh))
1183 BUG();
1184
1185 clear_buffer_dirty(tbh);
1186 BUG_ON(!buffer_uptodate(tbh));
1187 BUG_ON(!buffer_mapped(tbh));
1188 get_bh(tbh);
1189 tbh->b_end_io = end_buffer_write_sync;
1190 submit_bh(REQ_OP_WRITE, tbh);
1191 }
1192
1193 if (is_mft && !sync)
1194 goto do_mirror;
1195 do_wait:
1196
1197 for (i = 0; i < nr_bhs; i++) {
1198 tbh = bhs[i];
1199 if (!tbh)
1200 continue;
1201 wait_on_buffer(tbh);
1202 if (unlikely(!buffer_uptodate(tbh))) {
1203 ntfs_error(vol->sb, "I/O error while writing ntfs "
1204 "record buffer (inode 0x%lx, "
1205 "attribute type 0x%x, page index "
1206 "0x%lx, page offset 0x%lx)! Unmount "
1207 "and run chkdsk.", vi->i_ino, ni->type,
1208 page->index, bh_offset(tbh));
1209 if (!err || err == -ENOMEM)
1210 err = -EIO;
1211
1212
1213
1214
1215 set_buffer_uptodate(tbh);
1216 }
1217 }
1218
1219 if (is_mft && sync) {
1220 do_mirror:
1221 for (i = 0; i < nr_bhs; i++) {
1222 unsigned long mft_no;
1223 unsigned int ofs;
1224
1225
1226
1227
1228
1229 if (i % bhs_per_rec)
1230 continue;
1231 tbh = bhs[i];
1232
1233 if (!tbh)
1234 continue;
1235 ofs = bh_offset(tbh);
1236
1237 mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
1238 >> rec_size_bits;
1239 if (mft_no < vol->mftmirr_size)
1240 ntfs_sync_mft_mirror(vol, mft_no,
1241 (MFT_RECORD*)(kaddr + ofs),
1242 sync);
1243 }
1244 if (!sync)
1245 goto do_wait;
1246 }
1247
1248 for (i = 0; i < nr_bhs; i++) {
1249 if (!(i % bhs_per_rec)) {
1250 tbh = bhs[i];
1251 if (!tbh)
1252 continue;
1253 post_write_mst_fixup((NTFS_RECORD*)(kaddr +
1254 bh_offset(tbh)));
1255 }
1256 }
1257 flush_dcache_page(page);
1258 unm_done:
1259
1260 while (nr_locked_nis-- > 0) {
1261 ntfs_inode *tni, *base_tni;
1262
1263 tni = locked_nis[nr_locked_nis];
1264
1265 mutex_lock(&tni->extent_lock);
1266 if (tni->nr_extents >= 0)
1267 base_tni = tni;
1268 else {
1269 base_tni = tni->ext.base_ntfs_ino;
1270 BUG_ON(!base_tni);
1271 }
1272 mutex_unlock(&tni->extent_lock);
1273 ntfs_debug("Unlocking %s inode 0x%lx.",
1274 tni == base_tni ? "base" : "extent",
1275 tni->mft_no);
1276 mutex_unlock(&tni->mrec_lock);
1277 atomic_dec(&tni->count);
1278 iput(VFS_I(base_tni));
1279 }
1280 SetPageUptodate(page);
1281 kunmap(page);
1282 done:
1283 if (unlikely(err && err != -ENOMEM)) {
1284
1285
1286
1287
1288 if (ni->itype.index.block_size == PAGE_SIZE)
1289 SetPageError(page);
1290 NVolSetErrors(vol);
1291 }
1292 if (page_is_dirty) {
1293 ntfs_debug("Page still contains one or more dirty ntfs "
1294 "records. Redirtying the page starting at "
1295 "record 0x%lx.", page->index <<
1296 (PAGE_SHIFT - rec_size_bits));
1297 redirty_page_for_writepage(wbc, page);
1298 unlock_page(page);
1299 } else {
1300
1301
1302
1303
1304
1305 BUG_ON(PageWriteback(page));
1306 set_page_writeback(page);
1307 unlock_page(page);
1308 end_page_writeback(page);
1309 }
1310 if (likely(!err))
1311 ntfs_debug("Done.");
1312 return err;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1339 {
1340 loff_t i_size;
1341 struct inode *vi = page->mapping->host;
1342 ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1343 char *addr;
1344 ntfs_attr_search_ctx *ctx = NULL;
1345 MFT_RECORD *m = NULL;
1346 u32 attr_len;
1347 int err;
1348
1349 retry_writepage:
1350 BUG_ON(!PageLocked(page));
1351 i_size = i_size_read(vi);
1352
1353 if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
1354 PAGE_SHIFT)) {
1355 struct folio *folio = page_folio(page);
1356
1357
1358
1359
1360 block_invalidate_folio(folio, 0, folio_size(folio));
1361 folio_unlock(folio);
1362 ntfs_debug("Write outside i_size - truncated?");
1363 return 0;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373 if (ni->type != AT_INDEX_ALLOCATION) {
1374
1375 if (NInoEncrypted(ni)) {
1376 unlock_page(page);
1377 BUG_ON(ni->type != AT_DATA);
1378 ntfs_debug("Denying write access to encrypted file.");
1379 return -EACCES;
1380 }
1381
1382 if (NInoNonResident(ni) && NInoCompressed(ni)) {
1383 BUG_ON(ni->type != AT_DATA);
1384 BUG_ON(ni->name_len);
1385
1386
1387 unlock_page(page);
1388 ntfs_error(vi->i_sb, "Writing to compressed files is "
1389 "not supported yet. Sorry.");
1390 return -EOPNOTSUPP;
1391 }
1392
1393 if (NInoNonResident(ni) && NInoSparse(ni)) {
1394 unlock_page(page);
1395 ntfs_error(vi->i_sb, "Writing to sparse files is not "
1396 "supported yet. Sorry.");
1397 return -EOPNOTSUPP;
1398 }
1399 }
1400
1401 if (NInoNonResident(ni)) {
1402
1403 if (page->index >= (i_size >> PAGE_SHIFT)) {
1404
1405 unsigned int ofs = i_size & ~PAGE_MASK;
1406 zero_user_segment(page, ofs, PAGE_SIZE);
1407 }
1408
1409 if (NInoMstProtected(ni))
1410 return ntfs_write_mst_block(page, wbc);
1411
1412 return ntfs_write_block(page, wbc);
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422 BUG_ON(page_has_buffers(page));
1423 BUG_ON(!PageUptodate(page));
1424 if (unlikely(page->index > 0)) {
1425 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. "
1426 "Aborting write.", page->index);
1427 BUG_ON(PageWriteback(page));
1428 set_page_writeback(page);
1429 unlock_page(page);
1430 end_page_writeback(page);
1431 return -EIO;
1432 }
1433 if (!NInoAttr(ni))
1434 base_ni = ni;
1435 else
1436 base_ni = ni->ext.base_ntfs_ino;
1437
1438 m = map_mft_record(base_ni);
1439 if (IS_ERR(m)) {
1440 err = PTR_ERR(m);
1441 m = NULL;
1442 ctx = NULL;
1443 goto err_out;
1444 }
1445
1446
1447
1448
1449 if (unlikely(NInoNonResident(ni))) {
1450 unmap_mft_record(base_ni);
1451 goto retry_writepage;
1452 }
1453 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1454 if (unlikely(!ctx)) {
1455 err = -ENOMEM;
1456 goto err_out;
1457 }
1458 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1459 CASE_SENSITIVE, 0, NULL, 0, ctx);
1460 if (unlikely(err))
1461 goto err_out;
1462
1463
1464
1465
1466 BUG_ON(PageWriteback(page));
1467 set_page_writeback(page);
1468 unlock_page(page);
1469 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1470 i_size = i_size_read(vi);
1471 if (unlikely(attr_len > i_size)) {
1472
1473 attr_len = i_size;
1474
1475
1476
1477
1478 err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
1479 attr_len);
1480
1481 BUG_ON(err);
1482 }
1483 addr = kmap_atomic(page);
1484
1485 memcpy((u8*)ctx->attr +
1486 le16_to_cpu(ctx->attr->data.resident.value_offset),
1487 addr, attr_len);
1488
1489 memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
1490 kunmap_atomic(addr);
1491 flush_dcache_page(page);
1492 flush_dcache_mft_record_page(ctx->ntfs_ino);
1493
1494 end_page_writeback(page);
1495
1496 mark_mft_record_dirty(ctx->ntfs_ino);
1497 ntfs_attr_put_search_ctx(ctx);
1498 unmap_mft_record(base_ni);
1499 return 0;
1500 err_out:
1501 if (err == -ENOMEM) {
1502 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
1503 "page so we try again later.");
1504
1505
1506
1507
1508 redirty_page_for_writepage(wbc, page);
1509 err = 0;
1510 } else {
1511 ntfs_error(vi->i_sb, "Resident attribute write failed with "
1512 "error %i.", err);
1513 SetPageError(page);
1514 NVolSetErrors(ni->vol);
1515 }
1516 unlock_page(page);
1517 if (ctx)
1518 ntfs_attr_put_search_ctx(ctx);
1519 if (m)
1520 unmap_mft_record(base_ni);
1521 return err;
1522 }
1523
1524 #endif
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
1550 {
1551 s64 ofs, size;
1552 loff_t i_size;
1553 LCN lcn;
1554 unsigned long blocksize, flags;
1555 ntfs_inode *ni = NTFS_I(mapping->host);
1556 ntfs_volume *vol = ni->vol;
1557 unsigned delta;
1558 unsigned char blocksize_bits, cluster_size_shift;
1559
1560 ntfs_debug("Entering for mft_no 0x%lx, logical block 0x%llx.",
1561 ni->mft_no, (unsigned long long)block);
1562 if (ni->type != AT_DATA || !NInoNonResident(ni) || NInoEncrypted(ni)) {
1563 ntfs_error(vol->sb, "BMAP does not make sense for %s "
1564 "attributes, returning 0.",
1565 (ni->type != AT_DATA) ? "non-data" :
1566 (!NInoNonResident(ni) ? "resident" :
1567 "encrypted"));
1568 return 0;
1569 }
1570
1571 BUG_ON(NInoCompressed(ni));
1572 BUG_ON(NInoMstProtected(ni));
1573 blocksize = vol->sb->s_blocksize;
1574 blocksize_bits = vol->sb->s_blocksize_bits;
1575 ofs = (s64)block << blocksize_bits;
1576 read_lock_irqsave(&ni->size_lock, flags);
1577 size = ni->initialized_size;
1578 i_size = i_size_read(VFS_I(ni));
1579 read_unlock_irqrestore(&ni->size_lock, flags);
1580
1581
1582
1583
1584
1585 if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size)))
1586 goto hole;
1587 cluster_size_shift = vol->cluster_size_bits;
1588 down_read(&ni->runlist.lock);
1589 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false);
1590 up_read(&ni->runlist.lock);
1591 if (unlikely(lcn < LCN_HOLE)) {
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601 switch ((int)lcn) {
1602 case LCN_ENOENT:
1603
1604
1605
1606
1607 goto hole;
1608 case LCN_ENOMEM:
1609 ntfs_error(vol->sb, "Not enough memory to complete "
1610 "mapping for inode 0x%lx. "
1611 "Returning 0.", ni->mft_no);
1612 break;
1613 default:
1614 ntfs_error(vol->sb, "Failed to complete mapping for "
1615 "inode 0x%lx. Run chkdsk. "
1616 "Returning 0.", ni->mft_no);
1617 break;
1618 }
1619 return 0;
1620 }
1621 if (lcn < 0) {
1622
1623 hole:
1624 ntfs_debug("Done (returning hole).");
1625 return 0;
1626 }
1627
1628
1629
1630
1631 delta = ofs & vol->cluster_size_mask;
1632 if (unlikely(sizeof(block) < sizeof(lcn))) {
1633 block = lcn = ((lcn << cluster_size_shift) + delta) >>
1634 blocksize_bits;
1635
1636 if (unlikely(block != lcn)) {
1637 ntfs_error(vol->sb, "Physical block 0x%llx is too "
1638 "large to be returned, returning 0.",
1639 (long long)lcn);
1640 return 0;
1641 }
1642 } else
1643 block = ((lcn << cluster_size_shift) + delta) >>
1644 blocksize_bits;
1645 ntfs_debug("Done (returning block 0x%llx).", (unsigned long long)lcn);
1646 return block;
1647 }
1648
1649
1650
1651
1652
1653
1654
1655 const struct address_space_operations ntfs_normal_aops = {
1656 .read_folio = ntfs_read_folio,
1657 #ifdef NTFS_RW
1658 .writepage = ntfs_writepage,
1659 .dirty_folio = block_dirty_folio,
1660 #endif
1661 .bmap = ntfs_bmap,
1662 .migrate_folio = buffer_migrate_folio,
1663 .is_partially_uptodate = block_is_partially_uptodate,
1664 .error_remove_page = generic_error_remove_page,
1665 };
1666
1667
1668
1669
1670 const struct address_space_operations ntfs_compressed_aops = {
1671 .read_folio = ntfs_read_folio,
1672 #ifdef NTFS_RW
1673 .writepage = ntfs_writepage,
1674 .dirty_folio = block_dirty_folio,
1675 #endif
1676 .migrate_folio = buffer_migrate_folio,
1677 .is_partially_uptodate = block_is_partially_uptodate,
1678 .error_remove_page = generic_error_remove_page,
1679 };
1680
1681
1682
1683
1684
1685 const struct address_space_operations ntfs_mst_aops = {
1686 .read_folio = ntfs_read_folio,
1687 #ifdef NTFS_RW
1688 .writepage = ntfs_writepage,
1689 .dirty_folio = filemap_dirty_folio,
1690 #endif
1691 .migrate_folio = buffer_migrate_folio,
1692 .is_partially_uptodate = block_is_partially_uptodate,
1693 .error_remove_page = generic_error_remove_page,
1694 };
1695
1696 #ifdef NTFS_RW
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
1714 struct address_space *mapping = page->mapping;
1715 ntfs_inode *ni = NTFS_I(mapping->host);
1716 struct buffer_head *bh, *head, *buffers_to_free = NULL;
1717 unsigned int end, bh_size, bh_ofs;
1718
1719 BUG_ON(!PageUptodate(page));
1720 end = ofs + ni->itype.index.block_size;
1721 bh_size = VFS_I(ni)->i_sb->s_blocksize;
1722 spin_lock(&mapping->private_lock);
1723 if (unlikely(!page_has_buffers(page))) {
1724 spin_unlock(&mapping->private_lock);
1725 bh = head = alloc_page_buffers(page, bh_size, true);
1726 spin_lock(&mapping->private_lock);
1727 if (likely(!page_has_buffers(page))) {
1728 struct buffer_head *tail;
1729
1730 do {
1731 set_buffer_uptodate(bh);
1732 tail = bh;
1733 bh = bh->b_this_page;
1734 } while (bh);
1735 tail->b_this_page = head;
1736 attach_page_private(page, head);
1737 } else
1738 buffers_to_free = bh;
1739 }
1740 bh = head = page_buffers(page);
1741 BUG_ON(!bh);
1742 do {
1743 bh_ofs = bh_offset(bh);
1744 if (bh_ofs + bh_size <= ofs)
1745 continue;
1746 if (unlikely(bh_ofs >= end))
1747 break;
1748 set_buffer_dirty(bh);
1749 } while ((bh = bh->b_this_page) != head);
1750 spin_unlock(&mapping->private_lock);
1751 filemap_dirty_folio(mapping, page_folio(page));
1752 if (unlikely(buffers_to_free)) {
1753 do {
1754 bh = buffers_to_free->b_this_page;
1755 free_buffer_head(buffers_to_free);
1756 buffers_to_free = bh;
1757 } while (buffers_to_free);
1758 }
1759 }
1760
1761 #endif