0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/fs.h>
0011 #include <linux/buffer_head.h>
0012 #include <linux/blkdev.h>
0013 #include <linux/vmalloc.h>
0014 #include <linux/slab.h>
0015
0016 #include "attrib.h"
0017 #include "inode.h"
0018 #include "debug.h"
0019 #include "ntfs.h"
0020
0021
0022
0023
0024 typedef enum {
0025
0026 NTFS_SYMBOL_TOKEN = 0,
0027 NTFS_PHRASE_TOKEN = 1,
0028 NTFS_TOKEN_MASK = 1,
0029
0030
0031 NTFS_SB_SIZE_MASK = 0x0fff,
0032 NTFS_SB_SIZE = 0x1000,
0033 NTFS_SB_IS_COMPRESSED = 0x8000,
0034
0035
0036
0037
0038
0039
0040
0041 NTFS_MAX_CB_SIZE = 64 * 1024,
0042 } ntfs_compression_constants;
0043
0044
0045
0046
0047 static u8 *ntfs_compression_buffer;
0048
0049
0050
0051
0052 static DEFINE_SPINLOCK(ntfs_cb_lock);
0053
0054
0055
0056
0057
0058
0059
0060
0061 int allocate_compression_buffers(void)
0062 {
0063 BUG_ON(ntfs_compression_buffer);
0064
0065 ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
0066 if (!ntfs_compression_buffer)
0067 return -ENOMEM;
0068 return 0;
0069 }
0070
0071
0072
0073
0074
0075
0076 void free_compression_buffers(void)
0077 {
0078 BUG_ON(!ntfs_compression_buffer);
0079 vfree(ntfs_compression_buffer);
0080 ntfs_compression_buffer = NULL;
0081 }
0082
0083
0084
0085
0086 static void zero_partial_compressed_page(struct page *page,
0087 const s64 initialized_size)
0088 {
0089 u8 *kp = page_address(page);
0090 unsigned int kp_ofs;
0091
0092 ntfs_debug("Zeroing page region outside initialized size.");
0093 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) {
0094 clear_page(kp);
0095 return;
0096 }
0097 kp_ofs = initialized_size & ~PAGE_MASK;
0098 memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs);
0099 return;
0100 }
0101
0102
0103
0104
0105 static inline void handle_bounds_compressed_page(struct page *page,
0106 const loff_t i_size, const s64 initialized_size)
0107 {
0108 if ((page->index >= (initialized_size >> PAGE_SHIFT)) &&
0109 (initialized_size < i_size))
0110 zero_partial_compressed_page(page, initialized_size);
0111 return;
0112 }
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152 static int ntfs_decompress(struct page *dest_pages[], int completed_pages[],
0153 int *dest_index, int *dest_ofs, const int dest_max_index,
0154 const int dest_max_ofs, const int xpage, char *xpage_done,
0155 u8 *const cb_start, const u32 cb_size, const loff_t i_size,
0156 const s64 initialized_size)
0157 {
0158
0159
0160
0161
0162 u8 *cb_end = cb_start + cb_size;
0163 u8 *cb = cb_start;
0164 u8 *cb_sb_start = cb;
0165 u8 *cb_sb_end;
0166
0167
0168 struct page *dp;
0169 u8 *dp_addr;
0170 u8 *dp_sb_start;
0171 u8 *dp_sb_end;
0172
0173 u16 do_sb_start;
0174 u16 do_sb_end;
0175
0176
0177
0178 u8 tag;
0179 int token;
0180 int nr_completed_pages = 0;
0181
0182
0183 int err = -EOVERFLOW;
0184
0185 ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
0186 do_next_sb:
0187 ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
0188 cb - cb_start);
0189
0190
0191
0192
0193
0194
0195 if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
0196 (*dest_index == dest_max_index &&
0197 *dest_ofs == dest_max_ofs)) {
0198 int i;
0199
0200 ntfs_debug("Completed. Returning success (0).");
0201 err = 0;
0202 return_error:
0203
0204 spin_unlock(&ntfs_cb_lock);
0205
0206 if (nr_completed_pages > 0) {
0207 for (i = 0; i < nr_completed_pages; i++) {
0208 int di = completed_pages[i];
0209
0210 dp = dest_pages[di];
0211
0212
0213
0214
0215 handle_bounds_compressed_page(dp, i_size,
0216 initialized_size);
0217 flush_dcache_page(dp);
0218 kunmap(dp);
0219 SetPageUptodate(dp);
0220 unlock_page(dp);
0221 if (di == xpage)
0222 *xpage_done = 1;
0223 else
0224 put_page(dp);
0225 dest_pages[di] = NULL;
0226 }
0227 }
0228 return err;
0229 }
0230
0231
0232 do_sb_start = *dest_ofs;
0233 do_sb_end = do_sb_start + NTFS_SB_SIZE;
0234
0235
0236 if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
0237 goto return_overflow;
0238
0239
0240 if (cb + 6 > cb_end)
0241 goto return_overflow;
0242
0243
0244 cb_sb_start = cb;
0245 cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
0246 + 3;
0247 if (cb_sb_end > cb_end)
0248 goto return_overflow;
0249
0250
0251 dp = dest_pages[*dest_index];
0252 if (!dp) {
0253
0254 cb = cb_sb_end;
0255
0256
0257 *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK;
0258 if (!*dest_ofs && (++*dest_index > dest_max_index))
0259 goto return_overflow;
0260 goto do_next_sb;
0261 }
0262
0263
0264 dp_addr = (u8*)page_address(dp) + do_sb_start;
0265
0266
0267 if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
0268 ntfs_debug("Found uncompressed sub-block.");
0269
0270
0271
0272 cb += 2;
0273
0274
0275 if (cb_sb_end - cb != NTFS_SB_SIZE)
0276 goto return_overflow;
0277
0278
0279 memcpy(dp_addr, cb, NTFS_SB_SIZE);
0280 cb += NTFS_SB_SIZE;
0281
0282
0283 *dest_ofs += NTFS_SB_SIZE;
0284 if (!(*dest_ofs &= ~PAGE_MASK)) {
0285 finalize_page:
0286
0287
0288
0289
0290 completed_pages[nr_completed_pages++] = *dest_index;
0291 if (++*dest_index > dest_max_index)
0292 goto return_overflow;
0293 }
0294 goto do_next_sb;
0295 }
0296 ntfs_debug("Found compressed sub-block.");
0297
0298
0299
0300 dp_sb_start = dp_addr;
0301 dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
0302
0303
0304 cb += 2;
0305 do_next_tag:
0306 if (cb == cb_sb_end) {
0307
0308 if (dp_addr < dp_sb_end) {
0309 int nr_bytes = do_sb_end - *dest_ofs;
0310
0311 ntfs_debug("Filling incomplete sub-block with "
0312 "zeroes.");
0313
0314 memset(dp_addr, 0, nr_bytes);
0315 *dest_ofs += nr_bytes;
0316 }
0317
0318 if (!(*dest_ofs &= ~PAGE_MASK))
0319 goto finalize_page;
0320 goto do_next_sb;
0321 }
0322
0323
0324 if (cb > cb_sb_end || dp_addr > dp_sb_end)
0325 goto return_overflow;
0326
0327
0328 tag = *cb++;
0329
0330
0331 for (token = 0; token < 8; token++, tag >>= 1) {
0332 u16 lg, pt, length, max_non_overlap;
0333 register u16 i;
0334 u8 *dp_back_addr;
0335
0336
0337 if (cb >= cb_sb_end || dp_addr > dp_sb_end)
0338 break;
0339
0340
0341 if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
0342
0343
0344
0345
0346 *dp_addr++ = *cb++;
0347 ++*dest_ofs;
0348
0349
0350 continue;
0351 }
0352
0353
0354
0355
0356
0357 if (dp_addr == dp_sb_start)
0358 goto return_overflow;
0359
0360
0361
0362
0363
0364
0365
0366
0367 lg = 0;
0368 for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
0369 lg++;
0370
0371
0372 pt = le16_to_cpup((le16*)cb);
0373
0374
0375
0376
0377
0378
0379 dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
0380 if (dp_back_addr < dp_sb_start)
0381 goto return_overflow;
0382
0383
0384 length = (pt & (0xfff >> lg)) + 3;
0385
0386
0387 *dest_ofs += length;
0388 if (*dest_ofs > do_sb_end)
0389 goto return_overflow;
0390
0391
0392 max_non_overlap = dp_addr - dp_back_addr;
0393
0394 if (length <= max_non_overlap) {
0395
0396 memcpy(dp_addr, dp_back_addr, length);
0397
0398
0399 dp_addr += length;
0400 } else {
0401
0402
0403
0404
0405
0406
0407 memcpy(dp_addr, dp_back_addr, max_non_overlap);
0408 dp_addr += max_non_overlap;
0409 dp_back_addr += max_non_overlap;
0410 length -= max_non_overlap;
0411 while (length--)
0412 *dp_addr++ = *dp_back_addr++;
0413 }
0414
0415
0416 cb += 2;
0417 }
0418
0419
0420 goto do_next_tag;
0421
0422 return_overflow:
0423 ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
0424 goto return_error;
0425 }
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462 int ntfs_read_compressed_block(struct page *page)
0463 {
0464 loff_t i_size;
0465 s64 initialized_size;
0466 struct address_space *mapping = page->mapping;
0467 ntfs_inode *ni = NTFS_I(mapping->host);
0468 ntfs_volume *vol = ni->vol;
0469 struct super_block *sb = vol->sb;
0470 runlist_element *rl;
0471 unsigned long flags, block_size = sb->s_blocksize;
0472 unsigned char block_size_bits = sb->s_blocksize_bits;
0473 u8 *cb, *cb_pos, *cb_end;
0474 struct buffer_head **bhs;
0475 unsigned long offset, index = page->index;
0476 u32 cb_size = ni->itype.compressed.block_size;
0477 u64 cb_size_mask = cb_size - 1UL;
0478 VCN vcn;
0479 LCN lcn;
0480
0481 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >>
0482 vol->cluster_size_bits;
0483
0484
0485
0486
0487 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1)
0488 & ~cb_size_mask) >> vol->cluster_size_bits;
0489
0490 unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
0491 >> ni->itype.compressed.block_size_bits;
0492
0493
0494
0495
0496
0497 unsigned int nr_pages = (end_vcn - start_vcn) <<
0498 vol->cluster_size_bits >> PAGE_SHIFT;
0499 unsigned int xpage, max_page, cur_page, cur_ofs, i;
0500 unsigned int cb_clusters, cb_max_ofs;
0501 int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
0502 struct page **pages;
0503 int *completed_pages;
0504 unsigned char xpage_done = 0;
0505
0506 ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
0507 "%i.", index, cb_size, nr_pages);
0508
0509
0510
0511
0512 BUG_ON(ni->type != AT_DATA);
0513 BUG_ON(ni->name_len);
0514
0515 pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
0516 completed_pages = kmalloc_array(nr_pages + 1, sizeof(int), GFP_NOFS);
0517
0518
0519 bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
0520 bhs = kmalloc(bhs_size, GFP_NOFS);
0521
0522 if (unlikely(!pages || !bhs || !completed_pages)) {
0523 kfree(bhs);
0524 kfree(pages);
0525 kfree(completed_pages);
0526 unlock_page(page);
0527 ntfs_error(vol->sb, "Failed to allocate internal buffers.");
0528 return -ENOMEM;
0529 }
0530
0531
0532
0533
0534
0535 offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT;
0536 xpage = index - offset;
0537 pages[xpage] = page;
0538
0539
0540
0541
0542 read_lock_irqsave(&ni->size_lock, flags);
0543 i_size = i_size_read(VFS_I(ni));
0544 initialized_size = ni->initialized_size;
0545 read_unlock_irqrestore(&ni->size_lock, flags);
0546 max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) -
0547 offset;
0548
0549 if (xpage >= max_page) {
0550 kfree(bhs);
0551 kfree(pages);
0552 kfree(completed_pages);
0553 zero_user(page, 0, PAGE_SIZE);
0554 ntfs_debug("Compressed read outside i_size - truncated?");
0555 SetPageUptodate(page);
0556 unlock_page(page);
0557 return 0;
0558 }
0559 if (nr_pages < max_page)
0560 max_page = nr_pages;
0561 for (i = 0; i < max_page; i++, offset++) {
0562 if (i != xpage)
0563 pages[i] = grab_cache_page_nowait(mapping, offset);
0564 page = pages[i];
0565 if (page) {
0566
0567
0568
0569
0570
0571 if (!PageDirty(page) && (!PageUptodate(page) ||
0572 PageError(page))) {
0573 ClearPageError(page);
0574 kmap(page);
0575 continue;
0576 }
0577 unlock_page(page);
0578 put_page(page);
0579 pages[i] = NULL;
0580 }
0581 }
0582
0583
0584
0585
0586
0587 cur_page = 0;
0588 cur_ofs = 0;
0589 cb_clusters = ni->itype.compressed.block_clusters;
0590 do_next_cb:
0591 nr_cbs--;
0592 nr_bhs = 0;
0593
0594
0595 rl = NULL;
0596 for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
0597 vcn++) {
0598 bool is_retry = false;
0599
0600 if (!rl) {
0601 lock_retry_remap:
0602 down_read(&ni->runlist.lock);
0603 rl = ni->runlist.rl;
0604 }
0605 if (likely(rl != NULL)) {
0606
0607 while (rl->length && rl[1].vcn <= vcn)
0608 rl++;
0609 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
0610 } else
0611 lcn = LCN_RL_NOT_MAPPED;
0612 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
0613 (unsigned long long)vcn,
0614 (unsigned long long)lcn);
0615 if (lcn < 0) {
0616
0617
0618
0619
0620 if (lcn == LCN_HOLE)
0621 break;
0622 if (is_retry || lcn != LCN_RL_NOT_MAPPED)
0623 goto rl_err;
0624 is_retry = true;
0625
0626
0627
0628
0629 up_read(&ni->runlist.lock);
0630 if (!ntfs_map_runlist(ni, vcn))
0631 goto lock_retry_remap;
0632 goto map_rl_err;
0633 }
0634 block = lcn << vol->cluster_size_bits >> block_size_bits;
0635
0636 max_block = block + (vol->cluster_size >> block_size_bits);
0637 do {
0638 ntfs_debug("block = 0x%x.", block);
0639 if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
0640 goto getblk_err;
0641 nr_bhs++;
0642 } while (++block < max_block);
0643 }
0644
0645
0646 if (rl)
0647 up_read(&ni->runlist.lock);
0648
0649
0650 for (i = 0; i < nr_bhs; i++) {
0651 struct buffer_head *tbh = bhs[i];
0652
0653 if (!trylock_buffer(tbh))
0654 continue;
0655 if (unlikely(buffer_uptodate(tbh))) {
0656 unlock_buffer(tbh);
0657 continue;
0658 }
0659 get_bh(tbh);
0660 tbh->b_end_io = end_buffer_read_sync;
0661 submit_bh(REQ_OP_READ, tbh);
0662 }
0663
0664
0665 for (i = 0; i < nr_bhs; i++) {
0666 struct buffer_head *tbh = bhs[i];
0667
0668 if (buffer_uptodate(tbh))
0669 continue;
0670 wait_on_buffer(tbh);
0671
0672
0673
0674
0675
0676
0677
0678
0679 barrier();
0680 if (unlikely(!buffer_uptodate(tbh))) {
0681 ntfs_warning(vol->sb, "Buffer is unlocked but not "
0682 "uptodate! Unplugging the disk queue "
0683 "and rescheduling.");
0684 get_bh(tbh);
0685 io_schedule();
0686 put_bh(tbh);
0687 if (unlikely(!buffer_uptodate(tbh)))
0688 goto read_err;
0689 ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
0690 }
0691 }
0692
0693
0694
0695
0696
0697 spin_lock(&ntfs_cb_lock);
0698 cb = ntfs_compression_buffer;
0699
0700 BUG_ON(!cb);
0701
0702 cb_pos = cb;
0703 cb_end = cb + cb_size;
0704
0705
0706 for (i = 0; i < nr_bhs; i++) {
0707 memcpy(cb_pos, bhs[i]->b_data, block_size);
0708 cb_pos += block_size;
0709 }
0710
0711
0712 if (cb_pos + 2 <= cb + cb_size)
0713 *(u16*)cb_pos = 0;
0714
0715
0716 cb_pos = cb;
0717
0718
0719 ntfs_debug("Successfully read the compression block.");
0720
0721
0722 cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size;
0723 cb_max_ofs = cb_max_page & ~PAGE_MASK;
0724 cb_max_page >>= PAGE_SHIFT;
0725
0726
0727 if (cb_max_page > max_page)
0728 cb_max_page = max_page;
0729
0730 if (vcn == start_vcn - cb_clusters) {
0731
0732 ntfs_debug("Found sparse compression block.");
0733
0734 spin_unlock(&ntfs_cb_lock);
0735 if (cb_max_ofs)
0736 cb_max_page--;
0737 for (; cur_page < cb_max_page; cur_page++) {
0738 page = pages[cur_page];
0739 if (page) {
0740 if (likely(!cur_ofs))
0741 clear_page(page_address(page));
0742 else
0743 memset(page_address(page) + cur_ofs, 0,
0744 PAGE_SIZE -
0745 cur_ofs);
0746 flush_dcache_page(page);
0747 kunmap(page);
0748 SetPageUptodate(page);
0749 unlock_page(page);
0750 if (cur_page == xpage)
0751 xpage_done = 1;
0752 else
0753 put_page(page);
0754 pages[cur_page] = NULL;
0755 }
0756 cb_pos += PAGE_SIZE - cur_ofs;
0757 cur_ofs = 0;
0758 if (cb_pos >= cb_end)
0759 break;
0760 }
0761
0762 if (cb_max_ofs && cb_pos < cb_end) {
0763 page = pages[cur_page];
0764 if (page)
0765 memset(page_address(page) + cur_ofs, 0,
0766 cb_max_ofs - cur_ofs);
0767
0768
0769
0770
0771 cur_ofs = cb_max_ofs;
0772 }
0773 } else if (vcn == start_vcn) {
0774
0775 unsigned int cur2_page = cur_page;
0776 unsigned int cur_ofs2 = cur_ofs;
0777 u8 *cb_pos2 = cb_pos;
0778
0779 ntfs_debug("Found uncompressed compression block.");
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791 if (cb_max_ofs)
0792 cb_max_page--;
0793
0794 for (; cur_page < cb_max_page; cur_page++) {
0795 page = pages[cur_page];
0796 if (page)
0797 memcpy(page_address(page) + cur_ofs, cb_pos,
0798 PAGE_SIZE - cur_ofs);
0799 cb_pos += PAGE_SIZE - cur_ofs;
0800 cur_ofs = 0;
0801 if (cb_pos >= cb_end)
0802 break;
0803 }
0804
0805 if (cb_max_ofs && cb_pos < cb_end) {
0806 page = pages[cur_page];
0807 if (page)
0808 memcpy(page_address(page) + cur_ofs, cb_pos,
0809 cb_max_ofs - cur_ofs);
0810 cb_pos += cb_max_ofs - cur_ofs;
0811 cur_ofs = cb_max_ofs;
0812 }
0813
0814 spin_unlock(&ntfs_cb_lock);
0815
0816 for (; cur2_page < cb_max_page; cur2_page++) {
0817 page = pages[cur2_page];
0818 if (page) {
0819
0820
0821
0822
0823 handle_bounds_compressed_page(page, i_size,
0824 initialized_size);
0825 flush_dcache_page(page);
0826 kunmap(page);
0827 SetPageUptodate(page);
0828 unlock_page(page);
0829 if (cur2_page == xpage)
0830 xpage_done = 1;
0831 else
0832 put_page(page);
0833 pages[cur2_page] = NULL;
0834 }
0835 cb_pos2 += PAGE_SIZE - cur_ofs2;
0836 cur_ofs2 = 0;
0837 if (cb_pos2 >= cb_end)
0838 break;
0839 }
0840 } else {
0841
0842 unsigned int prev_cur_page = cur_page;
0843
0844 ntfs_debug("Found compressed compression block.");
0845 err = ntfs_decompress(pages, completed_pages, &cur_page,
0846 &cur_ofs, cb_max_page, cb_max_ofs, xpage,
0847 &xpage_done, cb_pos, cb_size - (cb_pos - cb),
0848 i_size, initialized_size);
0849
0850
0851
0852
0853 if (err) {
0854 ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
0855 "0x%lx with error code %i. Skipping "
0856 "this compression block.",
0857 ni->mft_no, -err);
0858
0859 for (; prev_cur_page < cur_page; prev_cur_page++) {
0860 page = pages[prev_cur_page];
0861 if (page) {
0862 flush_dcache_page(page);
0863 kunmap(page);
0864 unlock_page(page);
0865 if (prev_cur_page != xpage)
0866 put_page(page);
0867 pages[prev_cur_page] = NULL;
0868 }
0869 }
0870 }
0871 }
0872
0873
0874 for (i = 0; i < nr_bhs; i++)
0875 brelse(bhs[i]);
0876
0877
0878 if (nr_cbs)
0879 goto do_next_cb;
0880
0881
0882 kfree(bhs);
0883
0884
0885 for (cur_page = 0; cur_page < max_page; cur_page++) {
0886 page = pages[cur_page];
0887 if (page) {
0888 ntfs_error(vol->sb, "Still have pages left! "
0889 "Terminating them with extreme "
0890 "prejudice. Inode 0x%lx, page index "
0891 "0x%lx.", ni->mft_no, page->index);
0892 flush_dcache_page(page);
0893 kunmap(page);
0894 unlock_page(page);
0895 if (cur_page != xpage)
0896 put_page(page);
0897 pages[cur_page] = NULL;
0898 }
0899 }
0900
0901
0902 kfree(pages);
0903 kfree(completed_pages);
0904
0905
0906 if (likely(xpage_done))
0907 return 0;
0908
0909 ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
0910 "EOVERFLOW" : (!err ? "EIO" : "unknown error"));
0911 return err < 0 ? err : -EIO;
0912
0913 read_err:
0914 ntfs_error(vol->sb, "IO error while reading compressed data.");
0915
0916 for (i = 0; i < nr_bhs; i++)
0917 brelse(bhs[i]);
0918 goto err_out;
0919
0920 map_rl_err:
0921 ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
0922 "compression block.");
0923 goto err_out;
0924
0925 rl_err:
0926 up_read(&ni->runlist.lock);
0927 ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
0928 "compression block.");
0929 goto err_out;
0930
0931 getblk_err:
0932 up_read(&ni->runlist.lock);
0933 ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
0934
0935 err_out:
0936 kfree(bhs);
0937 for (i = cur_page; i < max_page; i++) {
0938 page = pages[i];
0939 if (page) {
0940 flush_dcache_page(page);
0941 kunmap(page);
0942 unlock_page(page);
0943 if (i != xpage)
0944 put_page(page);
0945 }
0946 }
0947 kfree(pages);
0948 kfree(completed_pages);
0949 return -EIO;
0950 }