Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* -*- linux-c -*- ------------------------------------------------------- *
0003  *   
0004  *   Copyright 2001 H. Peter Anvin - All Rights Reserved
0005  *
0006  * ----------------------------------------------------------------------- */
0007 
0008 /*
0009  * linux/fs/isofs/compress.c
0010  *
0011  * Transparent decompression of files on an iso9660 filesystem
0012  */
0013 
0014 #include <linux/module.h>
0015 #include <linux/init.h>
0016 #include <linux/bio.h>
0017 
0018 #include <linux/slab.h>
0019 #include <linux/vmalloc.h>
0020 #include <linux/zlib.h>
0021 
0022 #include "isofs.h"
0023 #include "zisofs.h"
0024 
0025 /* This should probably be global. */
0026 static char zisofs_sink_page[PAGE_SIZE];
0027 
0028 /*
0029  * This contains the zlib memory allocation and the mutex for the
0030  * allocation; this avoids failures at block-decompression time.
0031  */
0032 static void *zisofs_zlib_workspace;
0033 static DEFINE_MUTEX(zisofs_zlib_lock);
0034 
0035 /*
0036  * Read data of @inode from @block_start to @block_end and uncompress
0037  * to one zisofs block. Store the data in the @pages array with @pcount
0038  * entries. Start storing at offset @poffset of the first page.
0039  */
0040 static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
0041                       loff_t block_end, int pcount,
0042                       struct page **pages, unsigned poffset,
0043                       int *errp)
0044 {
0045     unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
0046     unsigned int bufsize = ISOFS_BUFFER_SIZE(inode);
0047     unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
0048     unsigned int bufmask = bufsize - 1;
0049     int i, block_size = block_end - block_start;
0050     z_stream stream = { .total_out = 0,
0051                 .avail_in = 0,
0052                 .avail_out = 0, };
0053     int zerr;
0054     int needblocks = (block_size + (block_start & bufmask) + bufmask)
0055                 >> bufshift;
0056     int haveblocks;
0057     blkcnt_t blocknum;
0058     struct buffer_head **bhs;
0059     int curbh, curpage;
0060 
0061     if (block_size > deflateBound(1UL << zisofs_block_shift)) {
0062         *errp = -EIO;
0063         return 0;
0064     }
0065     /* Empty block? */
0066     if (block_size == 0) {
0067         for ( i = 0 ; i < pcount ; i++ ) {
0068             if (!pages[i])
0069                 continue;
0070             memset(page_address(pages[i]), 0, PAGE_SIZE);
0071             flush_dcache_page(pages[i]);
0072             SetPageUptodate(pages[i]);
0073         }
0074         return ((loff_t)pcount) << PAGE_SHIFT;
0075     }
0076 
0077     /* Because zlib is not thread-safe, do all the I/O at the top. */
0078     blocknum = block_start >> bufshift;
0079     bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
0080     if (!bhs) {
0081         *errp = -ENOMEM;
0082         return 0;
0083     }
0084     haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
0085     ll_rw_block(REQ_OP_READ, haveblocks, bhs);
0086 
0087     curbh = 0;
0088     curpage = 0;
0089     /*
0090      * First block is special since it may be fractional.  We also wait for
0091      * it before grabbing the zlib mutex; odds are that the subsequent
0092      * blocks are going to come in in short order so we don't hold the zlib
0093      * mutex longer than necessary.
0094      */
0095 
0096     if (!bhs[0])
0097         goto b_eio;
0098 
0099     wait_on_buffer(bhs[0]);
0100     if (!buffer_uptodate(bhs[0])) {
0101         *errp = -EIO;
0102         goto b_eio;
0103     }
0104 
0105     stream.workspace = zisofs_zlib_workspace;
0106     mutex_lock(&zisofs_zlib_lock);
0107         
0108     zerr = zlib_inflateInit(&stream);
0109     if (zerr != Z_OK) {
0110         if (zerr == Z_MEM_ERROR)
0111             *errp = -ENOMEM;
0112         else
0113             *errp = -EIO;
0114         printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
0115                    zerr);
0116         goto z_eio;
0117     }
0118 
0119     while (curpage < pcount && curbh < haveblocks &&
0120            zerr != Z_STREAM_END) {
0121         if (!stream.avail_out) {
0122             if (pages[curpage]) {
0123                 stream.next_out = page_address(pages[curpage])
0124                         + poffset;
0125                 stream.avail_out = PAGE_SIZE - poffset;
0126                 poffset = 0;
0127             } else {
0128                 stream.next_out = (void *)&zisofs_sink_page;
0129                 stream.avail_out = PAGE_SIZE;
0130             }
0131         }
0132         if (!stream.avail_in) {
0133             wait_on_buffer(bhs[curbh]);
0134             if (!buffer_uptodate(bhs[curbh])) {
0135                 *errp = -EIO;
0136                 break;
0137             }
0138             stream.next_in  = bhs[curbh]->b_data +
0139                         (block_start & bufmask);
0140             stream.avail_in = min_t(unsigned, bufsize -
0141                         (block_start & bufmask),
0142                         block_size);
0143             block_size -= stream.avail_in;
0144             block_start = 0;
0145         }
0146 
0147         while (stream.avail_out && stream.avail_in) {
0148             zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
0149             if (zerr == Z_BUF_ERROR && stream.avail_in == 0)
0150                 break;
0151             if (zerr == Z_STREAM_END)
0152                 break;
0153             if (zerr != Z_OK) {
0154                 /* EOF, error, or trying to read beyond end of input */
0155                 if (zerr == Z_MEM_ERROR)
0156                     *errp = -ENOMEM;
0157                 else {
0158                     printk(KERN_DEBUG
0159                            "zisofs: zisofs_inflate returned"
0160                            " %d, inode = %lu,"
0161                            " page idx = %d, bh idx = %d,"
0162                            " avail_in = %ld,"
0163                            " avail_out = %ld\n",
0164                            zerr, inode->i_ino, curpage,
0165                            curbh, stream.avail_in,
0166                            stream.avail_out);
0167                     *errp = -EIO;
0168                 }
0169                 goto inflate_out;
0170             }
0171         }
0172 
0173         if (!stream.avail_out) {
0174             /* This page completed */
0175             if (pages[curpage]) {
0176                 flush_dcache_page(pages[curpage]);
0177                 SetPageUptodate(pages[curpage]);
0178             }
0179             curpage++;
0180         }
0181         if (!stream.avail_in)
0182             curbh++;
0183     }
0184 inflate_out:
0185     zlib_inflateEnd(&stream);
0186 
0187 z_eio:
0188     mutex_unlock(&zisofs_zlib_lock);
0189 
0190 b_eio:
0191     for (i = 0; i < haveblocks; i++)
0192         brelse(bhs[i]);
0193     kfree(bhs);
0194     return stream.total_out;
0195 }
0196 
0197 /*
0198  * Uncompress data so that pages[full_page] is fully uptodate and possibly
0199  * fills in other pages if we have data for them.
0200  */
0201 static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
0202                  struct page **pages)
0203 {
0204     loff_t start_off, end_off;
0205     loff_t block_start, block_end;
0206     unsigned int header_size = ISOFS_I(inode)->i_format_parm[0];
0207     unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
0208     unsigned int blockptr;
0209     loff_t poffset = 0;
0210     blkcnt_t cstart_block, cend_block;
0211     struct buffer_head *bh;
0212     unsigned int blkbits = ISOFS_BUFFER_BITS(inode);
0213     unsigned int blksize = 1 << blkbits;
0214     int err;
0215     loff_t ret;
0216 
0217     BUG_ON(!pages[full_page]);
0218 
0219     /*
0220      * We want to read at least 'full_page' page. Because we have to
0221      * uncompress the whole compression block anyway, fill the surrounding
0222      * pages with the data we have anyway...
0223      */
0224     start_off = page_offset(pages[full_page]);
0225     end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size);
0226 
0227     cstart_block = start_off >> zisofs_block_shift;
0228     cend_block = (end_off + (1 << zisofs_block_shift) - 1)
0229             >> zisofs_block_shift;
0230 
0231     WARN_ON(start_off - (full_page << PAGE_SHIFT) !=
0232         ((cstart_block << zisofs_block_shift) & PAGE_MASK));
0233 
0234     /* Find the pointer to this specific chunk */
0235     /* Note: we're not using isonum_731() here because the data is known aligned */
0236     /* Note: header_size is in 32-bit words (4 bytes) */
0237     blockptr = (header_size + cstart_block) << 2;
0238     bh = isofs_bread(inode, blockptr >> blkbits);
0239     if (!bh)
0240         return -EIO;
0241     block_start = le32_to_cpu(*(__le32 *)
0242                 (bh->b_data + (blockptr & (blksize - 1))));
0243 
0244     while (cstart_block < cend_block && pcount > 0) {
0245         /* Load end of the compressed block in the file */
0246         blockptr += 4;
0247         /* Traversed to next block? */
0248         if (!(blockptr & (blksize - 1))) {
0249             brelse(bh);
0250 
0251             bh = isofs_bread(inode, blockptr >> blkbits);
0252             if (!bh)
0253                 return -EIO;
0254         }
0255         block_end = le32_to_cpu(*(__le32 *)
0256                 (bh->b_data + (blockptr & (blksize - 1))));
0257         if (block_start > block_end) {
0258             brelse(bh);
0259             return -EIO;
0260         }
0261         err = 0;
0262         ret = zisofs_uncompress_block(inode, block_start, block_end,
0263                           pcount, pages, poffset, &err);
0264         poffset += ret;
0265         pages += poffset >> PAGE_SHIFT;
0266         pcount -= poffset >> PAGE_SHIFT;
0267         full_page -= poffset >> PAGE_SHIFT;
0268         poffset &= ~PAGE_MASK;
0269 
0270         if (err) {
0271             brelse(bh);
0272             /*
0273              * Did we finish reading the page we really wanted
0274              * to read?
0275              */
0276             if (full_page < 0)
0277                 return 0;
0278             return err;
0279         }
0280 
0281         block_start = block_end;
0282         cstart_block++;
0283     }
0284 
0285     if (poffset && *pages) {
0286         memset(page_address(*pages) + poffset, 0,
0287                PAGE_SIZE - poffset);
0288         flush_dcache_page(*pages);
0289         SetPageUptodate(*pages);
0290     }
0291     return 0;
0292 }
0293 
0294 /*
0295  * When decompressing, we typically obtain more than one page
0296  * per reference.  We inject the additional pages into the page
0297  * cache as a form of readahead.
0298  */
0299 static int zisofs_read_folio(struct file *file, struct folio *folio)
0300 {
0301     struct page *page = &folio->page;
0302     struct inode *inode = file_inode(file);
0303     struct address_space *mapping = inode->i_mapping;
0304     int err;
0305     int i, pcount, full_page;
0306     unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1];
0307     unsigned int zisofs_pages_per_cblock =
0308         PAGE_SHIFT <= zisofs_block_shift ?
0309         (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
0310     struct page **pages;
0311     pgoff_t index = page->index, end_index;
0312 
0313     end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
0314     /*
0315      * If this page is wholly outside i_size we just return zero;
0316      * do_generic_file_read() will handle this for us
0317      */
0318     if (index >= end_index) {
0319         SetPageUptodate(page);
0320         unlock_page(page);
0321         return 0;
0322     }
0323 
0324     if (PAGE_SHIFT <= zisofs_block_shift) {
0325         /* We have already been given one page, this is the one
0326            we must do. */
0327         full_page = index & (zisofs_pages_per_cblock - 1);
0328         pcount = min_t(int, zisofs_pages_per_cblock,
0329             end_index - (index & ~(zisofs_pages_per_cblock - 1)));
0330         index -= full_page;
0331     } else {
0332         full_page = 0;
0333         pcount = 1;
0334     }
0335     pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
0336                     sizeof(*pages), GFP_KERNEL);
0337     if (!pages) {
0338         unlock_page(page);
0339         return -ENOMEM;
0340     }
0341     pages[full_page] = page;
0342 
0343     for (i = 0; i < pcount; i++, index++) {
0344         if (i != full_page)
0345             pages[i] = grab_cache_page_nowait(mapping, index);
0346         if (pages[i]) {
0347             ClearPageError(pages[i]);
0348             kmap(pages[i]);
0349         }
0350     }
0351 
0352     err = zisofs_fill_pages(inode, full_page, pcount, pages);
0353 
0354     /* Release any residual pages, do not SetPageUptodate */
0355     for (i = 0; i < pcount; i++) {
0356         if (pages[i]) {
0357             flush_dcache_page(pages[i]);
0358             if (i == full_page && err)
0359                 SetPageError(pages[i]);
0360             kunmap(pages[i]);
0361             unlock_page(pages[i]);
0362             if (i != full_page)
0363                 put_page(pages[i]);
0364         }
0365     }           
0366 
0367     /* At this point, err contains 0 or -EIO depending on the "critical" page */
0368     kfree(pages);
0369     return err;
0370 }
0371 
0372 const struct address_space_operations zisofs_aops = {
0373     .read_folio = zisofs_read_folio,
0374     /* No bmap operation supported */
0375 };
0376 
0377 int __init zisofs_init(void)
0378 {
0379     zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize());
0380     if ( !zisofs_zlib_workspace )
0381         return -ENOMEM;
0382 
0383     return 0;
0384 }
0385 
0386 void zisofs_cleanup(void)
0387 {
0388     vfree(zisofs_zlib_workspace);
0389 }