Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Squashfs - a compressed read only filesystem for Linux
0004  *
0005  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
0006  * Phillip Lougher <phillip@squashfs.org.uk>
0007  *
0008  * file.c
0009  */
0010 
0011 /*
0012  * This file contains code for handling regular files.  A regular file
0013  * consists of a sequence of contiguous compressed blocks, and/or a
0014  * compressed fragment block (tail-end packed block).   The compressed size
0015  * of each datablock is stored in a block list contained within the
0016  * file inode (itself stored in one or more compressed metadata blocks).
0017  *
0018  * To speed up access to datablocks when reading 'large' files (256 Mbytes or
0019  * larger), the code implements an index cache that caches the mapping from
0020  * block index to datablock location on disk.
0021  *
0022  * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
0023  * retaining a simple and space-efficient block list on disk.  The cache
0024  * is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
0025  * Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
0026  * The index cache is designed to be memory efficient, and by default uses
0027  * 16 KiB.
0028  */
0029 
0030 #include <linux/fs.h>
0031 #include <linux/vfs.h>
0032 #include <linux/kernel.h>
0033 #include <linux/slab.h>
0034 #include <linux/string.h>
0035 #include <linux/pagemap.h>
0036 #include <linux/mutex.h>
0037 
0038 #include "squashfs_fs.h"
0039 #include "squashfs_fs_sb.h"
0040 #include "squashfs_fs_i.h"
0041 #include "squashfs.h"
0042 #include "page_actor.h"
0043 
0044 /*
0045  * Locate cache slot in range [offset, index] for specified inode.  If
0046  * there's more than one return the slot closest to index.
0047  */
0048 static struct meta_index *locate_meta_index(struct inode *inode, int offset,
0049                 int index)
0050 {
0051     struct meta_index *meta = NULL;
0052     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
0053     int i;
0054 
0055     mutex_lock(&msblk->meta_index_mutex);
0056 
0057     TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
0058 
0059     if (msblk->meta_index == NULL)
0060         goto not_allocated;
0061 
0062     for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
0063         if (msblk->meta_index[i].inode_number == inode->i_ino &&
0064                 msblk->meta_index[i].offset >= offset &&
0065                 msblk->meta_index[i].offset <= index &&
0066                 msblk->meta_index[i].locked == 0) {
0067             TRACE("locate_meta_index: entry %d, offset %d\n", i,
0068                     msblk->meta_index[i].offset);
0069             meta = &msblk->meta_index[i];
0070             offset = meta->offset;
0071         }
0072     }
0073 
0074     if (meta)
0075         meta->locked = 1;
0076 
0077 not_allocated:
0078     mutex_unlock(&msblk->meta_index_mutex);
0079 
0080     return meta;
0081 }
0082 
0083 
0084 /*
0085  * Find and initialise an empty cache slot for index offset.
0086  */
0087 static struct meta_index *empty_meta_index(struct inode *inode, int offset,
0088                 int skip)
0089 {
0090     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
0091     struct meta_index *meta = NULL;
0092     int i;
0093 
0094     mutex_lock(&msblk->meta_index_mutex);
0095 
0096     TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
0097 
0098     if (msblk->meta_index == NULL) {
0099         /*
0100          * First time cache index has been used, allocate and
0101          * initialise.  The cache index could be allocated at
0102          * mount time but doing it here means it is allocated only
0103          * if a 'large' file is read.
0104          */
0105         msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS,
0106             sizeof(*(msblk->meta_index)), GFP_KERNEL);
0107         if (msblk->meta_index == NULL) {
0108             ERROR("Failed to allocate meta_index\n");
0109             goto failed;
0110         }
0111         for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
0112             msblk->meta_index[i].inode_number = 0;
0113             msblk->meta_index[i].locked = 0;
0114         }
0115         msblk->next_meta_index = 0;
0116     }
0117 
0118     for (i = SQUASHFS_META_SLOTS; i &&
0119             msblk->meta_index[msblk->next_meta_index].locked; i--)
0120         msblk->next_meta_index = (msblk->next_meta_index + 1) %
0121             SQUASHFS_META_SLOTS;
0122 
0123     if (i == 0) {
0124         TRACE("empty_meta_index: failed!\n");
0125         goto failed;
0126     }
0127 
0128     TRACE("empty_meta_index: returned meta entry %d, %p\n",
0129             msblk->next_meta_index,
0130             &msblk->meta_index[msblk->next_meta_index]);
0131 
0132     meta = &msblk->meta_index[msblk->next_meta_index];
0133     msblk->next_meta_index = (msblk->next_meta_index + 1) %
0134             SQUASHFS_META_SLOTS;
0135 
0136     meta->inode_number = inode->i_ino;
0137     meta->offset = offset;
0138     meta->skip = skip;
0139     meta->entries = 0;
0140     meta->locked = 1;
0141 
0142 failed:
0143     mutex_unlock(&msblk->meta_index_mutex);
0144     return meta;
0145 }
0146 
0147 
0148 static void release_meta_index(struct inode *inode, struct meta_index *meta)
0149 {
0150     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
0151     mutex_lock(&msblk->meta_index_mutex);
0152     meta->locked = 0;
0153     mutex_unlock(&msblk->meta_index_mutex);
0154 }
0155 
0156 
0157 /*
0158  * Read the next n blocks from the block list, starting from
0159  * metadata block <start_block, offset>.
0160  */
0161 static long long read_indexes(struct super_block *sb, int n,
0162                 u64 *start_block, int *offset)
0163 {
0164     int err, i;
0165     long long block = 0;
0166     __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
0167 
0168     if (blist == NULL) {
0169         ERROR("read_indexes: Failed to allocate block_list\n");
0170         return -ENOMEM;
0171     }
0172 
0173     while (n) {
0174         int blocks = min_t(int, n, PAGE_SIZE >> 2);
0175 
0176         err = squashfs_read_metadata(sb, blist, start_block,
0177                 offset, blocks << 2);
0178         if (err < 0) {
0179             ERROR("read_indexes: reading block [%llx:%x]\n",
0180                 *start_block, *offset);
0181             goto failure;
0182         }
0183 
0184         for (i = 0; i < blocks; i++) {
0185             int size = squashfs_block_size(blist[i]);
0186             if (size < 0) {
0187                 err = size;
0188                 goto failure;
0189             }
0190             block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
0191         }
0192         n -= blocks;
0193     }
0194 
0195     kfree(blist);
0196     return block;
0197 
0198 failure:
0199     kfree(blist);
0200     return err;
0201 }
0202 
0203 
0204 /*
0205  * Each cache index slot has SQUASHFS_META_ENTRIES, each of which
0206  * can cache one index -> datablock/blocklist-block mapping.  We wish
0207  * to distribute these over the length of the file, entry[0] maps index x,
0208  * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on.
0209  * The larger the file, the greater the skip factor.  The skip factor is
0210  * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure
0211  * the number of metadata blocks that need to be read fits into the cache.
0212  * If the skip factor is limited in this way then the file will use multiple
0213  * slots.
0214  */
0215 static inline int calculate_skip(u64 blocks)
0216 {
0217     u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
0218          * SQUASHFS_META_INDEXES);
0219     return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
0220 }
0221 
0222 
0223 /*
0224  * Search and grow the index cache for the specified inode, returning the
0225  * on-disk locations of the datablock and block list metadata block
0226  * <index_block, index_offset> for index (scaled to nearest cache index).
0227  */
0228 static int fill_meta_index(struct inode *inode, int index,
0229         u64 *index_block, int *index_offset, u64 *data_block)
0230 {
0231     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
0232     int skip = calculate_skip(i_size_read(inode) >> msblk->block_log);
0233     int offset = 0;
0234     struct meta_index *meta;
0235     struct meta_entry *meta_entry;
0236     u64 cur_index_block = squashfs_i(inode)->block_list_start;
0237     int cur_offset = squashfs_i(inode)->offset;
0238     u64 cur_data_block = squashfs_i(inode)->start;
0239     int err, i;
0240 
0241     /*
0242      * Scale index to cache index (cache slot entry)
0243      */
0244     index /= SQUASHFS_META_INDEXES * skip;
0245 
0246     while (offset < index) {
0247         meta = locate_meta_index(inode, offset + 1, index);
0248 
0249         if (meta == NULL) {
0250             meta = empty_meta_index(inode, offset + 1, skip);
0251             if (meta == NULL)
0252                 goto all_done;
0253         } else {
0254             offset = index < meta->offset + meta->entries ? index :
0255                 meta->offset + meta->entries - 1;
0256             meta_entry = &meta->meta_entry[offset - meta->offset];
0257             cur_index_block = meta_entry->index_block +
0258                 msblk->inode_table;
0259             cur_offset = meta_entry->offset;
0260             cur_data_block = meta_entry->data_block;
0261             TRACE("get_meta_index: offset %d, meta->offset %d, "
0262                 "meta->entries %d\n", offset, meta->offset,
0263                 meta->entries);
0264             TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
0265                 " data_block 0x%llx\n", cur_index_block,
0266                 cur_offset, cur_data_block);
0267         }
0268 
0269         /*
0270          * If necessary grow cache slot by reading block list.  Cache
0271          * slot is extended up to index or to the end of the slot, in
0272          * which case further slots will be used.
0273          */
0274         for (i = meta->offset + meta->entries; i <= index &&
0275                 i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
0276             int blocks = skip * SQUASHFS_META_INDEXES;
0277             long long res = read_indexes(inode->i_sb, blocks,
0278                     &cur_index_block, &cur_offset);
0279 
0280             if (res < 0) {
0281                 if (meta->entries == 0)
0282                     /*
0283                      * Don't leave an empty slot on read
0284                      * error allocated to this inode...
0285                      */
0286                     meta->inode_number = 0;
0287                 err = res;
0288                 goto failed;
0289             }
0290 
0291             cur_data_block += res;
0292             meta_entry = &meta->meta_entry[i - meta->offset];
0293             meta_entry->index_block = cur_index_block -
0294                 msblk->inode_table;
0295             meta_entry->offset = cur_offset;
0296             meta_entry->data_block = cur_data_block;
0297             meta->entries++;
0298             offset++;
0299         }
0300 
0301         TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
0302                 meta->offset, meta->entries);
0303 
0304         release_meta_index(inode, meta);
0305     }
0306 
0307 all_done:
0308     *index_block = cur_index_block;
0309     *index_offset = cur_offset;
0310     *data_block = cur_data_block;
0311 
0312     /*
0313      * Scale cache index (cache slot entry) to index
0314      */
0315     return offset * SQUASHFS_META_INDEXES * skip;
0316 
0317 failed:
0318     release_meta_index(inode, meta);
0319     return err;
0320 }
0321 
0322 
0323 /*
0324  * Get the on-disk location and compressed size of the datablock
0325  * specified by index.  Fill_meta_index() does most of the work.
0326  */
0327 static int read_blocklist(struct inode *inode, int index, u64 *block)
0328 {
0329     u64 start;
0330     long long blks;
0331     int offset;
0332     __le32 size;
0333     int res = fill_meta_index(inode, index, &start, &offset, block);
0334 
0335     TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset"
0336                " 0x%x, block 0x%llx\n", res, index, start, offset,
0337             *block);
0338 
0339     if (res < 0)
0340         return res;
0341 
0342     /*
0343      * res contains the index of the mapping returned by fill_meta_index(),
0344      * this will likely be less than the desired index (because the
0345      * meta_index cache works at a higher granularity).  Read any
0346      * extra block indexes needed.
0347      */
0348     if (res < index) {
0349         blks = read_indexes(inode->i_sb, index - res, &start, &offset);
0350         if (blks < 0)
0351             return (int) blks;
0352         *block += blks;
0353     }
0354 
0355     /*
0356      * Read length of block specified by index.
0357      */
0358     res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset,
0359             sizeof(size));
0360     if (res < 0)
0361         return res;
0362     return squashfs_block_size(size);
0363 }
0364 
0365 void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
0366 {
0367     int copied;
0368     void *pageaddr;
0369 
0370     pageaddr = kmap_atomic(page);
0371     copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
0372     memset(pageaddr + copied, 0, PAGE_SIZE - copied);
0373     kunmap_atomic(pageaddr);
0374 
0375     flush_dcache_page(page);
0376     if (copied == avail)
0377         SetPageUptodate(page);
0378     else
0379         SetPageError(page);
0380 }
0381 
0382 /* Copy data into page cache  */
0383 void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
0384     int bytes, int offset)
0385 {
0386     struct inode *inode = page->mapping->host;
0387     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
0388     int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
0389     int start_index = page->index & ~mask, end_index = start_index | mask;
0390 
0391     /*
0392      * Loop copying datablock into pages.  As the datablock likely covers
0393      * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
0394      * grab the pages from the page cache, except for the page that we've
0395      * been called to fill.
0396      */
0397     for (i = start_index; i <= end_index && bytes > 0; i++,
0398             bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
0399         struct page *push_page;
0400         int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
0401 
0402         TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
0403 
0404         push_page = (i == page->index) ? page :
0405             grab_cache_page_nowait(page->mapping, i);
0406 
0407         if (!push_page)
0408             continue;
0409 
0410         if (PageUptodate(push_page))
0411             goto skip_page;
0412 
0413         squashfs_fill_page(push_page, buffer, offset, avail);
0414 skip_page:
0415         unlock_page(push_page);
0416         if (i != page->index)
0417             put_page(push_page);
0418     }
0419 }
0420 
0421 /* Read datablock stored packed inside a fragment (tail-end packed block) */
0422 static int squashfs_readpage_fragment(struct page *page, int expected)
0423 {
0424     struct inode *inode = page->mapping->host;
0425     struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
0426         squashfs_i(inode)->fragment_block,
0427         squashfs_i(inode)->fragment_size);
0428     int res = buffer->error;
0429 
0430     if (res)
0431         ERROR("Unable to read page, block %llx, size %x\n",
0432             squashfs_i(inode)->fragment_block,
0433             squashfs_i(inode)->fragment_size);
0434     else
0435         squashfs_copy_cache(page, buffer, expected,
0436             squashfs_i(inode)->fragment_offset);
0437 
0438     squashfs_cache_put(buffer);
0439     return res;
0440 }
0441 
0442 static int squashfs_readpage_sparse(struct page *page, int expected)
0443 {
0444     squashfs_copy_cache(page, NULL, expected, 0);
0445     return 0;
0446 }
0447 
0448 static int squashfs_read_folio(struct file *file, struct folio *folio)
0449 {
0450     struct page *page = &folio->page;
0451     struct inode *inode = page->mapping->host;
0452     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
0453     int index = page->index >> (msblk->block_log - PAGE_SHIFT);
0454     int file_end = i_size_read(inode) >> msblk->block_log;
0455     int expected = index == file_end ?
0456             (i_size_read(inode) & (msblk->block_size - 1)) :
0457              msblk->block_size;
0458     int res = 0;
0459     void *pageaddr;
0460 
0461     TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
0462                 page->index, squashfs_i(inode)->start);
0463 
0464     if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
0465                     PAGE_SHIFT))
0466         goto out;
0467 
0468     if (index < file_end || squashfs_i(inode)->fragment_block ==
0469                     SQUASHFS_INVALID_BLK) {
0470         u64 block = 0;
0471 
0472         res = read_blocklist(inode, index, &block);
0473         if (res < 0)
0474             goto error_out;
0475 
0476         if (res == 0)
0477             res = squashfs_readpage_sparse(page, expected);
0478         else
0479             res = squashfs_readpage_block(page, block, res, expected);
0480     } else
0481         res = squashfs_readpage_fragment(page, expected);
0482 
0483     if (!res)
0484         return 0;
0485 
0486 error_out:
0487     SetPageError(page);
0488 out:
0489     pageaddr = kmap_atomic(page);
0490     memset(pageaddr, 0, PAGE_SIZE);
0491     kunmap_atomic(pageaddr);
0492     flush_dcache_page(page);
0493     if (res == 0)
0494         SetPageUptodate(page);
0495     unlock_page(page);
0496 
0497     return res;
0498 }
0499 
0500 static int squashfs_readahead_fragment(struct page **page,
0501     unsigned int pages, unsigned int expected)
0502 {
0503     struct inode *inode = page[0]->mapping->host;
0504     struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
0505         squashfs_i(inode)->fragment_block,
0506         squashfs_i(inode)->fragment_size);
0507     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
0508     unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
0509 
0510     if (buffer->error)
0511         goto out;
0512 
0513     expected += squashfs_i(inode)->fragment_offset;
0514 
0515     for (n = 0; n < pages; n++) {
0516         unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
0517         unsigned int offset = base + squashfs_i(inode)->fragment_offset;
0518 
0519         if (expected > offset) {
0520             unsigned int avail = min_t(unsigned int, expected -
0521                 offset, PAGE_SIZE);
0522 
0523             squashfs_fill_page(page[n], buffer, offset, avail);
0524         }
0525 
0526         unlock_page(page[n]);
0527         put_page(page[n]);
0528     }
0529 
0530 out:
0531     squashfs_cache_put(buffer);
0532     return buffer->error;
0533 }
0534 
0535 static void squashfs_readahead(struct readahead_control *ractl)
0536 {
0537     struct inode *inode = ractl->mapping->host;
0538     struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
0539     size_t mask = (1UL << msblk->block_log) - 1;
0540     unsigned short shift = msblk->block_log - PAGE_SHIFT;
0541     loff_t start = readahead_pos(ractl) & ~mask;
0542     size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
0543     struct squashfs_page_actor *actor;
0544     unsigned int nr_pages = 0;
0545     struct page **pages;
0546     int i, file_end = i_size_read(inode) >> msblk->block_log;
0547     unsigned int max_pages = 1UL << shift;
0548 
0549     readahead_expand(ractl, start, (len | mask) + 1);
0550 
0551     pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL);
0552     if (!pages)
0553         return;
0554 
0555     for (;;) {
0556         pgoff_t index;
0557         int res, bsize;
0558         u64 block = 0;
0559         unsigned int expected;
0560 
0561         nr_pages = __readahead_batch(ractl, pages, max_pages);
0562         if (!nr_pages)
0563             break;
0564 
0565         if (readahead_pos(ractl) >= i_size_read(inode))
0566             goto skip_pages;
0567 
0568         index = pages[0]->index >> shift;
0569         if ((pages[nr_pages - 1]->index >> shift) != index)
0570             goto skip_pages;
0571 
0572         expected = index == file_end ?
0573                (i_size_read(inode) & (msblk->block_size - 1)) :
0574                 msblk->block_size;
0575 
0576         if (index == file_end && squashfs_i(inode)->fragment_block !=
0577                         SQUASHFS_INVALID_BLK) {
0578             res = squashfs_readahead_fragment(pages, nr_pages,
0579                               expected);
0580             if (res)
0581                 goto skip_pages;
0582             continue;
0583         }
0584 
0585         bsize = read_blocklist(inode, index, &block);
0586         if (bsize == 0)
0587             goto skip_pages;
0588 
0589         actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
0590                              expected);
0591         if (!actor)
0592             goto skip_pages;
0593 
0594         res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
0595 
0596         squashfs_page_actor_free(actor);
0597 
0598         if (res == expected) {
0599             int bytes;
0600 
0601             /* Last page (if present) may have trailing bytes not filled */
0602             bytes = res % PAGE_SIZE;
0603             if (pages[nr_pages - 1]->index == file_end && bytes)
0604                 memzero_page(pages[nr_pages - 1], bytes,
0605                          PAGE_SIZE - bytes);
0606 
0607             for (i = 0; i < nr_pages; i++) {
0608                 flush_dcache_page(pages[i]);
0609                 SetPageUptodate(pages[i]);
0610             }
0611         }
0612 
0613         for (i = 0; i < nr_pages; i++) {
0614             unlock_page(pages[i]);
0615             put_page(pages[i]);
0616         }
0617     }
0618 
0619     kfree(pages);
0620     return;
0621 
0622 skip_pages:
0623     for (i = 0; i < nr_pages; i++) {
0624         unlock_page(pages[i]);
0625         put_page(pages[i]);
0626     }
0627     kfree(pages);
0628 }
0629 
0630 const struct address_space_operations squashfs_aops = {
0631     .read_folio = squashfs_read_folio,
0632     .readahead = squashfs_readahead
0633 };