Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2008 Oracle.  All rights reserved.
0004  */
0005 
0006 #include <linux/kernel.h>
0007 #include <linux/slab.h>
0008 #include <linux/mm.h>
0009 #include <linux/init.h>
0010 #include <linux/err.h>
0011 #include <linux/sched.h>
0012 #include <linux/pagemap.h>
0013 #include <linux/bio.h>
0014 #include <linux/lzo.h>
0015 #include <linux/refcount.h>
0016 #include "compression.h"
0017 #include "ctree.h"
0018 
0019 #define LZO_LEN 4
0020 
0021 /*
0022  * Btrfs LZO compression format
0023  *
0024  * Regular and inlined LZO compressed data extents consist of:
0025  *
0026  * 1.  Header
0027  *     Fixed size. LZO_LEN (4) bytes long, LE32.
0028  *     Records the total size (including the header) of compressed data.
0029  *
0030  * 2.  Segment(s)
0031  *     Variable size. Each segment includes one segment header, followed by data
0032  *     payload.
0033  *     One regular LZO compressed extent can have one or more segments.
0034  *     For inlined LZO compressed extent, only one segment is allowed.
0035  *     One segment represents at most one sector of uncompressed data.
0036  *
0037  * 2.1 Segment header
0038  *     Fixed size. LZO_LEN (4) bytes long, LE32.
0039  *     Records the total size of the segment (not including the header).
0040  *     Segment header never crosses sector boundary, thus it's possible to
0041  *     have at most 3 padding zeros at the end of the sector.
0042  *
0043  * 2.2 Data Payload
0044  *     Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
0045  *     which is 4419 for a 4KiB sectorsize.
0046  *
0047  * Example with 4K sectorsize:
0048  * Page 1:
0049  *          0     0x2   0x4   0x6   0x8   0xa   0xc   0xe     0x10
0050  * 0x0000   |  Header   | SegHdr 01 | Data payload 01 ...     |
0051  * ...
0052  * 0x0ff0   | SegHdr  N | Data payload  N     ...          |00|
0053  *                                                          ^^ padding zeros
0054  * Page 2:
0055  * 0x1000   | SegHdr N+1| Data payload N+1 ...                |
0056  */
0057 
0058 #define WORKSPACE_BUF_LENGTH    (lzo1x_worst_compress(PAGE_SIZE))
0059 #define WORKSPACE_CBUF_LENGTH   (lzo1x_worst_compress(PAGE_SIZE))
0060 
0061 struct workspace {
0062     void *mem;
0063     void *buf;  /* where decompressed data goes */
0064     void *cbuf; /* where compressed data goes */
0065     struct list_head list;
0066 };
0067 
0068 static struct workspace_manager wsm;
0069 
0070 void lzo_free_workspace(struct list_head *ws)
0071 {
0072     struct workspace *workspace = list_entry(ws, struct workspace, list);
0073 
0074     kvfree(workspace->buf);
0075     kvfree(workspace->cbuf);
0076     kvfree(workspace->mem);
0077     kfree(workspace);
0078 }
0079 
0080 struct list_head *lzo_alloc_workspace(unsigned int level)
0081 {
0082     struct workspace *workspace;
0083 
0084     workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
0085     if (!workspace)
0086         return ERR_PTR(-ENOMEM);
0087 
0088     workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
0089     workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL);
0090     workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL);
0091     if (!workspace->mem || !workspace->buf || !workspace->cbuf)
0092         goto fail;
0093 
0094     INIT_LIST_HEAD(&workspace->list);
0095 
0096     return &workspace->list;
0097 fail:
0098     lzo_free_workspace(&workspace->list);
0099     return ERR_PTR(-ENOMEM);
0100 }
0101 
0102 static inline void write_compress_length(char *buf, size_t len)
0103 {
0104     __le32 dlen;
0105 
0106     dlen = cpu_to_le32(len);
0107     memcpy(buf, &dlen, LZO_LEN);
0108 }
0109 
0110 static inline size_t read_compress_length(const char *buf)
0111 {
0112     __le32 dlen;
0113 
0114     memcpy(&dlen, buf, LZO_LEN);
0115     return le32_to_cpu(dlen);
0116 }
0117 
0118 /*
0119  * Will do:
0120  *
0121  * - Write a segment header into the destination
0122  * - Copy the compressed buffer into the destination
0123  * - Make sure we have enough space in the last sector to fit a segment header
0124  *   If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
0125  *
0126  * Will allocate new pages when needed.
0127  */
0128 static int copy_compressed_data_to_page(char *compressed_data,
0129                     size_t compressed_size,
0130                     struct page **out_pages,
0131                     unsigned long max_nr_page,
0132                     u32 *cur_out,
0133                     const u32 sectorsize)
0134 {
0135     u32 sector_bytes_left;
0136     u32 orig_out;
0137     struct page *cur_page;
0138     char *kaddr;
0139 
0140     if ((*cur_out / PAGE_SIZE) >= max_nr_page)
0141         return -E2BIG;
0142 
0143     /*
0144      * We never allow a segment header crossing sector boundary, previous
0145      * run should ensure we have enough space left inside the sector.
0146      */
0147     ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
0148 
0149     cur_page = out_pages[*cur_out / PAGE_SIZE];
0150     /* Allocate a new page */
0151     if (!cur_page) {
0152         cur_page = alloc_page(GFP_NOFS);
0153         if (!cur_page)
0154             return -ENOMEM;
0155         out_pages[*cur_out / PAGE_SIZE] = cur_page;
0156     }
0157 
0158     kaddr = kmap_local_page(cur_page);
0159     write_compress_length(kaddr + offset_in_page(*cur_out),
0160                   compressed_size);
0161     *cur_out += LZO_LEN;
0162 
0163     orig_out = *cur_out;
0164 
0165     /* Copy compressed data */
0166     while (*cur_out - orig_out < compressed_size) {
0167         u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
0168                      orig_out + compressed_size - *cur_out);
0169 
0170         kunmap_local(kaddr);
0171 
0172         if ((*cur_out / PAGE_SIZE) >= max_nr_page)
0173             return -E2BIG;
0174 
0175         cur_page = out_pages[*cur_out / PAGE_SIZE];
0176         /* Allocate a new page */
0177         if (!cur_page) {
0178             cur_page = alloc_page(GFP_NOFS);
0179             if (!cur_page)
0180                 return -ENOMEM;
0181             out_pages[*cur_out / PAGE_SIZE] = cur_page;
0182         }
0183         kaddr = kmap_local_page(cur_page);
0184 
0185         memcpy(kaddr + offset_in_page(*cur_out),
0186                compressed_data + *cur_out - orig_out, copy_len);
0187 
0188         *cur_out += copy_len;
0189     }
0190 
0191     /*
0192      * Check if we can fit the next segment header into the remaining space
0193      * of the sector.
0194      */
0195     sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
0196     if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
0197         goto out;
0198 
0199     /* The remaining size is not enough, pad it with zeros */
0200     memset(kaddr + offset_in_page(*cur_out), 0,
0201            sector_bytes_left);
0202     *cur_out += sector_bytes_left;
0203 
0204 out:
0205     kunmap_local(kaddr);
0206     return 0;
0207 }
0208 
0209 int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
0210         u64 start, struct page **pages, unsigned long *out_pages,
0211         unsigned long *total_in, unsigned long *total_out)
0212 {
0213     struct workspace *workspace = list_entry(ws, struct workspace, list);
0214     const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
0215     struct page *page_in = NULL;
0216     char *sizes_ptr;
0217     const unsigned long max_nr_page = *out_pages;
0218     int ret = 0;
0219     /* Points to the file offset of input data */
0220     u64 cur_in = start;
0221     /* Points to the current output byte */
0222     u32 cur_out = 0;
0223     u32 len = *total_out;
0224 
0225     ASSERT(max_nr_page > 0);
0226     *out_pages = 0;
0227     *total_out = 0;
0228     *total_in = 0;
0229 
0230     /*
0231      * Skip the header for now, we will later come back and write the total
0232      * compressed size
0233      */
0234     cur_out += LZO_LEN;
0235     while (cur_in < start + len) {
0236         char *data_in;
0237         const u32 sectorsize_mask = sectorsize - 1;
0238         u32 sector_off = (cur_in - start) & sectorsize_mask;
0239         u32 in_len;
0240         size_t out_len;
0241 
0242         /* Get the input page first */
0243         if (!page_in) {
0244             page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
0245             ASSERT(page_in);
0246         }
0247 
0248         /* Compress at most one sector of data each time */
0249         in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
0250         ASSERT(in_len);
0251         data_in = kmap_local_page(page_in);
0252         ret = lzo1x_1_compress(data_in +
0253                        offset_in_page(cur_in), in_len,
0254                        workspace->cbuf, &out_len,
0255                        workspace->mem);
0256         kunmap_local(data_in);
0257         if (ret < 0) {
0258             pr_debug("BTRFS: lzo in loop returned %d\n", ret);
0259             ret = -EIO;
0260             goto out;
0261         }
0262 
0263         ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
0264                            pages, max_nr_page,
0265                            &cur_out, sectorsize);
0266         if (ret < 0)
0267             goto out;
0268 
0269         cur_in += in_len;
0270 
0271         /*
0272          * Check if we're making it bigger after two sectors.  And if
0273          * it is so, give up.
0274          */
0275         if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
0276             ret = -E2BIG;
0277             goto out;
0278         }
0279 
0280         /* Check if we have reached page boundary */
0281         if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
0282             put_page(page_in);
0283             page_in = NULL;
0284         }
0285     }
0286 
0287     /* Store the size of all chunks of compressed data */
0288     sizes_ptr = kmap_local_page(pages[0]);
0289     write_compress_length(sizes_ptr, cur_out);
0290     kunmap_local(sizes_ptr);
0291 
0292     ret = 0;
0293     *total_out = cur_out;
0294     *total_in = cur_in - start;
0295 out:
0296     if (page_in)
0297         put_page(page_in);
0298     *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
0299     return ret;
0300 }
0301 
0302 /*
0303  * Copy the compressed segment payload into @dest.
0304  *
0305  * For the payload there will be no padding, just need to do page switching.
0306  */
0307 static void copy_compressed_segment(struct compressed_bio *cb,
0308                     char *dest, u32 len, u32 *cur_in)
0309 {
0310     u32 orig_in = *cur_in;
0311 
0312     while (*cur_in < orig_in + len) {
0313         struct page *cur_page;
0314         u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
0315                       orig_in + len - *cur_in);
0316 
0317         ASSERT(copy_len);
0318         cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
0319 
0320         memcpy_from_page(dest + *cur_in - orig_in, cur_page,
0321                  offset_in_page(*cur_in), copy_len);
0322 
0323         *cur_in += copy_len;
0324     }
0325 }
0326 
0327 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
0328 {
0329     struct workspace *workspace = list_entry(ws, struct workspace, list);
0330     const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
0331     const u32 sectorsize = fs_info->sectorsize;
0332     char *kaddr;
0333     int ret;
0334     /* Compressed data length, can be unaligned */
0335     u32 len_in;
0336     /* Offset inside the compressed data */
0337     u32 cur_in = 0;
0338     /* Bytes decompressed so far */
0339     u32 cur_out = 0;
0340 
0341     kaddr = kmap_local_page(cb->compressed_pages[0]);
0342     len_in = read_compress_length(kaddr);
0343     kunmap_local(kaddr);
0344     cur_in += LZO_LEN;
0345 
0346     /*
0347      * LZO header length check
0348      *
0349      * The total length should not exceed the maximum extent length,
0350      * and all sectors should be used.
0351      * If this happens, it means the compressed extent is corrupted.
0352      */
0353     if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
0354         round_up(len_in, sectorsize) < cb->compressed_len) {
0355         btrfs_err(fs_info,
0356             "invalid lzo header, lzo len %u compressed len %u",
0357             len_in, cb->compressed_len);
0358         return -EUCLEAN;
0359     }
0360 
0361     /* Go through each lzo segment */
0362     while (cur_in < len_in) {
0363         struct page *cur_page;
0364         /* Length of the compressed segment */
0365         u32 seg_len;
0366         u32 sector_bytes_left;
0367         size_t out_len = lzo1x_worst_compress(sectorsize);
0368 
0369         /*
0370          * We should always have enough space for one segment header
0371          * inside current sector.
0372          */
0373         ASSERT(cur_in / sectorsize ==
0374                (cur_in + LZO_LEN - 1) / sectorsize);
0375         cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
0376         ASSERT(cur_page);
0377         kaddr = kmap_local_page(cur_page);
0378         seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
0379         kunmap_local(kaddr);
0380         cur_in += LZO_LEN;
0381 
0382         if (seg_len > WORKSPACE_CBUF_LENGTH) {
0383             /*
0384              * seg_len shouldn't be larger than we have allocated
0385              * for workspace->cbuf
0386              */
0387             btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
0388                     seg_len);
0389             ret = -EIO;
0390             goto out;
0391         }
0392 
0393         /* Copy the compressed segment payload into workspace */
0394         copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
0395 
0396         /* Decompress the data */
0397         ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
0398                         workspace->buf, &out_len);
0399         if (ret != LZO_E_OK) {
0400             btrfs_err(fs_info, "failed to decompress");
0401             ret = -EIO;
0402             goto out;
0403         }
0404 
0405         /* Copy the data into inode pages */
0406         ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
0407         cur_out += out_len;
0408 
0409         /* All data read, exit */
0410         if (ret == 0)
0411             goto out;
0412         ret = 0;
0413 
0414         /* Check if the sector has enough space for a segment header */
0415         sector_bytes_left = sectorsize - (cur_in % sectorsize);
0416         if (sector_bytes_left >= LZO_LEN)
0417             continue;
0418 
0419         /* Skip the padding zeros */
0420         cur_in += sector_bytes_left;
0421     }
0422 out:
0423     if (!ret)
0424         zero_fill_bio(cb->orig_bio);
0425     return ret;
0426 }
0427 
0428 int lzo_decompress(struct list_head *ws, unsigned char *data_in,
0429         struct page *dest_page, unsigned long start_byte, size_t srclen,
0430         size_t destlen)
0431 {
0432     struct workspace *workspace = list_entry(ws, struct workspace, list);
0433     size_t in_len;
0434     size_t out_len;
0435     size_t max_segment_len = WORKSPACE_BUF_LENGTH;
0436     int ret = 0;
0437     char *kaddr;
0438     unsigned long bytes;
0439 
0440     if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
0441         return -EUCLEAN;
0442 
0443     in_len = read_compress_length(data_in);
0444     if (in_len != srclen)
0445         return -EUCLEAN;
0446     data_in += LZO_LEN;
0447 
0448     in_len = read_compress_length(data_in);
0449     if (in_len != srclen - LZO_LEN * 2) {
0450         ret = -EUCLEAN;
0451         goto out;
0452     }
0453     data_in += LZO_LEN;
0454 
0455     out_len = PAGE_SIZE;
0456     ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
0457     if (ret != LZO_E_OK) {
0458         pr_warn("BTRFS: decompress failed!\n");
0459         ret = -EIO;
0460         goto out;
0461     }
0462 
0463     if (out_len < start_byte) {
0464         ret = -EIO;
0465         goto out;
0466     }
0467 
0468     /*
0469      * the caller is already checking against PAGE_SIZE, but lets
0470      * move this check closer to the memcpy/memset
0471      */
0472     destlen = min_t(unsigned long, destlen, PAGE_SIZE);
0473     bytes = min_t(unsigned long, destlen, out_len - start_byte);
0474 
0475     kaddr = kmap_local_page(dest_page);
0476     memcpy(kaddr, workspace->buf + start_byte, bytes);
0477 
0478     /*
0479      * btrfs_getblock is doing a zero on the tail of the page too,
0480      * but this will cover anything missing from the decompressed
0481      * data.
0482      */
0483     if (bytes < destlen)
0484         memset(kaddr+bytes, 0, destlen-bytes);
0485     kunmap_local(kaddr);
0486 out:
0487     return ret;
0488 }
0489 
0490 const struct btrfs_compress_op btrfs_lzo_compress = {
0491     .workspace_manager  = &wsm,
0492     .max_level      = 1,
0493     .default_level      = 1,
0494 };