Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2019 HUAWEI, Inc.
0004  *             https://www.huawei.com/
0005  */
0006 #include "compress.h"
0007 #include <linux/module.h>
0008 #include <linux/lz4.h>
0009 
0010 #ifndef LZ4_DISTANCE_MAX    /* history window size */
0011 #define LZ4_DISTANCE_MAX 65535  /* set to maximum value by default */
0012 #endif
0013 
0014 #define LZ4_MAX_DISTANCE_PAGES  (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
0015 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
0016 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
0017 #endif
0018 
0019 struct z_erofs_lz4_decompress_ctx {
0020     struct z_erofs_decompress_req *rq;
0021     /* # of encoded, decoded pages */
0022     unsigned int inpages, outpages;
0023     /* decoded block total length (used for in-place decompression) */
0024     unsigned int oend;
0025 };
0026 
0027 int z_erofs_load_lz4_config(struct super_block *sb,
0028                 struct erofs_super_block *dsb,
0029                 struct z_erofs_lz4_cfgs *lz4, int size)
0030 {
0031     struct erofs_sb_info *sbi = EROFS_SB(sb);
0032     u16 distance;
0033 
0034     if (lz4) {
0035         if (size < sizeof(struct z_erofs_lz4_cfgs)) {
0036             erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
0037             return -EINVAL;
0038         }
0039         distance = le16_to_cpu(lz4->max_distance);
0040 
0041         sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
0042         if (!sbi->lz4.max_pclusterblks) {
0043             sbi->lz4.max_pclusterblks = 1;  /* reserved case */
0044         } else if (sbi->lz4.max_pclusterblks >
0045                Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
0046             erofs_err(sb, "too large lz4 pclusterblks %u",
0047                   sbi->lz4.max_pclusterblks);
0048             return -EINVAL;
0049         }
0050     } else {
0051         distance = le16_to_cpu(dsb->u1.lz4_max_distance);
0052         sbi->lz4.max_pclusterblks = 1;
0053     }
0054 
0055     sbi->lz4.max_distance_pages = distance ?
0056                     DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
0057                     LZ4_MAX_DISTANCE_PAGES;
0058     return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
0059 }
0060 
0061 /*
0062  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
0063  * all physical pages are consecutive, which can be seen for moderate CR.
0064  */
0065 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
0066                     struct page **pagepool)
0067 {
0068     struct z_erofs_decompress_req *rq = ctx->rq;
0069     struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
0070     unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
0071                        BITS_PER_LONG)] = { 0 };
0072     unsigned int lz4_max_distance_pages =
0073                 EROFS_SB(rq->sb)->lz4.max_distance_pages;
0074     void *kaddr = NULL;
0075     unsigned int i, j, top;
0076 
0077     top = 0;
0078     for (i = j = 0; i < ctx->outpages; ++i, ++j) {
0079         struct page *const page = rq->out[i];
0080         struct page *victim;
0081 
0082         if (j >= lz4_max_distance_pages)
0083             j = 0;
0084 
0085         /* 'valid' bounced can only be tested after a complete round */
0086         if (!rq->fillgaps && test_bit(j, bounced)) {
0087             DBG_BUGON(i < lz4_max_distance_pages);
0088             DBG_BUGON(top >= lz4_max_distance_pages);
0089             availables[top++] = rq->out[i - lz4_max_distance_pages];
0090         }
0091 
0092         if (page) {
0093             __clear_bit(j, bounced);
0094             if (!PageHighMem(page)) {
0095                 if (!i) {
0096                     kaddr = page_address(page);
0097                     continue;
0098                 }
0099                 if (kaddr &&
0100                     kaddr + PAGE_SIZE == page_address(page)) {
0101                     kaddr += PAGE_SIZE;
0102                     continue;
0103                 }
0104             }
0105             kaddr = NULL;
0106             continue;
0107         }
0108         kaddr = NULL;
0109         __set_bit(j, bounced);
0110 
0111         if (top) {
0112             victim = availables[--top];
0113             get_page(victim);
0114         } else {
0115             victim = erofs_allocpage(pagepool,
0116                          GFP_KERNEL | __GFP_NOFAIL);
0117             set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
0118         }
0119         rq->out[i] = victim;
0120     }
0121     return kaddr ? 1 : 0;
0122 }
0123 
0124 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
0125             void *inpage, unsigned int *inputmargin, int *maptype,
0126             bool may_inplace)
0127 {
0128     struct z_erofs_decompress_req *rq = ctx->rq;
0129     unsigned int omargin, total, i, j;
0130     struct page **in;
0131     void *src, *tmp;
0132 
0133     if (rq->inplace_io) {
0134         omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
0135         if (rq->partial_decoding || !may_inplace ||
0136             omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
0137             goto docopy;
0138 
0139         for (i = 0; i < ctx->inpages; ++i) {
0140             DBG_BUGON(rq->in[i] == NULL);
0141             for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
0142                 if (rq->out[j] == rq->in[i])
0143                     goto docopy;
0144         }
0145     }
0146 
0147     if (ctx->inpages <= 1) {
0148         *maptype = 0;
0149         return inpage;
0150     }
0151     kunmap_atomic(inpage);
0152     might_sleep();
0153     src = erofs_vm_map_ram(rq->in, ctx->inpages);
0154     if (!src)
0155         return ERR_PTR(-ENOMEM);
0156     *maptype = 1;
0157     return src;
0158 
0159 docopy:
0160     /* Or copy compressed data which can be overlapped to per-CPU buffer */
0161     in = rq->in;
0162     src = erofs_get_pcpubuf(ctx->inpages);
0163     if (!src) {
0164         DBG_BUGON(1);
0165         kunmap_atomic(inpage);
0166         return ERR_PTR(-EFAULT);
0167     }
0168 
0169     tmp = src;
0170     total = rq->inputsize;
0171     while (total) {
0172         unsigned int page_copycnt =
0173             min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
0174 
0175         if (!inpage)
0176             inpage = kmap_atomic(*in);
0177         memcpy(tmp, inpage + *inputmargin, page_copycnt);
0178         kunmap_atomic(inpage);
0179         inpage = NULL;
0180         tmp += page_copycnt;
0181         total -= page_copycnt;
0182         ++in;
0183         *inputmargin = 0;
0184     }
0185     *maptype = 2;
0186     return src;
0187 }
0188 
0189 /*
0190  * Get the exact inputsize with zero_padding feature.
0191  *  - For LZ4, it should work if zero_padding feature is on (5.3+);
0192  *  - For MicroLZMA, it'd be enabled all the time.
0193  */
0194 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
0195              unsigned int padbufsize)
0196 {
0197     const char *padend;
0198 
0199     padend = memchr_inv(padbuf, 0, padbufsize);
0200     if (!padend)
0201         return -EFSCORRUPTED;
0202     rq->inputsize -= padend - padbuf;
0203     rq->pageofs_in += padend - padbuf;
0204     return 0;
0205 }
0206 
0207 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
0208                       u8 *out)
0209 {
0210     struct z_erofs_decompress_req *rq = ctx->rq;
0211     bool support_0padding = false, may_inplace = false;
0212     unsigned int inputmargin;
0213     u8 *headpage, *src;
0214     int ret, maptype;
0215 
0216     DBG_BUGON(*rq->in == NULL);
0217     headpage = kmap_atomic(*rq->in);
0218 
0219     /* LZ4 decompression inplace is only safe if zero_padding is enabled */
0220     if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
0221         support_0padding = true;
0222         ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
0223                 min_t(unsigned int, rq->inputsize,
0224                       EROFS_BLKSIZ - rq->pageofs_in));
0225         if (ret) {
0226             kunmap_atomic(headpage);
0227             return ret;
0228         }
0229         may_inplace = !((rq->pageofs_in + rq->inputsize) &
0230                 (EROFS_BLKSIZ - 1));
0231     }
0232 
0233     inputmargin = rq->pageofs_in;
0234     src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
0235                      &maptype, may_inplace);
0236     if (IS_ERR(src))
0237         return PTR_ERR(src);
0238 
0239     /* legacy format could compress extra data in a pcluster. */
0240     if (rq->partial_decoding || !support_0padding)
0241         ret = LZ4_decompress_safe_partial(src + inputmargin, out,
0242                 rq->inputsize, rq->outputsize, rq->outputsize);
0243     else
0244         ret = LZ4_decompress_safe(src + inputmargin, out,
0245                       rq->inputsize, rq->outputsize);
0246 
0247     if (ret != rq->outputsize) {
0248         erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
0249               ret, rq->inputsize, inputmargin, rq->outputsize);
0250 
0251         print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
0252                    16, 1, src + inputmargin, rq->inputsize, true);
0253         print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
0254                    16, 1, out, rq->outputsize, true);
0255 
0256         if (ret >= 0)
0257             memset(out + ret, 0, rq->outputsize - ret);
0258         ret = -EIO;
0259     } else {
0260         ret = 0;
0261     }
0262 
0263     if (maptype == 0) {
0264         kunmap_atomic(headpage);
0265     } else if (maptype == 1) {
0266         vm_unmap_ram(src, ctx->inpages);
0267     } else if (maptype == 2) {
0268         erofs_put_pcpubuf(src);
0269     } else {
0270         DBG_BUGON(1);
0271         return -EFAULT;
0272     }
0273     return ret;
0274 }
0275 
0276 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
0277                   struct page **pagepool)
0278 {
0279     struct z_erofs_lz4_decompress_ctx ctx;
0280     unsigned int dst_maptype;
0281     void *dst;
0282     int ret;
0283 
0284     ctx.rq = rq;
0285     ctx.oend = rq->pageofs_out + rq->outputsize;
0286     ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
0287     ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
0288 
0289     /* one optimized fast path only for non bigpcluster cases yet */
0290     if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
0291         DBG_BUGON(!*rq->out);
0292         dst = kmap_atomic(*rq->out);
0293         dst_maptype = 0;
0294         goto dstmap_out;
0295     }
0296 
0297     /* general decoding path which can be used for all cases */
0298     ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
0299     if (ret < 0) {
0300         return ret;
0301     } else if (ret > 0) {
0302         dst = page_address(*rq->out);
0303         dst_maptype = 1;
0304     } else {
0305         dst = erofs_vm_map_ram(rq->out, ctx.outpages);
0306         if (!dst)
0307             return -ENOMEM;
0308         dst_maptype = 2;
0309     }
0310 
0311 dstmap_out:
0312     ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
0313     if (!dst_maptype)
0314         kunmap_atomic(dst);
0315     else if (dst_maptype == 2)
0316         vm_unmap_ram(dst, ctx.outpages);
0317     return ret;
0318 }
0319 
0320 static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
0321                      struct page **pagepool)
0322 {
0323     const unsigned int nrpages_out =
0324         PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
0325     const unsigned int righthalf = min_t(unsigned int, rq->outputsize,
0326                          PAGE_SIZE - rq->pageofs_out);
0327     const unsigned int lefthalf = rq->outputsize - righthalf;
0328     unsigned char *src, *dst;
0329 
0330     if (nrpages_out > 2) {
0331         DBG_BUGON(1);
0332         return -EIO;
0333     }
0334 
0335     if (rq->out[0] == *rq->in) {
0336         DBG_BUGON(nrpages_out != 1);
0337         return 0;
0338     }
0339 
0340     src = kmap_atomic(*rq->in) + rq->pageofs_in;
0341     if (rq->out[0]) {
0342         dst = kmap_atomic(rq->out[0]);
0343         memcpy(dst + rq->pageofs_out, src, righthalf);
0344         kunmap_atomic(dst);
0345     }
0346 
0347     if (nrpages_out == 2) {
0348         DBG_BUGON(!rq->out[1]);
0349         if (rq->out[1] == *rq->in) {
0350             memmove(src, src + righthalf, lefthalf);
0351         } else {
0352             dst = kmap_atomic(rq->out[1]);
0353             memcpy(dst, src + righthalf, lefthalf);
0354             kunmap_atomic(dst);
0355         }
0356     }
0357     kunmap_atomic(src);
0358     return 0;
0359 }
0360 
0361 static struct z_erofs_decompressor decompressors[] = {
0362     [Z_EROFS_COMPRESSION_SHIFTED] = {
0363         .decompress = z_erofs_shifted_transform,
0364         .name = "shifted"
0365     },
0366     [Z_EROFS_COMPRESSION_LZ4] = {
0367         .decompress = z_erofs_lz4_decompress,
0368         .name = "lz4"
0369     },
0370 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
0371     [Z_EROFS_COMPRESSION_LZMA] = {
0372         .decompress = z_erofs_lzma_decompress,
0373         .name = "lzma"
0374     },
0375 #endif
0376 };
0377 
0378 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
0379                struct page **pagepool)
0380 {
0381     return decompressors[rq->alg].decompress(rq, pagepool);
0382 }