Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 #include <linux/xz.h>
0003 #include <linux/module.h>
0004 #include "compress.h"
0005 
0006 struct z_erofs_lzma {
0007     struct z_erofs_lzma *next;
0008     struct xz_dec_microlzma *state;
0009     struct xz_buf buf;
0010     u8 bounce[PAGE_SIZE];
0011 };
0012 
0013 /* considering the LZMA performance, no need to use a lockless list for now */
0014 static DEFINE_SPINLOCK(z_erofs_lzma_lock);
0015 static unsigned int z_erofs_lzma_max_dictsize;
0016 static unsigned int z_erofs_lzma_nstrms, z_erofs_lzma_avail_strms;
0017 static struct z_erofs_lzma *z_erofs_lzma_head;
0018 static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq);
0019 
0020 module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444);
0021 
0022 void z_erofs_lzma_exit(void)
0023 {
0024     /* there should be no running fs instance */
0025     while (z_erofs_lzma_avail_strms) {
0026         struct z_erofs_lzma *strm;
0027 
0028         spin_lock(&z_erofs_lzma_lock);
0029         strm = z_erofs_lzma_head;
0030         if (!strm) {
0031             spin_unlock(&z_erofs_lzma_lock);
0032             DBG_BUGON(1);
0033             return;
0034         }
0035         z_erofs_lzma_head = NULL;
0036         spin_unlock(&z_erofs_lzma_lock);
0037 
0038         while (strm) {
0039             struct z_erofs_lzma *n = strm->next;
0040 
0041             if (strm->state)
0042                 xz_dec_microlzma_end(strm->state);
0043             kfree(strm);
0044             --z_erofs_lzma_avail_strms;
0045             strm = n;
0046         }
0047     }
0048 }
0049 
0050 int z_erofs_lzma_init(void)
0051 {
0052     unsigned int i;
0053 
0054     /* by default, use # of possible CPUs instead */
0055     if (!z_erofs_lzma_nstrms)
0056         z_erofs_lzma_nstrms = num_possible_cpus();
0057 
0058     for (i = 0; i < z_erofs_lzma_nstrms; ++i) {
0059         struct z_erofs_lzma *strm = kzalloc(sizeof(*strm), GFP_KERNEL);
0060 
0061         if (!strm) {
0062             z_erofs_lzma_exit();
0063             return -ENOMEM;
0064         }
0065         spin_lock(&z_erofs_lzma_lock);
0066         strm->next = z_erofs_lzma_head;
0067         z_erofs_lzma_head = strm;
0068         spin_unlock(&z_erofs_lzma_lock);
0069         ++z_erofs_lzma_avail_strms;
0070     }
0071     return 0;
0072 }
0073 
0074 int z_erofs_load_lzma_config(struct super_block *sb,
0075                  struct erofs_super_block *dsb,
0076                  struct z_erofs_lzma_cfgs *lzma, int size)
0077 {
0078     static DEFINE_MUTEX(lzma_resize_mutex);
0079     unsigned int dict_size, i;
0080     struct z_erofs_lzma *strm, *head = NULL;
0081     int err;
0082 
0083     if (!lzma || size < sizeof(struct z_erofs_lzma_cfgs)) {
0084         erofs_err(sb, "invalid lzma cfgs, size=%u", size);
0085         return -EINVAL;
0086     }
0087     if (lzma->format) {
0088         erofs_err(sb, "unidentified lzma format %x, please check kernel version",
0089               le16_to_cpu(lzma->format));
0090         return -EINVAL;
0091     }
0092     dict_size = le32_to_cpu(lzma->dict_size);
0093     if (dict_size > Z_EROFS_LZMA_MAX_DICT_SIZE || dict_size < 4096) {
0094         erofs_err(sb, "unsupported lzma dictionary size %u",
0095               dict_size);
0096         return -EINVAL;
0097     }
0098 
0099     erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!");
0100 
0101     /* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */
0102     mutex_lock(&lzma_resize_mutex);
0103 
0104     if (z_erofs_lzma_max_dictsize >= dict_size) {
0105         mutex_unlock(&lzma_resize_mutex);
0106         return 0;
0107     }
0108 
0109     /* 1. collect/isolate all streams for the following check */
0110     for (i = 0; i < z_erofs_lzma_avail_strms; ++i) {
0111         struct z_erofs_lzma *last;
0112 
0113 again:
0114         spin_lock(&z_erofs_lzma_lock);
0115         strm = z_erofs_lzma_head;
0116         if (!strm) {
0117             spin_unlock(&z_erofs_lzma_lock);
0118             wait_event(z_erofs_lzma_wq,
0119                    READ_ONCE(z_erofs_lzma_head));
0120             goto again;
0121         }
0122         z_erofs_lzma_head = NULL;
0123         spin_unlock(&z_erofs_lzma_lock);
0124 
0125         for (last = strm; last->next; last = last->next)
0126             ++i;
0127         last->next = head;
0128         head = strm;
0129     }
0130 
0131     err = 0;
0132     /* 2. walk each isolated stream and grow max dict_size if needed */
0133     for (strm = head; strm; strm = strm->next) {
0134         if (strm->state)
0135             xz_dec_microlzma_end(strm->state);
0136         strm->state = xz_dec_microlzma_alloc(XZ_PREALLOC, dict_size);
0137         if (!strm->state)
0138             err = -ENOMEM;
0139     }
0140 
0141     /* 3. push back all to the global list and update max dict_size */
0142     spin_lock(&z_erofs_lzma_lock);
0143     DBG_BUGON(z_erofs_lzma_head);
0144     z_erofs_lzma_head = head;
0145     spin_unlock(&z_erofs_lzma_lock);
0146     wake_up_all(&z_erofs_lzma_wq);
0147 
0148     z_erofs_lzma_max_dictsize = dict_size;
0149     mutex_unlock(&lzma_resize_mutex);
0150     return err;
0151 }
0152 
0153 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
0154                 struct page **pagepool)
0155 {
0156     const unsigned int nrpages_out =
0157         PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
0158     const unsigned int nrpages_in =
0159         PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
0160     unsigned int inlen, outlen, pageofs;
0161     struct z_erofs_lzma *strm;
0162     u8 *kin;
0163     bool bounced = false;
0164     int no, ni, j, err = 0;
0165 
0166     /* 1. get the exact LZMA compressed size */
0167     kin = kmap(*rq->in);
0168     err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
0169                    min_t(unsigned int, rq->inputsize,
0170                      EROFS_BLKSIZ - rq->pageofs_in));
0171     if (err) {
0172         kunmap(*rq->in);
0173         return err;
0174     }
0175 
0176     /* 2. get an available lzma context */
0177 again:
0178     spin_lock(&z_erofs_lzma_lock);
0179     strm = z_erofs_lzma_head;
0180     if (!strm) {
0181         spin_unlock(&z_erofs_lzma_lock);
0182         wait_event(z_erofs_lzma_wq, READ_ONCE(z_erofs_lzma_head));
0183         goto again;
0184     }
0185     z_erofs_lzma_head = strm->next;
0186     spin_unlock(&z_erofs_lzma_lock);
0187 
0188     /* 3. multi-call decompress */
0189     inlen = rq->inputsize;
0190     outlen = rq->outputsize;
0191     xz_dec_microlzma_reset(strm->state, inlen, outlen,
0192                    !rq->partial_decoding);
0193     pageofs = rq->pageofs_out;
0194     strm->buf.in = kin + rq->pageofs_in;
0195     strm->buf.in_pos = 0;
0196     strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in);
0197     inlen -= strm->buf.in_size;
0198     strm->buf.out = NULL;
0199     strm->buf.out_pos = 0;
0200     strm->buf.out_size = 0;
0201 
0202     for (ni = 0, no = -1;;) {
0203         enum xz_ret xz_err;
0204 
0205         if (strm->buf.out_pos == strm->buf.out_size) {
0206             if (strm->buf.out) {
0207                 kunmap(rq->out[no]);
0208                 strm->buf.out = NULL;
0209             }
0210 
0211             if (++no >= nrpages_out || !outlen) {
0212                 erofs_err(rq->sb, "decompressed buf out of bound");
0213                 err = -EFSCORRUPTED;
0214                 break;
0215             }
0216             strm->buf.out_pos = 0;
0217             strm->buf.out_size = min_t(u32, outlen,
0218                            PAGE_SIZE - pageofs);
0219             outlen -= strm->buf.out_size;
0220             if (rq->out[no])
0221                 strm->buf.out = kmap(rq->out[no]) + pageofs;
0222             pageofs = 0;
0223         } else if (strm->buf.in_pos == strm->buf.in_size) {
0224             kunmap(rq->in[ni]);
0225 
0226             if (++ni >= nrpages_in || !inlen) {
0227                 erofs_err(rq->sb, "compressed buf out of bound");
0228                 err = -EFSCORRUPTED;
0229                 break;
0230             }
0231             strm->buf.in_pos = 0;
0232             strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE);
0233             inlen -= strm->buf.in_size;
0234             kin = kmap(rq->in[ni]);
0235             strm->buf.in = kin;
0236             bounced = false;
0237         }
0238 
0239         /*
0240          * Handle overlapping: Use bounced buffer if the compressed
0241          * data is under processing; Otherwise, Use short-lived pages
0242          * from the on-stack pagepool where pages share with the same
0243          * request.
0244          */
0245         if (!bounced && rq->out[no] == rq->in[ni]) {
0246             memcpy(strm->bounce, strm->buf.in, strm->buf.in_size);
0247             strm->buf.in = strm->bounce;
0248             bounced = true;
0249         }
0250         for (j = ni + 1; j < nrpages_in; ++j) {
0251             struct page *tmppage;
0252 
0253             if (rq->out[no] != rq->in[j])
0254                 continue;
0255 
0256             DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
0257                             rq->in[j]));
0258             tmppage = erofs_allocpage(pagepool,
0259                           GFP_KERNEL | __GFP_NOFAIL);
0260             set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
0261             copy_highpage(tmppage, rq->in[j]);
0262             rq->in[j] = tmppage;
0263         }
0264         xz_err = xz_dec_microlzma_run(strm->state, &strm->buf);
0265         DBG_BUGON(strm->buf.out_pos > strm->buf.out_size);
0266         DBG_BUGON(strm->buf.in_pos > strm->buf.in_size);
0267 
0268         if (xz_err != XZ_OK) {
0269             if (xz_err == XZ_STREAM_END && !outlen)
0270                 break;
0271             erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]",
0272                   xz_err, rq->inputsize, rq->outputsize);
0273             err = -EFSCORRUPTED;
0274             break;
0275         }
0276     }
0277     if (no < nrpages_out && strm->buf.out)
0278         kunmap(rq->in[no]);
0279     if (ni < nrpages_in)
0280         kunmap(rq->in[ni]);
0281     /* 4. push back LZMA stream context to the global list */
0282     spin_lock(&z_erofs_lzma_lock);
0283     strm->next = z_erofs_lzma_head;
0284     z_erofs_lzma_head = strm;
0285     spin_unlock(&z_erofs_lzma_lock);
0286     wake_up(&z_erofs_lzma_wq);
0287     return err;
0288 }