Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2008 Oracle.  All rights reserved.
0004  */
0005 
0006 #include <linux/kernel.h>
0007 #include <linux/bio.h>
0008 #include <linux/file.h>
0009 #include <linux/fs.h>
0010 #include <linux/pagemap.h>
0011 #include <linux/highmem.h>
0012 #include <linux/kthread.h>
0013 #include <linux/time.h>
0014 #include <linux/init.h>
0015 #include <linux/string.h>
0016 #include <linux/backing-dev.h>
0017 #include <linux/writeback.h>
0018 #include <linux/slab.h>
0019 #include <linux/sched/mm.h>
0020 #include <linux/log2.h>
0021 #include <crypto/hash.h>
0022 #include "misc.h"
0023 #include "ctree.h"
0024 #include "disk-io.h"
0025 #include "transaction.h"
0026 #include "btrfs_inode.h"
0027 #include "volumes.h"
0028 #include "ordered-data.h"
0029 #include "compression.h"
0030 #include "extent_io.h"
0031 #include "extent_map.h"
0032 #include "subpage.h"
0033 #include "zoned.h"
0034 
0035 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
0036 
0037 const char* btrfs_compress_type2str(enum btrfs_compression_type type)
0038 {
0039     switch (type) {
0040     case BTRFS_COMPRESS_ZLIB:
0041     case BTRFS_COMPRESS_LZO:
0042     case BTRFS_COMPRESS_ZSTD:
0043     case BTRFS_COMPRESS_NONE:
0044         return btrfs_compress_types[type];
0045     default:
0046         break;
0047     }
0048 
0049     return NULL;
0050 }
0051 
0052 bool btrfs_compress_is_valid_type(const char *str, size_t len)
0053 {
0054     int i;
0055 
0056     for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
0057         size_t comp_len = strlen(btrfs_compress_types[i]);
0058 
0059         if (len < comp_len)
0060             continue;
0061 
0062         if (!strncmp(btrfs_compress_types[i], str, comp_len))
0063             return true;
0064     }
0065     return false;
0066 }
0067 
0068 static int compression_compress_pages(int type, struct list_head *ws,
0069                struct address_space *mapping, u64 start, struct page **pages,
0070                unsigned long *out_pages, unsigned long *total_in,
0071                unsigned long *total_out)
0072 {
0073     switch (type) {
0074     case BTRFS_COMPRESS_ZLIB:
0075         return zlib_compress_pages(ws, mapping, start, pages,
0076                 out_pages, total_in, total_out);
0077     case BTRFS_COMPRESS_LZO:
0078         return lzo_compress_pages(ws, mapping, start, pages,
0079                 out_pages, total_in, total_out);
0080     case BTRFS_COMPRESS_ZSTD:
0081         return zstd_compress_pages(ws, mapping, start, pages,
0082                 out_pages, total_in, total_out);
0083     case BTRFS_COMPRESS_NONE:
0084     default:
0085         /*
0086          * This can happen when compression races with remount setting
0087          * it to 'no compress', while caller doesn't call
0088          * inode_need_compress() to check if we really need to
0089          * compress.
0090          *
0091          * Not a big deal, just need to inform caller that we
0092          * haven't allocated any pages yet.
0093          */
0094         *out_pages = 0;
0095         return -E2BIG;
0096     }
0097 }
0098 
0099 static int compression_decompress_bio(struct list_head *ws,
0100                       struct compressed_bio *cb)
0101 {
0102     switch (cb->compress_type) {
0103     case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
0104     case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
0105     case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
0106     case BTRFS_COMPRESS_NONE:
0107     default:
0108         /*
0109          * This can't happen, the type is validated several times
0110          * before we get here.
0111          */
0112         BUG();
0113     }
0114 }
0115 
0116 static int compression_decompress(int type, struct list_head *ws,
0117                unsigned char *data_in, struct page *dest_page,
0118                unsigned long start_byte, size_t srclen, size_t destlen)
0119 {
0120     switch (type) {
0121     case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
0122                         start_byte, srclen, destlen);
0123     case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
0124                         start_byte, srclen, destlen);
0125     case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
0126                         start_byte, srclen, destlen);
0127     case BTRFS_COMPRESS_NONE:
0128     default:
0129         /*
0130          * This can't happen, the type is validated several times
0131          * before we get here.
0132          */
0133         BUG();
0134     }
0135 }
0136 
0137 static int btrfs_decompress_bio(struct compressed_bio *cb);
0138 
0139 static void finish_compressed_bio_read(struct compressed_bio *cb)
0140 {
0141     unsigned int index;
0142     struct page *page;
0143 
0144     if (cb->status == BLK_STS_OK)
0145         cb->status = errno_to_blk_status(btrfs_decompress_bio(cb));
0146 
0147     /* Release the compressed pages */
0148     for (index = 0; index < cb->nr_pages; index++) {
0149         page = cb->compressed_pages[index];
0150         page->mapping = NULL;
0151         put_page(page);
0152     }
0153 
0154     /* Do io completion on the original bio */
0155     if (cb->status != BLK_STS_OK)
0156         cb->orig_bio->bi_status = cb->status;
0157     bio_endio(cb->orig_bio);
0158 
0159     /* Finally free the cb struct */
0160     kfree(cb->compressed_pages);
0161     kfree(cb);
0162 }
0163 
0164 /*
0165  * Verify the checksums and kick off repair if needed on the uncompressed data
0166  * before decompressing it into the original bio and freeing the uncompressed
0167  * pages.
0168  */
0169 static void end_compressed_bio_read(struct bio *bio)
0170 {
0171     struct compressed_bio *cb = bio->bi_private;
0172     struct inode *inode = cb->inode;
0173     struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0174     struct btrfs_inode *bi = BTRFS_I(inode);
0175     bool csum = !(bi->flags & BTRFS_INODE_NODATASUM) &&
0176             !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
0177     blk_status_t status = bio->bi_status;
0178     struct btrfs_bio *bbio = btrfs_bio(bio);
0179     struct bvec_iter iter;
0180     struct bio_vec bv;
0181     u32 offset;
0182 
0183     btrfs_bio_for_each_sector(fs_info, bv, bbio, iter, offset) {
0184         u64 start = bbio->file_offset + offset;
0185 
0186         if (!status &&
0187             (!csum || !btrfs_check_data_csum(inode, bbio, offset,
0188                              bv.bv_page, bv.bv_offset))) {
0189             clean_io_failure(fs_info, &bi->io_failure_tree,
0190                      &bi->io_tree, start, bv.bv_page,
0191                      btrfs_ino(bi), bv.bv_offset);
0192         } else {
0193             int ret;
0194 
0195             refcount_inc(&cb->pending_ios);
0196             ret = btrfs_repair_one_sector(inode, bbio, offset,
0197                               bv.bv_page, bv.bv_offset,
0198                               btrfs_submit_data_read_bio);
0199             if (ret) {
0200                 refcount_dec(&cb->pending_ios);
0201                 status = errno_to_blk_status(ret);
0202             }
0203         }
0204     }
0205 
0206     if (status)
0207         cb->status = status;
0208 
0209     if (refcount_dec_and_test(&cb->pending_ios))
0210         finish_compressed_bio_read(cb);
0211     btrfs_bio_free_csum(bbio);
0212     bio_put(bio);
0213 }
0214 
0215 /*
0216  * Clear the writeback bits on all of the file
0217  * pages for a compressed write
0218  */
0219 static noinline void end_compressed_writeback(struct inode *inode,
0220                           const struct compressed_bio *cb)
0221 {
0222     struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0223     unsigned long index = cb->start >> PAGE_SHIFT;
0224     unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
0225     struct page *pages[16];
0226     unsigned long nr_pages = end_index - index + 1;
0227     const int errno = blk_status_to_errno(cb->status);
0228     int i;
0229     int ret;
0230 
0231     if (errno)
0232         mapping_set_error(inode->i_mapping, errno);
0233 
0234     while (nr_pages > 0) {
0235         ret = find_get_pages_contig(inode->i_mapping, index,
0236                      min_t(unsigned long,
0237                      nr_pages, ARRAY_SIZE(pages)), pages);
0238         if (ret == 0) {
0239             nr_pages -= 1;
0240             index += 1;
0241             continue;
0242         }
0243         for (i = 0; i < ret; i++) {
0244             if (errno)
0245                 SetPageError(pages[i]);
0246             btrfs_page_clamp_clear_writeback(fs_info, pages[i],
0247                              cb->start, cb->len);
0248             put_page(pages[i]);
0249         }
0250         nr_pages -= ret;
0251         index += ret;
0252     }
0253     /* the inode may be gone now */
0254 }
0255 
0256 static void finish_compressed_bio_write(struct compressed_bio *cb)
0257 {
0258     struct inode *inode = cb->inode;
0259     unsigned int index;
0260 
0261     /*
0262      * Ok, we're the last bio for this extent, step one is to call back
0263      * into the FS and do all the end_io operations.
0264      */
0265     btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
0266             cb->start, cb->start + cb->len - 1,
0267             cb->status == BLK_STS_OK);
0268 
0269     if (cb->writeback)
0270         end_compressed_writeback(inode, cb);
0271     /* Note, our inode could be gone now */
0272 
0273     /*
0274      * Release the compressed pages, these came from alloc_page and
0275      * are not attached to the inode at all
0276      */
0277     for (index = 0; index < cb->nr_pages; index++) {
0278         struct page *page = cb->compressed_pages[index];
0279 
0280         page->mapping = NULL;
0281         put_page(page);
0282     }
0283 
0284     /* Finally free the cb struct */
0285     kfree(cb->compressed_pages);
0286     kfree(cb);
0287 }
0288 
0289 static void btrfs_finish_compressed_write_work(struct work_struct *work)
0290 {
0291     struct compressed_bio *cb =
0292         container_of(work, struct compressed_bio, write_end_work);
0293 
0294     finish_compressed_bio_write(cb);
0295 }
0296 
0297 /*
0298  * Do the cleanup once all the compressed pages hit the disk.  This will clear
0299  * writeback on the file pages and free the compressed pages.
0300  *
0301  * This also calls the writeback end hooks for the file pages so that metadata
0302  * and checksums can be updated in the file.
0303  */
0304 static void end_compressed_bio_write(struct bio *bio)
0305 {
0306     struct compressed_bio *cb = bio->bi_private;
0307 
0308     if (bio->bi_status)
0309         cb->status = bio->bi_status;
0310 
0311     if (refcount_dec_and_test(&cb->pending_ios)) {
0312         struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
0313 
0314         btrfs_record_physical_zoned(cb->inode, cb->start, bio);
0315         queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
0316     }
0317     bio_put(bio);
0318 }
0319 
0320 /*
0321  * Allocate a compressed_bio, which will be used to read/write on-disk
0322  * (aka, compressed) * data.
0323  *
0324  * @cb:                 The compressed_bio structure, which records all the needed
0325  *                      information to bind the compressed data to the uncompressed
0326  *                      page cache.
0327  * @disk_byten:         The logical bytenr where the compressed data will be read
0328  *                      from or written to.
0329  * @endio_func:         The endio function to call after the IO for compressed data
0330  *                      is finished.
0331  * @next_stripe_start:  Return value of logical bytenr of where next stripe starts.
0332  *                      Let the caller know to only fill the bio up to the stripe
0333  *                      boundary.
0334  */
0335 
0336 
0337 static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
0338                     blk_opf_t opf, bio_end_io_t endio_func,
0339                     u64 *next_stripe_start)
0340 {
0341     struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
0342     struct btrfs_io_geometry geom;
0343     struct extent_map *em;
0344     struct bio *bio;
0345     int ret;
0346 
0347     bio = btrfs_bio_alloc(BIO_MAX_VECS);
0348 
0349     bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
0350     bio->bi_opf = opf;
0351     bio->bi_private = cb;
0352     bio->bi_end_io = endio_func;
0353 
0354     em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
0355     if (IS_ERR(em)) {
0356         bio_put(bio);
0357         return ERR_CAST(em);
0358     }
0359 
0360     if (bio_op(bio) == REQ_OP_ZONE_APPEND)
0361         bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev);
0362 
0363     ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom);
0364     free_extent_map(em);
0365     if (ret < 0) {
0366         bio_put(bio);
0367         return ERR_PTR(ret);
0368     }
0369     *next_stripe_start = disk_bytenr + geom.len;
0370     refcount_inc(&cb->pending_ios);
0371     return bio;
0372 }
0373 
0374 /*
0375  * worker function to build and submit bios for previously compressed pages.
0376  * The corresponding pages in the inode should be marked for writeback
0377  * and the compressed pages should have a reference on them for dropping
0378  * when the IO is complete.
0379  *
0380  * This also checksums the file bytes and gets things ready for
0381  * the end io hooks.
0382  */
0383 blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
0384                  unsigned int len, u64 disk_start,
0385                  unsigned int compressed_len,
0386                  struct page **compressed_pages,
0387                  unsigned int nr_pages,
0388                  blk_opf_t write_flags,
0389                  struct cgroup_subsys_state *blkcg_css,
0390                  bool writeback)
0391 {
0392     struct btrfs_fs_info *fs_info = inode->root->fs_info;
0393     struct bio *bio = NULL;
0394     struct compressed_bio *cb;
0395     u64 cur_disk_bytenr = disk_start;
0396     u64 next_stripe_start;
0397     blk_status_t ret = BLK_STS_OK;
0398     int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
0399     const bool use_append = btrfs_use_zone_append(inode, disk_start);
0400     const enum req_op bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
0401 
0402     ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
0403            IS_ALIGNED(len, fs_info->sectorsize));
0404     cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS);
0405     if (!cb)
0406         return BLK_STS_RESOURCE;
0407     refcount_set(&cb->pending_ios, 1);
0408     cb->status = BLK_STS_OK;
0409     cb->inode = &inode->vfs_inode;
0410     cb->start = start;
0411     cb->len = len;
0412     cb->compressed_pages = compressed_pages;
0413     cb->compressed_len = compressed_len;
0414     cb->writeback = writeback;
0415     INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
0416     cb->nr_pages = nr_pages;
0417 
0418     if (blkcg_css)
0419         kthread_associate_blkcg(blkcg_css);
0420 
0421     while (cur_disk_bytenr < disk_start + compressed_len) {
0422         u64 offset = cur_disk_bytenr - disk_start;
0423         unsigned int index = offset >> PAGE_SHIFT;
0424         unsigned int real_size;
0425         unsigned int added;
0426         struct page *page = compressed_pages[index];
0427         bool submit = false;
0428 
0429         /* Allocate new bio if submitted or not yet allocated */
0430         if (!bio) {
0431             bio = alloc_compressed_bio(cb, cur_disk_bytenr,
0432                 bio_op | write_flags, end_compressed_bio_write,
0433                 &next_stripe_start);
0434             if (IS_ERR(bio)) {
0435                 ret = errno_to_blk_status(PTR_ERR(bio));
0436                 break;
0437             }
0438             if (blkcg_css)
0439                 bio->bi_opf |= REQ_CGROUP_PUNT;
0440         }
0441         /*
0442          * We should never reach next_stripe_start start as we will
0443          * submit comp_bio when reach the boundary immediately.
0444          */
0445         ASSERT(cur_disk_bytenr != next_stripe_start);
0446 
0447         /*
0448          * We have various limits on the real read size:
0449          * - stripe boundary
0450          * - page boundary
0451          * - compressed length boundary
0452          */
0453         real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr);
0454         real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
0455         real_size = min_t(u64, real_size, compressed_len - offset);
0456         ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
0457 
0458         if (use_append)
0459             added = bio_add_zone_append_page(bio, page, real_size,
0460                     offset_in_page(offset));
0461         else
0462             added = bio_add_page(bio, page, real_size,
0463                     offset_in_page(offset));
0464         /* Reached zoned boundary */
0465         if (added == 0)
0466             submit = true;
0467 
0468         cur_disk_bytenr += added;
0469         /* Reached stripe boundary */
0470         if (cur_disk_bytenr == next_stripe_start)
0471             submit = true;
0472 
0473         /* Finished the range */
0474         if (cur_disk_bytenr == disk_start + compressed_len)
0475             submit = true;
0476 
0477         if (submit) {
0478             if (!skip_sum) {
0479                 ret = btrfs_csum_one_bio(inode, bio, start, true);
0480                 if (ret) {
0481                     bio->bi_status = ret;
0482                     bio_endio(bio);
0483                     break;
0484                 }
0485             }
0486 
0487             ASSERT(bio->bi_iter.bi_size);
0488             btrfs_submit_bio(fs_info, bio, 0);
0489             bio = NULL;
0490         }
0491         cond_resched();
0492     }
0493 
0494     if (blkcg_css)
0495         kthread_associate_blkcg(NULL);
0496 
0497     if (refcount_dec_and_test(&cb->pending_ios))
0498         finish_compressed_bio_write(cb);
0499     return ret;
0500 }
0501 
0502 static u64 bio_end_offset(struct bio *bio)
0503 {
0504     struct bio_vec *last = bio_last_bvec_all(bio);
0505 
0506     return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
0507 }
0508 
0509 /*
0510  * Add extra pages in the same compressed file extent so that we don't need to
0511  * re-read the same extent again and again.
0512  *
0513  * NOTE: this won't work well for subpage, as for subpage read, we lock the
0514  * full page then submit bio for each compressed/regular extents.
0515  *
0516  * This means, if we have several sectors in the same page points to the same
0517  * on-disk compressed data, we will re-read the same extent many times and
0518  * this function can only help for the next page.
0519  */
0520 static noinline int add_ra_bio_pages(struct inode *inode,
0521                      u64 compressed_end,
0522                      struct compressed_bio *cb)
0523 {
0524     struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0525     unsigned long end_index;
0526     u64 cur = bio_end_offset(cb->orig_bio);
0527     u64 isize = i_size_read(inode);
0528     int ret;
0529     struct page *page;
0530     struct extent_map *em;
0531     struct address_space *mapping = inode->i_mapping;
0532     struct extent_map_tree *em_tree;
0533     struct extent_io_tree *tree;
0534     int sectors_missed = 0;
0535 
0536     em_tree = &BTRFS_I(inode)->extent_tree;
0537     tree = &BTRFS_I(inode)->io_tree;
0538 
0539     if (isize == 0)
0540         return 0;
0541 
0542     /*
0543      * For current subpage support, we only support 64K page size,
0544      * which means maximum compressed extent size (128K) is just 2x page
0545      * size.
0546      * This makes readahead less effective, so here disable readahead for
0547      * subpage for now, until full compressed write is supported.
0548      */
0549     if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
0550         return 0;
0551 
0552     end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
0553 
0554     while (cur < compressed_end) {
0555         u64 page_end;
0556         u64 pg_index = cur >> PAGE_SHIFT;
0557         u32 add_size;
0558 
0559         if (pg_index > end_index)
0560             break;
0561 
0562         page = xa_load(&mapping->i_pages, pg_index);
0563         if (page && !xa_is_value(page)) {
0564             sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
0565                       fs_info->sectorsize_bits;
0566 
0567             /* Beyond threshold, no need to continue */
0568             if (sectors_missed > 4)
0569                 break;
0570 
0571             /*
0572              * Jump to next page start as we already have page for
0573              * current offset.
0574              */
0575             cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
0576             continue;
0577         }
0578 
0579         page = __page_cache_alloc(mapping_gfp_constraint(mapping,
0580                                  ~__GFP_FS));
0581         if (!page)
0582             break;
0583 
0584         if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
0585             put_page(page);
0586             /* There is already a page, skip to page end */
0587             cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
0588             continue;
0589         }
0590 
0591         ret = set_page_extent_mapped(page);
0592         if (ret < 0) {
0593             unlock_page(page);
0594             put_page(page);
0595             break;
0596         }
0597 
0598         page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
0599         lock_extent(tree, cur, page_end);
0600         read_lock(&em_tree->lock);
0601         em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
0602         read_unlock(&em_tree->lock);
0603 
0604         /*
0605          * At this point, we have a locked page in the page cache for
0606          * these bytes in the file.  But, we have to make sure they map
0607          * to this compressed extent on disk.
0608          */
0609         if (!em || cur < em->start ||
0610             (cur + fs_info->sectorsize > extent_map_end(em)) ||
0611             (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
0612             free_extent_map(em);
0613             unlock_extent(tree, cur, page_end);
0614             unlock_page(page);
0615             put_page(page);
0616             break;
0617         }
0618         free_extent_map(em);
0619 
0620         if (page->index == end_index) {
0621             size_t zero_offset = offset_in_page(isize);
0622 
0623             if (zero_offset) {
0624                 int zeros;
0625                 zeros = PAGE_SIZE - zero_offset;
0626                 memzero_page(page, zero_offset, zeros);
0627             }
0628         }
0629 
0630         add_size = min(em->start + em->len, page_end + 1) - cur;
0631         ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
0632         if (ret != add_size) {
0633             unlock_extent(tree, cur, page_end);
0634             unlock_page(page);
0635             put_page(page);
0636             break;
0637         }
0638         /*
0639          * If it's subpage, we also need to increase its
0640          * subpage::readers number, as at endio we will decrease
0641          * subpage::readers and to unlock the page.
0642          */
0643         if (fs_info->sectorsize < PAGE_SIZE)
0644             btrfs_subpage_start_reader(fs_info, page, cur, add_size);
0645         put_page(page);
0646         cur += add_size;
0647     }
0648     return 0;
0649 }
0650 
0651 /*
0652  * for a compressed read, the bio we get passed has all the inode pages
0653  * in it.  We don't actually do IO on those pages but allocate new ones
0654  * to hold the compressed pages on disk.
0655  *
0656  * bio->bi_iter.bi_sector points to the compressed extent on disk
0657  * bio->bi_io_vec points to all of the inode pages
0658  *
0659  * After the compressed pages are read, we copy the bytes into the
0660  * bio we were passed and then call the bio end_io calls
0661  */
0662 void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
0663                   int mirror_num)
0664 {
0665     struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0666     struct extent_map_tree *em_tree;
0667     struct compressed_bio *cb;
0668     unsigned int compressed_len;
0669     struct bio *comp_bio = NULL;
0670     const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
0671     u64 cur_disk_byte = disk_bytenr;
0672     u64 next_stripe_start;
0673     u64 file_offset;
0674     u64 em_len;
0675     u64 em_start;
0676     struct extent_map *em;
0677     blk_status_t ret;
0678     int ret2;
0679     int i;
0680 
0681     em_tree = &BTRFS_I(inode)->extent_tree;
0682 
0683     file_offset = bio_first_bvec_all(bio)->bv_offset +
0684               page_offset(bio_first_page_all(bio));
0685 
0686     /* we need the actual starting offset of this extent in the file */
0687     read_lock(&em_tree->lock);
0688     em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
0689     read_unlock(&em_tree->lock);
0690     if (!em) {
0691         ret = BLK_STS_IOERR;
0692         goto out;
0693     }
0694 
0695     ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
0696     compressed_len = em->block_len;
0697     cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS);
0698     if (!cb) {
0699         ret = BLK_STS_RESOURCE;
0700         goto out;
0701     }
0702 
0703     refcount_set(&cb->pending_ios, 1);
0704     cb->status = BLK_STS_OK;
0705     cb->inode = inode;
0706 
0707     cb->start = em->orig_start;
0708     em_len = em->len;
0709     em_start = em->start;
0710 
0711     cb->len = bio->bi_iter.bi_size;
0712     cb->compressed_len = compressed_len;
0713     cb->compress_type = em->compress_type;
0714     cb->orig_bio = bio;
0715 
0716     free_extent_map(em);
0717     em = NULL;
0718 
0719     cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
0720     cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
0721     if (!cb->compressed_pages) {
0722         ret = BLK_STS_RESOURCE;
0723         goto fail;
0724     }
0725 
0726     ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages);
0727     if (ret2) {
0728         ret = BLK_STS_RESOURCE;
0729         goto fail;
0730     }
0731 
0732     add_ra_bio_pages(inode, em_start + em_len, cb);
0733 
0734     /* include any pages we added in add_ra-bio_pages */
0735     cb->len = bio->bi_iter.bi_size;
0736 
0737     while (cur_disk_byte < disk_bytenr + compressed_len) {
0738         u64 offset = cur_disk_byte - disk_bytenr;
0739         unsigned int index = offset >> PAGE_SHIFT;
0740         unsigned int real_size;
0741         unsigned int added;
0742         struct page *page = cb->compressed_pages[index];
0743         bool submit = false;
0744 
0745         /* Allocate new bio if submitted or not yet allocated */
0746         if (!comp_bio) {
0747             comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
0748                     REQ_OP_READ, end_compressed_bio_read,
0749                     &next_stripe_start);
0750             if (IS_ERR(comp_bio)) {
0751                 cb->status = errno_to_blk_status(PTR_ERR(comp_bio));
0752                 break;
0753             }
0754         }
0755         /*
0756          * We should never reach next_stripe_start start as we will
0757          * submit comp_bio when reach the boundary immediately.
0758          */
0759         ASSERT(cur_disk_byte != next_stripe_start);
0760         /*
0761          * We have various limit on the real read size:
0762          * - stripe boundary
0763          * - page boundary
0764          * - compressed length boundary
0765          */
0766         real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte);
0767         real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
0768         real_size = min_t(u64, real_size, compressed_len - offset);
0769         ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
0770 
0771         added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset));
0772         /*
0773          * Maximum compressed extent is smaller than bio size limit,
0774          * thus bio_add_page() should always success.
0775          */
0776         ASSERT(added == real_size);
0777         cur_disk_byte += added;
0778 
0779         /* Reached stripe boundary, need to submit */
0780         if (cur_disk_byte == next_stripe_start)
0781             submit = true;
0782 
0783         /* Has finished the range, need to submit */
0784         if (cur_disk_byte == disk_bytenr + compressed_len)
0785             submit = true;
0786 
0787         if (submit) {
0788             /* Save the original iter for read repair */
0789             if (bio_op(comp_bio) == REQ_OP_READ)
0790                 btrfs_bio(comp_bio)->iter = comp_bio->bi_iter;
0791 
0792             /*
0793              * Save the initial offset of this chunk, as there
0794              * is no direct correlation between compressed pages and
0795              * the original file offset.  The field is only used for
0796              * priting error messages.
0797              */
0798             btrfs_bio(comp_bio)->file_offset = file_offset;
0799 
0800             ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL);
0801             if (ret) {
0802                 comp_bio->bi_status = ret;
0803                 bio_endio(comp_bio);
0804                 break;
0805             }
0806 
0807             ASSERT(comp_bio->bi_iter.bi_size);
0808             btrfs_submit_bio(fs_info, comp_bio, mirror_num);
0809             comp_bio = NULL;
0810         }
0811     }
0812 
0813     if (refcount_dec_and_test(&cb->pending_ios))
0814         finish_compressed_bio_read(cb);
0815     return;
0816 
0817 fail:
0818     if (cb->compressed_pages) {
0819         for (i = 0; i < cb->nr_pages; i++) {
0820             if (cb->compressed_pages[i])
0821                 __free_page(cb->compressed_pages[i]);
0822         }
0823     }
0824 
0825     kfree(cb->compressed_pages);
0826     kfree(cb);
0827 out:
0828     free_extent_map(em);
0829     bio->bi_status = ret;
0830     bio_endio(bio);
0831     return;
0832 }
0833 
0834 /*
0835  * Heuristic uses systematic sampling to collect data from the input data
0836  * range, the logic can be tuned by the following constants:
0837  *
0838  * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
0839  * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
0840  */
0841 #define SAMPLING_READ_SIZE  (16)
0842 #define SAMPLING_INTERVAL   (256)
0843 
0844 /*
0845  * For statistical analysis of the input data we consider bytes that form a
0846  * Galois Field of 256 objects. Each object has an attribute count, ie. how
0847  * many times the object appeared in the sample.
0848  */
0849 #define BUCKET_SIZE     (256)
0850 
0851 /*
0852  * The size of the sample is based on a statistical sampling rule of thumb.
0853  * The common way is to perform sampling tests as long as the number of
0854  * elements in each cell is at least 5.
0855  *
0856  * Instead of 5, we choose 32 to obtain more accurate results.
0857  * If the data contain the maximum number of symbols, which is 256, we obtain a
0858  * sample size bound by 8192.
0859  *
0860  * For a sample of at most 8KB of data per data range: 16 consecutive bytes
0861  * from up to 512 locations.
0862  */
0863 #define MAX_SAMPLE_SIZE     (BTRFS_MAX_UNCOMPRESSED *       \
0864                  SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
0865 
0866 struct bucket_item {
0867     u32 count;
0868 };
0869 
0870 struct heuristic_ws {
0871     /* Partial copy of input data */
0872     u8 *sample;
0873     u32 sample_size;
0874     /* Buckets store counters for each byte value */
0875     struct bucket_item *bucket;
0876     /* Sorting buffer */
0877     struct bucket_item *bucket_b;
0878     struct list_head list;
0879 };
0880 
0881 static struct workspace_manager heuristic_wsm;
0882 
0883 static void free_heuristic_ws(struct list_head *ws)
0884 {
0885     struct heuristic_ws *workspace;
0886 
0887     workspace = list_entry(ws, struct heuristic_ws, list);
0888 
0889     kvfree(workspace->sample);
0890     kfree(workspace->bucket);
0891     kfree(workspace->bucket_b);
0892     kfree(workspace);
0893 }
0894 
0895 static struct list_head *alloc_heuristic_ws(unsigned int level)
0896 {
0897     struct heuristic_ws *ws;
0898 
0899     ws = kzalloc(sizeof(*ws), GFP_KERNEL);
0900     if (!ws)
0901         return ERR_PTR(-ENOMEM);
0902 
0903     ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
0904     if (!ws->sample)
0905         goto fail;
0906 
0907     ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
0908     if (!ws->bucket)
0909         goto fail;
0910 
0911     ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
0912     if (!ws->bucket_b)
0913         goto fail;
0914 
0915     INIT_LIST_HEAD(&ws->list);
0916     return &ws->list;
0917 fail:
0918     free_heuristic_ws(&ws->list);
0919     return ERR_PTR(-ENOMEM);
0920 }
0921 
0922 const struct btrfs_compress_op btrfs_heuristic_compress = {
0923     .workspace_manager = &heuristic_wsm,
0924 };
0925 
0926 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
0927     /* The heuristic is represented as compression type 0 */
0928     &btrfs_heuristic_compress,
0929     &btrfs_zlib_compress,
0930     &btrfs_lzo_compress,
0931     &btrfs_zstd_compress,
0932 };
0933 
0934 static struct list_head *alloc_workspace(int type, unsigned int level)
0935 {
0936     switch (type) {
0937     case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
0938     case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
0939     case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
0940     case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
0941     default:
0942         /*
0943          * This can't happen, the type is validated several times
0944          * before we get here.
0945          */
0946         BUG();
0947     }
0948 }
0949 
0950 static void free_workspace(int type, struct list_head *ws)
0951 {
0952     switch (type) {
0953     case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
0954     case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
0955     case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
0956     case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
0957     default:
0958         /*
0959          * This can't happen, the type is validated several times
0960          * before we get here.
0961          */
0962         BUG();
0963     }
0964 }
0965 
0966 static void btrfs_init_workspace_manager(int type)
0967 {
0968     struct workspace_manager *wsm;
0969     struct list_head *workspace;
0970 
0971     wsm = btrfs_compress_op[type]->workspace_manager;
0972     INIT_LIST_HEAD(&wsm->idle_ws);
0973     spin_lock_init(&wsm->ws_lock);
0974     atomic_set(&wsm->total_ws, 0);
0975     init_waitqueue_head(&wsm->ws_wait);
0976 
0977     /*
0978      * Preallocate one workspace for each compression type so we can
0979      * guarantee forward progress in the worst case
0980      */
0981     workspace = alloc_workspace(type, 0);
0982     if (IS_ERR(workspace)) {
0983         pr_warn(
0984     "BTRFS: cannot preallocate compression workspace, will try later\n");
0985     } else {
0986         atomic_set(&wsm->total_ws, 1);
0987         wsm->free_ws = 1;
0988         list_add(workspace, &wsm->idle_ws);
0989     }
0990 }
0991 
0992 static void btrfs_cleanup_workspace_manager(int type)
0993 {
0994     struct workspace_manager *wsman;
0995     struct list_head *ws;
0996 
0997     wsman = btrfs_compress_op[type]->workspace_manager;
0998     while (!list_empty(&wsman->idle_ws)) {
0999         ws = wsman->idle_ws.next;
1000         list_del(ws);
1001         free_workspace(type, ws);
1002         atomic_dec(&wsman->total_ws);
1003     }
1004 }
1005 
1006 /*
1007  * This finds an available workspace or allocates a new one.
1008  * If it's not possible to allocate a new one, waits until there's one.
1009  * Preallocation makes a forward progress guarantees and we do not return
1010  * errors.
1011  */
1012 struct list_head *btrfs_get_workspace(int type, unsigned int level)
1013 {
1014     struct workspace_manager *wsm;
1015     struct list_head *workspace;
1016     int cpus = num_online_cpus();
1017     unsigned nofs_flag;
1018     struct list_head *idle_ws;
1019     spinlock_t *ws_lock;
1020     atomic_t *total_ws;
1021     wait_queue_head_t *ws_wait;
1022     int *free_ws;
1023 
1024     wsm = btrfs_compress_op[type]->workspace_manager;
1025     idle_ws  = &wsm->idle_ws;
1026     ws_lock  = &wsm->ws_lock;
1027     total_ws = &wsm->total_ws;
1028     ws_wait  = &wsm->ws_wait;
1029     free_ws  = &wsm->free_ws;
1030 
1031 again:
1032     spin_lock(ws_lock);
1033     if (!list_empty(idle_ws)) {
1034         workspace = idle_ws->next;
1035         list_del(workspace);
1036         (*free_ws)--;
1037         spin_unlock(ws_lock);
1038         return workspace;
1039 
1040     }
1041     if (atomic_read(total_ws) > cpus) {
1042         DEFINE_WAIT(wait);
1043 
1044         spin_unlock(ws_lock);
1045         prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1046         if (atomic_read(total_ws) > cpus && !*free_ws)
1047             schedule();
1048         finish_wait(ws_wait, &wait);
1049         goto again;
1050     }
1051     atomic_inc(total_ws);
1052     spin_unlock(ws_lock);
1053 
1054     /*
1055      * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1056      * to turn it off here because we might get called from the restricted
1057      * context of btrfs_compress_bio/btrfs_compress_pages
1058      */
1059     nofs_flag = memalloc_nofs_save();
1060     workspace = alloc_workspace(type, level);
1061     memalloc_nofs_restore(nofs_flag);
1062 
1063     if (IS_ERR(workspace)) {
1064         atomic_dec(total_ws);
1065         wake_up(ws_wait);
1066 
1067         /*
1068          * Do not return the error but go back to waiting. There's a
1069          * workspace preallocated for each type and the compression
1070          * time is bounded so we get to a workspace eventually. This
1071          * makes our caller's life easier.
1072          *
1073          * To prevent silent and low-probability deadlocks (when the
1074          * initial preallocation fails), check if there are any
1075          * workspaces at all.
1076          */
1077         if (atomic_read(total_ws) == 0) {
1078             static DEFINE_RATELIMIT_STATE(_rs,
1079                     /* once per minute */ 60 * HZ,
1080                     /* no burst */ 1);
1081 
1082             if (__ratelimit(&_rs)) {
1083                 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1084             }
1085         }
1086         goto again;
1087     }
1088     return workspace;
1089 }
1090 
1091 static struct list_head *get_workspace(int type, int level)
1092 {
1093     switch (type) {
1094     case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1095     case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1096     case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1097     case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1098     default:
1099         /*
1100          * This can't happen, the type is validated several times
1101          * before we get here.
1102          */
1103         BUG();
1104     }
1105 }
1106 
1107 /*
1108  * put a workspace struct back on the list or free it if we have enough
1109  * idle ones sitting around
1110  */
1111 void btrfs_put_workspace(int type, struct list_head *ws)
1112 {
1113     struct workspace_manager *wsm;
1114     struct list_head *idle_ws;
1115     spinlock_t *ws_lock;
1116     atomic_t *total_ws;
1117     wait_queue_head_t *ws_wait;
1118     int *free_ws;
1119 
1120     wsm = btrfs_compress_op[type]->workspace_manager;
1121     idle_ws  = &wsm->idle_ws;
1122     ws_lock  = &wsm->ws_lock;
1123     total_ws = &wsm->total_ws;
1124     ws_wait  = &wsm->ws_wait;
1125     free_ws  = &wsm->free_ws;
1126 
1127     spin_lock(ws_lock);
1128     if (*free_ws <= num_online_cpus()) {
1129         list_add(ws, idle_ws);
1130         (*free_ws)++;
1131         spin_unlock(ws_lock);
1132         goto wake;
1133     }
1134     spin_unlock(ws_lock);
1135 
1136     free_workspace(type, ws);
1137     atomic_dec(total_ws);
1138 wake:
1139     cond_wake_up(ws_wait);
1140 }
1141 
1142 static void put_workspace(int type, struct list_head *ws)
1143 {
1144     switch (type) {
1145     case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1146     case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1147     case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1148     case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1149     default:
1150         /*
1151          * This can't happen, the type is validated several times
1152          * before we get here.
1153          */
1154         BUG();
1155     }
1156 }
1157 
1158 /*
1159  * Adjust @level according to the limits of the compression algorithm or
1160  * fallback to default
1161  */
1162 static unsigned int btrfs_compress_set_level(int type, unsigned level)
1163 {
1164     const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1165 
1166     if (level == 0)
1167         level = ops->default_level;
1168     else
1169         level = min(level, ops->max_level);
1170 
1171     return level;
1172 }
1173 
1174 /*
1175  * Given an address space and start and length, compress the bytes into @pages
1176  * that are allocated on demand.
1177  *
1178  * @type_level is encoded algorithm and level, where level 0 means whatever
1179  * default the algorithm chooses and is opaque here;
1180  * - compression algo are 0-3
1181  * - the level are bits 4-7
1182  *
1183  * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1184  * and returns number of actually allocated pages
1185  *
1186  * @total_in is used to return the number of bytes actually read.  It
1187  * may be smaller than the input length if we had to exit early because we
1188  * ran out of room in the pages array or because we cross the
1189  * max_out threshold.
1190  *
1191  * @total_out is an in/out parameter, must be set to the input length and will
1192  * be also used to return the total number of compressed bytes
1193  */
1194 int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1195              u64 start, struct page **pages,
1196              unsigned long *out_pages,
1197              unsigned long *total_in,
1198              unsigned long *total_out)
1199 {
1200     int type = btrfs_compress_type(type_level);
1201     int level = btrfs_compress_level(type_level);
1202     struct list_head *workspace;
1203     int ret;
1204 
1205     level = btrfs_compress_set_level(type, level);
1206     workspace = get_workspace(type, level);
1207     ret = compression_compress_pages(type, workspace, mapping, start, pages,
1208                      out_pages, total_in, total_out);
1209     put_workspace(type, workspace);
1210     return ret;
1211 }
1212 
1213 static int btrfs_decompress_bio(struct compressed_bio *cb)
1214 {
1215     struct list_head *workspace;
1216     int ret;
1217     int type = cb->compress_type;
1218 
1219     workspace = get_workspace(type, 0);
1220     ret = compression_decompress_bio(workspace, cb);
1221     put_workspace(type, workspace);
1222 
1223     return ret;
1224 }
1225 
1226 /*
1227  * a less complex decompression routine.  Our compressed data fits in a
1228  * single page, and we want to read a single page out of it.
1229  * start_byte tells us the offset into the compressed data we're interested in
1230  */
1231 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1232              unsigned long start_byte, size_t srclen, size_t destlen)
1233 {
1234     struct list_head *workspace;
1235     int ret;
1236 
1237     workspace = get_workspace(type, 0);
1238     ret = compression_decompress(type, workspace, data_in, dest_page,
1239                      start_byte, srclen, destlen);
1240     put_workspace(type, workspace);
1241 
1242     return ret;
1243 }
1244 
1245 void __init btrfs_init_compress(void)
1246 {
1247     btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1248     btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1249     btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1250     zstd_init_workspace_manager();
1251 }
1252 
1253 void __cold btrfs_exit_compress(void)
1254 {
1255     btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1256     btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1257     btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1258     zstd_cleanup_workspace_manager();
1259 }
1260 
1261 /*
1262  * Copy decompressed data from working buffer to pages.
1263  *
1264  * @buf:        The decompressed data buffer
1265  * @buf_len:        The decompressed data length
1266  * @decompressed:   Number of bytes that are already decompressed inside the
1267  *          compressed extent
1268  * @cb:         The compressed extent descriptor
1269  * @orig_bio:       The original bio that the caller wants to read for
1270  *
1271  * An easier to understand graph is like below:
1272  *
1273  *      |<- orig_bio ->|     |<- orig_bio->|
1274  *  |<-------      full decompressed extent      ----->|
1275  *  |<-----------    @cb range   ---->|
1276  *  |           |<-- @buf_len -->|
1277  *  |<--- @decompressed --->|
1278  *
1279  * Note that, @cb can be a subpage of the full decompressed extent, but
1280  * @cb->start always has the same as the orig_file_offset value of the full
1281  * decompressed extent.
1282  *
1283  * When reading compressed extent, we have to read the full compressed extent,
1284  * while @orig_bio may only want part of the range.
1285  * Thus this function will ensure only data covered by @orig_bio will be copied
1286  * to.
1287  *
1288  * Return 0 if we have copied all needed contents for @orig_bio.
1289  * Return >0 if we need continue decompress.
1290  */
1291 int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1292                   struct compressed_bio *cb, u32 decompressed)
1293 {
1294     struct bio *orig_bio = cb->orig_bio;
1295     /* Offset inside the full decompressed extent */
1296     u32 cur_offset;
1297 
1298     cur_offset = decompressed;
1299     /* The main loop to do the copy */
1300     while (cur_offset < decompressed + buf_len) {
1301         struct bio_vec bvec;
1302         size_t copy_len;
1303         u32 copy_start;
1304         /* Offset inside the full decompressed extent */
1305         u32 bvec_offset;
1306 
1307         bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1308         /*
1309          * cb->start may underflow, but subtracting that value can still
1310          * give us correct offset inside the full decompressed extent.
1311          */
1312         bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1313 
1314         /* Haven't reached the bvec range, exit */
1315         if (decompressed + buf_len <= bvec_offset)
1316             return 1;
1317 
1318         copy_start = max(cur_offset, bvec_offset);
1319         copy_len = min(bvec_offset + bvec.bv_len,
1320                    decompressed + buf_len) - copy_start;
1321         ASSERT(copy_len);
1322 
1323         /*
1324          * Extra range check to ensure we didn't go beyond
1325          * @buf + @buf_len.
1326          */
1327         ASSERT(copy_start - decompressed < buf_len);
1328         memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1329                    buf + copy_start - decompressed, copy_len);
1330         cur_offset += copy_len;
1331 
1332         bio_advance(orig_bio, copy_len);
1333         /* Finished the bio */
1334         if (!orig_bio->bi_iter.bi_size)
1335             return 0;
1336     }
1337     return 1;
1338 }
1339 
1340 /*
1341  * Shannon Entropy calculation
1342  *
1343  * Pure byte distribution analysis fails to determine compressibility of data.
1344  * Try calculating entropy to estimate the average minimum number of bits
1345  * needed to encode the sampled data.
1346  *
1347  * For convenience, return the percentage of needed bits, instead of amount of
1348  * bits directly.
1349  *
1350  * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1351  *              and can be compressible with high probability
1352  *
1353  * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1354  *
1355  * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1356  */
1357 #define ENTROPY_LVL_ACEPTABLE       (65)
1358 #define ENTROPY_LVL_HIGH        (80)
1359 
1360 /*
1361  * For increasead precision in shannon_entropy calculation,
1362  * let's do pow(n, M) to save more digits after comma:
1363  *
1364  * - maximum int bit length is 64
1365  * - ilog2(MAX_SAMPLE_SIZE) -> 13
1366  * - 13 * 4 = 52 < 64       -> M = 4
1367  *
1368  * So use pow(n, 4).
1369  */
1370 static inline u32 ilog2_w(u64 n)
1371 {
1372     return ilog2(n * n * n * n);
1373 }
1374 
1375 static u32 shannon_entropy(struct heuristic_ws *ws)
1376 {
1377     const u32 entropy_max = 8 * ilog2_w(2);
1378     u32 entropy_sum = 0;
1379     u32 p, p_base, sz_base;
1380     u32 i;
1381 
1382     sz_base = ilog2_w(ws->sample_size);
1383     for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1384         p = ws->bucket[i].count;
1385         p_base = ilog2_w(p);
1386         entropy_sum += p * (sz_base - p_base);
1387     }
1388 
1389     entropy_sum /= ws->sample_size;
1390     return entropy_sum * 100 / entropy_max;
1391 }
1392 
1393 #define RADIX_BASE      4U
1394 #define COUNTERS_SIZE       (1U << RADIX_BASE)
1395 
1396 static u8 get4bits(u64 num, int shift) {
1397     u8 low4bits;
1398 
1399     num >>= shift;
1400     /* Reverse order */
1401     low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1402     return low4bits;
1403 }
1404 
1405 /*
1406  * Use 4 bits as radix base
1407  * Use 16 u32 counters for calculating new position in buf array
1408  *
1409  * @array     - array that will be sorted
1410  * @array_buf - buffer array to store sorting results
1411  *              must be equal in size to @array
1412  * @num       - array size
1413  */
1414 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1415                int num)
1416 {
1417     u64 max_num;
1418     u64 buf_num;
1419     u32 counters[COUNTERS_SIZE];
1420     u32 new_addr;
1421     u32 addr;
1422     int bitlen;
1423     int shift;
1424     int i;
1425 
1426     /*
1427      * Try avoid useless loop iterations for small numbers stored in big
1428      * counters.  Example: 48 33 4 ... in 64bit array
1429      */
1430     max_num = array[0].count;
1431     for (i = 1; i < num; i++) {
1432         buf_num = array[i].count;
1433         if (buf_num > max_num)
1434             max_num = buf_num;
1435     }
1436 
1437     buf_num = ilog2(max_num);
1438     bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1439 
1440     shift = 0;
1441     while (shift < bitlen) {
1442         memset(counters, 0, sizeof(counters));
1443 
1444         for (i = 0; i < num; i++) {
1445             buf_num = array[i].count;
1446             addr = get4bits(buf_num, shift);
1447             counters[addr]++;
1448         }
1449 
1450         for (i = 1; i < COUNTERS_SIZE; i++)
1451             counters[i] += counters[i - 1];
1452 
1453         for (i = num - 1; i >= 0; i--) {
1454             buf_num = array[i].count;
1455             addr = get4bits(buf_num, shift);
1456             counters[addr]--;
1457             new_addr = counters[addr];
1458             array_buf[new_addr] = array[i];
1459         }
1460 
1461         shift += RADIX_BASE;
1462 
1463         /*
1464          * Normal radix expects to move data from a temporary array, to
1465          * the main one.  But that requires some CPU time. Avoid that
1466          * by doing another sort iteration to original array instead of
1467          * memcpy()
1468          */
1469         memset(counters, 0, sizeof(counters));
1470 
1471         for (i = 0; i < num; i ++) {
1472             buf_num = array_buf[i].count;
1473             addr = get4bits(buf_num, shift);
1474             counters[addr]++;
1475         }
1476 
1477         for (i = 1; i < COUNTERS_SIZE; i++)
1478             counters[i] += counters[i - 1];
1479 
1480         for (i = num - 1; i >= 0; i--) {
1481             buf_num = array_buf[i].count;
1482             addr = get4bits(buf_num, shift);
1483             counters[addr]--;
1484             new_addr = counters[addr];
1485             array[new_addr] = array_buf[i];
1486         }
1487 
1488         shift += RADIX_BASE;
1489     }
1490 }
1491 
1492 /*
1493  * Size of the core byte set - how many bytes cover 90% of the sample
1494  *
1495  * There are several types of structured binary data that use nearly all byte
1496  * values. The distribution can be uniform and counts in all buckets will be
1497  * nearly the same (eg. encrypted data). Unlikely to be compressible.
1498  *
1499  * Other possibility is normal (Gaussian) distribution, where the data could
1500  * be potentially compressible, but we have to take a few more steps to decide
1501  * how much.
1502  *
1503  * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1504  *                       compression algo can easy fix that
1505  * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1506  *                       probability is not compressible
1507  */
1508 #define BYTE_CORE_SET_LOW       (64)
1509 #define BYTE_CORE_SET_HIGH      (200)
1510 
1511 static int byte_core_set_size(struct heuristic_ws *ws)
1512 {
1513     u32 i;
1514     u32 coreset_sum = 0;
1515     const u32 core_set_threshold = ws->sample_size * 90 / 100;
1516     struct bucket_item *bucket = ws->bucket;
1517 
1518     /* Sort in reverse order */
1519     radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1520 
1521     for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1522         coreset_sum += bucket[i].count;
1523 
1524     if (coreset_sum > core_set_threshold)
1525         return i;
1526 
1527     for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1528         coreset_sum += bucket[i].count;
1529         if (coreset_sum > core_set_threshold)
1530             break;
1531     }
1532 
1533     return i;
1534 }
1535 
1536 /*
1537  * Count byte values in buckets.
1538  * This heuristic can detect textual data (configs, xml, json, html, etc).
1539  * Because in most text-like data byte set is restricted to limited number of
1540  * possible characters, and that restriction in most cases makes data easy to
1541  * compress.
1542  *
1543  * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1544  *  less - compressible
1545  *  more - need additional analysis
1546  */
1547 #define BYTE_SET_THRESHOLD      (64)
1548 
1549 static u32 byte_set_size(const struct heuristic_ws *ws)
1550 {
1551     u32 i;
1552     u32 byte_set_size = 0;
1553 
1554     for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1555         if (ws->bucket[i].count > 0)
1556             byte_set_size++;
1557     }
1558 
1559     /*
1560      * Continue collecting count of byte values in buckets.  If the byte
1561      * set size is bigger then the threshold, it's pointless to continue,
1562      * the detection technique would fail for this type of data.
1563      */
1564     for (; i < BUCKET_SIZE; i++) {
1565         if (ws->bucket[i].count > 0) {
1566             byte_set_size++;
1567             if (byte_set_size > BYTE_SET_THRESHOLD)
1568                 return byte_set_size;
1569         }
1570     }
1571 
1572     return byte_set_size;
1573 }
1574 
1575 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1576 {
1577     const u32 half_of_sample = ws->sample_size / 2;
1578     const u8 *data = ws->sample;
1579 
1580     return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1581 }
1582 
1583 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1584                      struct heuristic_ws *ws)
1585 {
1586     struct page *page;
1587     u64 index, index_end;
1588     u32 i, curr_sample_pos;
1589     u8 *in_data;
1590 
1591     /*
1592      * Compression handles the input data by chunks of 128KiB
1593      * (defined by BTRFS_MAX_UNCOMPRESSED)
1594      *
1595      * We do the same for the heuristic and loop over the whole range.
1596      *
1597      * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1598      * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1599      */
1600     if (end - start > BTRFS_MAX_UNCOMPRESSED)
1601         end = start + BTRFS_MAX_UNCOMPRESSED;
1602 
1603     index = start >> PAGE_SHIFT;
1604     index_end = end >> PAGE_SHIFT;
1605 
1606     /* Don't miss unaligned end */
1607     if (!IS_ALIGNED(end, PAGE_SIZE))
1608         index_end++;
1609 
1610     curr_sample_pos = 0;
1611     while (index < index_end) {
1612         page = find_get_page(inode->i_mapping, index);
1613         in_data = kmap_local_page(page);
1614         /* Handle case where the start is not aligned to PAGE_SIZE */
1615         i = start % PAGE_SIZE;
1616         while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1617             /* Don't sample any garbage from the last page */
1618             if (start > end - SAMPLING_READ_SIZE)
1619                 break;
1620             memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1621                     SAMPLING_READ_SIZE);
1622             i += SAMPLING_INTERVAL;
1623             start += SAMPLING_INTERVAL;
1624             curr_sample_pos += SAMPLING_READ_SIZE;
1625         }
1626         kunmap_local(in_data);
1627         put_page(page);
1628 
1629         index++;
1630     }
1631 
1632     ws->sample_size = curr_sample_pos;
1633 }
1634 
1635 /*
1636  * Compression heuristic.
1637  *
1638  * For now is's a naive and optimistic 'return true', we'll extend the logic to
1639  * quickly (compared to direct compression) detect data characteristics
1640  * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1641  * data.
1642  *
1643  * The following types of analysis can be performed:
1644  * - detect mostly zero data
1645  * - detect data with low "byte set" size (text, etc)
1646  * - detect data with low/high "core byte" set
1647  *
1648  * Return non-zero if the compression should be done, 0 otherwise.
1649  */
1650 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1651 {
1652     struct list_head *ws_list = get_workspace(0, 0);
1653     struct heuristic_ws *ws;
1654     u32 i;
1655     u8 byte;
1656     int ret = 0;
1657 
1658     ws = list_entry(ws_list, struct heuristic_ws, list);
1659 
1660     heuristic_collect_sample(inode, start, end, ws);
1661 
1662     if (sample_repeated_patterns(ws)) {
1663         ret = 1;
1664         goto out;
1665     }
1666 
1667     memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1668 
1669     for (i = 0; i < ws->sample_size; i++) {
1670         byte = ws->sample[i];
1671         ws->bucket[byte].count++;
1672     }
1673 
1674     i = byte_set_size(ws);
1675     if (i < BYTE_SET_THRESHOLD) {
1676         ret = 2;
1677         goto out;
1678     }
1679 
1680     i = byte_core_set_size(ws);
1681     if (i <= BYTE_CORE_SET_LOW) {
1682         ret = 3;
1683         goto out;
1684     }
1685 
1686     if (i >= BYTE_CORE_SET_HIGH) {
1687         ret = 0;
1688         goto out;
1689     }
1690 
1691     i = shannon_entropy(ws);
1692     if (i <= ENTROPY_LVL_ACEPTABLE) {
1693         ret = 4;
1694         goto out;
1695     }
1696 
1697     /*
1698      * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1699      * needed to give green light to compression.
1700      *
1701      * For now just assume that compression at that level is not worth the
1702      * resources because:
1703      *
1704      * 1. it is possible to defrag the data later
1705      *
1706      * 2. the data would turn out to be hardly compressible, eg. 150 byte
1707      * values, every bucket has counter at level ~54. The heuristic would
1708      * be confused. This can happen when data have some internal repeated
1709      * patterns like "abbacbbc...". This can be detected by analyzing
1710      * pairs of bytes, which is too costly.
1711      */
1712     if (i < ENTROPY_LVL_HIGH) {
1713         ret = 5;
1714         goto out;
1715     } else {
1716         ret = 0;
1717         goto out;
1718     }
1719 
1720 out:
1721     put_workspace(0, ws_list);
1722     return ret;
1723 }
1724 
1725 /*
1726  * Convert the compression suffix (eg. after "zlib" starting with ":") to
1727  * level, unrecognized string will set the default level
1728  */
1729 unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1730 {
1731     unsigned int level = 0;
1732     int ret;
1733 
1734     if (!type)
1735         return 0;
1736 
1737     if (str[0] == ':') {
1738         ret = kstrtouint(str + 1, 10, &level);
1739         if (ret)
1740             level = 0;
1741     }
1742 
1743     level = btrfs_compress_set_level(type, level);
1744 
1745     return level;
1746 }