0001
0002
0003
0004
0005
0006 #include <linux/kernel.h>
0007 #include <linux/bio.h>
0008 #include <linux/file.h>
0009 #include <linux/fs.h>
0010 #include <linux/pagemap.h>
0011 #include <linux/highmem.h>
0012 #include <linux/kthread.h>
0013 #include <linux/time.h>
0014 #include <linux/init.h>
0015 #include <linux/string.h>
0016 #include <linux/backing-dev.h>
0017 #include <linux/writeback.h>
0018 #include <linux/slab.h>
0019 #include <linux/sched/mm.h>
0020 #include <linux/log2.h>
0021 #include <crypto/hash.h>
0022 #include "misc.h"
0023 #include "ctree.h"
0024 #include "disk-io.h"
0025 #include "transaction.h"
0026 #include "btrfs_inode.h"
0027 #include "volumes.h"
0028 #include "ordered-data.h"
0029 #include "compression.h"
0030 #include "extent_io.h"
0031 #include "extent_map.h"
0032 #include "subpage.h"
0033 #include "zoned.h"
0034
0035 static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
0036
0037 const char* btrfs_compress_type2str(enum btrfs_compression_type type)
0038 {
0039 switch (type) {
0040 case BTRFS_COMPRESS_ZLIB:
0041 case BTRFS_COMPRESS_LZO:
0042 case BTRFS_COMPRESS_ZSTD:
0043 case BTRFS_COMPRESS_NONE:
0044 return btrfs_compress_types[type];
0045 default:
0046 break;
0047 }
0048
0049 return NULL;
0050 }
0051
0052 bool btrfs_compress_is_valid_type(const char *str, size_t len)
0053 {
0054 int i;
0055
0056 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
0057 size_t comp_len = strlen(btrfs_compress_types[i]);
0058
0059 if (len < comp_len)
0060 continue;
0061
0062 if (!strncmp(btrfs_compress_types[i], str, comp_len))
0063 return true;
0064 }
0065 return false;
0066 }
0067
0068 static int compression_compress_pages(int type, struct list_head *ws,
0069 struct address_space *mapping, u64 start, struct page **pages,
0070 unsigned long *out_pages, unsigned long *total_in,
0071 unsigned long *total_out)
0072 {
0073 switch (type) {
0074 case BTRFS_COMPRESS_ZLIB:
0075 return zlib_compress_pages(ws, mapping, start, pages,
0076 out_pages, total_in, total_out);
0077 case BTRFS_COMPRESS_LZO:
0078 return lzo_compress_pages(ws, mapping, start, pages,
0079 out_pages, total_in, total_out);
0080 case BTRFS_COMPRESS_ZSTD:
0081 return zstd_compress_pages(ws, mapping, start, pages,
0082 out_pages, total_in, total_out);
0083 case BTRFS_COMPRESS_NONE:
0084 default:
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094 *out_pages = 0;
0095 return -E2BIG;
0096 }
0097 }
0098
0099 static int compression_decompress_bio(struct list_head *ws,
0100 struct compressed_bio *cb)
0101 {
0102 switch (cb->compress_type) {
0103 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
0104 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
0105 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
0106 case BTRFS_COMPRESS_NONE:
0107 default:
0108
0109
0110
0111
0112 BUG();
0113 }
0114 }
0115
0116 static int compression_decompress(int type, struct list_head *ws,
0117 unsigned char *data_in, struct page *dest_page,
0118 unsigned long start_byte, size_t srclen, size_t destlen)
0119 {
0120 switch (type) {
0121 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
0122 start_byte, srclen, destlen);
0123 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
0124 start_byte, srclen, destlen);
0125 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
0126 start_byte, srclen, destlen);
0127 case BTRFS_COMPRESS_NONE:
0128 default:
0129
0130
0131
0132
0133 BUG();
0134 }
0135 }
0136
0137 static int btrfs_decompress_bio(struct compressed_bio *cb);
0138
0139 static void finish_compressed_bio_read(struct compressed_bio *cb)
0140 {
0141 unsigned int index;
0142 struct page *page;
0143
0144 if (cb->status == BLK_STS_OK)
0145 cb->status = errno_to_blk_status(btrfs_decompress_bio(cb));
0146
0147
0148 for (index = 0; index < cb->nr_pages; index++) {
0149 page = cb->compressed_pages[index];
0150 page->mapping = NULL;
0151 put_page(page);
0152 }
0153
0154
0155 if (cb->status != BLK_STS_OK)
0156 cb->orig_bio->bi_status = cb->status;
0157 bio_endio(cb->orig_bio);
0158
0159
0160 kfree(cb->compressed_pages);
0161 kfree(cb);
0162 }
0163
0164
0165
0166
0167
0168
0169 static void end_compressed_bio_read(struct bio *bio)
0170 {
0171 struct compressed_bio *cb = bio->bi_private;
0172 struct inode *inode = cb->inode;
0173 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0174 struct btrfs_inode *bi = BTRFS_I(inode);
0175 bool csum = !(bi->flags & BTRFS_INODE_NODATASUM) &&
0176 !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
0177 blk_status_t status = bio->bi_status;
0178 struct btrfs_bio *bbio = btrfs_bio(bio);
0179 struct bvec_iter iter;
0180 struct bio_vec bv;
0181 u32 offset;
0182
0183 btrfs_bio_for_each_sector(fs_info, bv, bbio, iter, offset) {
0184 u64 start = bbio->file_offset + offset;
0185
0186 if (!status &&
0187 (!csum || !btrfs_check_data_csum(inode, bbio, offset,
0188 bv.bv_page, bv.bv_offset))) {
0189 clean_io_failure(fs_info, &bi->io_failure_tree,
0190 &bi->io_tree, start, bv.bv_page,
0191 btrfs_ino(bi), bv.bv_offset);
0192 } else {
0193 int ret;
0194
0195 refcount_inc(&cb->pending_ios);
0196 ret = btrfs_repair_one_sector(inode, bbio, offset,
0197 bv.bv_page, bv.bv_offset,
0198 btrfs_submit_data_read_bio);
0199 if (ret) {
0200 refcount_dec(&cb->pending_ios);
0201 status = errno_to_blk_status(ret);
0202 }
0203 }
0204 }
0205
0206 if (status)
0207 cb->status = status;
0208
0209 if (refcount_dec_and_test(&cb->pending_ios))
0210 finish_compressed_bio_read(cb);
0211 btrfs_bio_free_csum(bbio);
0212 bio_put(bio);
0213 }
0214
0215
0216
0217
0218
0219 static noinline void end_compressed_writeback(struct inode *inode,
0220 const struct compressed_bio *cb)
0221 {
0222 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0223 unsigned long index = cb->start >> PAGE_SHIFT;
0224 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
0225 struct page *pages[16];
0226 unsigned long nr_pages = end_index - index + 1;
0227 const int errno = blk_status_to_errno(cb->status);
0228 int i;
0229 int ret;
0230
0231 if (errno)
0232 mapping_set_error(inode->i_mapping, errno);
0233
0234 while (nr_pages > 0) {
0235 ret = find_get_pages_contig(inode->i_mapping, index,
0236 min_t(unsigned long,
0237 nr_pages, ARRAY_SIZE(pages)), pages);
0238 if (ret == 0) {
0239 nr_pages -= 1;
0240 index += 1;
0241 continue;
0242 }
0243 for (i = 0; i < ret; i++) {
0244 if (errno)
0245 SetPageError(pages[i]);
0246 btrfs_page_clamp_clear_writeback(fs_info, pages[i],
0247 cb->start, cb->len);
0248 put_page(pages[i]);
0249 }
0250 nr_pages -= ret;
0251 index += ret;
0252 }
0253
0254 }
0255
0256 static void finish_compressed_bio_write(struct compressed_bio *cb)
0257 {
0258 struct inode *inode = cb->inode;
0259 unsigned int index;
0260
0261
0262
0263
0264
0265 btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
0266 cb->start, cb->start + cb->len - 1,
0267 cb->status == BLK_STS_OK);
0268
0269 if (cb->writeback)
0270 end_compressed_writeback(inode, cb);
0271
0272
0273
0274
0275
0276
0277 for (index = 0; index < cb->nr_pages; index++) {
0278 struct page *page = cb->compressed_pages[index];
0279
0280 page->mapping = NULL;
0281 put_page(page);
0282 }
0283
0284
0285 kfree(cb->compressed_pages);
0286 kfree(cb);
0287 }
0288
0289 static void btrfs_finish_compressed_write_work(struct work_struct *work)
0290 {
0291 struct compressed_bio *cb =
0292 container_of(work, struct compressed_bio, write_end_work);
0293
0294 finish_compressed_bio_write(cb);
0295 }
0296
0297
0298
0299
0300
0301
0302
0303
0304 static void end_compressed_bio_write(struct bio *bio)
0305 {
0306 struct compressed_bio *cb = bio->bi_private;
0307
0308 if (bio->bi_status)
0309 cb->status = bio->bi_status;
0310
0311 if (refcount_dec_and_test(&cb->pending_ios)) {
0312 struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
0313
0314 btrfs_record_physical_zoned(cb->inode, cb->start, bio);
0315 queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
0316 }
0317 bio_put(bio);
0318 }
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
0338 blk_opf_t opf, bio_end_io_t endio_func,
0339 u64 *next_stripe_start)
0340 {
0341 struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
0342 struct btrfs_io_geometry geom;
0343 struct extent_map *em;
0344 struct bio *bio;
0345 int ret;
0346
0347 bio = btrfs_bio_alloc(BIO_MAX_VECS);
0348
0349 bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
0350 bio->bi_opf = opf;
0351 bio->bi_private = cb;
0352 bio->bi_end_io = endio_func;
0353
0354 em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
0355 if (IS_ERR(em)) {
0356 bio_put(bio);
0357 return ERR_CAST(em);
0358 }
0359
0360 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
0361 bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev);
0362
0363 ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom);
0364 free_extent_map(em);
0365 if (ret < 0) {
0366 bio_put(bio);
0367 return ERR_PTR(ret);
0368 }
0369 *next_stripe_start = disk_bytenr + geom.len;
0370 refcount_inc(&cb->pending_ios);
0371 return bio;
0372 }
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
0384 unsigned int len, u64 disk_start,
0385 unsigned int compressed_len,
0386 struct page **compressed_pages,
0387 unsigned int nr_pages,
0388 blk_opf_t write_flags,
0389 struct cgroup_subsys_state *blkcg_css,
0390 bool writeback)
0391 {
0392 struct btrfs_fs_info *fs_info = inode->root->fs_info;
0393 struct bio *bio = NULL;
0394 struct compressed_bio *cb;
0395 u64 cur_disk_bytenr = disk_start;
0396 u64 next_stripe_start;
0397 blk_status_t ret = BLK_STS_OK;
0398 int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
0399 const bool use_append = btrfs_use_zone_append(inode, disk_start);
0400 const enum req_op bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
0401
0402 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
0403 IS_ALIGNED(len, fs_info->sectorsize));
0404 cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS);
0405 if (!cb)
0406 return BLK_STS_RESOURCE;
0407 refcount_set(&cb->pending_ios, 1);
0408 cb->status = BLK_STS_OK;
0409 cb->inode = &inode->vfs_inode;
0410 cb->start = start;
0411 cb->len = len;
0412 cb->compressed_pages = compressed_pages;
0413 cb->compressed_len = compressed_len;
0414 cb->writeback = writeback;
0415 INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
0416 cb->nr_pages = nr_pages;
0417
0418 if (blkcg_css)
0419 kthread_associate_blkcg(blkcg_css);
0420
0421 while (cur_disk_bytenr < disk_start + compressed_len) {
0422 u64 offset = cur_disk_bytenr - disk_start;
0423 unsigned int index = offset >> PAGE_SHIFT;
0424 unsigned int real_size;
0425 unsigned int added;
0426 struct page *page = compressed_pages[index];
0427 bool submit = false;
0428
0429
0430 if (!bio) {
0431 bio = alloc_compressed_bio(cb, cur_disk_bytenr,
0432 bio_op | write_flags, end_compressed_bio_write,
0433 &next_stripe_start);
0434 if (IS_ERR(bio)) {
0435 ret = errno_to_blk_status(PTR_ERR(bio));
0436 break;
0437 }
0438 if (blkcg_css)
0439 bio->bi_opf |= REQ_CGROUP_PUNT;
0440 }
0441
0442
0443
0444
0445 ASSERT(cur_disk_bytenr != next_stripe_start);
0446
0447
0448
0449
0450
0451
0452
0453 real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr);
0454 real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
0455 real_size = min_t(u64, real_size, compressed_len - offset);
0456 ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
0457
0458 if (use_append)
0459 added = bio_add_zone_append_page(bio, page, real_size,
0460 offset_in_page(offset));
0461 else
0462 added = bio_add_page(bio, page, real_size,
0463 offset_in_page(offset));
0464
0465 if (added == 0)
0466 submit = true;
0467
0468 cur_disk_bytenr += added;
0469
0470 if (cur_disk_bytenr == next_stripe_start)
0471 submit = true;
0472
0473
0474 if (cur_disk_bytenr == disk_start + compressed_len)
0475 submit = true;
0476
0477 if (submit) {
0478 if (!skip_sum) {
0479 ret = btrfs_csum_one_bio(inode, bio, start, true);
0480 if (ret) {
0481 bio->bi_status = ret;
0482 bio_endio(bio);
0483 break;
0484 }
0485 }
0486
0487 ASSERT(bio->bi_iter.bi_size);
0488 btrfs_submit_bio(fs_info, bio, 0);
0489 bio = NULL;
0490 }
0491 cond_resched();
0492 }
0493
0494 if (blkcg_css)
0495 kthread_associate_blkcg(NULL);
0496
0497 if (refcount_dec_and_test(&cb->pending_ios))
0498 finish_compressed_bio_write(cb);
0499 return ret;
0500 }
0501
0502 static u64 bio_end_offset(struct bio *bio)
0503 {
0504 struct bio_vec *last = bio_last_bvec_all(bio);
0505
0506 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
0507 }
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520 static noinline int add_ra_bio_pages(struct inode *inode,
0521 u64 compressed_end,
0522 struct compressed_bio *cb)
0523 {
0524 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0525 unsigned long end_index;
0526 u64 cur = bio_end_offset(cb->orig_bio);
0527 u64 isize = i_size_read(inode);
0528 int ret;
0529 struct page *page;
0530 struct extent_map *em;
0531 struct address_space *mapping = inode->i_mapping;
0532 struct extent_map_tree *em_tree;
0533 struct extent_io_tree *tree;
0534 int sectors_missed = 0;
0535
0536 em_tree = &BTRFS_I(inode)->extent_tree;
0537 tree = &BTRFS_I(inode)->io_tree;
0538
0539 if (isize == 0)
0540 return 0;
0541
0542
0543
0544
0545
0546
0547
0548
0549 if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
0550 return 0;
0551
0552 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
0553
0554 while (cur < compressed_end) {
0555 u64 page_end;
0556 u64 pg_index = cur >> PAGE_SHIFT;
0557 u32 add_size;
0558
0559 if (pg_index > end_index)
0560 break;
0561
0562 page = xa_load(&mapping->i_pages, pg_index);
0563 if (page && !xa_is_value(page)) {
0564 sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
0565 fs_info->sectorsize_bits;
0566
0567
0568 if (sectors_missed > 4)
0569 break;
0570
0571
0572
0573
0574
0575 cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
0576 continue;
0577 }
0578
0579 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
0580 ~__GFP_FS));
0581 if (!page)
0582 break;
0583
0584 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
0585 put_page(page);
0586
0587 cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
0588 continue;
0589 }
0590
0591 ret = set_page_extent_mapped(page);
0592 if (ret < 0) {
0593 unlock_page(page);
0594 put_page(page);
0595 break;
0596 }
0597
0598 page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
0599 lock_extent(tree, cur, page_end);
0600 read_lock(&em_tree->lock);
0601 em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
0602 read_unlock(&em_tree->lock);
0603
0604
0605
0606
0607
0608
0609 if (!em || cur < em->start ||
0610 (cur + fs_info->sectorsize > extent_map_end(em)) ||
0611 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
0612 free_extent_map(em);
0613 unlock_extent(tree, cur, page_end);
0614 unlock_page(page);
0615 put_page(page);
0616 break;
0617 }
0618 free_extent_map(em);
0619
0620 if (page->index == end_index) {
0621 size_t zero_offset = offset_in_page(isize);
0622
0623 if (zero_offset) {
0624 int zeros;
0625 zeros = PAGE_SIZE - zero_offset;
0626 memzero_page(page, zero_offset, zeros);
0627 }
0628 }
0629
0630 add_size = min(em->start + em->len, page_end + 1) - cur;
0631 ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
0632 if (ret != add_size) {
0633 unlock_extent(tree, cur, page_end);
0634 unlock_page(page);
0635 put_page(page);
0636 break;
0637 }
0638
0639
0640
0641
0642
0643 if (fs_info->sectorsize < PAGE_SIZE)
0644 btrfs_subpage_start_reader(fs_info, page, cur, add_size);
0645 put_page(page);
0646 cur += add_size;
0647 }
0648 return 0;
0649 }
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662 void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
0663 int mirror_num)
0664 {
0665 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0666 struct extent_map_tree *em_tree;
0667 struct compressed_bio *cb;
0668 unsigned int compressed_len;
0669 struct bio *comp_bio = NULL;
0670 const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
0671 u64 cur_disk_byte = disk_bytenr;
0672 u64 next_stripe_start;
0673 u64 file_offset;
0674 u64 em_len;
0675 u64 em_start;
0676 struct extent_map *em;
0677 blk_status_t ret;
0678 int ret2;
0679 int i;
0680
0681 em_tree = &BTRFS_I(inode)->extent_tree;
0682
0683 file_offset = bio_first_bvec_all(bio)->bv_offset +
0684 page_offset(bio_first_page_all(bio));
0685
0686
0687 read_lock(&em_tree->lock);
0688 em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
0689 read_unlock(&em_tree->lock);
0690 if (!em) {
0691 ret = BLK_STS_IOERR;
0692 goto out;
0693 }
0694
0695 ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
0696 compressed_len = em->block_len;
0697 cb = kmalloc(sizeof(struct compressed_bio), GFP_NOFS);
0698 if (!cb) {
0699 ret = BLK_STS_RESOURCE;
0700 goto out;
0701 }
0702
0703 refcount_set(&cb->pending_ios, 1);
0704 cb->status = BLK_STS_OK;
0705 cb->inode = inode;
0706
0707 cb->start = em->orig_start;
0708 em_len = em->len;
0709 em_start = em->start;
0710
0711 cb->len = bio->bi_iter.bi_size;
0712 cb->compressed_len = compressed_len;
0713 cb->compress_type = em->compress_type;
0714 cb->orig_bio = bio;
0715
0716 free_extent_map(em);
0717 em = NULL;
0718
0719 cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
0720 cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
0721 if (!cb->compressed_pages) {
0722 ret = BLK_STS_RESOURCE;
0723 goto fail;
0724 }
0725
0726 ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages);
0727 if (ret2) {
0728 ret = BLK_STS_RESOURCE;
0729 goto fail;
0730 }
0731
0732 add_ra_bio_pages(inode, em_start + em_len, cb);
0733
0734
0735 cb->len = bio->bi_iter.bi_size;
0736
0737 while (cur_disk_byte < disk_bytenr + compressed_len) {
0738 u64 offset = cur_disk_byte - disk_bytenr;
0739 unsigned int index = offset >> PAGE_SHIFT;
0740 unsigned int real_size;
0741 unsigned int added;
0742 struct page *page = cb->compressed_pages[index];
0743 bool submit = false;
0744
0745
0746 if (!comp_bio) {
0747 comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
0748 REQ_OP_READ, end_compressed_bio_read,
0749 &next_stripe_start);
0750 if (IS_ERR(comp_bio)) {
0751 cb->status = errno_to_blk_status(PTR_ERR(comp_bio));
0752 break;
0753 }
0754 }
0755
0756
0757
0758
0759 ASSERT(cur_disk_byte != next_stripe_start);
0760
0761
0762
0763
0764
0765
0766 real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte);
0767 real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
0768 real_size = min_t(u64, real_size, compressed_len - offset);
0769 ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
0770
0771 added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset));
0772
0773
0774
0775
0776 ASSERT(added == real_size);
0777 cur_disk_byte += added;
0778
0779
0780 if (cur_disk_byte == next_stripe_start)
0781 submit = true;
0782
0783
0784 if (cur_disk_byte == disk_bytenr + compressed_len)
0785 submit = true;
0786
0787 if (submit) {
0788
0789 if (bio_op(comp_bio) == REQ_OP_READ)
0790 btrfs_bio(comp_bio)->iter = comp_bio->bi_iter;
0791
0792
0793
0794
0795
0796
0797
0798 btrfs_bio(comp_bio)->file_offset = file_offset;
0799
0800 ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL);
0801 if (ret) {
0802 comp_bio->bi_status = ret;
0803 bio_endio(comp_bio);
0804 break;
0805 }
0806
0807 ASSERT(comp_bio->bi_iter.bi_size);
0808 btrfs_submit_bio(fs_info, comp_bio, mirror_num);
0809 comp_bio = NULL;
0810 }
0811 }
0812
0813 if (refcount_dec_and_test(&cb->pending_ios))
0814 finish_compressed_bio_read(cb);
0815 return;
0816
0817 fail:
0818 if (cb->compressed_pages) {
0819 for (i = 0; i < cb->nr_pages; i++) {
0820 if (cb->compressed_pages[i])
0821 __free_page(cb->compressed_pages[i]);
0822 }
0823 }
0824
0825 kfree(cb->compressed_pages);
0826 kfree(cb);
0827 out:
0828 free_extent_map(em);
0829 bio->bi_status = ret;
0830 bio_endio(bio);
0831 return;
0832 }
0833
0834
0835
0836
0837
0838
0839
0840
0841 #define SAMPLING_READ_SIZE (16)
0842 #define SAMPLING_INTERVAL (256)
0843
0844
0845
0846
0847
0848
0849 #define BUCKET_SIZE (256)
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863 #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
0864 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
0865
0866 struct bucket_item {
0867 u32 count;
0868 };
0869
0870 struct heuristic_ws {
0871
0872 u8 *sample;
0873 u32 sample_size;
0874
0875 struct bucket_item *bucket;
0876
0877 struct bucket_item *bucket_b;
0878 struct list_head list;
0879 };
0880
0881 static struct workspace_manager heuristic_wsm;
0882
0883 static void free_heuristic_ws(struct list_head *ws)
0884 {
0885 struct heuristic_ws *workspace;
0886
0887 workspace = list_entry(ws, struct heuristic_ws, list);
0888
0889 kvfree(workspace->sample);
0890 kfree(workspace->bucket);
0891 kfree(workspace->bucket_b);
0892 kfree(workspace);
0893 }
0894
0895 static struct list_head *alloc_heuristic_ws(unsigned int level)
0896 {
0897 struct heuristic_ws *ws;
0898
0899 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
0900 if (!ws)
0901 return ERR_PTR(-ENOMEM);
0902
0903 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
0904 if (!ws->sample)
0905 goto fail;
0906
0907 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
0908 if (!ws->bucket)
0909 goto fail;
0910
0911 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
0912 if (!ws->bucket_b)
0913 goto fail;
0914
0915 INIT_LIST_HEAD(&ws->list);
0916 return &ws->list;
0917 fail:
0918 free_heuristic_ws(&ws->list);
0919 return ERR_PTR(-ENOMEM);
0920 }
0921
0922 const struct btrfs_compress_op btrfs_heuristic_compress = {
0923 .workspace_manager = &heuristic_wsm,
0924 };
0925
0926 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
0927
0928 &btrfs_heuristic_compress,
0929 &btrfs_zlib_compress,
0930 &btrfs_lzo_compress,
0931 &btrfs_zstd_compress,
0932 };
0933
0934 static struct list_head *alloc_workspace(int type, unsigned int level)
0935 {
0936 switch (type) {
0937 case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
0938 case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
0939 case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
0940 case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
0941 default:
0942
0943
0944
0945
0946 BUG();
0947 }
0948 }
0949
0950 static void free_workspace(int type, struct list_head *ws)
0951 {
0952 switch (type) {
0953 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
0954 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
0955 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
0956 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
0957 default:
0958
0959
0960
0961
0962 BUG();
0963 }
0964 }
0965
0966 static void btrfs_init_workspace_manager(int type)
0967 {
0968 struct workspace_manager *wsm;
0969 struct list_head *workspace;
0970
0971 wsm = btrfs_compress_op[type]->workspace_manager;
0972 INIT_LIST_HEAD(&wsm->idle_ws);
0973 spin_lock_init(&wsm->ws_lock);
0974 atomic_set(&wsm->total_ws, 0);
0975 init_waitqueue_head(&wsm->ws_wait);
0976
0977
0978
0979
0980
0981 workspace = alloc_workspace(type, 0);
0982 if (IS_ERR(workspace)) {
0983 pr_warn(
0984 "BTRFS: cannot preallocate compression workspace, will try later\n");
0985 } else {
0986 atomic_set(&wsm->total_ws, 1);
0987 wsm->free_ws = 1;
0988 list_add(workspace, &wsm->idle_ws);
0989 }
0990 }
0991
0992 static void btrfs_cleanup_workspace_manager(int type)
0993 {
0994 struct workspace_manager *wsman;
0995 struct list_head *ws;
0996
0997 wsman = btrfs_compress_op[type]->workspace_manager;
0998 while (!list_empty(&wsman->idle_ws)) {
0999 ws = wsman->idle_ws.next;
1000 list_del(ws);
1001 free_workspace(type, ws);
1002 atomic_dec(&wsman->total_ws);
1003 }
1004 }
1005
1006
1007
1008
1009
1010
1011
1012 struct list_head *btrfs_get_workspace(int type, unsigned int level)
1013 {
1014 struct workspace_manager *wsm;
1015 struct list_head *workspace;
1016 int cpus = num_online_cpus();
1017 unsigned nofs_flag;
1018 struct list_head *idle_ws;
1019 spinlock_t *ws_lock;
1020 atomic_t *total_ws;
1021 wait_queue_head_t *ws_wait;
1022 int *free_ws;
1023
1024 wsm = btrfs_compress_op[type]->workspace_manager;
1025 idle_ws = &wsm->idle_ws;
1026 ws_lock = &wsm->ws_lock;
1027 total_ws = &wsm->total_ws;
1028 ws_wait = &wsm->ws_wait;
1029 free_ws = &wsm->free_ws;
1030
1031 again:
1032 spin_lock(ws_lock);
1033 if (!list_empty(idle_ws)) {
1034 workspace = idle_ws->next;
1035 list_del(workspace);
1036 (*free_ws)--;
1037 spin_unlock(ws_lock);
1038 return workspace;
1039
1040 }
1041 if (atomic_read(total_ws) > cpus) {
1042 DEFINE_WAIT(wait);
1043
1044 spin_unlock(ws_lock);
1045 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1046 if (atomic_read(total_ws) > cpus && !*free_ws)
1047 schedule();
1048 finish_wait(ws_wait, &wait);
1049 goto again;
1050 }
1051 atomic_inc(total_ws);
1052 spin_unlock(ws_lock);
1053
1054
1055
1056
1057
1058
1059 nofs_flag = memalloc_nofs_save();
1060 workspace = alloc_workspace(type, level);
1061 memalloc_nofs_restore(nofs_flag);
1062
1063 if (IS_ERR(workspace)) {
1064 atomic_dec(total_ws);
1065 wake_up(ws_wait);
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 if (atomic_read(total_ws) == 0) {
1078 static DEFINE_RATELIMIT_STATE(_rs,
1079 60 * HZ,
1080 1);
1081
1082 if (__ratelimit(&_rs)) {
1083 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1084 }
1085 }
1086 goto again;
1087 }
1088 return workspace;
1089 }
1090
1091 static struct list_head *get_workspace(int type, int level)
1092 {
1093 switch (type) {
1094 case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1095 case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1096 case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
1097 case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1098 default:
1099
1100
1101
1102
1103 BUG();
1104 }
1105 }
1106
1107
1108
1109
1110
1111 void btrfs_put_workspace(int type, struct list_head *ws)
1112 {
1113 struct workspace_manager *wsm;
1114 struct list_head *idle_ws;
1115 spinlock_t *ws_lock;
1116 atomic_t *total_ws;
1117 wait_queue_head_t *ws_wait;
1118 int *free_ws;
1119
1120 wsm = btrfs_compress_op[type]->workspace_manager;
1121 idle_ws = &wsm->idle_ws;
1122 ws_lock = &wsm->ws_lock;
1123 total_ws = &wsm->total_ws;
1124 ws_wait = &wsm->ws_wait;
1125 free_ws = &wsm->free_ws;
1126
1127 spin_lock(ws_lock);
1128 if (*free_ws <= num_online_cpus()) {
1129 list_add(ws, idle_ws);
1130 (*free_ws)++;
1131 spin_unlock(ws_lock);
1132 goto wake;
1133 }
1134 spin_unlock(ws_lock);
1135
1136 free_workspace(type, ws);
1137 atomic_dec(total_ws);
1138 wake:
1139 cond_wake_up(ws_wait);
1140 }
1141
1142 static void put_workspace(int type, struct list_head *ws)
1143 {
1144 switch (type) {
1145 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1146 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1147 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
1148 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1149 default:
1150
1151
1152
1153
1154 BUG();
1155 }
1156 }
1157
1158
1159
1160
1161
1162 static unsigned int btrfs_compress_set_level(int type, unsigned level)
1163 {
1164 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1165
1166 if (level == 0)
1167 level = ops->default_level;
1168 else
1169 level = min(level, ops->max_level);
1170
1171 return level;
1172 }
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1195 u64 start, struct page **pages,
1196 unsigned long *out_pages,
1197 unsigned long *total_in,
1198 unsigned long *total_out)
1199 {
1200 int type = btrfs_compress_type(type_level);
1201 int level = btrfs_compress_level(type_level);
1202 struct list_head *workspace;
1203 int ret;
1204
1205 level = btrfs_compress_set_level(type, level);
1206 workspace = get_workspace(type, level);
1207 ret = compression_compress_pages(type, workspace, mapping, start, pages,
1208 out_pages, total_in, total_out);
1209 put_workspace(type, workspace);
1210 return ret;
1211 }
1212
1213 static int btrfs_decompress_bio(struct compressed_bio *cb)
1214 {
1215 struct list_head *workspace;
1216 int ret;
1217 int type = cb->compress_type;
1218
1219 workspace = get_workspace(type, 0);
1220 ret = compression_decompress_bio(workspace, cb);
1221 put_workspace(type, workspace);
1222
1223 return ret;
1224 }
1225
1226
1227
1228
1229
1230
1231 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1232 unsigned long start_byte, size_t srclen, size_t destlen)
1233 {
1234 struct list_head *workspace;
1235 int ret;
1236
1237 workspace = get_workspace(type, 0);
1238 ret = compression_decompress(type, workspace, data_in, dest_page,
1239 start_byte, srclen, destlen);
1240 put_workspace(type, workspace);
1241
1242 return ret;
1243 }
1244
1245 void __init btrfs_init_compress(void)
1246 {
1247 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1248 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1249 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1250 zstd_init_workspace_manager();
1251 }
1252
1253 void __cold btrfs_exit_compress(void)
1254 {
1255 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1256 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1257 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1258 zstd_cleanup_workspace_manager();
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1292 struct compressed_bio *cb, u32 decompressed)
1293 {
1294 struct bio *orig_bio = cb->orig_bio;
1295
1296 u32 cur_offset;
1297
1298 cur_offset = decompressed;
1299
1300 while (cur_offset < decompressed + buf_len) {
1301 struct bio_vec bvec;
1302 size_t copy_len;
1303 u32 copy_start;
1304
1305 u32 bvec_offset;
1306
1307 bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1308
1309
1310
1311
1312 bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1313
1314
1315 if (decompressed + buf_len <= bvec_offset)
1316 return 1;
1317
1318 copy_start = max(cur_offset, bvec_offset);
1319 copy_len = min(bvec_offset + bvec.bv_len,
1320 decompressed + buf_len) - copy_start;
1321 ASSERT(copy_len);
1322
1323
1324
1325
1326
1327 ASSERT(copy_start - decompressed < buf_len);
1328 memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1329 buf + copy_start - decompressed, copy_len);
1330 cur_offset += copy_len;
1331
1332 bio_advance(orig_bio, copy_len);
1333
1334 if (!orig_bio->bi_iter.bi_size)
1335 return 0;
1336 }
1337 return 1;
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357 #define ENTROPY_LVL_ACEPTABLE (65)
1358 #define ENTROPY_LVL_HIGH (80)
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 static inline u32 ilog2_w(u64 n)
1371 {
1372 return ilog2(n * n * n * n);
1373 }
1374
1375 static u32 shannon_entropy(struct heuristic_ws *ws)
1376 {
1377 const u32 entropy_max = 8 * ilog2_w(2);
1378 u32 entropy_sum = 0;
1379 u32 p, p_base, sz_base;
1380 u32 i;
1381
1382 sz_base = ilog2_w(ws->sample_size);
1383 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1384 p = ws->bucket[i].count;
1385 p_base = ilog2_w(p);
1386 entropy_sum += p * (sz_base - p_base);
1387 }
1388
1389 entropy_sum /= ws->sample_size;
1390 return entropy_sum * 100 / entropy_max;
1391 }
1392
1393 #define RADIX_BASE 4U
1394 #define COUNTERS_SIZE (1U << RADIX_BASE)
1395
1396 static u8 get4bits(u64 num, int shift) {
1397 u8 low4bits;
1398
1399 num >>= shift;
1400
1401 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1402 return low4bits;
1403 }
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1415 int num)
1416 {
1417 u64 max_num;
1418 u64 buf_num;
1419 u32 counters[COUNTERS_SIZE];
1420 u32 new_addr;
1421 u32 addr;
1422 int bitlen;
1423 int shift;
1424 int i;
1425
1426
1427
1428
1429
1430 max_num = array[0].count;
1431 for (i = 1; i < num; i++) {
1432 buf_num = array[i].count;
1433 if (buf_num > max_num)
1434 max_num = buf_num;
1435 }
1436
1437 buf_num = ilog2(max_num);
1438 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1439
1440 shift = 0;
1441 while (shift < bitlen) {
1442 memset(counters, 0, sizeof(counters));
1443
1444 for (i = 0; i < num; i++) {
1445 buf_num = array[i].count;
1446 addr = get4bits(buf_num, shift);
1447 counters[addr]++;
1448 }
1449
1450 for (i = 1; i < COUNTERS_SIZE; i++)
1451 counters[i] += counters[i - 1];
1452
1453 for (i = num - 1; i >= 0; i--) {
1454 buf_num = array[i].count;
1455 addr = get4bits(buf_num, shift);
1456 counters[addr]--;
1457 new_addr = counters[addr];
1458 array_buf[new_addr] = array[i];
1459 }
1460
1461 shift += RADIX_BASE;
1462
1463
1464
1465
1466
1467
1468
1469 memset(counters, 0, sizeof(counters));
1470
1471 for (i = 0; i < num; i ++) {
1472 buf_num = array_buf[i].count;
1473 addr = get4bits(buf_num, shift);
1474 counters[addr]++;
1475 }
1476
1477 for (i = 1; i < COUNTERS_SIZE; i++)
1478 counters[i] += counters[i - 1];
1479
1480 for (i = num - 1; i >= 0; i--) {
1481 buf_num = array_buf[i].count;
1482 addr = get4bits(buf_num, shift);
1483 counters[addr]--;
1484 new_addr = counters[addr];
1485 array[new_addr] = array_buf[i];
1486 }
1487
1488 shift += RADIX_BASE;
1489 }
1490 }
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508 #define BYTE_CORE_SET_LOW (64)
1509 #define BYTE_CORE_SET_HIGH (200)
1510
1511 static int byte_core_set_size(struct heuristic_ws *ws)
1512 {
1513 u32 i;
1514 u32 coreset_sum = 0;
1515 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1516 struct bucket_item *bucket = ws->bucket;
1517
1518
1519 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1520
1521 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1522 coreset_sum += bucket[i].count;
1523
1524 if (coreset_sum > core_set_threshold)
1525 return i;
1526
1527 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1528 coreset_sum += bucket[i].count;
1529 if (coreset_sum > core_set_threshold)
1530 break;
1531 }
1532
1533 return i;
1534 }
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 #define BYTE_SET_THRESHOLD (64)
1548
1549 static u32 byte_set_size(const struct heuristic_ws *ws)
1550 {
1551 u32 i;
1552 u32 byte_set_size = 0;
1553
1554 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1555 if (ws->bucket[i].count > 0)
1556 byte_set_size++;
1557 }
1558
1559
1560
1561
1562
1563
1564 for (; i < BUCKET_SIZE; i++) {
1565 if (ws->bucket[i].count > 0) {
1566 byte_set_size++;
1567 if (byte_set_size > BYTE_SET_THRESHOLD)
1568 return byte_set_size;
1569 }
1570 }
1571
1572 return byte_set_size;
1573 }
1574
1575 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1576 {
1577 const u32 half_of_sample = ws->sample_size / 2;
1578 const u8 *data = ws->sample;
1579
1580 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1581 }
1582
1583 static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1584 struct heuristic_ws *ws)
1585 {
1586 struct page *page;
1587 u64 index, index_end;
1588 u32 i, curr_sample_pos;
1589 u8 *in_data;
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1601 end = start + BTRFS_MAX_UNCOMPRESSED;
1602
1603 index = start >> PAGE_SHIFT;
1604 index_end = end >> PAGE_SHIFT;
1605
1606
1607 if (!IS_ALIGNED(end, PAGE_SIZE))
1608 index_end++;
1609
1610 curr_sample_pos = 0;
1611 while (index < index_end) {
1612 page = find_get_page(inode->i_mapping, index);
1613 in_data = kmap_local_page(page);
1614
1615 i = start % PAGE_SIZE;
1616 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1617
1618 if (start > end - SAMPLING_READ_SIZE)
1619 break;
1620 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1621 SAMPLING_READ_SIZE);
1622 i += SAMPLING_INTERVAL;
1623 start += SAMPLING_INTERVAL;
1624 curr_sample_pos += SAMPLING_READ_SIZE;
1625 }
1626 kunmap_local(in_data);
1627 put_page(page);
1628
1629 index++;
1630 }
1631
1632 ws->sample_size = curr_sample_pos;
1633 }
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650 int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1651 {
1652 struct list_head *ws_list = get_workspace(0, 0);
1653 struct heuristic_ws *ws;
1654 u32 i;
1655 u8 byte;
1656 int ret = 0;
1657
1658 ws = list_entry(ws_list, struct heuristic_ws, list);
1659
1660 heuristic_collect_sample(inode, start, end, ws);
1661
1662 if (sample_repeated_patterns(ws)) {
1663 ret = 1;
1664 goto out;
1665 }
1666
1667 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1668
1669 for (i = 0; i < ws->sample_size; i++) {
1670 byte = ws->sample[i];
1671 ws->bucket[byte].count++;
1672 }
1673
1674 i = byte_set_size(ws);
1675 if (i < BYTE_SET_THRESHOLD) {
1676 ret = 2;
1677 goto out;
1678 }
1679
1680 i = byte_core_set_size(ws);
1681 if (i <= BYTE_CORE_SET_LOW) {
1682 ret = 3;
1683 goto out;
1684 }
1685
1686 if (i >= BYTE_CORE_SET_HIGH) {
1687 ret = 0;
1688 goto out;
1689 }
1690
1691 i = shannon_entropy(ws);
1692 if (i <= ENTROPY_LVL_ACEPTABLE) {
1693 ret = 4;
1694 goto out;
1695 }
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 if (i < ENTROPY_LVL_HIGH) {
1713 ret = 5;
1714 goto out;
1715 } else {
1716 ret = 0;
1717 goto out;
1718 }
1719
1720 out:
1721 put_workspace(0, ws_list);
1722 return ret;
1723 }
1724
1725
1726
1727
1728
1729 unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1730 {
1731 unsigned int level = 0;
1732 int ret;
1733
1734 if (!type)
1735 return 0;
1736
1737 if (str[0] == ':') {
1738 ret = kstrtouint(str + 1, 10, &level);
1739 if (ret)
1740 level = 0;
1741 }
1742
1743 level = btrfs_compress_set_level(type, level);
1744
1745 return level;
1746 }