0001
0002
0003
0004
0005
0006 #include <linux/kernel.h>
0007 #include <linux/slab.h>
0008 #include <linux/mm.h>
0009 #include <linux/init.h>
0010 #include <linux/err.h>
0011 #include <linux/sched.h>
0012 #include <linux/pagemap.h>
0013 #include <linux/bio.h>
0014 #include <linux/lzo.h>
0015 #include <linux/refcount.h>
0016 #include "compression.h"
0017 #include "ctree.h"
0018
0019 #define LZO_LEN 4
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 #define WORKSPACE_BUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
0059 #define WORKSPACE_CBUF_LENGTH (lzo1x_worst_compress(PAGE_SIZE))
0060
0061 struct workspace {
0062 void *mem;
0063 void *buf;
0064 void *cbuf;
0065 struct list_head list;
0066 };
0067
0068 static struct workspace_manager wsm;
0069
0070 void lzo_free_workspace(struct list_head *ws)
0071 {
0072 struct workspace *workspace = list_entry(ws, struct workspace, list);
0073
0074 kvfree(workspace->buf);
0075 kvfree(workspace->cbuf);
0076 kvfree(workspace->mem);
0077 kfree(workspace);
0078 }
0079
0080 struct list_head *lzo_alloc_workspace(unsigned int level)
0081 {
0082 struct workspace *workspace;
0083
0084 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
0085 if (!workspace)
0086 return ERR_PTR(-ENOMEM);
0087
0088 workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
0089 workspace->buf = kvmalloc(WORKSPACE_BUF_LENGTH, GFP_KERNEL);
0090 workspace->cbuf = kvmalloc(WORKSPACE_CBUF_LENGTH, GFP_KERNEL);
0091 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
0092 goto fail;
0093
0094 INIT_LIST_HEAD(&workspace->list);
0095
0096 return &workspace->list;
0097 fail:
0098 lzo_free_workspace(&workspace->list);
0099 return ERR_PTR(-ENOMEM);
0100 }
0101
0102 static inline void write_compress_length(char *buf, size_t len)
0103 {
0104 __le32 dlen;
0105
0106 dlen = cpu_to_le32(len);
0107 memcpy(buf, &dlen, LZO_LEN);
0108 }
0109
0110 static inline size_t read_compress_length(const char *buf)
0111 {
0112 __le32 dlen;
0113
0114 memcpy(&dlen, buf, LZO_LEN);
0115 return le32_to_cpu(dlen);
0116 }
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 static int copy_compressed_data_to_page(char *compressed_data,
0129 size_t compressed_size,
0130 struct page **out_pages,
0131 unsigned long max_nr_page,
0132 u32 *cur_out,
0133 const u32 sectorsize)
0134 {
0135 u32 sector_bytes_left;
0136 u32 orig_out;
0137 struct page *cur_page;
0138 char *kaddr;
0139
0140 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
0141 return -E2BIG;
0142
0143
0144
0145
0146
0147 ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
0148
0149 cur_page = out_pages[*cur_out / PAGE_SIZE];
0150
0151 if (!cur_page) {
0152 cur_page = alloc_page(GFP_NOFS);
0153 if (!cur_page)
0154 return -ENOMEM;
0155 out_pages[*cur_out / PAGE_SIZE] = cur_page;
0156 }
0157
0158 kaddr = kmap_local_page(cur_page);
0159 write_compress_length(kaddr + offset_in_page(*cur_out),
0160 compressed_size);
0161 *cur_out += LZO_LEN;
0162
0163 orig_out = *cur_out;
0164
0165
0166 while (*cur_out - orig_out < compressed_size) {
0167 u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
0168 orig_out + compressed_size - *cur_out);
0169
0170 kunmap_local(kaddr);
0171
0172 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
0173 return -E2BIG;
0174
0175 cur_page = out_pages[*cur_out / PAGE_SIZE];
0176
0177 if (!cur_page) {
0178 cur_page = alloc_page(GFP_NOFS);
0179 if (!cur_page)
0180 return -ENOMEM;
0181 out_pages[*cur_out / PAGE_SIZE] = cur_page;
0182 }
0183 kaddr = kmap_local_page(cur_page);
0184
0185 memcpy(kaddr + offset_in_page(*cur_out),
0186 compressed_data + *cur_out - orig_out, copy_len);
0187
0188 *cur_out += copy_len;
0189 }
0190
0191
0192
0193
0194
0195 sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
0196 if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
0197 goto out;
0198
0199
0200 memset(kaddr + offset_in_page(*cur_out), 0,
0201 sector_bytes_left);
0202 *cur_out += sector_bytes_left;
0203
0204 out:
0205 kunmap_local(kaddr);
0206 return 0;
0207 }
0208
0209 int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
0210 u64 start, struct page **pages, unsigned long *out_pages,
0211 unsigned long *total_in, unsigned long *total_out)
0212 {
0213 struct workspace *workspace = list_entry(ws, struct workspace, list);
0214 const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
0215 struct page *page_in = NULL;
0216 char *sizes_ptr;
0217 const unsigned long max_nr_page = *out_pages;
0218 int ret = 0;
0219
0220 u64 cur_in = start;
0221
0222 u32 cur_out = 0;
0223 u32 len = *total_out;
0224
0225 ASSERT(max_nr_page > 0);
0226 *out_pages = 0;
0227 *total_out = 0;
0228 *total_in = 0;
0229
0230
0231
0232
0233
0234 cur_out += LZO_LEN;
0235 while (cur_in < start + len) {
0236 char *data_in;
0237 const u32 sectorsize_mask = sectorsize - 1;
0238 u32 sector_off = (cur_in - start) & sectorsize_mask;
0239 u32 in_len;
0240 size_t out_len;
0241
0242
0243 if (!page_in) {
0244 page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
0245 ASSERT(page_in);
0246 }
0247
0248
0249 in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
0250 ASSERT(in_len);
0251 data_in = kmap_local_page(page_in);
0252 ret = lzo1x_1_compress(data_in +
0253 offset_in_page(cur_in), in_len,
0254 workspace->cbuf, &out_len,
0255 workspace->mem);
0256 kunmap_local(data_in);
0257 if (ret < 0) {
0258 pr_debug("BTRFS: lzo in loop returned %d\n", ret);
0259 ret = -EIO;
0260 goto out;
0261 }
0262
0263 ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
0264 pages, max_nr_page,
0265 &cur_out, sectorsize);
0266 if (ret < 0)
0267 goto out;
0268
0269 cur_in += in_len;
0270
0271
0272
0273
0274
0275 if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
0276 ret = -E2BIG;
0277 goto out;
0278 }
0279
0280
0281 if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
0282 put_page(page_in);
0283 page_in = NULL;
0284 }
0285 }
0286
0287
0288 sizes_ptr = kmap_local_page(pages[0]);
0289 write_compress_length(sizes_ptr, cur_out);
0290 kunmap_local(sizes_ptr);
0291
0292 ret = 0;
0293 *total_out = cur_out;
0294 *total_in = cur_in - start;
0295 out:
0296 if (page_in)
0297 put_page(page_in);
0298 *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
0299 return ret;
0300 }
0301
0302
0303
0304
0305
0306
0307 static void copy_compressed_segment(struct compressed_bio *cb,
0308 char *dest, u32 len, u32 *cur_in)
0309 {
0310 u32 orig_in = *cur_in;
0311
0312 while (*cur_in < orig_in + len) {
0313 struct page *cur_page;
0314 u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
0315 orig_in + len - *cur_in);
0316
0317 ASSERT(copy_len);
0318 cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
0319
0320 memcpy_from_page(dest + *cur_in - orig_in, cur_page,
0321 offset_in_page(*cur_in), copy_len);
0322
0323 *cur_in += copy_len;
0324 }
0325 }
0326
0327 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
0328 {
0329 struct workspace *workspace = list_entry(ws, struct workspace, list);
0330 const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
0331 const u32 sectorsize = fs_info->sectorsize;
0332 char *kaddr;
0333 int ret;
0334
0335 u32 len_in;
0336
0337 u32 cur_in = 0;
0338
0339 u32 cur_out = 0;
0340
0341 kaddr = kmap_local_page(cb->compressed_pages[0]);
0342 len_in = read_compress_length(kaddr);
0343 kunmap_local(kaddr);
0344 cur_in += LZO_LEN;
0345
0346
0347
0348
0349
0350
0351
0352
0353 if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
0354 round_up(len_in, sectorsize) < cb->compressed_len) {
0355 btrfs_err(fs_info,
0356 "invalid lzo header, lzo len %u compressed len %u",
0357 len_in, cb->compressed_len);
0358 return -EUCLEAN;
0359 }
0360
0361
0362 while (cur_in < len_in) {
0363 struct page *cur_page;
0364
0365 u32 seg_len;
0366 u32 sector_bytes_left;
0367 size_t out_len = lzo1x_worst_compress(sectorsize);
0368
0369
0370
0371
0372
0373 ASSERT(cur_in / sectorsize ==
0374 (cur_in + LZO_LEN - 1) / sectorsize);
0375 cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
0376 ASSERT(cur_page);
0377 kaddr = kmap_local_page(cur_page);
0378 seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
0379 kunmap_local(kaddr);
0380 cur_in += LZO_LEN;
0381
0382 if (seg_len > WORKSPACE_CBUF_LENGTH) {
0383
0384
0385
0386
0387 btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
0388 seg_len);
0389 ret = -EIO;
0390 goto out;
0391 }
0392
0393
0394 copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
0395
0396
0397 ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
0398 workspace->buf, &out_len);
0399 if (ret != LZO_E_OK) {
0400 btrfs_err(fs_info, "failed to decompress");
0401 ret = -EIO;
0402 goto out;
0403 }
0404
0405
0406 ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
0407 cur_out += out_len;
0408
0409
0410 if (ret == 0)
0411 goto out;
0412 ret = 0;
0413
0414
0415 sector_bytes_left = sectorsize - (cur_in % sectorsize);
0416 if (sector_bytes_left >= LZO_LEN)
0417 continue;
0418
0419
0420 cur_in += sector_bytes_left;
0421 }
0422 out:
0423 if (!ret)
0424 zero_fill_bio(cb->orig_bio);
0425 return ret;
0426 }
0427
0428 int lzo_decompress(struct list_head *ws, unsigned char *data_in,
0429 struct page *dest_page, unsigned long start_byte, size_t srclen,
0430 size_t destlen)
0431 {
0432 struct workspace *workspace = list_entry(ws, struct workspace, list);
0433 size_t in_len;
0434 size_t out_len;
0435 size_t max_segment_len = WORKSPACE_BUF_LENGTH;
0436 int ret = 0;
0437 char *kaddr;
0438 unsigned long bytes;
0439
0440 if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
0441 return -EUCLEAN;
0442
0443 in_len = read_compress_length(data_in);
0444 if (in_len != srclen)
0445 return -EUCLEAN;
0446 data_in += LZO_LEN;
0447
0448 in_len = read_compress_length(data_in);
0449 if (in_len != srclen - LZO_LEN * 2) {
0450 ret = -EUCLEAN;
0451 goto out;
0452 }
0453 data_in += LZO_LEN;
0454
0455 out_len = PAGE_SIZE;
0456 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
0457 if (ret != LZO_E_OK) {
0458 pr_warn("BTRFS: decompress failed!\n");
0459 ret = -EIO;
0460 goto out;
0461 }
0462
0463 if (out_len < start_byte) {
0464 ret = -EIO;
0465 goto out;
0466 }
0467
0468
0469
0470
0471
0472 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
0473 bytes = min_t(unsigned long, destlen, out_len - start_byte);
0474
0475 kaddr = kmap_local_page(dest_page);
0476 memcpy(kaddr, workspace->buf + start_byte, bytes);
0477
0478
0479
0480
0481
0482
0483 if (bytes < destlen)
0484 memset(kaddr+bytes, 0, destlen-bytes);
0485 kunmap_local(kaddr);
0486 out:
0487 return ret;
0488 }
0489
0490 const struct btrfs_compress_op btrfs_lzo_compress = {
0491 .workspace_manager = &wsm,
0492 .max_level = 1,
0493 .default_level = 1,
0494 };