0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) "PM: " fmt
0014
0015 #include <linux/module.h>
0016 #include <linux/file.h>
0017 #include <linux/delay.h>
0018 #include <linux/bitops.h>
0019 #include <linux/device.h>
0020 #include <linux/bio.h>
0021 #include <linux/blkdev.h>
0022 #include <linux/swap.h>
0023 #include <linux/swapops.h>
0024 #include <linux/pm.h>
0025 #include <linux/slab.h>
0026 #include <linux/lzo.h>
0027 #include <linux/vmalloc.h>
0028 #include <linux/cpumask.h>
0029 #include <linux/atomic.h>
0030 #include <linux/kthread.h>
0031 #include <linux/crc32.h>
0032 #include <linux/ktime.h>
0033
0034 #include "power.h"
0035
0036 #define HIBERNATE_SIG "S1SUSPEND"
0037
0038 u32 swsusp_hardware_signature;
0039
0040
0041
0042
0043
0044
0045 static bool clean_pages_on_read;
0046 static bool clean_pages_on_decompress;
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
0063
0064
0065
0066
0067 static inline unsigned long low_free_pages(void)
0068 {
0069 return nr_free_pages() - nr_free_highpages();
0070 }
0071
0072
0073
0074
0075
0076 static inline unsigned long reqd_free_pages(void)
0077 {
0078 return low_free_pages() / 2;
0079 }
0080
0081 struct swap_map_page {
0082 sector_t entries[MAP_PAGE_ENTRIES];
0083 sector_t next_swap;
0084 };
0085
0086 struct swap_map_page_list {
0087 struct swap_map_page *map;
0088 struct swap_map_page_list *next;
0089 };
0090
0091
0092
0093
0094
0095
0096 struct swap_map_handle {
0097 struct swap_map_page *cur;
0098 struct swap_map_page_list *maps;
0099 sector_t cur_swap;
0100 sector_t first_sector;
0101 unsigned int k;
0102 unsigned long reqd_free_pages;
0103 u32 crc32;
0104 };
0105
0106 struct swsusp_header {
0107 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
0108 sizeof(u32) - sizeof(u32)];
0109 u32 hw_sig;
0110 u32 crc32;
0111 sector_t image;
0112 unsigned int flags;
0113 char orig_sig[10];
0114 char sig[10];
0115 } __packed;
0116
0117 static struct swsusp_header *swsusp_header;
0118
0119
0120
0121
0122
0123
0124 struct swsusp_extent {
0125 struct rb_node node;
0126 unsigned long start;
0127 unsigned long end;
0128 };
0129
0130 static struct rb_root swsusp_extents = RB_ROOT;
0131
0132 static int swsusp_extents_insert(unsigned long swap_offset)
0133 {
0134 struct rb_node **new = &(swsusp_extents.rb_node);
0135 struct rb_node *parent = NULL;
0136 struct swsusp_extent *ext;
0137
0138
0139 while (*new) {
0140 ext = rb_entry(*new, struct swsusp_extent, node);
0141 parent = *new;
0142 if (swap_offset < ext->start) {
0143
0144 if (swap_offset == ext->start - 1) {
0145 ext->start--;
0146 return 0;
0147 }
0148 new = &((*new)->rb_left);
0149 } else if (swap_offset > ext->end) {
0150
0151 if (swap_offset == ext->end + 1) {
0152 ext->end++;
0153 return 0;
0154 }
0155 new = &((*new)->rb_right);
0156 } else {
0157
0158 return -EINVAL;
0159 }
0160 }
0161
0162 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
0163 if (!ext)
0164 return -ENOMEM;
0165
0166 ext->start = swap_offset;
0167 ext->end = swap_offset;
0168 rb_link_node(&ext->node, parent, new);
0169 rb_insert_color(&ext->node, &swsusp_extents);
0170 return 0;
0171 }
0172
0173
0174
0175
0176
0177
0178 sector_t alloc_swapdev_block(int swap)
0179 {
0180 unsigned long offset;
0181
0182 offset = swp_offset(get_swap_page_of_type(swap));
0183 if (offset) {
0184 if (swsusp_extents_insert(offset))
0185 swap_free(swp_entry(swap, offset));
0186 else
0187 return swapdev_block(swap, offset);
0188 }
0189 return 0;
0190 }
0191
0192
0193
0194
0195
0196
0197
0198 void free_all_swap_pages(int swap)
0199 {
0200 struct rb_node *node;
0201
0202 while ((node = swsusp_extents.rb_node)) {
0203 struct swsusp_extent *ext;
0204 unsigned long offset;
0205
0206 ext = rb_entry(node, struct swsusp_extent, node);
0207 rb_erase(node, &swsusp_extents);
0208 for (offset = ext->start; offset <= ext->end; offset++)
0209 swap_free(swp_entry(swap, offset));
0210
0211 kfree(ext);
0212 }
0213 }
0214
0215 int swsusp_swap_in_use(void)
0216 {
0217 return (swsusp_extents.rb_node != NULL);
0218 }
0219
0220
0221
0222
0223
0224 static unsigned short root_swap = 0xffff;
0225 static struct block_device *hib_resume_bdev;
0226
0227 struct hib_bio_batch {
0228 atomic_t count;
0229 wait_queue_head_t wait;
0230 blk_status_t error;
0231 struct blk_plug plug;
0232 };
0233
0234 static void hib_init_batch(struct hib_bio_batch *hb)
0235 {
0236 atomic_set(&hb->count, 0);
0237 init_waitqueue_head(&hb->wait);
0238 hb->error = BLK_STS_OK;
0239 blk_start_plug(&hb->plug);
0240 }
0241
0242 static void hib_finish_batch(struct hib_bio_batch *hb)
0243 {
0244 blk_finish_plug(&hb->plug);
0245 }
0246
0247 static void hib_end_io(struct bio *bio)
0248 {
0249 struct hib_bio_batch *hb = bio->bi_private;
0250 struct page *page = bio_first_page_all(bio);
0251
0252 if (bio->bi_status) {
0253 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
0254 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
0255 (unsigned long long)bio->bi_iter.bi_sector);
0256 }
0257
0258 if (bio_data_dir(bio) == WRITE)
0259 put_page(page);
0260 else if (clean_pages_on_read)
0261 flush_icache_range((unsigned long)page_address(page),
0262 (unsigned long)page_address(page) + PAGE_SIZE);
0263
0264 if (bio->bi_status && !hb->error)
0265 hb->error = bio->bi_status;
0266 if (atomic_dec_and_test(&hb->count))
0267 wake_up(&hb->wait);
0268
0269 bio_put(bio);
0270 }
0271
0272 static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
0273 struct hib_bio_batch *hb)
0274 {
0275 struct page *page = virt_to_page(addr);
0276 struct bio *bio;
0277 int error = 0;
0278
0279 bio = bio_alloc(hib_resume_bdev, 1, opf, GFP_NOIO | __GFP_HIGH);
0280 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
0281
0282 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
0283 pr_err("Adding page to bio failed at %llu\n",
0284 (unsigned long long)bio->bi_iter.bi_sector);
0285 bio_put(bio);
0286 return -EFAULT;
0287 }
0288
0289 if (hb) {
0290 bio->bi_end_io = hib_end_io;
0291 bio->bi_private = hb;
0292 atomic_inc(&hb->count);
0293 submit_bio(bio);
0294 } else {
0295 error = submit_bio_wait(bio);
0296 bio_put(bio);
0297 }
0298
0299 return error;
0300 }
0301
0302 static int hib_wait_io(struct hib_bio_batch *hb)
0303 {
0304
0305
0306
0307
0308 wait_event(hb->wait, atomic_read(&hb->count) == 0);
0309 return blk_status_to_errno(hb->error);
0310 }
0311
0312
0313
0314
0315 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
0316 {
0317 int error;
0318
0319 hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
0320 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
0321 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
0322 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
0323 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
0324 swsusp_header->image = handle->first_sector;
0325 if (swsusp_hardware_signature) {
0326 swsusp_header->hw_sig = swsusp_hardware_signature;
0327 flags |= SF_HW_SIG;
0328 }
0329 swsusp_header->flags = flags;
0330 if (flags & SF_CRC32_MODE)
0331 swsusp_header->crc32 = handle->crc32;
0332 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
0333 swsusp_resume_block, swsusp_header, NULL);
0334 } else {
0335 pr_err("Swap header not found!\n");
0336 error = -ENODEV;
0337 }
0338 return error;
0339 }
0340
0341
0342
0343
0344
0345
0346
0347 static int swsusp_swap_check(void)
0348 {
0349 int res;
0350
0351 if (swsusp_resume_device)
0352 res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
0353 else
0354 res = find_first_swap(&swsusp_resume_device);
0355 if (res < 0)
0356 return res;
0357 root_swap = res;
0358
0359 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE,
0360 NULL);
0361 if (IS_ERR(hib_resume_bdev))
0362 return PTR_ERR(hib_resume_bdev);
0363
0364 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
0365 if (res < 0)
0366 blkdev_put(hib_resume_bdev, FMODE_WRITE);
0367
0368 return res;
0369 }
0370
0371
0372
0373
0374
0375
0376
0377
0378 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
0379 {
0380 void *src;
0381 int ret;
0382
0383 if (!offset)
0384 return -ENOSPC;
0385
0386 if (hb) {
0387 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
0388 __GFP_NORETRY);
0389 if (src) {
0390 copy_page(src, buf);
0391 } else {
0392 ret = hib_wait_io(hb);
0393 if (ret)
0394 return ret;
0395 src = (void *)__get_free_page(GFP_NOIO |
0396 __GFP_NOWARN |
0397 __GFP_NORETRY);
0398 if (src) {
0399 copy_page(src, buf);
0400 } else {
0401 WARN_ON_ONCE(1);
0402 hb = NULL;
0403 src = buf;
0404 }
0405 }
0406 } else {
0407 src = buf;
0408 }
0409 return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
0410 }
0411
0412 static void release_swap_writer(struct swap_map_handle *handle)
0413 {
0414 if (handle->cur)
0415 free_page((unsigned long)handle->cur);
0416 handle->cur = NULL;
0417 }
0418
0419 static int get_swap_writer(struct swap_map_handle *handle)
0420 {
0421 int ret;
0422
0423 ret = swsusp_swap_check();
0424 if (ret) {
0425 if (ret != -ENOSPC)
0426 pr_err("Cannot find swap device, try swapon -a\n");
0427 return ret;
0428 }
0429 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
0430 if (!handle->cur) {
0431 ret = -ENOMEM;
0432 goto err_close;
0433 }
0434 handle->cur_swap = alloc_swapdev_block(root_swap);
0435 if (!handle->cur_swap) {
0436 ret = -ENOSPC;
0437 goto err_rel;
0438 }
0439 handle->k = 0;
0440 handle->reqd_free_pages = reqd_free_pages();
0441 handle->first_sector = handle->cur_swap;
0442 return 0;
0443 err_rel:
0444 release_swap_writer(handle);
0445 err_close:
0446 swsusp_close(FMODE_WRITE);
0447 return ret;
0448 }
0449
0450 static int swap_write_page(struct swap_map_handle *handle, void *buf,
0451 struct hib_bio_batch *hb)
0452 {
0453 int error = 0;
0454 sector_t offset;
0455
0456 if (!handle->cur)
0457 return -EINVAL;
0458 offset = alloc_swapdev_block(root_swap);
0459 error = write_page(buf, offset, hb);
0460 if (error)
0461 return error;
0462 handle->cur->entries[handle->k++] = offset;
0463 if (handle->k >= MAP_PAGE_ENTRIES) {
0464 offset = alloc_swapdev_block(root_swap);
0465 if (!offset)
0466 return -ENOSPC;
0467 handle->cur->next_swap = offset;
0468 error = write_page(handle->cur, handle->cur_swap, hb);
0469 if (error)
0470 goto out;
0471 clear_page(handle->cur);
0472 handle->cur_swap = offset;
0473 handle->k = 0;
0474
0475 if (hb && low_free_pages() <= handle->reqd_free_pages) {
0476 error = hib_wait_io(hb);
0477 if (error)
0478 goto out;
0479
0480
0481
0482
0483 handle->reqd_free_pages = reqd_free_pages();
0484 }
0485 }
0486 out:
0487 return error;
0488 }
0489
0490 static int flush_swap_writer(struct swap_map_handle *handle)
0491 {
0492 if (handle->cur && handle->cur_swap)
0493 return write_page(handle->cur, handle->cur_swap, NULL);
0494 else
0495 return -EINVAL;
0496 }
0497
0498 static int swap_writer_finish(struct swap_map_handle *handle,
0499 unsigned int flags, int error)
0500 {
0501 if (!error) {
0502 pr_info("S");
0503 error = mark_swapfiles(handle, flags);
0504 pr_cont("|\n");
0505 flush_swap_writer(handle);
0506 }
0507
0508 if (error)
0509 free_all_swap_pages(root_swap);
0510 release_swap_writer(handle);
0511 swsusp_close(FMODE_WRITE);
0512
0513 return error;
0514 }
0515
0516
0517 #define LZO_HEADER sizeof(size_t)
0518
0519
0520 #define LZO_UNC_PAGES 32
0521 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
0522
0523
0524 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
0525 LZO_HEADER, PAGE_SIZE)
0526 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
0527
0528
0529 #define LZO_THREADS 3
0530
0531
0532 #define LZO_MIN_RD_PAGES 1024
0533 #define LZO_MAX_RD_PAGES 8192
0534
0535
0536
0537
0538
0539
0540 static int save_image(struct swap_map_handle *handle,
0541 struct snapshot_handle *snapshot,
0542 unsigned int nr_to_write)
0543 {
0544 unsigned int m;
0545 int ret;
0546 int nr_pages;
0547 int err2;
0548 struct hib_bio_batch hb;
0549 ktime_t start;
0550 ktime_t stop;
0551
0552 hib_init_batch(&hb);
0553
0554 pr_info("Saving image data pages (%u pages)...\n",
0555 nr_to_write);
0556 m = nr_to_write / 10;
0557 if (!m)
0558 m = 1;
0559 nr_pages = 0;
0560 start = ktime_get();
0561 while (1) {
0562 ret = snapshot_read_next(snapshot);
0563 if (ret <= 0)
0564 break;
0565 ret = swap_write_page(handle, data_of(*snapshot), &hb);
0566 if (ret)
0567 break;
0568 if (!(nr_pages % m))
0569 pr_info("Image saving progress: %3d%%\n",
0570 nr_pages / m * 10);
0571 nr_pages++;
0572 }
0573 err2 = hib_wait_io(&hb);
0574 hib_finish_batch(&hb);
0575 stop = ktime_get();
0576 if (!ret)
0577 ret = err2;
0578 if (!ret)
0579 pr_info("Image saving done\n");
0580 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
0581 return ret;
0582 }
0583
0584
0585
0586
0587 struct crc_data {
0588 struct task_struct *thr;
0589 atomic_t ready;
0590 atomic_t stop;
0591 unsigned run_threads;
0592 wait_queue_head_t go;
0593 wait_queue_head_t done;
0594 u32 *crc32;
0595 size_t *unc_len[LZO_THREADS];
0596 unsigned char *unc[LZO_THREADS];
0597 };
0598
0599
0600
0601
0602 static int crc32_threadfn(void *data)
0603 {
0604 struct crc_data *d = data;
0605 unsigned i;
0606
0607 while (1) {
0608 wait_event(d->go, atomic_read(&d->ready) ||
0609 kthread_should_stop());
0610 if (kthread_should_stop()) {
0611 d->thr = NULL;
0612 atomic_set(&d->stop, 1);
0613 wake_up(&d->done);
0614 break;
0615 }
0616 atomic_set(&d->ready, 0);
0617
0618 for (i = 0; i < d->run_threads; i++)
0619 *d->crc32 = crc32_le(*d->crc32,
0620 d->unc[i], *d->unc_len[i]);
0621 atomic_set(&d->stop, 1);
0622 wake_up(&d->done);
0623 }
0624 return 0;
0625 }
0626
0627
0628
0629 struct cmp_data {
0630 struct task_struct *thr;
0631 atomic_t ready;
0632 atomic_t stop;
0633 int ret;
0634 wait_queue_head_t go;
0635 wait_queue_head_t done;
0636 size_t unc_len;
0637 size_t cmp_len;
0638 unsigned char unc[LZO_UNC_SIZE];
0639 unsigned char cmp[LZO_CMP_SIZE];
0640 unsigned char wrk[LZO1X_1_MEM_COMPRESS];
0641 };
0642
0643
0644
0645
0646 static int lzo_compress_threadfn(void *data)
0647 {
0648 struct cmp_data *d = data;
0649
0650 while (1) {
0651 wait_event(d->go, atomic_read(&d->ready) ||
0652 kthread_should_stop());
0653 if (kthread_should_stop()) {
0654 d->thr = NULL;
0655 d->ret = -1;
0656 atomic_set(&d->stop, 1);
0657 wake_up(&d->done);
0658 break;
0659 }
0660 atomic_set(&d->ready, 0);
0661
0662 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
0663 d->cmp + LZO_HEADER, &d->cmp_len,
0664 d->wrk);
0665 atomic_set(&d->stop, 1);
0666 wake_up(&d->done);
0667 }
0668 return 0;
0669 }
0670
0671
0672
0673
0674
0675
0676
0677 static int save_image_lzo(struct swap_map_handle *handle,
0678 struct snapshot_handle *snapshot,
0679 unsigned int nr_to_write)
0680 {
0681 unsigned int m;
0682 int ret = 0;
0683 int nr_pages;
0684 int err2;
0685 struct hib_bio_batch hb;
0686 ktime_t start;
0687 ktime_t stop;
0688 size_t off;
0689 unsigned thr, run_threads, nr_threads;
0690 unsigned char *page = NULL;
0691 struct cmp_data *data = NULL;
0692 struct crc_data *crc = NULL;
0693
0694 hib_init_batch(&hb);
0695
0696
0697
0698
0699
0700 nr_threads = num_online_cpus() - 1;
0701 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
0702
0703 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
0704 if (!page) {
0705 pr_err("Failed to allocate LZO page\n");
0706 ret = -ENOMEM;
0707 goto out_clean;
0708 }
0709
0710 data = vzalloc(array_size(nr_threads, sizeof(*data)));
0711 if (!data) {
0712 pr_err("Failed to allocate LZO data\n");
0713 ret = -ENOMEM;
0714 goto out_clean;
0715 }
0716
0717 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
0718 if (!crc) {
0719 pr_err("Failed to allocate crc\n");
0720 ret = -ENOMEM;
0721 goto out_clean;
0722 }
0723
0724
0725
0726
0727 for (thr = 0; thr < nr_threads; thr++) {
0728 init_waitqueue_head(&data[thr].go);
0729 init_waitqueue_head(&data[thr].done);
0730
0731 data[thr].thr = kthread_run(lzo_compress_threadfn,
0732 &data[thr],
0733 "image_compress/%u", thr);
0734 if (IS_ERR(data[thr].thr)) {
0735 data[thr].thr = NULL;
0736 pr_err("Cannot start compression threads\n");
0737 ret = -ENOMEM;
0738 goto out_clean;
0739 }
0740 }
0741
0742
0743
0744
0745 init_waitqueue_head(&crc->go);
0746 init_waitqueue_head(&crc->done);
0747
0748 handle->crc32 = 0;
0749 crc->crc32 = &handle->crc32;
0750 for (thr = 0; thr < nr_threads; thr++) {
0751 crc->unc[thr] = data[thr].unc;
0752 crc->unc_len[thr] = &data[thr].unc_len;
0753 }
0754
0755 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
0756 if (IS_ERR(crc->thr)) {
0757 crc->thr = NULL;
0758 pr_err("Cannot start CRC32 thread\n");
0759 ret = -ENOMEM;
0760 goto out_clean;
0761 }
0762
0763
0764
0765
0766
0767 handle->reqd_free_pages = reqd_free_pages();
0768
0769 pr_info("Using %u thread(s) for compression\n", nr_threads);
0770 pr_info("Compressing and saving image data (%u pages)...\n",
0771 nr_to_write);
0772 m = nr_to_write / 10;
0773 if (!m)
0774 m = 1;
0775 nr_pages = 0;
0776 start = ktime_get();
0777 for (;;) {
0778 for (thr = 0; thr < nr_threads; thr++) {
0779 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
0780 ret = snapshot_read_next(snapshot);
0781 if (ret < 0)
0782 goto out_finish;
0783
0784 if (!ret)
0785 break;
0786
0787 memcpy(data[thr].unc + off,
0788 data_of(*snapshot), PAGE_SIZE);
0789
0790 if (!(nr_pages % m))
0791 pr_info("Image saving progress: %3d%%\n",
0792 nr_pages / m * 10);
0793 nr_pages++;
0794 }
0795 if (!off)
0796 break;
0797
0798 data[thr].unc_len = off;
0799
0800 atomic_set(&data[thr].ready, 1);
0801 wake_up(&data[thr].go);
0802 }
0803
0804 if (!thr)
0805 break;
0806
0807 crc->run_threads = thr;
0808 atomic_set(&crc->ready, 1);
0809 wake_up(&crc->go);
0810
0811 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
0812 wait_event(data[thr].done,
0813 atomic_read(&data[thr].stop));
0814 atomic_set(&data[thr].stop, 0);
0815
0816 ret = data[thr].ret;
0817
0818 if (ret < 0) {
0819 pr_err("LZO compression failed\n");
0820 goto out_finish;
0821 }
0822
0823 if (unlikely(!data[thr].cmp_len ||
0824 data[thr].cmp_len >
0825 lzo1x_worst_compress(data[thr].unc_len))) {
0826 pr_err("Invalid LZO compressed length\n");
0827 ret = -1;
0828 goto out_finish;
0829 }
0830
0831 *(size_t *)data[thr].cmp = data[thr].cmp_len;
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841 for (off = 0;
0842 off < LZO_HEADER + data[thr].cmp_len;
0843 off += PAGE_SIZE) {
0844 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
0845
0846 ret = swap_write_page(handle, page, &hb);
0847 if (ret)
0848 goto out_finish;
0849 }
0850 }
0851
0852 wait_event(crc->done, atomic_read(&crc->stop));
0853 atomic_set(&crc->stop, 0);
0854 }
0855
0856 out_finish:
0857 err2 = hib_wait_io(&hb);
0858 stop = ktime_get();
0859 if (!ret)
0860 ret = err2;
0861 if (!ret)
0862 pr_info("Image saving done\n");
0863 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
0864 out_clean:
0865 hib_finish_batch(&hb);
0866 if (crc) {
0867 if (crc->thr)
0868 kthread_stop(crc->thr);
0869 kfree(crc);
0870 }
0871 if (data) {
0872 for (thr = 0; thr < nr_threads; thr++)
0873 if (data[thr].thr)
0874 kthread_stop(data[thr].thr);
0875 vfree(data);
0876 }
0877 if (page) free_page((unsigned long)page);
0878
0879 return ret;
0880 }
0881
0882
0883
0884
0885
0886
0887
0888
0889 static int enough_swap(unsigned int nr_pages)
0890 {
0891 unsigned int free_swap = count_swap_pages(root_swap, 1);
0892 unsigned int required;
0893
0894 pr_debug("Free swap pages: %u\n", free_swap);
0895
0896 required = PAGES_FOR_IO + nr_pages;
0897 return free_swap > required;
0898 }
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910 int swsusp_write(unsigned int flags)
0911 {
0912 struct swap_map_handle handle;
0913 struct snapshot_handle snapshot;
0914 struct swsusp_info *header;
0915 unsigned long pages;
0916 int error;
0917
0918 pages = snapshot_get_image_size();
0919 error = get_swap_writer(&handle);
0920 if (error) {
0921 pr_err("Cannot get swap writer\n");
0922 return error;
0923 }
0924 if (flags & SF_NOCOMPRESS_MODE) {
0925 if (!enough_swap(pages)) {
0926 pr_err("Not enough free swap\n");
0927 error = -ENOSPC;
0928 goto out_finish;
0929 }
0930 }
0931 memset(&snapshot, 0, sizeof(struct snapshot_handle));
0932 error = snapshot_read_next(&snapshot);
0933 if (error < (int)PAGE_SIZE) {
0934 if (error >= 0)
0935 error = -EFAULT;
0936
0937 goto out_finish;
0938 }
0939 header = (struct swsusp_info *)data_of(snapshot);
0940 error = swap_write_page(&handle, header, NULL);
0941 if (!error) {
0942 error = (flags & SF_NOCOMPRESS_MODE) ?
0943 save_image(&handle, &snapshot, pages - 1) :
0944 save_image_lzo(&handle, &snapshot, pages - 1);
0945 }
0946 out_finish:
0947 error = swap_writer_finish(&handle, flags, error);
0948 return error;
0949 }
0950
0951
0952
0953
0954
0955
0956 static void release_swap_reader(struct swap_map_handle *handle)
0957 {
0958 struct swap_map_page_list *tmp;
0959
0960 while (handle->maps) {
0961 if (handle->maps->map)
0962 free_page((unsigned long)handle->maps->map);
0963 tmp = handle->maps;
0964 handle->maps = handle->maps->next;
0965 kfree(tmp);
0966 }
0967 handle->cur = NULL;
0968 }
0969
0970 static int get_swap_reader(struct swap_map_handle *handle,
0971 unsigned int *flags_p)
0972 {
0973 int error;
0974 struct swap_map_page_list *tmp, *last;
0975 sector_t offset;
0976
0977 *flags_p = swsusp_header->flags;
0978
0979 if (!swsusp_header->image)
0980 return -EINVAL;
0981
0982 handle->cur = NULL;
0983 last = handle->maps = NULL;
0984 offset = swsusp_header->image;
0985 while (offset) {
0986 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
0987 if (!tmp) {
0988 release_swap_reader(handle);
0989 return -ENOMEM;
0990 }
0991 if (!handle->maps)
0992 handle->maps = tmp;
0993 if (last)
0994 last->next = tmp;
0995 last = tmp;
0996
0997 tmp->map = (struct swap_map_page *)
0998 __get_free_page(GFP_NOIO | __GFP_HIGH);
0999 if (!tmp->map) {
1000 release_swap_reader(handle);
1001 return -ENOMEM;
1002 }
1003
1004 error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
1005 if (error) {
1006 release_swap_reader(handle);
1007 return error;
1008 }
1009 offset = tmp->map->next_swap;
1010 }
1011 handle->k = 0;
1012 handle->cur = handle->maps->map;
1013 return 0;
1014 }
1015
1016 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1017 struct hib_bio_batch *hb)
1018 {
1019 sector_t offset;
1020 int error;
1021 struct swap_map_page_list *tmp;
1022
1023 if (!handle->cur)
1024 return -EINVAL;
1025 offset = handle->cur->entries[handle->k];
1026 if (!offset)
1027 return -EFAULT;
1028 error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
1029 if (error)
1030 return error;
1031 if (++handle->k >= MAP_PAGE_ENTRIES) {
1032 handle->k = 0;
1033 free_page((unsigned long)handle->maps->map);
1034 tmp = handle->maps;
1035 handle->maps = handle->maps->next;
1036 kfree(tmp);
1037 if (!handle->maps)
1038 release_swap_reader(handle);
1039 else
1040 handle->cur = handle->maps->map;
1041 }
1042 return error;
1043 }
1044
1045 static int swap_reader_finish(struct swap_map_handle *handle)
1046 {
1047 release_swap_reader(handle);
1048
1049 return 0;
1050 }
1051
1052
1053
1054
1055
1056
1057
1058 static int load_image(struct swap_map_handle *handle,
1059 struct snapshot_handle *snapshot,
1060 unsigned int nr_to_read)
1061 {
1062 unsigned int m;
1063 int ret = 0;
1064 ktime_t start;
1065 ktime_t stop;
1066 struct hib_bio_batch hb;
1067 int err2;
1068 unsigned nr_pages;
1069
1070 hib_init_batch(&hb);
1071
1072 clean_pages_on_read = true;
1073 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1074 m = nr_to_read / 10;
1075 if (!m)
1076 m = 1;
1077 nr_pages = 0;
1078 start = ktime_get();
1079 for ( ; ; ) {
1080 ret = snapshot_write_next(snapshot);
1081 if (ret <= 0)
1082 break;
1083 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1084 if (ret)
1085 break;
1086 if (snapshot->sync_read)
1087 ret = hib_wait_io(&hb);
1088 if (ret)
1089 break;
1090 if (!(nr_pages % m))
1091 pr_info("Image loading progress: %3d%%\n",
1092 nr_pages / m * 10);
1093 nr_pages++;
1094 }
1095 err2 = hib_wait_io(&hb);
1096 hib_finish_batch(&hb);
1097 stop = ktime_get();
1098 if (!ret)
1099 ret = err2;
1100 if (!ret) {
1101 pr_info("Image loading done\n");
1102 snapshot_write_finalize(snapshot);
1103 if (!snapshot_image_loaded(snapshot))
1104 ret = -ENODATA;
1105 }
1106 swsusp_show_speed(start, stop, nr_to_read, "Read");
1107 return ret;
1108 }
1109
1110
1111
1112
1113 struct dec_data {
1114 struct task_struct *thr;
1115 atomic_t ready;
1116 atomic_t stop;
1117 int ret;
1118 wait_queue_head_t go;
1119 wait_queue_head_t done;
1120 size_t unc_len;
1121 size_t cmp_len;
1122 unsigned char unc[LZO_UNC_SIZE];
1123 unsigned char cmp[LZO_CMP_SIZE];
1124 };
1125
1126
1127
1128
1129 static int lzo_decompress_threadfn(void *data)
1130 {
1131 struct dec_data *d = data;
1132
1133 while (1) {
1134 wait_event(d->go, atomic_read(&d->ready) ||
1135 kthread_should_stop());
1136 if (kthread_should_stop()) {
1137 d->thr = NULL;
1138 d->ret = -1;
1139 atomic_set(&d->stop, 1);
1140 wake_up(&d->done);
1141 break;
1142 }
1143 atomic_set(&d->ready, 0);
1144
1145 d->unc_len = LZO_UNC_SIZE;
1146 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1147 d->unc, &d->unc_len);
1148 if (clean_pages_on_decompress)
1149 flush_icache_range((unsigned long)d->unc,
1150 (unsigned long)d->unc + d->unc_len);
1151
1152 atomic_set(&d->stop, 1);
1153 wake_up(&d->done);
1154 }
1155 return 0;
1156 }
1157
1158
1159
1160
1161
1162
1163
1164 static int load_image_lzo(struct swap_map_handle *handle,
1165 struct snapshot_handle *snapshot,
1166 unsigned int nr_to_read)
1167 {
1168 unsigned int m;
1169 int ret = 0;
1170 int eof = 0;
1171 struct hib_bio_batch hb;
1172 ktime_t start;
1173 ktime_t stop;
1174 unsigned nr_pages;
1175 size_t off;
1176 unsigned i, thr, run_threads, nr_threads;
1177 unsigned ring = 0, pg = 0, ring_size = 0,
1178 have = 0, want, need, asked = 0;
1179 unsigned long read_pages = 0;
1180 unsigned char **page = NULL;
1181 struct dec_data *data = NULL;
1182 struct crc_data *crc = NULL;
1183
1184 hib_init_batch(&hb);
1185
1186
1187
1188
1189
1190 nr_threads = num_online_cpus() - 1;
1191 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1192
1193 page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1194 if (!page) {
1195 pr_err("Failed to allocate LZO page\n");
1196 ret = -ENOMEM;
1197 goto out_clean;
1198 }
1199
1200 data = vzalloc(array_size(nr_threads, sizeof(*data)));
1201 if (!data) {
1202 pr_err("Failed to allocate LZO data\n");
1203 ret = -ENOMEM;
1204 goto out_clean;
1205 }
1206
1207 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1208 if (!crc) {
1209 pr_err("Failed to allocate crc\n");
1210 ret = -ENOMEM;
1211 goto out_clean;
1212 }
1213
1214 clean_pages_on_decompress = true;
1215
1216
1217
1218
1219 for (thr = 0; thr < nr_threads; thr++) {
1220 init_waitqueue_head(&data[thr].go);
1221 init_waitqueue_head(&data[thr].done);
1222
1223 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1224 &data[thr],
1225 "image_decompress/%u", thr);
1226 if (IS_ERR(data[thr].thr)) {
1227 data[thr].thr = NULL;
1228 pr_err("Cannot start decompression threads\n");
1229 ret = -ENOMEM;
1230 goto out_clean;
1231 }
1232 }
1233
1234
1235
1236
1237 init_waitqueue_head(&crc->go);
1238 init_waitqueue_head(&crc->done);
1239
1240 handle->crc32 = 0;
1241 crc->crc32 = &handle->crc32;
1242 for (thr = 0; thr < nr_threads; thr++) {
1243 crc->unc[thr] = data[thr].unc;
1244 crc->unc_len[thr] = &data[thr].unc_len;
1245 }
1246
1247 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1248 if (IS_ERR(crc->thr)) {
1249 crc->thr = NULL;
1250 pr_err("Cannot start CRC32 thread\n");
1251 ret = -ENOMEM;
1252 goto out_clean;
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262 if (low_free_pages() > snapshot_get_image_size())
1263 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1264 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1265
1266 for (i = 0; i < read_pages; i++) {
1267 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1268 GFP_NOIO | __GFP_HIGH :
1269 GFP_NOIO | __GFP_NOWARN |
1270 __GFP_NORETRY);
1271
1272 if (!page[i]) {
1273 if (i < LZO_CMP_PAGES) {
1274 ring_size = i;
1275 pr_err("Failed to allocate LZO pages\n");
1276 ret = -ENOMEM;
1277 goto out_clean;
1278 } else {
1279 break;
1280 }
1281 }
1282 }
1283 want = ring_size = i;
1284
1285 pr_info("Using %u thread(s) for decompression\n", nr_threads);
1286 pr_info("Loading and decompressing image data (%u pages)...\n",
1287 nr_to_read);
1288 m = nr_to_read / 10;
1289 if (!m)
1290 m = 1;
1291 nr_pages = 0;
1292 start = ktime_get();
1293
1294 ret = snapshot_write_next(snapshot);
1295 if (ret <= 0)
1296 goto out_finish;
1297
1298 for(;;) {
1299 for (i = 0; !eof && i < want; i++) {
1300 ret = swap_read_page(handle, page[ring], &hb);
1301 if (ret) {
1302
1303
1304
1305
1306 if (handle->cur &&
1307 handle->cur->entries[handle->k]) {
1308 goto out_finish;
1309 } else {
1310 eof = 1;
1311 break;
1312 }
1313 }
1314 if (++ring >= ring_size)
1315 ring = 0;
1316 }
1317 asked += i;
1318 want -= i;
1319
1320
1321
1322
1323 if (!have) {
1324 if (!asked)
1325 break;
1326
1327 ret = hib_wait_io(&hb);
1328 if (ret)
1329 goto out_finish;
1330 have += asked;
1331 asked = 0;
1332 if (eof)
1333 eof = 2;
1334 }
1335
1336 if (crc->run_threads) {
1337 wait_event(crc->done, atomic_read(&crc->stop));
1338 atomic_set(&crc->stop, 0);
1339 crc->run_threads = 0;
1340 }
1341
1342 for (thr = 0; have && thr < nr_threads; thr++) {
1343 data[thr].cmp_len = *(size_t *)page[pg];
1344 if (unlikely(!data[thr].cmp_len ||
1345 data[thr].cmp_len >
1346 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1347 pr_err("Invalid LZO compressed length\n");
1348 ret = -1;
1349 goto out_finish;
1350 }
1351
1352 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1353 PAGE_SIZE);
1354 if (need > have) {
1355 if (eof > 1) {
1356 ret = -1;
1357 goto out_finish;
1358 }
1359 break;
1360 }
1361
1362 for (off = 0;
1363 off < LZO_HEADER + data[thr].cmp_len;
1364 off += PAGE_SIZE) {
1365 memcpy(data[thr].cmp + off,
1366 page[pg], PAGE_SIZE);
1367 have--;
1368 want++;
1369 if (++pg >= ring_size)
1370 pg = 0;
1371 }
1372
1373 atomic_set(&data[thr].ready, 1);
1374 wake_up(&data[thr].go);
1375 }
1376
1377
1378
1379
1380 if (have < LZO_CMP_PAGES && asked) {
1381 ret = hib_wait_io(&hb);
1382 if (ret)
1383 goto out_finish;
1384 have += asked;
1385 asked = 0;
1386 if (eof)
1387 eof = 2;
1388 }
1389
1390 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1391 wait_event(data[thr].done,
1392 atomic_read(&data[thr].stop));
1393 atomic_set(&data[thr].stop, 0);
1394
1395 ret = data[thr].ret;
1396
1397 if (ret < 0) {
1398 pr_err("LZO decompression failed\n");
1399 goto out_finish;
1400 }
1401
1402 if (unlikely(!data[thr].unc_len ||
1403 data[thr].unc_len > LZO_UNC_SIZE ||
1404 data[thr].unc_len & (PAGE_SIZE - 1))) {
1405 pr_err("Invalid LZO uncompressed length\n");
1406 ret = -1;
1407 goto out_finish;
1408 }
1409
1410 for (off = 0;
1411 off < data[thr].unc_len; off += PAGE_SIZE) {
1412 memcpy(data_of(*snapshot),
1413 data[thr].unc + off, PAGE_SIZE);
1414
1415 if (!(nr_pages % m))
1416 pr_info("Image loading progress: %3d%%\n",
1417 nr_pages / m * 10);
1418 nr_pages++;
1419
1420 ret = snapshot_write_next(snapshot);
1421 if (ret <= 0) {
1422 crc->run_threads = thr + 1;
1423 atomic_set(&crc->ready, 1);
1424 wake_up(&crc->go);
1425 goto out_finish;
1426 }
1427 }
1428 }
1429
1430 crc->run_threads = thr;
1431 atomic_set(&crc->ready, 1);
1432 wake_up(&crc->go);
1433 }
1434
1435 out_finish:
1436 if (crc->run_threads) {
1437 wait_event(crc->done, atomic_read(&crc->stop));
1438 atomic_set(&crc->stop, 0);
1439 }
1440 stop = ktime_get();
1441 if (!ret) {
1442 pr_info("Image loading done\n");
1443 snapshot_write_finalize(snapshot);
1444 if (!snapshot_image_loaded(snapshot))
1445 ret = -ENODATA;
1446 if (!ret) {
1447 if (swsusp_header->flags & SF_CRC32_MODE) {
1448 if(handle->crc32 != swsusp_header->crc32) {
1449 pr_err("Invalid image CRC32!\n");
1450 ret = -ENODATA;
1451 }
1452 }
1453 }
1454 }
1455 swsusp_show_speed(start, stop, nr_to_read, "Read");
1456 out_clean:
1457 hib_finish_batch(&hb);
1458 for (i = 0; i < ring_size; i++)
1459 free_page((unsigned long)page[i]);
1460 if (crc) {
1461 if (crc->thr)
1462 kthread_stop(crc->thr);
1463 kfree(crc);
1464 }
1465 if (data) {
1466 for (thr = 0; thr < nr_threads; thr++)
1467 if (data[thr].thr)
1468 kthread_stop(data[thr].thr);
1469 vfree(data);
1470 }
1471 vfree(page);
1472
1473 return ret;
1474 }
1475
1476
1477
1478
1479
1480
1481
1482 int swsusp_read(unsigned int *flags_p)
1483 {
1484 int error;
1485 struct swap_map_handle handle;
1486 struct snapshot_handle snapshot;
1487 struct swsusp_info *header;
1488
1489 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1490 error = snapshot_write_next(&snapshot);
1491 if (error < (int)PAGE_SIZE)
1492 return error < 0 ? error : -EFAULT;
1493 header = (struct swsusp_info *)data_of(snapshot);
1494 error = get_swap_reader(&handle, flags_p);
1495 if (error)
1496 goto end;
1497 if (!error)
1498 error = swap_read_page(&handle, header, NULL);
1499 if (!error) {
1500 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1501 load_image(&handle, &snapshot, header->pages - 1) :
1502 load_image_lzo(&handle, &snapshot, header->pages - 1);
1503 }
1504 swap_reader_finish(&handle);
1505 end:
1506 if (!error)
1507 pr_debug("Image successfully loaded\n");
1508 else
1509 pr_debug("Error %d resuming\n", error);
1510 return error;
1511 }
1512
1513
1514
1515
1516
1517 int swsusp_check(void)
1518 {
1519 int error;
1520 void *holder;
1521
1522 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1523 FMODE_READ | FMODE_EXCL, &holder);
1524 if (!IS_ERR(hib_resume_bdev)) {
1525 set_blocksize(hib_resume_bdev, PAGE_SIZE);
1526 clear_page(swsusp_header);
1527 error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1528 swsusp_header, NULL);
1529 if (error)
1530 goto put;
1531
1532 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1533 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1534
1535 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1536 swsusp_resume_block,
1537 swsusp_header, NULL);
1538 } else {
1539 error = -EINVAL;
1540 }
1541 if (!error && swsusp_header->flags & SF_HW_SIG &&
1542 swsusp_header->hw_sig != swsusp_hardware_signature) {
1543 pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
1544 swsusp_header->hw_sig, swsusp_hardware_signature);
1545 error = -EINVAL;
1546 }
1547
1548 put:
1549 if (error)
1550 blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
1551 else
1552 pr_debug("Image signature found, resuming\n");
1553 } else {
1554 error = PTR_ERR(hib_resume_bdev);
1555 }
1556
1557 if (error)
1558 pr_debug("Image not found (code %d)\n", error);
1559
1560 return error;
1561 }
1562
1563
1564
1565
1566
1567 void swsusp_close(fmode_t mode)
1568 {
1569 if (IS_ERR(hib_resume_bdev)) {
1570 pr_debug("Image device not initialised\n");
1571 return;
1572 }
1573
1574 blkdev_put(hib_resume_bdev, mode);
1575 }
1576
1577
1578
1579
1580
1581 #ifdef CONFIG_SUSPEND
1582 int swsusp_unmark(void)
1583 {
1584 int error;
1585
1586 hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1587 swsusp_header, NULL);
1588 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1589 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1590 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1591 swsusp_resume_block,
1592 swsusp_header, NULL);
1593 } else {
1594 pr_err("Cannot find swsusp signature!\n");
1595 error = -ENODEV;
1596 }
1597
1598
1599
1600
1601 free_all_swap_pages(root_swap);
1602
1603 return error;
1604 }
1605 #endif
1606
1607 static int __init swsusp_header_init(void)
1608 {
1609 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1610 if (!swsusp_header)
1611 panic("Could not allocate memory for swsusp_header\n");
1612 return 0;
1613 }
1614
1615 core_initcall(swsusp_header_init);