0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #define pr_fmt(fmt) "PM: hibernation: " fmt
0012
0013 #include <linux/version.h>
0014 #include <linux/module.h>
0015 #include <linux/mm.h>
0016 #include <linux/suspend.h>
0017 #include <linux/delay.h>
0018 #include <linux/bitops.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/kernel.h>
0021 #include <linux/pm.h>
0022 #include <linux/device.h>
0023 #include <linux/init.h>
0024 #include <linux/memblock.h>
0025 #include <linux/nmi.h>
0026 #include <linux/syscalls.h>
0027 #include <linux/console.h>
0028 #include <linux/highmem.h>
0029 #include <linux/list.h>
0030 #include <linux/slab.h>
0031 #include <linux/compiler.h>
0032 #include <linux/ktime.h>
0033 #include <linux/set_memory.h>
0034
0035 #include <linux/uaccess.h>
0036 #include <asm/mmu_context.h>
0037 #include <asm/tlbflush.h>
0038 #include <asm/io.h>
0039
0040 #include "power.h"
0041
0042 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
0043 static bool hibernate_restore_protection;
0044 static bool hibernate_restore_protection_active;
0045
0046 void enable_restore_image_protection(void)
0047 {
0048 hibernate_restore_protection = true;
0049 }
0050
0051 static inline void hibernate_restore_protection_begin(void)
0052 {
0053 hibernate_restore_protection_active = hibernate_restore_protection;
0054 }
0055
0056 static inline void hibernate_restore_protection_end(void)
0057 {
0058 hibernate_restore_protection_active = false;
0059 }
0060
0061 static inline void hibernate_restore_protect_page(void *page_address)
0062 {
0063 if (hibernate_restore_protection_active)
0064 set_memory_ro((unsigned long)page_address, 1);
0065 }
0066
0067 static inline void hibernate_restore_unprotect_page(void *page_address)
0068 {
0069 if (hibernate_restore_protection_active)
0070 set_memory_rw((unsigned long)page_address, 1);
0071 }
0072 #else
0073 static inline void hibernate_restore_protection_begin(void) {}
0074 static inline void hibernate_restore_protection_end(void) {}
0075 static inline void hibernate_restore_protect_page(void *page_address) {}
0076 static inline void hibernate_restore_unprotect_page(void *page_address) {}
0077 #endif
0078
0079
0080
0081
0082
0083
0084
0085
0086 static inline void hibernate_map_page(struct page *page)
0087 {
0088 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
0089 int ret = set_direct_map_default_noflush(page);
0090
0091 if (ret)
0092 pr_warn_once("Failed to remap page\n");
0093 } else {
0094 debug_pagealloc_map_pages(page, 1);
0095 }
0096 }
0097
0098 static inline void hibernate_unmap_page(struct page *page)
0099 {
0100 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
0101 unsigned long addr = (unsigned long)page_address(page);
0102 int ret = set_direct_map_invalid_noflush(page);
0103
0104 if (ret)
0105 pr_warn_once("Failed to remap page\n");
0106
0107 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
0108 } else {
0109 debug_pagealloc_unmap_pages(page, 1);
0110 }
0111 }
0112
0113 static int swsusp_page_is_free(struct page *);
0114 static void swsusp_set_page_forbidden(struct page *);
0115 static void swsusp_unset_page_forbidden(struct page *);
0116
0117
0118
0119
0120
0121
0122 unsigned long reserved_size;
0123
0124 void __init hibernate_reserved_size_init(void)
0125 {
0126 reserved_size = SPARE_PAGES * PAGE_SIZE;
0127 }
0128
0129
0130
0131
0132
0133
0134
0135 unsigned long image_size;
0136
0137 void __init hibernate_image_size_init(void)
0138 {
0139 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
0140 }
0141
0142
0143
0144
0145
0146
0147
0148 struct pbe *restore_pblist;
0149
0150
0151
0152 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
0153
0154 struct linked_page {
0155 struct linked_page *next;
0156 char data[LINKED_PAGE_DATA_SIZE];
0157 } __packed;
0158
0159
0160
0161
0162
0163
0164 static struct linked_page *safe_pages_list;
0165
0166
0167 static void *buffer;
0168
0169 #define PG_ANY 0
0170 #define PG_SAFE 1
0171 #define PG_UNSAFE_CLEAR 1
0172 #define PG_UNSAFE_KEEP 0
0173
0174 static unsigned int allocated_unsafe_pages;
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
0190 {
0191 void *res;
0192
0193 res = (void *)get_zeroed_page(gfp_mask);
0194 if (safe_needed)
0195 while (res && swsusp_page_is_free(virt_to_page(res))) {
0196
0197 swsusp_set_page_forbidden(virt_to_page(res));
0198 allocated_unsafe_pages++;
0199 res = (void *)get_zeroed_page(gfp_mask);
0200 }
0201 if (res) {
0202 swsusp_set_page_forbidden(virt_to_page(res));
0203 swsusp_set_page_free(virt_to_page(res));
0204 }
0205 return res;
0206 }
0207
0208 static void *__get_safe_page(gfp_t gfp_mask)
0209 {
0210 if (safe_pages_list) {
0211 void *ret = safe_pages_list;
0212
0213 safe_pages_list = safe_pages_list->next;
0214 memset(ret, 0, PAGE_SIZE);
0215 return ret;
0216 }
0217 return get_image_page(gfp_mask, PG_SAFE);
0218 }
0219
0220 unsigned long get_safe_page(gfp_t gfp_mask)
0221 {
0222 return (unsigned long)__get_safe_page(gfp_mask);
0223 }
0224
0225 static struct page *alloc_image_page(gfp_t gfp_mask)
0226 {
0227 struct page *page;
0228
0229 page = alloc_page(gfp_mask);
0230 if (page) {
0231 swsusp_set_page_forbidden(page);
0232 swsusp_set_page_free(page);
0233 }
0234 return page;
0235 }
0236
0237 static void recycle_safe_page(void *page_address)
0238 {
0239 struct linked_page *lp = page_address;
0240
0241 lp->next = safe_pages_list;
0242 safe_pages_list = lp;
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253 static inline void free_image_page(void *addr, int clear_nosave_free)
0254 {
0255 struct page *page;
0256
0257 BUG_ON(!virt_addr_valid(addr));
0258
0259 page = virt_to_page(addr);
0260
0261 swsusp_unset_page_forbidden(page);
0262 if (clear_nosave_free)
0263 swsusp_unset_page_free(page);
0264
0265 __free_page(page);
0266 }
0267
0268 static inline void free_list_of_pages(struct linked_page *list,
0269 int clear_page_nosave)
0270 {
0271 while (list) {
0272 struct linked_page *lp = list->next;
0273
0274 free_image_page(list, clear_page_nosave);
0275 list = lp;
0276 }
0277 }
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 struct chain_allocator {
0292 struct linked_page *chain;
0293 unsigned int used_space;
0294
0295 gfp_t gfp_mask;
0296 int safe_needed;
0297 };
0298
0299 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
0300 int safe_needed)
0301 {
0302 ca->chain = NULL;
0303 ca->used_space = LINKED_PAGE_DATA_SIZE;
0304 ca->gfp_mask = gfp_mask;
0305 ca->safe_needed = safe_needed;
0306 }
0307
0308 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
0309 {
0310 void *ret;
0311
0312 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
0313 struct linked_page *lp;
0314
0315 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
0316 get_image_page(ca->gfp_mask, PG_ANY);
0317 if (!lp)
0318 return NULL;
0319
0320 lp->next = ca->chain;
0321 ca->chain = lp;
0322 ca->used_space = 0;
0323 }
0324 ret = ca->chain->data + ca->used_space;
0325 ca->used_space += size;
0326 return ret;
0327 }
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370 #define BM_END_OF_MAP (~0UL)
0371
0372 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
0373 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
0374 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
0375
0376
0377
0378
0379
0380
0381 struct rtree_node {
0382 struct list_head list;
0383 unsigned long *data;
0384 };
0385
0386
0387
0388
0389
0390 struct mem_zone_bm_rtree {
0391 struct list_head list;
0392 struct list_head nodes;
0393 struct list_head leaves;
0394 unsigned long start_pfn;
0395 unsigned long end_pfn;
0396 struct rtree_node *rtree;
0397 int levels;
0398 unsigned int blocks;
0399 };
0400
0401
0402
0403 struct bm_position {
0404 struct mem_zone_bm_rtree *zone;
0405 struct rtree_node *node;
0406 unsigned long node_pfn;
0407 int node_bit;
0408 };
0409
0410 struct memory_bitmap {
0411 struct list_head zones;
0412 struct linked_page *p_list;
0413
0414
0415 struct bm_position cur;
0416 };
0417
0418
0419
0420 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
0421 #if BITS_PER_LONG == 32
0422 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
0423 #else
0424 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
0425 #endif
0426 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
0440 struct chain_allocator *ca,
0441 struct list_head *list)
0442 {
0443 struct rtree_node *node;
0444
0445 node = chain_alloc(ca, sizeof(struct rtree_node));
0446 if (!node)
0447 return NULL;
0448
0449 node->data = get_image_page(gfp_mask, safe_needed);
0450 if (!node->data)
0451 return NULL;
0452
0453 list_add_tail(&node->list, list);
0454
0455 return node;
0456 }
0457
0458
0459
0460
0461
0462
0463
0464
0465 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
0466 int safe_needed, struct chain_allocator *ca)
0467 {
0468 struct rtree_node *node, *block, **dst;
0469 unsigned int levels_needed, block_nr;
0470 int i;
0471
0472 block_nr = zone->blocks;
0473 levels_needed = 0;
0474
0475
0476 while (block_nr) {
0477 levels_needed += 1;
0478 block_nr >>= BM_RTREE_LEVEL_SHIFT;
0479 }
0480
0481
0482 for (i = zone->levels; i < levels_needed; i++) {
0483 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
0484 &zone->nodes);
0485 if (!node)
0486 return -ENOMEM;
0487
0488 node->data[0] = (unsigned long)zone->rtree;
0489 zone->rtree = node;
0490 zone->levels += 1;
0491 }
0492
0493
0494 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
0495 if (!block)
0496 return -ENOMEM;
0497
0498
0499 node = zone->rtree;
0500 dst = &zone->rtree;
0501 block_nr = zone->blocks;
0502 for (i = zone->levels; i > 0; i--) {
0503 int index;
0504
0505 if (!node) {
0506 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
0507 &zone->nodes);
0508 if (!node)
0509 return -ENOMEM;
0510 *dst = node;
0511 }
0512
0513 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
0514 index &= BM_RTREE_LEVEL_MASK;
0515 dst = (struct rtree_node **)&((*dst)->data[index]);
0516 node = *dst;
0517 }
0518
0519 zone->blocks += 1;
0520 *dst = block;
0521
0522 return 0;
0523 }
0524
0525 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
0526 int clear_nosave_free);
0527
0528
0529
0530
0531
0532
0533
0534
0535 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
0536 int safe_needed,
0537 struct chain_allocator *ca,
0538 unsigned long start,
0539 unsigned long end)
0540 {
0541 struct mem_zone_bm_rtree *zone;
0542 unsigned int i, nr_blocks;
0543 unsigned long pages;
0544
0545 pages = end - start;
0546 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
0547 if (!zone)
0548 return NULL;
0549
0550 INIT_LIST_HEAD(&zone->nodes);
0551 INIT_LIST_HEAD(&zone->leaves);
0552 zone->start_pfn = start;
0553 zone->end_pfn = end;
0554 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
0555
0556 for (i = 0; i < nr_blocks; i++) {
0557 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
0558 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
0559 return NULL;
0560 }
0561 }
0562
0563 return zone;
0564 }
0565
0566
0567
0568
0569
0570
0571
0572
0573 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
0574 int clear_nosave_free)
0575 {
0576 struct rtree_node *node;
0577
0578 list_for_each_entry(node, &zone->nodes, list)
0579 free_image_page(node->data, clear_nosave_free);
0580
0581 list_for_each_entry(node, &zone->leaves, list)
0582 free_image_page(node->data, clear_nosave_free);
0583 }
0584
0585 static void memory_bm_position_reset(struct memory_bitmap *bm)
0586 {
0587 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
0588 list);
0589 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
0590 struct rtree_node, list);
0591 bm->cur.node_pfn = 0;
0592 bm->cur.node_bit = 0;
0593 }
0594
0595 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
0596
0597 struct mem_extent {
0598 struct list_head hook;
0599 unsigned long start;
0600 unsigned long end;
0601 };
0602
0603
0604
0605
0606
0607 static void free_mem_extents(struct list_head *list)
0608 {
0609 struct mem_extent *ext, *aux;
0610
0611 list_for_each_entry_safe(ext, aux, list, hook) {
0612 list_del(&ext->hook);
0613 kfree(ext);
0614 }
0615 }
0616
0617
0618
0619
0620
0621
0622
0623
0624 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
0625 {
0626 struct zone *zone;
0627
0628 INIT_LIST_HEAD(list);
0629
0630 for_each_populated_zone(zone) {
0631 unsigned long zone_start, zone_end;
0632 struct mem_extent *ext, *cur, *aux;
0633
0634 zone_start = zone->zone_start_pfn;
0635 zone_end = zone_end_pfn(zone);
0636
0637 list_for_each_entry(ext, list, hook)
0638 if (zone_start <= ext->end)
0639 break;
0640
0641 if (&ext->hook == list || zone_end < ext->start) {
0642
0643 struct mem_extent *new_ext;
0644
0645 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
0646 if (!new_ext) {
0647 free_mem_extents(list);
0648 return -ENOMEM;
0649 }
0650 new_ext->start = zone_start;
0651 new_ext->end = zone_end;
0652 list_add_tail(&new_ext->hook, &ext->hook);
0653 continue;
0654 }
0655
0656
0657 if (zone_start < ext->start)
0658 ext->start = zone_start;
0659 if (zone_end > ext->end)
0660 ext->end = zone_end;
0661
0662
0663 cur = ext;
0664 list_for_each_entry_safe_continue(cur, aux, list, hook) {
0665 if (zone_end < cur->start)
0666 break;
0667 if (zone_end < cur->end)
0668 ext->end = cur->end;
0669 list_del(&cur->hook);
0670 kfree(cur);
0671 }
0672 }
0673
0674 return 0;
0675 }
0676
0677
0678
0679
0680 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
0681 int safe_needed)
0682 {
0683 struct chain_allocator ca;
0684 struct list_head mem_extents;
0685 struct mem_extent *ext;
0686 int error;
0687
0688 chain_init(&ca, gfp_mask, safe_needed);
0689 INIT_LIST_HEAD(&bm->zones);
0690
0691 error = create_mem_extents(&mem_extents, gfp_mask);
0692 if (error)
0693 return error;
0694
0695 list_for_each_entry(ext, &mem_extents, hook) {
0696 struct mem_zone_bm_rtree *zone;
0697
0698 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
0699 ext->start, ext->end);
0700 if (!zone) {
0701 error = -ENOMEM;
0702 goto Error;
0703 }
0704 list_add_tail(&zone->list, &bm->zones);
0705 }
0706
0707 bm->p_list = ca.chain;
0708 memory_bm_position_reset(bm);
0709 Exit:
0710 free_mem_extents(&mem_extents);
0711 return error;
0712
0713 Error:
0714 bm->p_list = ca.chain;
0715 memory_bm_free(bm, PG_UNSAFE_CLEAR);
0716 goto Exit;
0717 }
0718
0719
0720
0721
0722
0723 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
0724 {
0725 struct mem_zone_bm_rtree *zone;
0726
0727 list_for_each_entry(zone, &bm->zones, list)
0728 free_zone_bm_rtree(zone, clear_nosave_free);
0729
0730 free_list_of_pages(bm->p_list, clear_nosave_free);
0731
0732 INIT_LIST_HEAD(&bm->zones);
0733 }
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
0745 void **addr, unsigned int *bit_nr)
0746 {
0747 struct mem_zone_bm_rtree *curr, *zone;
0748 struct rtree_node *node;
0749 int i, block_nr;
0750
0751 zone = bm->cur.zone;
0752
0753 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
0754 goto zone_found;
0755
0756 zone = NULL;
0757
0758
0759 list_for_each_entry(curr, &bm->zones, list) {
0760 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
0761 zone = curr;
0762 break;
0763 }
0764 }
0765
0766 if (!zone)
0767 return -EFAULT;
0768
0769 zone_found:
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780 node = bm->cur.node;
0781 if (zone == bm->cur.zone &&
0782 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
0783 goto node_found;
0784
0785 node = zone->rtree;
0786 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
0787
0788 for (i = zone->levels; i > 0; i--) {
0789 int index;
0790
0791 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
0792 index &= BM_RTREE_LEVEL_MASK;
0793 BUG_ON(node->data[index] == 0);
0794 node = (struct rtree_node *)node->data[index];
0795 }
0796
0797 node_found:
0798
0799 bm->cur.zone = zone;
0800 bm->cur.node = node;
0801 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
0802
0803
0804 *addr = node->data;
0805 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
0806
0807 return 0;
0808 }
0809
0810 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
0811 {
0812 void *addr;
0813 unsigned int bit;
0814 int error;
0815
0816 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
0817 BUG_ON(error);
0818 set_bit(bit, addr);
0819 }
0820
0821 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
0822 {
0823 void *addr;
0824 unsigned int bit;
0825 int error;
0826
0827 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
0828 if (!error)
0829 set_bit(bit, addr);
0830
0831 return error;
0832 }
0833
0834 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
0835 {
0836 void *addr;
0837 unsigned int bit;
0838 int error;
0839
0840 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
0841 BUG_ON(error);
0842 clear_bit(bit, addr);
0843 }
0844
0845 static void memory_bm_clear_current(struct memory_bitmap *bm)
0846 {
0847 int bit;
0848
0849 bit = max(bm->cur.node_bit - 1, 0);
0850 clear_bit(bit, bm->cur.node->data);
0851 }
0852
0853 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
0854 {
0855 void *addr;
0856 unsigned int bit;
0857 int error;
0858
0859 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
0860 BUG_ON(error);
0861 return test_bit(bit, addr);
0862 }
0863
0864 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
0865 {
0866 void *addr;
0867 unsigned int bit;
0868
0869 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882 static bool rtree_next_node(struct memory_bitmap *bm)
0883 {
0884 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
0885 bm->cur.node = list_entry(bm->cur.node->list.next,
0886 struct rtree_node, list);
0887 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
0888 bm->cur.node_bit = 0;
0889 touch_softlockup_watchdog();
0890 return true;
0891 }
0892
0893
0894 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
0895 bm->cur.zone = list_entry(bm->cur.zone->list.next,
0896 struct mem_zone_bm_rtree, list);
0897 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
0898 struct rtree_node, list);
0899 bm->cur.node_pfn = 0;
0900 bm->cur.node_bit = 0;
0901 return true;
0902 }
0903
0904
0905 return false;
0906 }
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
0920 {
0921 unsigned long bits, pfn, pages;
0922 int bit;
0923
0924 do {
0925 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
0926 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
0927 bit = find_next_bit(bm->cur.node->data, bits,
0928 bm->cur.node_bit);
0929 if (bit < bits) {
0930 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
0931 bm->cur.node_bit = bit + 1;
0932 return pfn;
0933 }
0934 } while (rtree_next_node(bm));
0935
0936 return BM_END_OF_MAP;
0937 }
0938
0939
0940
0941
0942
0943 struct nosave_region {
0944 struct list_head list;
0945 unsigned long start_pfn;
0946 unsigned long end_pfn;
0947 };
0948
0949 static LIST_HEAD(nosave_regions);
0950
0951 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
0952 {
0953 struct rtree_node *node;
0954
0955 list_for_each_entry(node, &zone->nodes, list)
0956 recycle_safe_page(node->data);
0957
0958 list_for_each_entry(node, &zone->leaves, list)
0959 recycle_safe_page(node->data);
0960 }
0961
0962 static void memory_bm_recycle(struct memory_bitmap *bm)
0963 {
0964 struct mem_zone_bm_rtree *zone;
0965 struct linked_page *p_list;
0966
0967 list_for_each_entry(zone, &bm->zones, list)
0968 recycle_zone_bm_rtree(zone);
0969
0970 p_list = bm->p_list;
0971 while (p_list) {
0972 struct linked_page *lp = p_list;
0973
0974 p_list = lp->next;
0975 recycle_safe_page(lp);
0976 }
0977 }
0978
0979
0980
0981
0982
0983
0984
0985 void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
0986 {
0987 struct nosave_region *region;
0988
0989 if (start_pfn >= end_pfn)
0990 return;
0991
0992 if (!list_empty(&nosave_regions)) {
0993
0994 region = list_entry(nosave_regions.prev,
0995 struct nosave_region, list);
0996 if (region->end_pfn == start_pfn) {
0997 region->end_pfn = end_pfn;
0998 goto Report;
0999 }
1000 }
1001
1002 region = memblock_alloc(sizeof(struct nosave_region),
1003 SMP_CACHE_BYTES);
1004 if (!region)
1005 panic("%s: Failed to allocate %zu bytes\n", __func__,
1006 sizeof(struct nosave_region));
1007 region->start_pfn = start_pfn;
1008 region->end_pfn = end_pfn;
1009 list_add_tail(®ion->list, &nosave_regions);
1010 Report:
1011 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1012 (unsigned long long) start_pfn << PAGE_SHIFT,
1013 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1014 }
1015
1016
1017
1018
1019
1020 static struct memory_bitmap *forbidden_pages_map;
1021
1022
1023 static struct memory_bitmap *free_pages_map;
1024
1025
1026
1027
1028
1029
1030 void swsusp_set_page_free(struct page *page)
1031 {
1032 if (free_pages_map)
1033 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1034 }
1035
1036 static int swsusp_page_is_free(struct page *page)
1037 {
1038 return free_pages_map ?
1039 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1040 }
1041
1042 void swsusp_unset_page_free(struct page *page)
1043 {
1044 if (free_pages_map)
1045 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1046 }
1047
1048 static void swsusp_set_page_forbidden(struct page *page)
1049 {
1050 if (forbidden_pages_map)
1051 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1052 }
1053
1054 int swsusp_page_is_forbidden(struct page *page)
1055 {
1056 return forbidden_pages_map ?
1057 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1058 }
1059
1060 static void swsusp_unset_page_forbidden(struct page *page)
1061 {
1062 if (forbidden_pages_map)
1063 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1064 }
1065
1066
1067
1068
1069
1070
1071
1072
1073 static void mark_nosave_pages(struct memory_bitmap *bm)
1074 {
1075 struct nosave_region *region;
1076
1077 if (list_empty(&nosave_regions))
1078 return;
1079
1080 list_for_each_entry(region, &nosave_regions, list) {
1081 unsigned long pfn;
1082
1083 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1084 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1085 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1086 - 1);
1087
1088 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1089 if (pfn_valid(pfn)) {
1090
1091
1092
1093
1094
1095
1096 mem_bm_set_bit_check(bm, pfn);
1097 }
1098 }
1099 }
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 int create_basic_memory_bitmaps(void)
1110 {
1111 struct memory_bitmap *bm1, *bm2;
1112 int error = 0;
1113
1114 if (forbidden_pages_map && free_pages_map)
1115 return 0;
1116 else
1117 BUG_ON(forbidden_pages_map || free_pages_map);
1118
1119 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1120 if (!bm1)
1121 return -ENOMEM;
1122
1123 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1124 if (error)
1125 goto Free_first_object;
1126
1127 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1128 if (!bm2)
1129 goto Free_first_bitmap;
1130
1131 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1132 if (error)
1133 goto Free_second_object;
1134
1135 forbidden_pages_map = bm1;
1136 free_pages_map = bm2;
1137 mark_nosave_pages(forbidden_pages_map);
1138
1139 pr_debug("Basic memory bitmaps created\n");
1140
1141 return 0;
1142
1143 Free_second_object:
1144 kfree(bm2);
1145 Free_first_bitmap:
1146 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1147 Free_first_object:
1148 kfree(bm1);
1149 return -ENOMEM;
1150 }
1151
1152
1153
1154
1155
1156
1157
1158
1159 void free_basic_memory_bitmaps(void)
1160 {
1161 struct memory_bitmap *bm1, *bm2;
1162
1163 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1164 return;
1165
1166 bm1 = forbidden_pages_map;
1167 bm2 = free_pages_map;
1168 forbidden_pages_map = NULL;
1169 free_pages_map = NULL;
1170 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1171 kfree(bm1);
1172 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1173 kfree(bm2);
1174
1175 pr_debug("Basic memory bitmaps freed\n");
1176 }
1177
1178 static void clear_or_poison_free_page(struct page *page)
1179 {
1180 if (page_poisoning_enabled_static())
1181 __kernel_poison_pages(page, 1);
1182 else if (want_init_on_free())
1183 clear_highpage(page);
1184 }
1185
1186 void clear_or_poison_free_pages(void)
1187 {
1188 struct memory_bitmap *bm = free_pages_map;
1189 unsigned long pfn;
1190
1191 if (WARN_ON(!(free_pages_map)))
1192 return;
1193
1194 if (page_poisoning_enabled() || want_init_on_free()) {
1195 memory_bm_position_reset(bm);
1196 pfn = memory_bm_next_pfn(bm);
1197 while (pfn != BM_END_OF_MAP) {
1198 if (pfn_valid(pfn))
1199 clear_or_poison_free_page(pfn_to_page(pfn));
1200
1201 pfn = memory_bm_next_pfn(bm);
1202 }
1203 memory_bm_position_reset(bm);
1204 pr_info("free pages cleared after restore\n");
1205 }
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216 unsigned int snapshot_additional_pages(struct zone *zone)
1217 {
1218 unsigned int rtree, nodes;
1219
1220 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1221 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1222 LINKED_PAGE_DATA_SIZE);
1223 while (nodes > 1) {
1224 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1225 rtree += nodes;
1226 }
1227
1228 return 2 * rtree;
1229 }
1230
1231 #ifdef CONFIG_HIGHMEM
1232
1233
1234
1235
1236
1237 static unsigned int count_free_highmem_pages(void)
1238 {
1239 struct zone *zone;
1240 unsigned int cnt = 0;
1241
1242 for_each_populated_zone(zone)
1243 if (is_highmem(zone))
1244 cnt += zone_page_state(zone, NR_FREE_PAGES);
1245
1246 return cnt;
1247 }
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1258 {
1259 struct page *page;
1260
1261 if (!pfn_valid(pfn))
1262 return NULL;
1263
1264 page = pfn_to_online_page(pfn);
1265 if (!page || page_zone(page) != zone)
1266 return NULL;
1267
1268 BUG_ON(!PageHighMem(page));
1269
1270 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1271 return NULL;
1272
1273 if (PageReserved(page) || PageOffline(page))
1274 return NULL;
1275
1276 if (page_is_guard(page))
1277 return NULL;
1278
1279 return page;
1280 }
1281
1282
1283
1284
1285 static unsigned int count_highmem_pages(void)
1286 {
1287 struct zone *zone;
1288 unsigned int n = 0;
1289
1290 for_each_populated_zone(zone) {
1291 unsigned long pfn, max_zone_pfn;
1292
1293 if (!is_highmem(zone))
1294 continue;
1295
1296 mark_free_pages(zone);
1297 max_zone_pfn = zone_end_pfn(zone);
1298 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1299 if (saveable_highmem_page(zone, pfn))
1300 n++;
1301 }
1302 return n;
1303 }
1304 #else
1305 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1306 {
1307 return NULL;
1308 }
1309 #endif
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1322 {
1323 struct page *page;
1324
1325 if (!pfn_valid(pfn))
1326 return NULL;
1327
1328 page = pfn_to_online_page(pfn);
1329 if (!page || page_zone(page) != zone)
1330 return NULL;
1331
1332 BUG_ON(PageHighMem(page));
1333
1334 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1335 return NULL;
1336
1337 if (PageOffline(page))
1338 return NULL;
1339
1340 if (PageReserved(page)
1341 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1342 return NULL;
1343
1344 if (page_is_guard(page))
1345 return NULL;
1346
1347 return page;
1348 }
1349
1350
1351
1352
1353 static unsigned int count_data_pages(void)
1354 {
1355 struct zone *zone;
1356 unsigned long pfn, max_zone_pfn;
1357 unsigned int n = 0;
1358
1359 for_each_populated_zone(zone) {
1360 if (is_highmem(zone))
1361 continue;
1362
1363 mark_free_pages(zone);
1364 max_zone_pfn = zone_end_pfn(zone);
1365 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1366 if (saveable_page(zone, pfn))
1367 n++;
1368 }
1369 return n;
1370 }
1371
1372
1373
1374
1375
1376 static inline void do_copy_page(long *dst, long *src)
1377 {
1378 int n;
1379
1380 for (n = PAGE_SIZE / sizeof(long); n; n--)
1381 *dst++ = *src++;
1382 }
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 static void safe_copy_page(void *dst, struct page *s_page)
1393 {
1394 if (kernel_page_present(s_page)) {
1395 do_copy_page(dst, page_address(s_page));
1396 } else {
1397 hibernate_map_page(s_page);
1398 do_copy_page(dst, page_address(s_page));
1399 hibernate_unmap_page(s_page);
1400 }
1401 }
1402
1403 #ifdef CONFIG_HIGHMEM
1404 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1405 {
1406 return is_highmem(zone) ?
1407 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1408 }
1409
1410 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1411 {
1412 struct page *s_page, *d_page;
1413 void *src, *dst;
1414
1415 s_page = pfn_to_page(src_pfn);
1416 d_page = pfn_to_page(dst_pfn);
1417 if (PageHighMem(s_page)) {
1418 src = kmap_atomic(s_page);
1419 dst = kmap_atomic(d_page);
1420 do_copy_page(dst, src);
1421 kunmap_atomic(dst);
1422 kunmap_atomic(src);
1423 } else {
1424 if (PageHighMem(d_page)) {
1425
1426
1427
1428
1429 safe_copy_page(buffer, s_page);
1430 dst = kmap_atomic(d_page);
1431 copy_page(dst, buffer);
1432 kunmap_atomic(dst);
1433 } else {
1434 safe_copy_page(page_address(d_page), s_page);
1435 }
1436 }
1437 }
1438 #else
1439 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1440
1441 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1442 {
1443 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1444 pfn_to_page(src_pfn));
1445 }
1446 #endif
1447
1448 static void copy_data_pages(struct memory_bitmap *copy_bm,
1449 struct memory_bitmap *orig_bm)
1450 {
1451 struct zone *zone;
1452 unsigned long pfn;
1453
1454 for_each_populated_zone(zone) {
1455 unsigned long max_zone_pfn;
1456
1457 mark_free_pages(zone);
1458 max_zone_pfn = zone_end_pfn(zone);
1459 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1460 if (page_is_saveable(zone, pfn))
1461 memory_bm_set_bit(orig_bm, pfn);
1462 }
1463 memory_bm_position_reset(orig_bm);
1464 memory_bm_position_reset(copy_bm);
1465 for(;;) {
1466 pfn = memory_bm_next_pfn(orig_bm);
1467 if (unlikely(pfn == BM_END_OF_MAP))
1468 break;
1469 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1470 }
1471 }
1472
1473
1474 static unsigned int nr_copy_pages;
1475
1476 static unsigned int nr_meta_pages;
1477
1478
1479
1480
1481 static unsigned int alloc_normal, alloc_highmem;
1482
1483
1484
1485
1486 static struct memory_bitmap orig_bm;
1487
1488
1489
1490
1491
1492
1493
1494
1495 static struct memory_bitmap copy_bm;
1496
1497
1498
1499
1500
1501
1502
1503 void swsusp_free(void)
1504 {
1505 unsigned long fb_pfn, fr_pfn;
1506
1507 if (!forbidden_pages_map || !free_pages_map)
1508 goto out;
1509
1510 memory_bm_position_reset(forbidden_pages_map);
1511 memory_bm_position_reset(free_pages_map);
1512
1513 loop:
1514 fr_pfn = memory_bm_next_pfn(free_pages_map);
1515 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1516
1517
1518
1519
1520
1521 do {
1522 if (fb_pfn < fr_pfn)
1523 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1524 if (fr_pfn < fb_pfn)
1525 fr_pfn = memory_bm_next_pfn(free_pages_map);
1526 } while (fb_pfn != fr_pfn);
1527
1528 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1529 struct page *page = pfn_to_page(fr_pfn);
1530
1531 memory_bm_clear_current(forbidden_pages_map);
1532 memory_bm_clear_current(free_pages_map);
1533 hibernate_restore_unprotect_page(page_address(page));
1534 __free_page(page);
1535 goto loop;
1536 }
1537
1538 out:
1539 nr_copy_pages = 0;
1540 nr_meta_pages = 0;
1541 restore_pblist = NULL;
1542 buffer = NULL;
1543 alloc_normal = 0;
1544 alloc_highmem = 0;
1545 hibernate_restore_protection_end();
1546 }
1547
1548
1549
1550 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1551
1552
1553
1554
1555
1556
1557
1558
1559 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1560 {
1561 unsigned long nr_alloc = 0;
1562
1563 while (nr_pages > 0) {
1564 struct page *page;
1565
1566 page = alloc_image_page(mask);
1567 if (!page)
1568 break;
1569 memory_bm_set_bit(©_bm, page_to_pfn(page));
1570 if (PageHighMem(page))
1571 alloc_highmem++;
1572 else
1573 alloc_normal++;
1574 nr_pages--;
1575 nr_alloc++;
1576 }
1577
1578 return nr_alloc;
1579 }
1580
1581 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1582 unsigned long avail_normal)
1583 {
1584 unsigned long alloc;
1585
1586 if (avail_normal <= alloc_normal)
1587 return 0;
1588
1589 alloc = avail_normal - alloc_normal;
1590 if (nr_pages < alloc)
1591 alloc = nr_pages;
1592
1593 return preallocate_image_pages(alloc, GFP_IMAGE);
1594 }
1595
1596 #ifdef CONFIG_HIGHMEM
1597 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1598 {
1599 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1600 }
1601
1602
1603
1604
1605 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1606 {
1607 return div64_u64(x * multiplier, base);
1608 }
1609
1610 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1611 unsigned long highmem,
1612 unsigned long total)
1613 {
1614 unsigned long alloc = __fraction(nr_pages, highmem, total);
1615
1616 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1617 }
1618 #else
1619 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1620 {
1621 return 0;
1622 }
1623
1624 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1625 unsigned long highmem,
1626 unsigned long total)
1627 {
1628 return 0;
1629 }
1630 #endif
1631
1632
1633
1634
1635 static unsigned long free_unnecessary_pages(void)
1636 {
1637 unsigned long save, to_free_normal, to_free_highmem, free;
1638
1639 save = count_data_pages();
1640 if (alloc_normal >= save) {
1641 to_free_normal = alloc_normal - save;
1642 save = 0;
1643 } else {
1644 to_free_normal = 0;
1645 save -= alloc_normal;
1646 }
1647 save += count_highmem_pages();
1648 if (alloc_highmem >= save) {
1649 to_free_highmem = alloc_highmem - save;
1650 } else {
1651 to_free_highmem = 0;
1652 save -= alloc_highmem;
1653 if (to_free_normal > save)
1654 to_free_normal -= save;
1655 else
1656 to_free_normal = 0;
1657 }
1658 free = to_free_normal + to_free_highmem;
1659
1660 memory_bm_position_reset(©_bm);
1661
1662 while (to_free_normal > 0 || to_free_highmem > 0) {
1663 unsigned long pfn = memory_bm_next_pfn(©_bm);
1664 struct page *page = pfn_to_page(pfn);
1665
1666 if (PageHighMem(page)) {
1667 if (!to_free_highmem)
1668 continue;
1669 to_free_highmem--;
1670 alloc_highmem--;
1671 } else {
1672 if (!to_free_normal)
1673 continue;
1674 to_free_normal--;
1675 alloc_normal--;
1676 }
1677 memory_bm_clear_bit(©_bm, pfn);
1678 swsusp_unset_page_forbidden(page);
1679 swsusp_unset_page_free(page);
1680 __free_page(page);
1681 }
1682
1683 return free;
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701 static unsigned long minimum_image_size(unsigned long saveable)
1702 {
1703 unsigned long size;
1704
1705 size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1706 + global_node_page_state(NR_ACTIVE_ANON)
1707 + global_node_page_state(NR_INACTIVE_ANON)
1708 + global_node_page_state(NR_ACTIVE_FILE)
1709 + global_node_page_state(NR_INACTIVE_FILE);
1710
1711 return saveable <= size ? 0 : saveable - size;
1712 }
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736 int hibernate_preallocate_memory(void)
1737 {
1738 struct zone *zone;
1739 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1740 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1741 ktime_t start, stop;
1742 int error;
1743
1744 pr_info("Preallocating image memory\n");
1745 start = ktime_get();
1746
1747 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1748 if (error) {
1749 pr_err("Cannot allocate original bitmap\n");
1750 goto err_out;
1751 }
1752
1753 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1754 if (error) {
1755 pr_err("Cannot allocate copy bitmap\n");
1756 goto err_out;
1757 }
1758
1759 alloc_normal = 0;
1760 alloc_highmem = 0;
1761
1762
1763 save_highmem = count_highmem_pages();
1764 saveable = count_data_pages();
1765
1766
1767
1768
1769
1770 count = saveable;
1771 saveable += save_highmem;
1772 highmem = save_highmem;
1773 size = 0;
1774 for_each_populated_zone(zone) {
1775 size += snapshot_additional_pages(zone);
1776 if (is_highmem(zone))
1777 highmem += zone_page_state(zone, NR_FREE_PAGES);
1778 else
1779 count += zone_page_state(zone, NR_FREE_PAGES);
1780 }
1781 avail_normal = count;
1782 count += highmem;
1783 count -= totalreserve_pages;
1784
1785
1786 max_size = (count - (size + PAGES_FOR_IO)) / 2
1787 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1788
1789 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1790 if (size > max_size)
1791 size = max_size;
1792
1793
1794
1795
1796
1797 if (size >= saveable) {
1798 pages = preallocate_image_highmem(save_highmem);
1799 pages += preallocate_image_memory(saveable - pages, avail_normal);
1800 goto out;
1801 }
1802
1803
1804 pages = minimum_image_size(saveable);
1805
1806
1807
1808
1809
1810 if (avail_normal > pages)
1811 avail_normal -= pages;
1812 else
1813 avail_normal = 0;
1814 if (size < pages)
1815 size = min_t(unsigned long, pages, max_size);
1816
1817
1818
1819
1820
1821
1822
1823 shrink_all_memory(saveable - size);
1824
1825
1826
1827
1828
1829
1830
1831
1832 pages_highmem = preallocate_image_highmem(highmem / 2);
1833 alloc = count - max_size;
1834 if (alloc > pages_highmem)
1835 alloc -= pages_highmem;
1836 else
1837 alloc = 0;
1838 pages = preallocate_image_memory(alloc, avail_normal);
1839 if (pages < alloc) {
1840
1841 alloc -= pages;
1842 pages += pages_highmem;
1843 pages_highmem = preallocate_image_highmem(alloc);
1844 if (pages_highmem < alloc) {
1845 pr_err("Image allocation is %lu pages short\n",
1846 alloc - pages_highmem);
1847 goto err_out;
1848 }
1849 pages += pages_highmem;
1850
1851
1852
1853
1854 alloc = (count - pages) - size;
1855 pages += preallocate_image_highmem(alloc);
1856 } else {
1857
1858
1859
1860
1861 alloc = max_size - size;
1862 size = preallocate_highmem_fraction(alloc, highmem, count);
1863 pages_highmem += size;
1864 alloc -= size;
1865 size = preallocate_image_memory(alloc, avail_normal);
1866 pages_highmem += preallocate_image_highmem(alloc - size);
1867 pages += pages_highmem + size;
1868 }
1869
1870
1871
1872
1873
1874
1875 pages -= free_unnecessary_pages();
1876
1877 out:
1878 stop = ktime_get();
1879 pr_info("Allocated %lu pages for snapshot\n", pages);
1880 swsusp_show_speed(start, stop, pages, "Allocated");
1881
1882 return 0;
1883
1884 err_out:
1885 swsusp_free();
1886 return -ENOMEM;
1887 }
1888
1889 #ifdef CONFIG_HIGHMEM
1890
1891
1892
1893
1894
1895
1896 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1897 {
1898 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1899
1900 if (free_highmem >= nr_highmem)
1901 nr_highmem = 0;
1902 else
1903 nr_highmem -= free_highmem;
1904
1905 return nr_highmem;
1906 }
1907 #else
1908 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1909 #endif
1910
1911
1912
1913
1914 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1915 {
1916 struct zone *zone;
1917 unsigned int free = alloc_normal;
1918
1919 for_each_populated_zone(zone)
1920 if (!is_highmem(zone))
1921 free += zone_page_state(zone, NR_FREE_PAGES);
1922
1923 nr_pages += count_pages_for_highmem(nr_highmem);
1924 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1925 nr_pages, PAGES_FOR_IO, free);
1926
1927 return free > nr_pages + PAGES_FOR_IO;
1928 }
1929
1930 #ifdef CONFIG_HIGHMEM
1931
1932
1933
1934
1935
1936
1937 static inline int get_highmem_buffer(int safe_needed)
1938 {
1939 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1940 return buffer ? 0 : -ENOMEM;
1941 }
1942
1943
1944
1945
1946
1947
1948
1949 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1950 unsigned int nr_highmem)
1951 {
1952 unsigned int to_alloc = count_free_highmem_pages();
1953
1954 if (to_alloc > nr_highmem)
1955 to_alloc = nr_highmem;
1956
1957 nr_highmem -= to_alloc;
1958 while (to_alloc-- > 0) {
1959 struct page *page;
1960
1961 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1962 memory_bm_set_bit(bm, page_to_pfn(page));
1963 }
1964 return nr_highmem;
1965 }
1966 #else
1967 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1968
1969 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1970 unsigned int n) { return 0; }
1971 #endif
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984 static int swsusp_alloc(struct memory_bitmap *copy_bm,
1985 unsigned int nr_pages, unsigned int nr_highmem)
1986 {
1987 if (nr_highmem > 0) {
1988 if (get_highmem_buffer(PG_ANY))
1989 goto err_out;
1990 if (nr_highmem > alloc_highmem) {
1991 nr_highmem -= alloc_highmem;
1992 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1993 }
1994 }
1995 if (nr_pages > alloc_normal) {
1996 nr_pages -= alloc_normal;
1997 while (nr_pages-- > 0) {
1998 struct page *page;
1999
2000 page = alloc_image_page(GFP_ATOMIC);
2001 if (!page)
2002 goto err_out;
2003 memory_bm_set_bit(copy_bm, page_to_pfn(page));
2004 }
2005 }
2006
2007 return 0;
2008
2009 err_out:
2010 swsusp_free();
2011 return -ENOMEM;
2012 }
2013
2014 asmlinkage __visible int swsusp_save(void)
2015 {
2016 unsigned int nr_pages, nr_highmem;
2017
2018 pr_info("Creating image:\n");
2019
2020 drain_local_pages(NULL);
2021 nr_pages = count_data_pages();
2022 nr_highmem = count_highmem_pages();
2023 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
2024
2025 if (!enough_free_mem(nr_pages, nr_highmem)) {
2026 pr_err("Not enough free memory\n");
2027 return -ENOMEM;
2028 }
2029
2030 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
2031 pr_err("Memory allocation failed\n");
2032 return -ENOMEM;
2033 }
2034
2035
2036
2037
2038
2039 drain_local_pages(NULL);
2040 copy_data_pages(©_bm, &orig_bm);
2041
2042
2043
2044
2045
2046
2047
2048 nr_pages += nr_highmem;
2049 nr_copy_pages = nr_pages;
2050 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2051
2052 pr_info("Image created (%d pages copied)\n", nr_pages);
2053
2054 return 0;
2055 }
2056
2057 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
2058 static int init_header_complete(struct swsusp_info *info)
2059 {
2060 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2061 info->version_code = LINUX_VERSION_CODE;
2062 return 0;
2063 }
2064
2065 static const char *check_image_kernel(struct swsusp_info *info)
2066 {
2067 if (info->version_code != LINUX_VERSION_CODE)
2068 return "kernel version";
2069 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2070 return "system type";
2071 if (strcmp(info->uts.release,init_utsname()->release))
2072 return "kernel release";
2073 if (strcmp(info->uts.version,init_utsname()->version))
2074 return "version";
2075 if (strcmp(info->uts.machine,init_utsname()->machine))
2076 return "machine";
2077 return NULL;
2078 }
2079 #endif
2080
2081 unsigned long snapshot_get_image_size(void)
2082 {
2083 return nr_copy_pages + nr_meta_pages + 1;
2084 }
2085
2086 static int init_header(struct swsusp_info *info)
2087 {
2088 memset(info, 0, sizeof(struct swsusp_info));
2089 info->num_physpages = get_num_physpages();
2090 info->image_pages = nr_copy_pages;
2091 info->pages = snapshot_get_image_size();
2092 info->size = info->pages;
2093 info->size <<= PAGE_SHIFT;
2094 return init_header_complete(info);
2095 }
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2106 {
2107 int j;
2108
2109 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2110 buf[j] = memory_bm_next_pfn(bm);
2111 if (unlikely(buf[j] == BM_END_OF_MAP))
2112 break;
2113 }
2114 }
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132 int snapshot_read_next(struct snapshot_handle *handle)
2133 {
2134 if (handle->cur > nr_meta_pages + nr_copy_pages)
2135 return 0;
2136
2137 if (!buffer) {
2138
2139 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2140 if (!buffer)
2141 return -ENOMEM;
2142 }
2143 if (!handle->cur) {
2144 int error;
2145
2146 error = init_header((struct swsusp_info *)buffer);
2147 if (error)
2148 return error;
2149 handle->buffer = buffer;
2150 memory_bm_position_reset(&orig_bm);
2151 memory_bm_position_reset(©_bm);
2152 } else if (handle->cur <= nr_meta_pages) {
2153 clear_page(buffer);
2154 pack_pfns(buffer, &orig_bm);
2155 } else {
2156 struct page *page;
2157
2158 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2159 if (PageHighMem(page)) {
2160
2161
2162
2163
2164
2165 void *kaddr;
2166
2167 kaddr = kmap_atomic(page);
2168 copy_page(buffer, kaddr);
2169 kunmap_atomic(kaddr);
2170 handle->buffer = buffer;
2171 } else {
2172 handle->buffer = page_address(page);
2173 }
2174 }
2175 handle->cur++;
2176 return PAGE_SIZE;
2177 }
2178
2179 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2180 struct memory_bitmap *src)
2181 {
2182 unsigned long pfn;
2183
2184 memory_bm_position_reset(src);
2185 pfn = memory_bm_next_pfn(src);
2186 while (pfn != BM_END_OF_MAP) {
2187 memory_bm_set_bit(dst, pfn);
2188 pfn = memory_bm_next_pfn(src);
2189 }
2190 }
2191
2192
2193
2194
2195
2196
2197
2198 static void mark_unsafe_pages(struct memory_bitmap *bm)
2199 {
2200 unsigned long pfn;
2201
2202
2203 memory_bm_position_reset(free_pages_map);
2204 pfn = memory_bm_next_pfn(free_pages_map);
2205 while (pfn != BM_END_OF_MAP) {
2206 memory_bm_clear_current(free_pages_map);
2207 pfn = memory_bm_next_pfn(free_pages_map);
2208 }
2209
2210
2211 duplicate_memory_bitmap(free_pages_map, bm);
2212
2213 allocated_unsafe_pages = 0;
2214 }
2215
2216 static int check_header(struct swsusp_info *info)
2217 {
2218 const char *reason;
2219
2220 reason = check_image_kernel(info);
2221 if (!reason && info->num_physpages != get_num_physpages())
2222 reason = "memory size";
2223 if (reason) {
2224 pr_err("Image mismatch: %s\n", reason);
2225 return -EPERM;
2226 }
2227 return 0;
2228 }
2229
2230
2231
2232
2233 static int load_header(struct swsusp_info *info)
2234 {
2235 int error;
2236
2237 restore_pblist = NULL;
2238 error = check_header(info);
2239 if (!error) {
2240 nr_copy_pages = info->image_pages;
2241 nr_meta_pages = info->pages - info->image_pages - 1;
2242 }
2243 return error;
2244 }
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2255 {
2256 int j;
2257
2258 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2259 if (unlikely(buf[j] == BM_END_OF_MAP))
2260 break;
2261
2262 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2263 memory_bm_set_bit(bm, buf[j]);
2264 else
2265 return -EFAULT;
2266 }
2267
2268 return 0;
2269 }
2270
2271 #ifdef CONFIG_HIGHMEM
2272
2273
2274
2275
2276
2277 struct highmem_pbe {
2278 struct page *copy_page;
2279 struct page *orig_page;
2280 struct highmem_pbe *next;
2281 };
2282
2283
2284
2285
2286
2287
2288
2289 static struct highmem_pbe *highmem_pblist;
2290
2291
2292
2293
2294
2295
2296
2297 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2298 {
2299 unsigned long pfn;
2300 unsigned int cnt = 0;
2301
2302 memory_bm_position_reset(bm);
2303 pfn = memory_bm_next_pfn(bm);
2304 while (pfn != BM_END_OF_MAP) {
2305 if (PageHighMem(pfn_to_page(pfn)))
2306 cnt++;
2307
2308 pfn = memory_bm_next_pfn(bm);
2309 }
2310 return cnt;
2311 }
2312
2313 static unsigned int safe_highmem_pages;
2314
2315 static struct memory_bitmap *safe_highmem_bm;
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330 static int prepare_highmem_image(struct memory_bitmap *bm,
2331 unsigned int *nr_highmem_p)
2332 {
2333 unsigned int to_alloc;
2334
2335 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2336 return -ENOMEM;
2337
2338 if (get_highmem_buffer(PG_SAFE))
2339 return -ENOMEM;
2340
2341 to_alloc = count_free_highmem_pages();
2342 if (to_alloc > *nr_highmem_p)
2343 to_alloc = *nr_highmem_p;
2344 else
2345 *nr_highmem_p = to_alloc;
2346
2347 safe_highmem_pages = 0;
2348 while (to_alloc-- > 0) {
2349 struct page *page;
2350
2351 page = alloc_page(__GFP_HIGHMEM);
2352 if (!swsusp_page_is_free(page)) {
2353
2354 memory_bm_set_bit(bm, page_to_pfn(page));
2355 safe_highmem_pages++;
2356 }
2357
2358 swsusp_set_page_forbidden(page);
2359 swsusp_set_page_free(page);
2360 }
2361 memory_bm_position_reset(bm);
2362 safe_highmem_bm = bm;
2363 return 0;
2364 }
2365
2366 static struct page *last_highmem_page;
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386 static void *get_highmem_page_buffer(struct page *page,
2387 struct chain_allocator *ca)
2388 {
2389 struct highmem_pbe *pbe;
2390 void *kaddr;
2391
2392 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2393
2394
2395
2396
2397 last_highmem_page = page;
2398 return buffer;
2399 }
2400
2401
2402
2403
2404 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2405 if (!pbe) {
2406 swsusp_free();
2407 return ERR_PTR(-ENOMEM);
2408 }
2409 pbe->orig_page = page;
2410 if (safe_highmem_pages > 0) {
2411 struct page *tmp;
2412
2413
2414 kaddr = buffer;
2415 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2416 safe_highmem_pages--;
2417 last_highmem_page = tmp;
2418 pbe->copy_page = tmp;
2419 } else {
2420
2421 kaddr = safe_pages_list;
2422 safe_pages_list = safe_pages_list->next;
2423 pbe->copy_page = virt_to_page(kaddr);
2424 }
2425 pbe->next = highmem_pblist;
2426 highmem_pblist = pbe;
2427 return kaddr;
2428 }
2429
2430
2431
2432
2433
2434
2435
2436
2437 static void copy_last_highmem_page(void)
2438 {
2439 if (last_highmem_page) {
2440 void *dst;
2441
2442 dst = kmap_atomic(last_highmem_page);
2443 copy_page(dst, buffer);
2444 kunmap_atomic(dst);
2445 last_highmem_page = NULL;
2446 }
2447 }
2448
2449 static inline int last_highmem_page_copied(void)
2450 {
2451 return !last_highmem_page;
2452 }
2453
2454 static inline void free_highmem_data(void)
2455 {
2456 if (safe_highmem_bm)
2457 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2458
2459 if (buffer)
2460 free_image_page(buffer, PG_UNSAFE_CLEAR);
2461 }
2462 #else
2463 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2464
2465 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2466 unsigned int *nr_highmem_p) { return 0; }
2467
2468 static inline void *get_highmem_page_buffer(struct page *page,
2469 struct chain_allocator *ca)
2470 {
2471 return ERR_PTR(-EINVAL);
2472 }
2473
2474 static inline void copy_last_highmem_page(void) {}
2475 static inline int last_highmem_page_copied(void) { return 1; }
2476 static inline void free_highmem_data(void) {}
2477 #endif
2478
2479 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2497 {
2498 unsigned int nr_pages, nr_highmem;
2499 struct linked_page *lp;
2500 int error;
2501
2502
2503 free_image_page(buffer, PG_UNSAFE_CLEAR);
2504 buffer = NULL;
2505
2506 nr_highmem = count_highmem_image_pages(bm);
2507 mark_unsafe_pages(bm);
2508
2509 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2510 if (error)
2511 goto Free;
2512
2513 duplicate_memory_bitmap(new_bm, bm);
2514 memory_bm_free(bm, PG_UNSAFE_KEEP);
2515 if (nr_highmem > 0) {
2516 error = prepare_highmem_image(bm, &nr_highmem);
2517 if (error)
2518 goto Free;
2519 }
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2530 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2531 while (nr_pages > 0) {
2532 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2533 if (!lp) {
2534 error = -ENOMEM;
2535 goto Free;
2536 }
2537 lp->next = safe_pages_list;
2538 safe_pages_list = lp;
2539 nr_pages--;
2540 }
2541
2542 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2543 while (nr_pages > 0) {
2544 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2545 if (!lp) {
2546 error = -ENOMEM;
2547 goto Free;
2548 }
2549 if (!swsusp_page_is_free(virt_to_page(lp))) {
2550
2551 lp->next = safe_pages_list;
2552 safe_pages_list = lp;
2553 }
2554
2555 swsusp_set_page_forbidden(virt_to_page(lp));
2556 swsusp_set_page_free(virt_to_page(lp));
2557 nr_pages--;
2558 }
2559 return 0;
2560
2561 Free:
2562 swsusp_free();
2563 return error;
2564 }
2565
2566
2567
2568
2569
2570
2571
2572 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2573 {
2574 struct pbe *pbe;
2575 struct page *page;
2576 unsigned long pfn = memory_bm_next_pfn(bm);
2577
2578 if (pfn == BM_END_OF_MAP)
2579 return ERR_PTR(-EFAULT);
2580
2581 page = pfn_to_page(pfn);
2582 if (PageHighMem(page))
2583 return get_highmem_page_buffer(page, ca);
2584
2585 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2586
2587
2588
2589
2590 return page_address(page);
2591
2592
2593
2594
2595
2596 pbe = chain_alloc(ca, sizeof(struct pbe));
2597 if (!pbe) {
2598 swsusp_free();
2599 return ERR_PTR(-ENOMEM);
2600 }
2601 pbe->orig_address = page_address(page);
2602 pbe->address = safe_pages_list;
2603 safe_pages_list = safe_pages_list->next;
2604 pbe->next = restore_pblist;
2605 restore_pblist = pbe;
2606 return pbe->address;
2607 }
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625 int snapshot_write_next(struct snapshot_handle *handle)
2626 {
2627 static struct chain_allocator ca;
2628 int error = 0;
2629
2630
2631 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2632 return 0;
2633
2634 handle->sync_read = 1;
2635
2636 if (!handle->cur) {
2637 if (!buffer)
2638
2639 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2640
2641 if (!buffer)
2642 return -ENOMEM;
2643
2644 handle->buffer = buffer;
2645 } else if (handle->cur == 1) {
2646 error = load_header(buffer);
2647 if (error)
2648 return error;
2649
2650 safe_pages_list = NULL;
2651
2652 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2653 if (error)
2654 return error;
2655
2656 hibernate_restore_protection_begin();
2657 } else if (handle->cur <= nr_meta_pages + 1) {
2658 error = unpack_orig_pfns(buffer, ©_bm);
2659 if (error)
2660 return error;
2661
2662 if (handle->cur == nr_meta_pages + 1) {
2663 error = prepare_image(&orig_bm, ©_bm);
2664 if (error)
2665 return error;
2666
2667 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2668 memory_bm_position_reset(&orig_bm);
2669 restore_pblist = NULL;
2670 handle->buffer = get_buffer(&orig_bm, &ca);
2671 handle->sync_read = 0;
2672 if (IS_ERR(handle->buffer))
2673 return PTR_ERR(handle->buffer);
2674 }
2675 } else {
2676 copy_last_highmem_page();
2677 hibernate_restore_protect_page(handle->buffer);
2678 handle->buffer = get_buffer(&orig_bm, &ca);
2679 if (IS_ERR(handle->buffer))
2680 return PTR_ERR(handle->buffer);
2681 if (handle->buffer != buffer)
2682 handle->sync_read = 0;
2683 }
2684 handle->cur++;
2685 return PAGE_SIZE;
2686 }
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696 void snapshot_write_finalize(struct snapshot_handle *handle)
2697 {
2698 copy_last_highmem_page();
2699 hibernate_restore_protect_page(handle->buffer);
2700
2701 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2702 memory_bm_recycle(&orig_bm);
2703 free_highmem_data();
2704 }
2705 }
2706
2707 int snapshot_image_loaded(struct snapshot_handle *handle)
2708 {
2709 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2710 handle->cur <= nr_meta_pages + nr_copy_pages);
2711 }
2712
2713 #ifdef CONFIG_HIGHMEM
2714
2715 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2716 void *buf)
2717 {
2718 void *kaddr1, *kaddr2;
2719
2720 kaddr1 = kmap_atomic(p1);
2721 kaddr2 = kmap_atomic(p2);
2722 copy_page(buf, kaddr1);
2723 copy_page(kaddr1, kaddr2);
2724 copy_page(kaddr2, buf);
2725 kunmap_atomic(kaddr2);
2726 kunmap_atomic(kaddr1);
2727 }
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739 int restore_highmem(void)
2740 {
2741 struct highmem_pbe *pbe = highmem_pblist;
2742 void *buf;
2743
2744 if (!pbe)
2745 return 0;
2746
2747 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2748 if (!buf)
2749 return -ENOMEM;
2750
2751 while (pbe) {
2752 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2753 pbe = pbe->next;
2754 }
2755 free_image_page(buf, PG_UNSAFE_CLEAR);
2756 return 0;
2757 }
2758 #endif