0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/gfp.h>
0011 #include <linux/smp.h>
0012 #include <linux/suspend.h>
0013 #include <linux/scatterlist.h>
0014 #include <linux/kdebug.h>
0015 #include <linux/pgtable.h>
0016
0017 #include <crypto/hash.h>
0018
0019 #include <asm/e820/api.h>
0020 #include <asm/init.h>
0021 #include <asm/proto.h>
0022 #include <asm/page.h>
0023 #include <asm/mtrr.h>
0024 #include <asm/sections.h>
0025 #include <asm/suspend.h>
0026 #include <asm/tlbflush.h>
0027
0028 static int set_up_temporary_text_mapping(pgd_t *pgd)
0029 {
0030 pmd_t *pmd;
0031 pud_t *pud;
0032 p4d_t *p4d = NULL;
0033 pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
0034 pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
0035
0036
0037 pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
0038 pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 if (pgtable_l5_enabled()) {
0055 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
0056 if (!p4d)
0057 return -ENOMEM;
0058 }
0059
0060 pud = (pud_t *)get_safe_page(GFP_ATOMIC);
0061 if (!pud)
0062 return -ENOMEM;
0063
0064 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
0065 if (!pmd)
0066 return -ENOMEM;
0067
0068 set_pmd(pmd + pmd_index(restore_jump_address),
0069 __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
0070 set_pud(pud + pud_index(restore_jump_address),
0071 __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
0072 if (p4d) {
0073 p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
0074 pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
0075
0076 set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
0077 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
0078 } else {
0079
0080 pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
0081 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
0082 }
0083
0084 return 0;
0085 }
0086
0087 static void *alloc_pgt_page(void *context)
0088 {
0089 return (void *)get_safe_page(GFP_ATOMIC);
0090 }
0091
0092 static int set_up_temporary_mappings(void)
0093 {
0094 struct x86_mapping_info info = {
0095 .alloc_pgt_page = alloc_pgt_page,
0096 .page_flag = __PAGE_KERNEL_LARGE_EXEC,
0097 .offset = __PAGE_OFFSET,
0098 };
0099 unsigned long mstart, mend;
0100 pgd_t *pgd;
0101 int result;
0102 int i;
0103
0104 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
0105 if (!pgd)
0106 return -ENOMEM;
0107
0108
0109 result = set_up_temporary_text_mapping(pgd);
0110 if (result)
0111 return result;
0112
0113
0114 for (i = 0; i < nr_pfn_mapped; i++) {
0115 mstart = pfn_mapped[i].start << PAGE_SHIFT;
0116 mend = pfn_mapped[i].end << PAGE_SHIFT;
0117
0118 result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
0119 if (result)
0120 return result;
0121 }
0122
0123 temp_pgt = __pa(pgd);
0124 return 0;
0125 }
0126
0127 asmlinkage int swsusp_arch_resume(void)
0128 {
0129 int error;
0130
0131
0132 error = set_up_temporary_mappings();
0133 if (error)
0134 return error;
0135
0136 error = relocate_restore_code();
0137 if (error)
0138 return error;
0139
0140 restore_image();
0141 return 0;
0142 }