0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/gfp.h>
0009 #include <linux/suspend.h>
0010 #include <linux/memblock.h>
0011 #include <linux/pgtable.h>
0012
0013 #include <asm/page.h>
0014 #include <asm/mmzone.h>
0015 #include <asm/sections.h>
0016 #include <asm/suspend.h>
0017
0018
0019 pgd_t *resume_pg_dir;
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 static pmd_t *resume_one_md_table_init(pgd_t *pgd)
0031 {
0032 p4d_t *p4d;
0033 pud_t *pud;
0034 pmd_t *pmd_table;
0035
0036 #ifdef CONFIG_X86_PAE
0037 pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
0038 if (!pmd_table)
0039 return NULL;
0040
0041 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
0042 p4d = p4d_offset(pgd, 0);
0043 pud = pud_offset(p4d, 0);
0044
0045 BUG_ON(pmd_table != pmd_offset(pud, 0));
0046 #else
0047 p4d = p4d_offset(pgd, 0);
0048 pud = pud_offset(p4d, 0);
0049 pmd_table = pmd_offset(pud, 0);
0050 #endif
0051
0052 return pmd_table;
0053 }
0054
0055
0056
0057
0058
0059 static pte_t *resume_one_page_table_init(pmd_t *pmd)
0060 {
0061 if (pmd_none(*pmd)) {
0062 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
0063 if (!page_table)
0064 return NULL;
0065
0066 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
0067
0068 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
0069
0070 return page_table;
0071 }
0072
0073 return pte_offset_kernel(pmd, 0);
0074 }
0075
0076
0077
0078
0079
0080
0081 static int resume_physical_mapping_init(pgd_t *pgd_base)
0082 {
0083 unsigned long pfn;
0084 pgd_t *pgd;
0085 pmd_t *pmd;
0086 pte_t *pte;
0087 int pgd_idx, pmd_idx;
0088
0089 pgd_idx = pgd_index(PAGE_OFFSET);
0090 pgd = pgd_base + pgd_idx;
0091 pfn = 0;
0092
0093 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
0094 pmd = resume_one_md_table_init(pgd);
0095 if (!pmd)
0096 return -ENOMEM;
0097
0098 if (pfn >= max_low_pfn)
0099 continue;
0100
0101 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
0102 if (pfn >= max_low_pfn)
0103 break;
0104
0105
0106
0107
0108
0109 if (boot_cpu_has(X86_FEATURE_PSE)) {
0110 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
0111 pfn += PTRS_PER_PTE;
0112 } else {
0113 pte_t *max_pte;
0114
0115 pte = resume_one_page_table_init(pmd);
0116 if (!pte)
0117 return -ENOMEM;
0118
0119 max_pte = pte + PTRS_PER_PTE;
0120 for (; pte < max_pte; pte++, pfn++) {
0121 if (pfn >= max_low_pfn)
0122 break;
0123
0124 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
0125 }
0126 }
0127 }
0128 }
0129
0130 return 0;
0131 }
0132
0133 static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
0134 {
0135 #ifdef CONFIG_X86_PAE
0136 int i;
0137
0138
0139 for (i = 0; i < PTRS_PER_PGD; i++)
0140 set_pgd(pg_dir + i,
0141 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
0142 #endif
0143 }
0144
0145 static int set_up_temporary_text_mapping(pgd_t *pgd_base)
0146 {
0147 pgd_t *pgd;
0148 pmd_t *pmd;
0149 pte_t *pte;
0150
0151 pgd = pgd_base + pgd_index(restore_jump_address);
0152
0153 pmd = resume_one_md_table_init(pgd);
0154 if (!pmd)
0155 return -ENOMEM;
0156
0157 if (boot_cpu_has(X86_FEATURE_PSE)) {
0158 set_pmd(pmd + pmd_index(restore_jump_address),
0159 __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
0160 } else {
0161 pte = resume_one_page_table_init(pmd);
0162 if (!pte)
0163 return -ENOMEM;
0164 set_pte(pte + pte_index(restore_jump_address),
0165 __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
0166 }
0167
0168 return 0;
0169 }
0170
0171 asmlinkage int swsusp_arch_resume(void)
0172 {
0173 int error;
0174
0175 resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
0176 if (!resume_pg_dir)
0177 return -ENOMEM;
0178
0179 resume_init_first_level_page_table(resume_pg_dir);
0180
0181 error = set_up_temporary_text_mapping(resume_pg_dir);
0182 if (error)
0183 return error;
0184
0185 error = resume_physical_mapping_init(resume_pg_dir);
0186 if (error)
0187 return error;
0188
0189 temp_pgt = __pa(resume_pg_dir);
0190
0191 error = relocate_restore_code();
0192 if (error)
0193 return error;
0194
0195
0196 restore_image();
0197 return 0;
0198 }