Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Hibernation support for x86-64
0004  *
0005  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
0006  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
0007  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
0008  */
0009 
0010 #include <linux/gfp.h>
0011 #include <linux/smp.h>
0012 #include <linux/suspend.h>
0013 #include <linux/scatterlist.h>
0014 #include <linux/kdebug.h>
0015 #include <linux/pgtable.h>
0016 
0017 #include <crypto/hash.h>
0018 
0019 #include <asm/e820/api.h>
0020 #include <asm/init.h>
0021 #include <asm/proto.h>
0022 #include <asm/page.h>
0023 #include <asm/mtrr.h>
0024 #include <asm/sections.h>
0025 #include <asm/suspend.h>
0026 #include <asm/tlbflush.h>
0027 
0028 static int set_up_temporary_text_mapping(pgd_t *pgd)
0029 {
0030     pmd_t *pmd;
0031     pud_t *pud;
0032     p4d_t *p4d = NULL;
0033     pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
0034     pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
0035 
0036     /* Filter out unsupported __PAGE_KERNEL* bits: */
0037     pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
0038     pgprot_val(pgtable_prot)  &= __default_kernel_pte_mask;
0039 
0040     /*
0041      * The new mapping only has to cover the page containing the image
0042      * kernel's entry point (jump_address_phys), because the switch over to
0043      * it is carried out by relocated code running from a page allocated
0044      * specifically for this purpose and covered by the identity mapping, so
0045      * the temporary kernel text mapping is only needed for the final jump.
0046      * Moreover, in that mapping the virtual address of the image kernel's
0047      * entry point must be the same as its virtual address in the image
0048      * kernel (restore_jump_address), so the image kernel's
0049      * restore_registers() code doesn't find itself in a different area of
0050      * the virtual address space after switching over to the original page
0051      * tables used by the image kernel.
0052      */
0053 
0054     if (pgtable_l5_enabled()) {
0055         p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
0056         if (!p4d)
0057             return -ENOMEM;
0058     }
0059 
0060     pud = (pud_t *)get_safe_page(GFP_ATOMIC);
0061     if (!pud)
0062         return -ENOMEM;
0063 
0064     pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
0065     if (!pmd)
0066         return -ENOMEM;
0067 
0068     set_pmd(pmd + pmd_index(restore_jump_address),
0069         __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
0070     set_pud(pud + pud_index(restore_jump_address),
0071         __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
0072     if (p4d) {
0073         p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
0074         pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
0075 
0076         set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
0077         set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
0078     } else {
0079         /* No p4d for 4-level paging: point the pgd to the pud page table */
0080         pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
0081         set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
0082     }
0083 
0084     return 0;
0085 }
0086 
0087 static void *alloc_pgt_page(void *context)
0088 {
0089     return (void *)get_safe_page(GFP_ATOMIC);
0090 }
0091 
0092 static int set_up_temporary_mappings(void)
0093 {
0094     struct x86_mapping_info info = {
0095         .alloc_pgt_page = alloc_pgt_page,
0096         .page_flag  = __PAGE_KERNEL_LARGE_EXEC,
0097         .offset     = __PAGE_OFFSET,
0098     };
0099     unsigned long mstart, mend;
0100     pgd_t *pgd;
0101     int result;
0102     int i;
0103 
0104     pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
0105     if (!pgd)
0106         return -ENOMEM;
0107 
0108     /* Prepare a temporary mapping for the kernel text */
0109     result = set_up_temporary_text_mapping(pgd);
0110     if (result)
0111         return result;
0112 
0113     /* Set up the direct mapping from scratch */
0114     for (i = 0; i < nr_pfn_mapped; i++) {
0115         mstart = pfn_mapped[i].start << PAGE_SHIFT;
0116         mend   = pfn_mapped[i].end << PAGE_SHIFT;
0117 
0118         result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
0119         if (result)
0120             return result;
0121     }
0122 
0123     temp_pgt = __pa(pgd);
0124     return 0;
0125 }
0126 
0127 asmlinkage int swsusp_arch_resume(void)
0128 {
0129     int error;
0130 
0131     /* We have got enough memory and from now on we cannot recover */
0132     error = set_up_temporary_mappings();
0133     if (error)
0134         return error;
0135 
0136     error = relocate_restore_code();
0137     if (error)
0138         return error;
0139 
0140     restore_image();
0141     return 0;
0142 }