Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Hibernation support for x86
0004  *
0005  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
0006  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
0007  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
0008  */
0009 #include <linux/gfp.h>
0010 #include <linux/smp.h>
0011 #include <linux/suspend.h>
0012 #include <linux/scatterlist.h>
0013 #include <linux/kdebug.h>
0014 #include <linux/cpu.h>
0015 #include <linux/pgtable.h>
0016 #include <linux/types.h>
0017 #include <linux/crc32.h>
0018 
0019 #include <asm/e820/api.h>
0020 #include <asm/init.h>
0021 #include <asm/proto.h>
0022 #include <asm/page.h>
0023 #include <asm/mtrr.h>
0024 #include <asm/sections.h>
0025 #include <asm/suspend.h>
0026 #include <asm/tlbflush.h>
0027 
0028 /*
0029  * Address to jump to in the last phase of restore in order to get to the image
0030  * kernel's text (this value is passed in the image header).
0031  */
0032 unsigned long restore_jump_address __visible;
0033 unsigned long jump_address_phys;
0034 
0035 /*
0036  * Value of the cr3 register from before the hibernation (this value is passed
0037  * in the image header).
0038  */
0039 unsigned long restore_cr3 __visible;
0040 unsigned long temp_pgt __visible;
0041 unsigned long relocated_restore_code __visible;
0042 
0043 /**
0044  *  pfn_is_nosave - check if given pfn is in the 'nosave' section
0045  */
0046 int pfn_is_nosave(unsigned long pfn)
0047 {
0048     unsigned long nosave_begin_pfn;
0049     unsigned long nosave_end_pfn;
0050 
0051     nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
0052     nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
0053 
0054     return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
0055 }
0056 
0057 struct restore_data_record {
0058     unsigned long jump_address;
0059     unsigned long jump_address_phys;
0060     unsigned long cr3;
0061     unsigned long magic;
0062     unsigned long e820_checksum;
0063 };
0064 
0065 /**
0066  * compute_e820_crc32 - calculate crc32 of a given e820 table
0067  *
0068  * @table: the e820 table to be calculated
0069  *
0070  * Return: the resulting checksum
0071  */
0072 static inline u32 compute_e820_crc32(struct e820_table *table)
0073 {
0074     int size = offsetof(struct e820_table, entries) +
0075         sizeof(struct e820_entry) * table->nr_entries;
0076 
0077     return ~crc32_le(~0, (unsigned char const *)table, size);
0078 }
0079 
0080 #ifdef CONFIG_X86_64
0081 #define RESTORE_MAGIC   0x23456789ABCDEF02UL
0082 #else
0083 #define RESTORE_MAGIC   0x12345679UL
0084 #endif
0085 
0086 /**
0087  *  arch_hibernation_header_save - populate the architecture specific part
0088  *      of a hibernation image header
0089  *  @addr: address to save the data at
0090  */
0091 int arch_hibernation_header_save(void *addr, unsigned int max_size)
0092 {
0093     struct restore_data_record *rdr = addr;
0094 
0095     if (max_size < sizeof(struct restore_data_record))
0096         return -EOVERFLOW;
0097     rdr->magic = RESTORE_MAGIC;
0098     rdr->jump_address = (unsigned long)restore_registers;
0099     rdr->jump_address_phys = __pa_symbol(restore_registers);
0100 
0101     /*
0102      * The restore code fixes up CR3 and CR4 in the following sequence:
0103      *
0104      * [in hibernation asm]
0105      * 1. CR3 <= temporary page tables
0106      * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
0107      * 3. CR3 <= rdr->cr3
0108      * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
0109      * [in restore_processor_state()]
0110      * 5. CR4 <= saved CR4
0111      * 6. CR3 <= saved CR3
0112      *
0113      * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
0114      * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
0115      * rdr->cr3 needs to point to valid page tables but must not
0116      * have any of the PCID bits set.
0117      */
0118     rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
0119 
0120     rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
0121     return 0;
0122 }
0123 
0124 /**
0125  *  arch_hibernation_header_restore - read the architecture specific data
0126  *      from the hibernation image header
0127  *  @addr: address to read the data from
0128  */
0129 int arch_hibernation_header_restore(void *addr)
0130 {
0131     struct restore_data_record *rdr = addr;
0132 
0133     if (rdr->magic != RESTORE_MAGIC) {
0134         pr_crit("Unrecognized hibernate image header format!\n");
0135         return -EINVAL;
0136     }
0137 
0138     restore_jump_address = rdr->jump_address;
0139     jump_address_phys = rdr->jump_address_phys;
0140     restore_cr3 = rdr->cr3;
0141 
0142     if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
0143         pr_crit("Hibernate inconsistent memory map detected!\n");
0144         return -ENODEV;
0145     }
0146 
0147     return 0;
0148 }
0149 
0150 int relocate_restore_code(void)
0151 {
0152     pgd_t *pgd;
0153     p4d_t *p4d;
0154     pud_t *pud;
0155     pmd_t *pmd;
0156     pte_t *pte;
0157 
0158     relocated_restore_code = get_safe_page(GFP_ATOMIC);
0159     if (!relocated_restore_code)
0160         return -ENOMEM;
0161 
0162     memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
0163 
0164     /* Make the page containing the relocated code executable */
0165     pgd = (pgd_t *)__va(read_cr3_pa()) +
0166         pgd_index(relocated_restore_code);
0167     p4d = p4d_offset(pgd, relocated_restore_code);
0168     if (p4d_large(*p4d)) {
0169         set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
0170         goto out;
0171     }
0172     pud = pud_offset(p4d, relocated_restore_code);
0173     if (pud_large(*pud)) {
0174         set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
0175         goto out;
0176     }
0177     pmd = pmd_offset(pud, relocated_restore_code);
0178     if (pmd_large(*pmd)) {
0179         set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
0180         goto out;
0181     }
0182     pte = pte_offset_kernel(pmd, relocated_restore_code);
0183     set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
0184 out:
0185     __flush_tlb_all();
0186     return 0;
0187 }
0188 
0189 int arch_resume_nosmt(void)
0190 {
0191     int ret = 0;
0192     /*
0193      * We reached this while coming out of hibernation. This means
0194      * that SMT siblings are sleeping in hlt, as mwait is not safe
0195      * against control transition during resume (see comment in
0196      * hibernate_resume_nonboot_cpu_disable()).
0197      *
0198      * If the resumed kernel has SMT disabled, we have to take all the
0199      * SMT siblings out of hlt, and offline them again so that they
0200      * end up in mwait proper.
0201      *
0202      * Called with hotplug disabled.
0203      */
0204     cpu_hotplug_enable();
0205     if (cpu_smt_control == CPU_SMT_DISABLED ||
0206             cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
0207         enum cpuhp_smt_control old = cpu_smt_control;
0208 
0209         ret = cpuhp_smt_enable();
0210         if (ret)
0211             goto out;
0212         ret = cpuhp_smt_disable(old);
0213         if (ret)
0214             goto out;
0215     }
0216 out:
0217     cpu_hotplug_disable();
0218     return ret;
0219 }