0001
0002 #ifndef _ASM_X86_KEXEC_H
0003 #define _ASM_X86_KEXEC_H
0004
0005 #ifdef CONFIG_X86_32
0006 # define PA_CONTROL_PAGE 0
0007 # define VA_CONTROL_PAGE 1
0008 # define PA_PGD 2
0009 # define PA_SWAP_PAGE 3
0010 # define PAGES_NR 4
0011 #else
0012 # define PA_CONTROL_PAGE 0
0013 # define VA_CONTROL_PAGE 1
0014 # define PA_TABLE_PAGE 2
0015 # define PA_SWAP_PAGE 3
0016 # define PAGES_NR 4
0017 #endif
0018
0019 # define KEXEC_CONTROL_CODE_MAX_SIZE 2048
0020
0021 #ifndef __ASSEMBLY__
0022
0023 #include <linux/string.h>
0024 #include <linux/kernel.h>
0025
0026 #include <asm/page.h>
0027 #include <asm/ptrace.h>
0028 #include <asm/bootparam.h>
0029
0030 struct kimage;
0031
0032
0033
0034
0035
0036
0037
0038
0039 #ifdef CONFIG_X86_32
0040
0041 # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
0042
0043 # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
0044
0045 # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
0046
0047 # define KEXEC_CONTROL_PAGE_SIZE 4096
0048
0049
0050 # define KEXEC_ARCH KEXEC_ARCH_386
0051
0052
0053 # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
0054 #else
0055
0056 # define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
0057
0058 # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
0059
0060 # define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
0061
0062
0063 # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
0064
0065
0066 # define KEXEC_ARCH KEXEC_ARCH_X86_64
0067 #endif
0068
0069
0070
0071
0072
0073
0074 static inline void crash_setup_regs(struct pt_regs *newregs,
0075 struct pt_regs *oldregs)
0076 {
0077 if (oldregs) {
0078 memcpy(newregs, oldregs, sizeof(*newregs));
0079 } else {
0080 #ifdef CONFIG_X86_32
0081 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
0082 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
0083 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
0084 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
0085 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
0086 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
0087 asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
0088 asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
0089 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
0090 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
0091 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
0092 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
0093 asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
0094 #else
0095 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
0096 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
0097 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
0098 asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
0099 asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
0100 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
0101 asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
0102 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
0103 asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
0104 asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
0105 asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
0106 asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
0107 asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
0108 asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
0109 asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
0110 asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
0111 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
0112 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
0113 asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
0114 #endif
0115 newregs->ip = _THIS_IP_;
0116 }
0117 }
0118
0119 #ifdef CONFIG_X86_32
0120 asmlinkage unsigned long
0121 relocate_kernel(unsigned long indirection_page,
0122 unsigned long control_page,
0123 unsigned long start_address,
0124 unsigned int has_pae,
0125 unsigned int preserve_context);
0126 #else
0127 unsigned long
0128 relocate_kernel(unsigned long indirection_page,
0129 unsigned long page_list,
0130 unsigned long start_address,
0131 unsigned int preserve_context,
0132 unsigned int host_mem_enc_active);
0133 #endif
0134
0135 #define ARCH_HAS_KIMAGE_ARCH
0136
0137 #ifdef CONFIG_X86_32
0138 struct kimage_arch {
0139 pgd_t *pgd;
0140 #ifdef CONFIG_X86_PAE
0141 pmd_t *pmd0;
0142 pmd_t *pmd1;
0143 #endif
0144 pte_t *pte0;
0145 pte_t *pte1;
0146 };
0147 #else
0148 struct kimage_arch {
0149 p4d_t *p4d;
0150 pud_t *pud;
0151 pmd_t *pmd;
0152 pte_t *pte;
0153 };
0154 #endif
0155
0156 #ifdef CONFIG_X86_64
0157
0158
0159
0160
0161
0162 struct kexec_entry64_regs {
0163 uint64_t rax;
0164 uint64_t rcx;
0165 uint64_t rdx;
0166 uint64_t rbx;
0167 uint64_t rsp;
0168 uint64_t rbp;
0169 uint64_t rsi;
0170 uint64_t rdi;
0171 uint64_t r8;
0172 uint64_t r9;
0173 uint64_t r10;
0174 uint64_t r11;
0175 uint64_t r12;
0176 uint64_t r13;
0177 uint64_t r14;
0178 uint64_t r15;
0179 uint64_t rip;
0180 };
0181
0182 extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
0183 gfp_t gfp);
0184 #define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
0185
0186 extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
0187 #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
0188
0189 void arch_kexec_protect_crashkres(void);
0190 #define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
0191
0192 void arch_kexec_unprotect_crashkres(void);
0193 #define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
0194
0195 #ifdef CONFIG_KEXEC_FILE
0196 struct purgatory_info;
0197 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
0198 Elf_Shdr *section,
0199 const Elf_Shdr *relsec,
0200 const Elf_Shdr *symtab);
0201 #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
0202
0203 void *arch_kexec_kernel_image_load(struct kimage *image);
0204 #define arch_kexec_kernel_image_load arch_kexec_kernel_image_load
0205
0206 int arch_kimage_file_post_load_cleanup(struct kimage *image);
0207 #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
0208 #endif
0209 #endif
0210
0211 typedef void crash_vmclear_fn(void);
0212 extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
0213 extern void kdump_nmi_shootdown_cpus(void);
0214
0215 #endif
0216
0217 #endif