0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #define pr_fmt(fmt) "kexec: " fmt
0015
0016 #include <linux/types.h>
0017 #include <linux/kernel.h>
0018 #include <linux/smp.h>
0019 #include <linux/reboot.h>
0020 #include <linux/kexec.h>
0021 #include <linux/delay.h>
0022 #include <linux/elf.h>
0023 #include <linux/elfcore.h>
0024 #include <linux/export.h>
0025 #include <linux/slab.h>
0026 #include <linux/vmalloc.h>
0027 #include <linux/memblock.h>
0028
0029 #include <asm/processor.h>
0030 #include <asm/hardirq.h>
0031 #include <asm/nmi.h>
0032 #include <asm/hw_irq.h>
0033 #include <asm/apic.h>
0034 #include <asm/e820/types.h>
0035 #include <asm/io_apic.h>
0036 #include <asm/hpet.h>
0037 #include <linux/kdebug.h>
0038 #include <asm/cpu.h>
0039 #include <asm/reboot.h>
0040 #include <asm/virtext.h>
0041 #include <asm/intel_pt.h>
0042 #include <asm/crash.h>
0043 #include <asm/cmdline.h>
0044
0045
0046 struct crash_memmap_data {
0047 struct boot_params *params;
0048
0049 unsigned int type;
0050 };
0051
0052
0053
0054
0055
0056
0057
0058
0059 crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
0060 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
0061
0062 static inline void cpu_crash_vmclear_loaded_vmcss(void)
0063 {
0064 crash_vmclear_fn *do_vmclear_operation = NULL;
0065
0066 rcu_read_lock();
0067 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
0068 if (do_vmclear_operation)
0069 do_vmclear_operation();
0070 rcu_read_unlock();
0071 }
0072
0073 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
0074
0075 static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
0076 {
0077 crash_save_cpu(regs, cpu);
0078
0079
0080
0081
0082 cpu_crash_vmclear_loaded_vmcss();
0083
0084
0085
0086
0087
0088
0089
0090 cpu_emergency_vmxoff();
0091 cpu_emergency_svm_disable();
0092
0093
0094
0095
0096 cpu_emergency_stop_pt();
0097
0098 disable_local_APIC();
0099 }
0100
0101 void kdump_nmi_shootdown_cpus(void)
0102 {
0103 nmi_shootdown_cpus(kdump_nmi_callback);
0104
0105 disable_local_APIC();
0106 }
0107
0108
0109 void crash_smp_send_stop(void)
0110 {
0111 static int cpus_stopped;
0112
0113 if (cpus_stopped)
0114 return;
0115
0116 if (smp_ops.crash_stop_other_cpus)
0117 smp_ops.crash_stop_other_cpus();
0118 else
0119 smp_send_stop();
0120
0121 cpus_stopped = 1;
0122 }
0123
0124 #else
0125 void crash_smp_send_stop(void)
0126 {
0127
0128 }
0129 #endif
0130
0131 void native_machine_crash_shutdown(struct pt_regs *regs)
0132 {
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142 local_irq_disable();
0143
0144 crash_smp_send_stop();
0145
0146
0147
0148
0149 cpu_crash_vmclear_loaded_vmcss();
0150
0151
0152
0153
0154
0155 cpu_emergency_vmxoff();
0156 cpu_emergency_svm_disable();
0157
0158
0159
0160
0161 cpu_emergency_stop_pt();
0162
0163 #ifdef CONFIG_X86_IO_APIC
0164
0165 ioapic_zap_locks();
0166 clear_IO_APIC();
0167 #endif
0168 lapic_shutdown();
0169 restore_boot_irq_mode();
0170 #ifdef CONFIG_HPET_TIMER
0171 hpet_disable();
0172 #endif
0173 crash_save_cpu(regs, safe_smp_processor_id());
0174 }
0175
0176 #ifdef CONFIG_KEXEC_FILE
0177
0178 static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
0179 {
0180 unsigned int *nr_ranges = arg;
0181
0182 (*nr_ranges)++;
0183 return 0;
0184 }
0185
0186
0187 static struct crash_mem *fill_up_crash_elf_data(void)
0188 {
0189 unsigned int nr_ranges = 0;
0190 struct crash_mem *cmem;
0191
0192 walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
0193 if (!nr_ranges)
0194 return NULL;
0195
0196
0197
0198
0199
0200 nr_ranges += 2;
0201 cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
0202 if (!cmem)
0203 return NULL;
0204
0205 cmem->max_nr_ranges = nr_ranges;
0206 cmem->nr_ranges = 0;
0207
0208 return cmem;
0209 }
0210
0211
0212
0213
0214
0215 static int elf_header_exclude_ranges(struct crash_mem *cmem)
0216 {
0217 int ret = 0;
0218
0219
0220 ret = crash_exclude_mem_range(cmem, 0, (1<<20)-1);
0221 if (ret)
0222 return ret;
0223
0224
0225 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
0226 if (ret)
0227 return ret;
0228
0229 if (crashk_low_res.end)
0230 ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
0231 crashk_low_res.end);
0232
0233 return ret;
0234 }
0235
0236 static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
0237 {
0238 struct crash_mem *cmem = arg;
0239
0240 cmem->ranges[cmem->nr_ranges].start = res->start;
0241 cmem->ranges[cmem->nr_ranges].end = res->end;
0242 cmem->nr_ranges++;
0243
0244 return 0;
0245 }
0246
0247
0248 static int prepare_elf_headers(struct kimage *image, void **addr,
0249 unsigned long *sz)
0250 {
0251 struct crash_mem *cmem;
0252 int ret;
0253
0254 cmem = fill_up_crash_elf_data();
0255 if (!cmem)
0256 return -ENOMEM;
0257
0258 ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
0259 if (ret)
0260 goto out;
0261
0262
0263 ret = elf_header_exclude_ranges(cmem);
0264 if (ret)
0265 goto out;
0266
0267
0268 ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
0269
0270 out:
0271 vfree(cmem);
0272 return ret;
0273 }
0274
0275 static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
0276 {
0277 unsigned int nr_e820_entries;
0278
0279 nr_e820_entries = params->e820_entries;
0280 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
0281 return 1;
0282
0283 memcpy(¶ms->e820_table[nr_e820_entries], entry, sizeof(struct e820_entry));
0284 params->e820_entries++;
0285 return 0;
0286 }
0287
0288 static int memmap_entry_callback(struct resource *res, void *arg)
0289 {
0290 struct crash_memmap_data *cmd = arg;
0291 struct boot_params *params = cmd->params;
0292 struct e820_entry ei;
0293
0294 ei.addr = res->start;
0295 ei.size = resource_size(res);
0296 ei.type = cmd->type;
0297 add_e820_entry(params, &ei);
0298
0299 return 0;
0300 }
0301
0302 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
0303 unsigned long long mstart,
0304 unsigned long long mend)
0305 {
0306 unsigned long start, end;
0307
0308 cmem->ranges[0].start = mstart;
0309 cmem->ranges[0].end = mend;
0310 cmem->nr_ranges = 1;
0311
0312
0313 start = image->elf_load_addr;
0314 end = start + image->elf_headers_sz - 1;
0315 return crash_exclude_mem_range(cmem, start, end);
0316 }
0317
0318
0319 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
0320 {
0321 int i, ret = 0;
0322 unsigned long flags;
0323 struct e820_entry ei;
0324 struct crash_memmap_data cmd;
0325 struct crash_mem *cmem;
0326
0327 cmem = vzalloc(struct_size(cmem, ranges, 1));
0328 if (!cmem)
0329 return -ENOMEM;
0330
0331 memset(&cmd, 0, sizeof(struct crash_memmap_data));
0332 cmd.params = params;
0333
0334
0335 cmd.type = E820_TYPE_RAM;
0336 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
0337 walk_iomem_res_desc(IORES_DESC_NONE, flags, 0, (1<<20)-1, &cmd,
0338 memmap_entry_callback);
0339
0340
0341 cmd.type = E820_TYPE_ACPI;
0342 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
0343 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
0344 memmap_entry_callback);
0345
0346
0347 cmd.type = E820_TYPE_NVS;
0348 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
0349 memmap_entry_callback);
0350
0351
0352 cmd.type = E820_TYPE_RESERVED;
0353 flags = IORESOURCE_MEM;
0354 walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
0355 memmap_entry_callback);
0356
0357
0358 if (crashk_low_res.end) {
0359 ei.addr = crashk_low_res.start;
0360 ei.size = resource_size(&crashk_low_res);
0361 ei.type = E820_TYPE_RAM;
0362 add_e820_entry(params, &ei);
0363 }
0364
0365
0366 ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end);
0367 if (ret)
0368 goto out;
0369
0370 for (i = 0; i < cmem->nr_ranges; i++) {
0371 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
0372
0373
0374 if (ei.size < PAGE_SIZE)
0375 continue;
0376 ei.addr = cmem->ranges[i].start;
0377 ei.type = E820_TYPE_RAM;
0378 add_e820_entry(params, &ei);
0379 }
0380
0381 out:
0382 vfree(cmem);
0383 return ret;
0384 }
0385
0386 int crash_load_segments(struct kimage *image)
0387 {
0388 int ret;
0389 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
0390 .buf_max = ULONG_MAX, .top_down = false };
0391
0392
0393 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
0394 if (ret)
0395 return ret;
0396
0397 image->elf_headers = kbuf.buffer;
0398 image->elf_headers_sz = kbuf.bufsz;
0399
0400 kbuf.memsz = kbuf.bufsz;
0401 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
0402 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
0403 ret = kexec_add_buffer(&kbuf);
0404 if (ret) {
0405 vfree((void *)image->elf_headers);
0406 return ret;
0407 }
0408 image->elf_load_addr = kbuf.mem;
0409 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
0410 image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
0411
0412 return ret;
0413 }
0414 #endif