Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * machine_kexec.c - handle transition of Linux booting another kernel
0004  */
0005 
0006 #include <linux/mm.h>
0007 #include <linux/kexec.h>
0008 #include <linux/delay.h>
0009 #include <linux/reboot.h>
0010 #include <linux/io.h>
0011 #include <linux/irq.h>
0012 #include <linux/memblock.h>
0013 #include <linux/of_fdt.h>
0014 #include <asm/mmu_context.h>
0015 #include <asm/cacheflush.h>
0016 #include <asm/kexec-internal.h>
0017 #include <asm/fncpy.h>
0018 #include <asm/mach-types.h>
0019 #include <asm/smp_plat.h>
0020 #include <asm/system_misc.h>
0021 #include <asm/set_memory.h>
0022 
0023 extern void relocate_new_kernel(void);
0024 extern const unsigned int relocate_new_kernel_size;
0025 
0026 static atomic_t waiting_for_crash_ipi;
0027 
0028 /*
0029  * Provide a dummy crash_notes definition while crash dump arrives to arm.
0030  * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
0031  */
0032 
0033 int machine_kexec_prepare(struct kimage *image)
0034 {
0035     struct kexec_segment *current_segment;
0036     __be32 header;
0037     int i, err;
0038 
0039     image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET
0040                      + KEXEC_ARM_ATAGS_OFFSET;
0041 
0042     /*
0043      * Validate that if the current HW supports SMP, then the SW supports
0044      * and implements CPU hotplug for the current HW. If not, we won't be
0045      * able to kexec reliably, so fail the prepare operation.
0046      */
0047     if (num_possible_cpus() > 1 && platform_can_secondary_boot() &&
0048         !platform_can_cpu_hotplug())
0049         return -EINVAL;
0050 
0051     /*
0052      * No segment at default ATAGs address. try to locate
0053      * a dtb using magic.
0054      */
0055     for (i = 0; i < image->nr_segments; i++) {
0056         current_segment = &image->segment[i];
0057 
0058         if (!memblock_is_region_memory(idmap_to_phys(current_segment->mem),
0059                            current_segment->memsz))
0060             return -EINVAL;
0061 
0062         err = get_user(header, (__be32*)current_segment->buf);
0063         if (err)
0064             return err;
0065 
0066         if (header == cpu_to_be32(OF_DT_HEADER))
0067             image->arch.kernel_r2 = current_segment->mem;
0068     }
0069     return 0;
0070 }
0071 
0072 void machine_kexec_cleanup(struct kimage *image)
0073 {
0074 }
0075 
0076 void machine_crash_nonpanic_core(void *unused)
0077 {
0078     struct pt_regs regs;
0079 
0080     crash_setup_regs(&regs, get_irq_regs());
0081     printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
0082            smp_processor_id());
0083     crash_save_cpu(&regs, smp_processor_id());
0084     flush_cache_all();
0085 
0086     set_cpu_online(smp_processor_id(), false);
0087     atomic_dec(&waiting_for_crash_ipi);
0088 
0089     while (1) {
0090         cpu_relax();
0091         wfe();
0092     }
0093 }
0094 
0095 void crash_smp_send_stop(void)
0096 {
0097     static int cpus_stopped;
0098     unsigned long msecs;
0099 
0100     if (cpus_stopped)
0101         return;
0102 
0103     atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
0104     smp_call_function(machine_crash_nonpanic_core, NULL, false);
0105     msecs = 1000; /* Wait at most a second for the other cpus to stop */
0106     while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
0107         mdelay(1);
0108         msecs--;
0109     }
0110     if (atomic_read(&waiting_for_crash_ipi) > 0)
0111         pr_warn("Non-crashing CPUs did not react to IPI\n");
0112 
0113     cpus_stopped = 1;
0114 }
0115 
0116 static void machine_kexec_mask_interrupts(void)
0117 {
0118     unsigned int i;
0119     struct irq_desc *desc;
0120 
0121     for_each_irq_desc(i, desc) {
0122         struct irq_chip *chip;
0123 
0124         chip = irq_desc_get_chip(desc);
0125         if (!chip)
0126             continue;
0127 
0128         if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
0129             chip->irq_eoi(&desc->irq_data);
0130 
0131         if (chip->irq_mask)
0132             chip->irq_mask(&desc->irq_data);
0133 
0134         if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
0135             chip->irq_disable(&desc->irq_data);
0136     }
0137 }
0138 
0139 void machine_crash_shutdown(struct pt_regs *regs)
0140 {
0141     local_irq_disable();
0142     crash_smp_send_stop();
0143 
0144     crash_save_cpu(regs, smp_processor_id());
0145     machine_kexec_mask_interrupts();
0146 
0147     pr_info("Loading crashdump kernel...\n");
0148 }
0149 
0150 void machine_kexec(struct kimage *image)
0151 {
0152     unsigned long page_list, reboot_entry_phys;
0153     struct kexec_relocate_data *data;
0154     void (*reboot_entry)(void);
0155     void *reboot_code_buffer;
0156 
0157     /*
0158      * This can only happen if machine_shutdown() failed to disable some
0159      * CPU, and that can only happen if the checks in
0160      * machine_kexec_prepare() were not correct. If this fails, we can't
0161      * reliably kexec anyway, so BUG_ON is appropriate.
0162      */
0163     BUG_ON(num_online_cpus() > 1);
0164 
0165     page_list = image->head & PAGE_MASK;
0166 
0167     reboot_code_buffer = page_address(image->control_code_page);
0168 
0169     /* copy our kernel relocation code to the control code page */
0170     reboot_entry = fncpy(reboot_code_buffer,
0171                  &relocate_new_kernel,
0172                  relocate_new_kernel_size);
0173 
0174     data = reboot_code_buffer + relocate_new_kernel_size;
0175     data->kexec_start_address = image->start;
0176     data->kexec_indirection_page = page_list;
0177     data->kexec_mach_type = machine_arch_type;
0178     data->kexec_r2 = image->arch.kernel_r2;
0179 
0180     /* get the identity mapping physical address for the reboot code */
0181     reboot_entry_phys = virt_to_idmap(reboot_entry);
0182 
0183     pr_info("Bye!\n");
0184 
0185     soft_restart(reboot_entry_phys);
0186 }
0187 
0188 void arch_crash_save_vmcoreinfo(void)
0189 {
0190 #ifdef CONFIG_ARM_LPAE
0191     VMCOREINFO_CONFIG(ARM_LPAE);
0192 #endif
0193 }