Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Common prep/pmac/chrp boot and setup code.
0004  */
0005 
0006 #include <linux/module.h>
0007 #include <linux/string.h>
0008 #include <linux/sched.h>
0009 #include <linux/init.h>
0010 #include <linux/kernel.h>
0011 #include <linux/reboot.h>
0012 #include <linux/delay.h>
0013 #include <linux/initrd.h>
0014 #include <linux/tty.h>
0015 #include <linux/seq_file.h>
0016 #include <linux/root_dev.h>
0017 #include <linux/cpu.h>
0018 #include <linux/console.h>
0019 #include <linux/memblock.h>
0020 #include <linux/export.h>
0021 #include <linux/nvram.h>
0022 #include <linux/pgtable.h>
0023 #include <linux/of_fdt.h>
0024 #include <linux/irq.h>
0025 
0026 #include <asm/io.h>
0027 #include <asm/processor.h>
0028 #include <asm/setup.h>
0029 #include <asm/smp.h>
0030 #include <asm/elf.h>
0031 #include <asm/cputable.h>
0032 #include <asm/bootx.h>
0033 #include <asm/btext.h>
0034 #include <asm/machdep.h>
0035 #include <linux/uaccess.h>
0036 #include <asm/pmac_feature.h>
0037 #include <asm/sections.h>
0038 #include <asm/nvram.h>
0039 #include <asm/xmon.h>
0040 #include <asm/time.h>
0041 #include <asm/serial.h>
0042 #include <asm/udbg.h>
0043 #include <asm/code-patching.h>
0044 #include <asm/cpu_has_feature.h>
0045 #include <asm/asm-prototypes.h>
0046 #include <asm/kdump.h>
0047 #include <asm/feature-fixups.h>
0048 #include <asm/early_ioremap.h>
0049 
0050 #include "setup.h"
0051 
0052 #define DBG(fmt...)
0053 
0054 extern void bootx_init(unsigned long r4, unsigned long phys);
0055 
0056 int boot_cpuid_phys;
0057 EXPORT_SYMBOL_GPL(boot_cpuid_phys);
0058 
0059 int smp_hw_index[NR_CPUS];
0060 EXPORT_SYMBOL(smp_hw_index);
0061 
0062 unsigned int DMA_MODE_READ;
0063 unsigned int DMA_MODE_WRITE;
0064 
0065 EXPORT_SYMBOL(DMA_MODE_READ);
0066 EXPORT_SYMBOL(DMA_MODE_WRITE);
0067 
0068 /*
0069  * This is run before start_kernel(), the kernel has been relocated
0070  * and we are running with enough of the MMU enabled to have our
0071  * proper kernel virtual addresses
0072  *
0073  * We do the initial parsing of the flat device-tree and prepares
0074  * for the MMU to be fully initialized.
0075  */
0076 notrace void __init machine_init(u64 dt_ptr)
0077 {
0078     u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache);
0079     ppc_inst_t insn;
0080 
0081     /* Configure static keys first, now that we're relocated. */
0082     setup_feature_keys();
0083 
0084     early_ioremap_init();
0085 
0086     /* Enable early debugging if any specified (see udbg.h) */
0087     udbg_early_init();
0088 
0089     patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_RAW_NOP()));
0090 
0091     create_cond_branch(&insn, addr, branch_target(addr), 0x820000);
0092     patch_instruction(addr, insn);  /* replace b by bne cr0 */
0093 
0094     /* Do some early initialization based on the flat device tree */
0095     early_init_devtree(__va(dt_ptr));
0096 
0097     early_init_mmu();
0098 
0099     setup_kdump_trampoline();
0100 }
0101 
0102 /* Checks "l2cr=xxxx" command-line option */
0103 static int __init ppc_setup_l2cr(char *str)
0104 {
0105     if (cpu_has_feature(CPU_FTR_L2CR)) {
0106         unsigned long val = simple_strtoul(str, NULL, 0);
0107         printk(KERN_INFO "l2cr set to %lx\n", val);
0108         _set_L2CR(0);       /* force invalidate by disable cache */
0109         _set_L2CR(val);     /* and enable it */
0110     }
0111     return 1;
0112 }
0113 __setup("l2cr=", ppc_setup_l2cr);
0114 
0115 /* Checks "l3cr=xxxx" command-line option */
0116 static int __init ppc_setup_l3cr(char *str)
0117 {
0118     if (cpu_has_feature(CPU_FTR_L3CR)) {
0119         unsigned long val = simple_strtoul(str, NULL, 0);
0120         printk(KERN_INFO "l3cr set to %lx\n", val);
0121         _set_L3CR(val);     /* and enable it */
0122     }
0123     return 1;
0124 }
0125 __setup("l3cr=", ppc_setup_l3cr);
0126 
0127 static int __init ppc_init(void)
0128 {
0129     /* clear the progress line */
0130     if (ppc_md.progress)
0131         ppc_md.progress("             ", 0xffff);
0132 
0133     /* call platform init */
0134     if (ppc_md.init != NULL) {
0135         ppc_md.init();
0136     }
0137     return 0;
0138 }
0139 arch_initcall(ppc_init);
0140 
0141 static void *__init alloc_stack(void)
0142 {
0143     void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN);
0144 
0145     if (!ptr)
0146         panic("cannot allocate %d bytes for stack at %pS\n",
0147               THREAD_SIZE, (void *)_RET_IP_);
0148 
0149     return ptr;
0150 }
0151 
0152 void __init irqstack_early_init(void)
0153 {
0154     unsigned int i;
0155 
0156     if (IS_ENABLED(CONFIG_VMAP_STACK))
0157         return;
0158 
0159     /* interrupt stacks must be in lowmem, we get that for free on ppc32
0160      * as the memblock is limited to lowmem by default */
0161     for_each_possible_cpu(i) {
0162         softirq_ctx[i] = alloc_stack();
0163         hardirq_ctx[i] = alloc_stack();
0164     }
0165 }
0166 
0167 #ifdef CONFIG_VMAP_STACK
0168 void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
0169 
0170 void __init emergency_stack_init(void)
0171 {
0172     unsigned int i;
0173 
0174     for_each_possible_cpu(i)
0175         emergency_ctx[i] = alloc_stack();
0176 }
0177 #endif
0178 
0179 #ifdef CONFIG_BOOKE_OR_40x
0180 void __init exc_lvl_early_init(void)
0181 {
0182     unsigned int i, hw_cpu;
0183 
0184     /* interrupt stacks must be in lowmem, we get that for free on ppc32
0185      * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
0186     for_each_possible_cpu(i) {
0187 #ifdef CONFIG_SMP
0188         hw_cpu = get_hard_smp_processor_id(i);
0189 #else
0190         hw_cpu = 0;
0191 #endif
0192 
0193         critirq_ctx[hw_cpu] = alloc_stack();
0194 #ifdef CONFIG_BOOKE
0195         dbgirq_ctx[hw_cpu] = alloc_stack();
0196         mcheckirq_ctx[hw_cpu] = alloc_stack();
0197 #endif
0198     }
0199 }
0200 #endif
0201 
0202 void __init setup_power_save(void)
0203 {
0204 #ifdef CONFIG_PPC_BOOK3S_32
0205     if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
0206         cpu_has_feature(CPU_FTR_CAN_NAP))
0207         ppc_md.power_save = ppc6xx_idle;
0208 #endif
0209 
0210 #ifdef CONFIG_E500
0211     if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
0212         cpu_has_feature(CPU_FTR_CAN_NAP))
0213         ppc_md.power_save = e500_idle;
0214 #endif
0215 }
0216 
0217 __init void initialize_cache_info(void)
0218 {
0219     /*
0220      * Set cache line size based on type of cpu as a default.
0221      * Systems with OF can look in the properties on the cpu node(s)
0222      * for a possibly more accurate value.
0223      */
0224     dcache_bsize = cur_cpu_spec->dcache_bsize;
0225     icache_bsize = cur_cpu_spec->icache_bsize;
0226 }