Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Common boot and setup code for both 32-bit and 64-bit.
0004  * Extracted from arch/powerpc/kernel/setup_64.c.
0005  *
0006  * Copyright (C) 2001 PPC64 Team, IBM Corp
0007  */
0008 
0009 #undef DEBUG
0010 
0011 #include <linux/export.h>
0012 #include <linux/panic_notifier.h>
0013 #include <linux/string.h>
0014 #include <linux/sched.h>
0015 #include <linux/init.h>
0016 #include <linux/kernel.h>
0017 #include <linux/reboot.h>
0018 #include <linux/delay.h>
0019 #include <linux/initrd.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/seq_file.h>
0022 #include <linux/ioport.h>
0023 #include <linux/console.h>
0024 #include <linux/screen_info.h>
0025 #include <linux/root_dev.h>
0026 #include <linux/cpu.h>
0027 #include <linux/unistd.h>
0028 #include <linux/serial.h>
0029 #include <linux/serial_8250.h>
0030 #include <linux/percpu.h>
0031 #include <linux/memblock.h>
0032 #include <linux/of_irq.h>
0033 #include <linux/of_fdt.h>
0034 #include <linux/of_platform.h>
0035 #include <linux/hugetlb.h>
0036 #include <linux/pgtable.h>
0037 #include <asm/io.h>
0038 #include <asm/paca.h>
0039 #include <asm/processor.h>
0040 #include <asm/vdso_datapage.h>
0041 #include <asm/smp.h>
0042 #include <asm/elf.h>
0043 #include <asm/machdep.h>
0044 #include <asm/time.h>
0045 #include <asm/cputable.h>
0046 #include <asm/sections.h>
0047 #include <asm/firmware.h>
0048 #include <asm/btext.h>
0049 #include <asm/nvram.h>
0050 #include <asm/setup.h>
0051 #include <asm/rtas.h>
0052 #include <asm/iommu.h>
0053 #include <asm/serial.h>
0054 #include <asm/cache.h>
0055 #include <asm/page.h>
0056 #include <asm/mmu.h>
0057 #include <asm/xmon.h>
0058 #include <asm/cputhreads.h>
0059 #include <mm/mmu_decl.h>
0060 #include <asm/fadump.h>
0061 #include <asm/udbg.h>
0062 #include <asm/hugetlb.h>
0063 #include <asm/livepatch.h>
0064 #include <asm/mmu_context.h>
0065 #include <asm/cpu_has_feature.h>
0066 #include <asm/kasan.h>
0067 #include <asm/mce.h>
0068 
0069 #include "setup.h"
0070 
0071 #ifdef DEBUG
0072 #define DBG(fmt...) udbg_printf(fmt)
0073 #else
0074 #define DBG(fmt...)
0075 #endif
0076 
0077 /* The main machine-dep calls structure
0078  */
0079 struct machdep_calls ppc_md;
0080 EXPORT_SYMBOL(ppc_md);
0081 struct machdep_calls *machine_id;
0082 EXPORT_SYMBOL(machine_id);
0083 
0084 int boot_cpuid = -1;
0085 EXPORT_SYMBOL_GPL(boot_cpuid);
0086 
0087 /*
0088  * These are used in binfmt_elf.c to put aux entries on the stack
0089  * for each elf executable being started.
0090  */
0091 int dcache_bsize;
0092 int icache_bsize;
0093 
0094 /*
0095  * This still seems to be needed... -- paulus
0096  */ 
0097 struct screen_info screen_info = {
0098     .orig_x = 0,
0099     .orig_y = 25,
0100     .orig_video_cols = 80,
0101     .orig_video_lines = 25,
0102     .orig_video_isVGA = 1,
0103     .orig_video_points = 16
0104 };
0105 #if defined(CONFIG_FB_VGA16_MODULE)
0106 EXPORT_SYMBOL(screen_info);
0107 #endif
0108 
0109 /* Variables required to store legacy IO irq routing */
0110 int of_i8042_kbd_irq;
0111 EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
0112 int of_i8042_aux_irq;
0113 EXPORT_SYMBOL_GPL(of_i8042_aux_irq);
0114 
0115 #ifdef __DO_IRQ_CANON
0116 /* XXX should go elsewhere eventually */
0117 int ppc_do_canonicalize_irqs;
0118 EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
0119 #endif
0120 
0121 #ifdef CONFIG_CRASH_CORE
0122 /* This keeps a track of which one is the crashing cpu. */
0123 int crashing_cpu = -1;
0124 #endif
0125 
0126 /* also used by kexec */
0127 void machine_shutdown(void)
0128 {
0129     /*
0130      * if fadump is active, cleanup the fadump registration before we
0131      * shutdown.
0132      */
0133     fadump_cleanup();
0134 
0135     if (ppc_md.machine_shutdown)
0136         ppc_md.machine_shutdown();
0137 }
0138 
0139 static void machine_hang(void)
0140 {
0141     pr_emerg("System Halted, OK to turn off power\n");
0142     local_irq_disable();
0143     while (1)
0144         ;
0145 }
0146 
0147 void machine_restart(char *cmd)
0148 {
0149     machine_shutdown();
0150     if (ppc_md.restart)
0151         ppc_md.restart(cmd);
0152 
0153     smp_send_stop();
0154 
0155     do_kernel_restart(cmd);
0156     mdelay(1000);
0157 
0158     machine_hang();
0159 }
0160 
0161 void machine_power_off(void)
0162 {
0163     machine_shutdown();
0164     do_kernel_power_off();
0165     smp_send_stop();
0166     machine_hang();
0167 }
0168 /* Used by the G5 thermal driver */
0169 EXPORT_SYMBOL_GPL(machine_power_off);
0170 
0171 void (*pm_power_off)(void);
0172 EXPORT_SYMBOL_GPL(pm_power_off);
0173 
0174 size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
0175 {
0176     if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
0177         return 1;
0178     return 0;
0179 }
0180 EXPORT_SYMBOL(arch_get_random_seed_longs);
0181 
0182 void machine_halt(void)
0183 {
0184     machine_shutdown();
0185     if (ppc_md.halt)
0186         ppc_md.halt();
0187 
0188     smp_send_stop();
0189     machine_hang();
0190 }
0191 
0192 #ifdef CONFIG_SMP
0193 DEFINE_PER_CPU(unsigned int, cpu_pvr);
0194 #endif
0195 
0196 static void show_cpuinfo_summary(struct seq_file *m)
0197 {
0198     struct device_node *root;
0199     const char *model = NULL;
0200     unsigned long bogosum = 0;
0201     int i;
0202 
0203     if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) {
0204         for_each_online_cpu(i)
0205             bogosum += loops_per_jiffy;
0206         seq_printf(m, "total bogomips\t: %lu.%02lu\n",
0207                bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100);
0208     }
0209     seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
0210     if (ppc_md.name)
0211         seq_printf(m, "platform\t: %s\n", ppc_md.name);
0212     root = of_find_node_by_path("/");
0213     if (root)
0214         model = of_get_property(root, "model", NULL);
0215     if (model)
0216         seq_printf(m, "model\t\t: %s\n", model);
0217     of_node_put(root);
0218 
0219     if (ppc_md.show_cpuinfo != NULL)
0220         ppc_md.show_cpuinfo(m);
0221 
0222     /* Display the amount of memory */
0223     if (IS_ENABLED(CONFIG_PPC32))
0224         seq_printf(m, "Memory\t\t: %d MB\n",
0225                (unsigned int)(total_memory / (1024 * 1024)));
0226 }
0227 
0228 static int show_cpuinfo(struct seq_file *m, void *v)
0229 {
0230     unsigned long cpu_id = (unsigned long)v - 1;
0231     unsigned int pvr;
0232     unsigned long proc_freq;
0233     unsigned short maj;
0234     unsigned short min;
0235 
0236 #ifdef CONFIG_SMP
0237     pvr = per_cpu(cpu_pvr, cpu_id);
0238 #else
0239     pvr = mfspr(SPRN_PVR);
0240 #endif
0241     maj = (pvr >> 8) & 0xFF;
0242     min = pvr & 0xFF;
0243 
0244     seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id);
0245 
0246     if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
0247         seq_puts(m, cur_cpu_spec->cpu_name);
0248     else
0249         seq_printf(m, "unknown (%08x)", pvr);
0250 
0251     if (cpu_has_feature(CPU_FTR_ALTIVEC))
0252         seq_puts(m, ", altivec supported");
0253 
0254     seq_putc(m, '\n');
0255 
0256 #ifdef CONFIG_TAU
0257     if (cpu_has_feature(CPU_FTR_TAU)) {
0258         if (IS_ENABLED(CONFIG_TAU_AVERAGE)) {
0259             /* more straightforward, but potentially misleading */
0260             seq_printf(m,  "temperature \t: %u C (uncalibrated)\n",
0261                    cpu_temp(cpu_id));
0262         } else {
0263             /* show the actual temp sensor range */
0264             u32 temp;
0265             temp = cpu_temp_both(cpu_id);
0266             seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
0267                    temp & 0xff, temp >> 16);
0268         }
0269     }
0270 #endif /* CONFIG_TAU */
0271 
0272     /*
0273      * Platforms that have variable clock rates, should implement
0274      * the method ppc_md.get_proc_freq() that reports the clock
0275      * rate of a given cpu. The rest can use ppc_proc_freq to
0276      * report the clock rate that is same across all cpus.
0277      */
0278     if (ppc_md.get_proc_freq)
0279         proc_freq = ppc_md.get_proc_freq(cpu_id);
0280     else
0281         proc_freq = ppc_proc_freq;
0282 
0283     if (proc_freq)
0284         seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
0285                proc_freq / 1000000, proc_freq % 1000000);
0286 
0287     /* If we are a Freescale core do a simple check so
0288      * we don't have to keep adding cases in the future */
0289     if (PVR_VER(pvr) & 0x8000) {
0290         switch (PVR_VER(pvr)) {
0291         case 0x8000:    /* 7441/7450/7451, Voyager */
0292         case 0x8001:    /* 7445/7455, Apollo 6 */
0293         case 0x8002:    /* 7447/7457, Apollo 7 */
0294         case 0x8003:    /* 7447A, Apollo 7 PM */
0295         case 0x8004:    /* 7448, Apollo 8 */
0296         case 0x800c:    /* 7410, Nitro */
0297             maj = ((pvr >> 8) & 0xF);
0298             min = PVR_MIN(pvr);
0299             break;
0300         default:    /* e500/book-e */
0301             maj = PVR_MAJ(pvr);
0302             min = PVR_MIN(pvr);
0303             break;
0304         }
0305     } else {
0306         switch (PVR_VER(pvr)) {
0307             case 0x1008:    /* 740P/750P ?? */
0308                 maj = ((pvr >> 8) & 0xFF) - 1;
0309                 min = pvr & 0xFF;
0310                 break;
0311             case 0x004e: /* POWER9 bits 12-15 give chip type */
0312             case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
0313                 maj = (pvr >> 8) & 0x0F;
0314                 min = pvr & 0xFF;
0315                 break;
0316             default:
0317                 maj = (pvr >> 8) & 0xFF;
0318                 min = pvr & 0xFF;
0319                 break;
0320         }
0321     }
0322 
0323     seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
0324            maj, min, PVR_VER(pvr), PVR_REV(pvr));
0325 
0326     if (IS_ENABLED(CONFIG_PPC32))
0327         seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
0328                (loops_per_jiffy / (5000 / HZ)) % 100);
0329 
0330     seq_putc(m, '\n');
0331 
0332     /* If this is the last cpu, print the summary */
0333     if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
0334         show_cpuinfo_summary(m);
0335 
0336     return 0;
0337 }
0338 
0339 static void *c_start(struct seq_file *m, loff_t *pos)
0340 {
0341     if (*pos == 0)  /* just in case, cpu 0 is not the first */
0342         *pos = cpumask_first(cpu_online_mask);
0343     else
0344         *pos = cpumask_next(*pos - 1, cpu_online_mask);
0345     if ((*pos) < nr_cpu_ids)
0346         return (void *)(unsigned long)(*pos + 1);
0347     return NULL;
0348 }
0349 
0350 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
0351 {
0352     (*pos)++;
0353     return c_start(m, pos);
0354 }
0355 
0356 static void c_stop(struct seq_file *m, void *v)
0357 {
0358 }
0359 
0360 const struct seq_operations cpuinfo_op = {
0361     .start  = c_start,
0362     .next   = c_next,
0363     .stop   = c_stop,
0364     .show   = show_cpuinfo,
0365 };
0366 
0367 void __init check_for_initrd(void)
0368 {
0369 #ifdef CONFIG_BLK_DEV_INITRD
0370     DBG(" -> check_for_initrd()  initrd_start=0x%lx  initrd_end=0x%lx\n",
0371         initrd_start, initrd_end);
0372 
0373     /* If we were passed an initrd, set the ROOT_DEV properly if the values
0374      * look sensible. If not, clear initrd reference.
0375      */
0376     if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
0377         initrd_end > initrd_start)
0378         ROOT_DEV = Root_RAM0;
0379     else
0380         initrd_start = initrd_end = 0;
0381 
0382     if (initrd_start)
0383         pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
0384 
0385     DBG(" <- check_for_initrd()\n");
0386 #endif /* CONFIG_BLK_DEV_INITRD */
0387 }
0388 
0389 #ifdef CONFIG_SMP
0390 
0391 int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
0392 cpumask_t threads_core_mask __read_mostly;
0393 EXPORT_SYMBOL_GPL(threads_per_core);
0394 EXPORT_SYMBOL_GPL(threads_per_subcore);
0395 EXPORT_SYMBOL_GPL(threads_shift);
0396 EXPORT_SYMBOL_GPL(threads_core_mask);
0397 
0398 static void __init cpu_init_thread_core_maps(int tpc)
0399 {
0400     int i;
0401 
0402     threads_per_core = tpc;
0403     threads_per_subcore = tpc;
0404     cpumask_clear(&threads_core_mask);
0405 
0406     /* This implementation only supports power of 2 number of threads
0407      * for simplicity and performance
0408      */
0409     threads_shift = ilog2(tpc);
0410     BUG_ON(tpc != (1 << threads_shift));
0411 
0412     for (i = 0; i < tpc; i++)
0413         cpumask_set_cpu(i, &threads_core_mask);
0414 
0415     printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
0416            tpc, tpc > 1 ? "s" : "");
0417     printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
0418 }
0419 
0420 
0421 u32 *cpu_to_phys_id = NULL;
0422 
0423 /**
0424  * setup_cpu_maps - initialize the following cpu maps:
0425  *                  cpu_possible_mask
0426  *                  cpu_present_mask
0427  *
0428  * Having the possible map set up early allows us to restrict allocations
0429  * of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
0430  *
0431  * We do not initialize the online map here; cpus set their own bits in
0432  * cpu_online_mask as they come up.
0433  *
0434  * This function is valid only for Open Firmware systems.  finish_device_tree
0435  * must be called before using this.
0436  *
0437  * While we're here, we may as well set the "physical" cpu ids in the paca.
0438  *
0439  * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
0440  */
0441 void __init smp_setup_cpu_maps(void)
0442 {
0443     struct device_node *dn;
0444     int cpu = 0;
0445     int nthreads = 1;
0446 
0447     DBG("smp_setup_cpu_maps()\n");
0448 
0449     cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
0450                     __alignof__(u32));
0451     if (!cpu_to_phys_id)
0452         panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
0453               __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
0454 
0455     for_each_node_by_type(dn, "cpu") {
0456         const __be32 *intserv;
0457         __be32 cpu_be;
0458         int j, len;
0459 
0460         DBG("  * %pOF...\n", dn);
0461 
0462         intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
0463                 &len);
0464         if (intserv) {
0465             DBG("    ibm,ppc-interrupt-server#s -> %lu threads\n",
0466                 (len / sizeof(int)));
0467         } else {
0468             DBG("    no ibm,ppc-interrupt-server#s -> 1 thread\n");
0469             intserv = of_get_property(dn, "reg", &len);
0470             if (!intserv) {
0471                 cpu_be = cpu_to_be32(cpu);
0472                 /* XXX: what is this? uninitialized?? */
0473                 intserv = &cpu_be;  /* assume logical == phys */
0474                 len = 4;
0475             }
0476         }
0477 
0478         nthreads = len / sizeof(int);
0479 
0480         for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
0481             bool avail;
0482 
0483             DBG("    thread %d -> cpu %d (hard id %d)\n",
0484                 j, cpu, be32_to_cpu(intserv[j]));
0485 
0486             avail = of_device_is_available(dn);
0487             if (!avail)
0488                 avail = !of_property_match_string(dn,
0489                         "enable-method", "spin-table");
0490 
0491             set_cpu_present(cpu, avail);
0492             set_cpu_possible(cpu, true);
0493             cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
0494             cpu++;
0495         }
0496 
0497         if (cpu >= nr_cpu_ids) {
0498             of_node_put(dn);
0499             break;
0500         }
0501     }
0502 
0503     /* If no SMT supported, nthreads is forced to 1 */
0504     if (!cpu_has_feature(CPU_FTR_SMT)) {
0505         DBG("  SMT disabled ! nthreads forced to 1\n");
0506         nthreads = 1;
0507     }
0508 
0509 #ifdef CONFIG_PPC64
0510     /*
0511      * On pSeries LPAR, we need to know how many cpus
0512      * could possibly be added to this partition.
0513      */
0514     if (firmware_has_feature(FW_FEATURE_LPAR) &&
0515         (dn = of_find_node_by_path("/rtas"))) {
0516         int num_addr_cell, num_size_cell, maxcpus;
0517         const __be32 *ireg;
0518 
0519         num_addr_cell = of_n_addr_cells(dn);
0520         num_size_cell = of_n_size_cells(dn);
0521 
0522         ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
0523 
0524         if (!ireg)
0525             goto out;
0526 
0527         maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
0528 
0529         /* Double maxcpus for processors which have SMT capability */
0530         if (cpu_has_feature(CPU_FTR_SMT))
0531             maxcpus *= nthreads;
0532 
0533         if (maxcpus > nr_cpu_ids) {
0534             printk(KERN_WARNING
0535                    "Partition configured for %d cpus, "
0536                    "operating system maximum is %u.\n",
0537                    maxcpus, nr_cpu_ids);
0538             maxcpus = nr_cpu_ids;
0539         } else
0540             printk(KERN_INFO "Partition configured for %d cpus.\n",
0541                    maxcpus);
0542 
0543         for (cpu = 0; cpu < maxcpus; cpu++)
0544             set_cpu_possible(cpu, true);
0545     out:
0546         of_node_put(dn);
0547     }
0548     vdso_data->processorCount = num_present_cpus();
0549 #endif /* CONFIG_PPC64 */
0550 
0551         /* Initialize CPU <=> thread mapping/
0552      *
0553      * WARNING: We assume that the number of threads is the same for
0554      * every CPU in the system. If that is not the case, then some code
0555      * here will have to be reworked
0556      */
0557     cpu_init_thread_core_maps(nthreads);
0558 
0559     /* Now that possible cpus are set, set nr_cpu_ids for later use */
0560     setup_nr_cpu_ids();
0561 
0562     free_unused_pacas();
0563 }
0564 #endif /* CONFIG_SMP */
0565 
0566 #ifdef CONFIG_PCSPKR_PLATFORM
0567 static __init int add_pcspkr(void)
0568 {
0569     struct device_node *np;
0570     struct platform_device *pd;
0571     int ret;
0572 
0573     np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
0574     of_node_put(np);
0575     if (!np)
0576         return -ENODEV;
0577 
0578     pd = platform_device_alloc("pcspkr", -1);
0579     if (!pd)
0580         return -ENOMEM;
0581 
0582     ret = platform_device_add(pd);
0583     if (ret)
0584         platform_device_put(pd);
0585 
0586     return ret;
0587 }
0588 device_initcall(add_pcspkr);
0589 #endif  /* CONFIG_PCSPKR_PLATFORM */
0590 
0591 static __init void probe_machine(void)
0592 {
0593     extern struct machdep_calls __machine_desc_start;
0594     extern struct machdep_calls __machine_desc_end;
0595     unsigned int i;
0596 
0597     /*
0598      * Iterate all ppc_md structures until we find the proper
0599      * one for the current machine type
0600      */
0601     DBG("Probing machine type ...\n");
0602 
0603     /*
0604      * Check ppc_md is empty, if not we have a bug, ie, we setup an
0605      * entry before probe_machine() which will be overwritten
0606      */
0607     for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
0608         if (((void **)&ppc_md)[i]) {
0609             printk(KERN_ERR "Entry %d in ppc_md non empty before"
0610                    " machine probe !\n", i);
0611         }
0612     }
0613 
0614     for (machine_id = &__machine_desc_start;
0615          machine_id < &__machine_desc_end;
0616          machine_id++) {
0617         DBG("  %s ...", machine_id->name);
0618         memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
0619         if (ppc_md.probe()) {
0620             DBG(" match !\n");
0621             break;
0622         }
0623         DBG("\n");
0624     }
0625     /* What can we do if we didn't find ? */
0626     if (machine_id >= &__machine_desc_end) {
0627         pr_err("No suitable machine description found !\n");
0628         for (;;);
0629     }
0630 
0631     printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
0632 }
0633 
0634 /* Match a class of boards, not a specific device configuration. */
0635 int check_legacy_ioport(unsigned long base_port)
0636 {
0637     struct device_node *parent, *np = NULL;
0638     int ret = -ENODEV;
0639 
0640     switch(base_port) {
0641     case I8042_DATA_REG:
0642         if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
0643             np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
0644         if (np) {
0645             parent = of_get_parent(np);
0646 
0647             of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0);
0648             if (!of_i8042_kbd_irq)
0649                 of_i8042_kbd_irq = 1;
0650 
0651             of_i8042_aux_irq = irq_of_parse_and_map(parent, 1);
0652             if (!of_i8042_aux_irq)
0653                 of_i8042_aux_irq = 12;
0654 
0655             of_node_put(np);
0656             np = parent;
0657             break;
0658         }
0659         np = of_find_node_by_type(NULL, "8042");
0660         /* Pegasos has no device_type on its 8042 node, look for the
0661          * name instead */
0662         if (!np)
0663             np = of_find_node_by_name(NULL, "8042");
0664         if (np) {
0665             of_i8042_kbd_irq = 1;
0666             of_i8042_aux_irq = 12;
0667         }
0668         break;
0669     case FDC_BASE: /* FDC1 */
0670         np = of_find_node_by_type(NULL, "fdc");
0671         break;
0672     default:
0673         /* ipmi is supposed to fail here */
0674         break;
0675     }
0676     if (!np)
0677         return ret;
0678     parent = of_get_parent(np);
0679     if (parent) {
0680         if (of_node_is_type(parent, "isa"))
0681             ret = 0;
0682         of_node_put(parent);
0683     }
0684     of_node_put(np);
0685     return ret;
0686 }
0687 EXPORT_SYMBOL(check_legacy_ioport);
0688 
0689 /*
0690  * Panic notifiers setup
0691  *
0692  * We have 3 notifiers for powerpc, each one from a different "nature":
0693  *
0694  * - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables
0695  *   IRQs and deal with the Firmware-Assisted dump, when it is configured;
0696  *   should run early in the panic path.
0697  *
0698  * - dump_kernel_offset() is an informative notifier, just showing the KASLR
0699  *   offset if we have RANDOMIZE_BASE set.
0700  *
0701  * - ppc_panic_platform_handler() is a low-level handler that's registered
0702  *   only if the platform wishes to perform final actions in the panic path,
0703  *   hence it should run late and might not even return. Currently, only
0704  *   pseries and ps3 platforms register callbacks.
0705  */
0706 static int ppc_panic_fadump_handler(struct notifier_block *this,
0707                     unsigned long event, void *ptr)
0708 {
0709     /*
0710      * panic does a local_irq_disable, but we really
0711      * want interrupts to be hard disabled.
0712      */
0713     hard_irq_disable();
0714 
0715     /*
0716      * If firmware-assisted dump has been registered then trigger
0717      * its callback and let the firmware handles everything else.
0718      */
0719     crash_fadump(NULL, ptr);
0720 
0721     return NOTIFY_DONE;
0722 }
0723 
0724 static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
0725                   void *p)
0726 {
0727     pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
0728          kaslr_offset(), KERNELBASE);
0729 
0730     return NOTIFY_DONE;
0731 }
0732 
0733 static int ppc_panic_platform_handler(struct notifier_block *this,
0734                       unsigned long event, void *ptr)
0735 {
0736     /*
0737      * This handler is only registered if we have a panic callback
0738      * on ppc_md, hence NULL check is not needed.
0739      * Also, it may not return, so it runs really late on panic path.
0740      */
0741     ppc_md.panic(ptr);
0742 
0743     return NOTIFY_DONE;
0744 }
0745 
0746 static struct notifier_block ppc_fadump_block = {
0747     .notifier_call = ppc_panic_fadump_handler,
0748     .priority = INT_MAX, /* run early, to notify the firmware ASAP */
0749 };
0750 
0751 static struct notifier_block kernel_offset_notifier = {
0752     .notifier_call = dump_kernel_offset,
0753 };
0754 
0755 static struct notifier_block ppc_panic_block = {
0756     .notifier_call = ppc_panic_platform_handler,
0757     .priority = INT_MIN, /* may not return; must be done last */
0758 };
0759 
0760 void __init setup_panic(void)
0761 {
0762     /* Hard-disables IRQs + deal with FW-assisted dump (fadump) */
0763     atomic_notifier_chain_register(&panic_notifier_list,
0764                        &ppc_fadump_block);
0765 
0766     if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
0767         atomic_notifier_chain_register(&panic_notifier_list,
0768                            &kernel_offset_notifier);
0769 
0770     /* Low-level platform-specific routines that should run on panic */
0771     if (ppc_md.panic)
0772         atomic_notifier_chain_register(&panic_notifier_list,
0773                            &ppc_panic_block);
0774 }
0775 
0776 #ifdef CONFIG_CHECK_CACHE_COHERENCY
0777 /*
0778  * For platforms that have configurable cache-coherency.  This function
0779  * checks that the cache coherency setting of the kernel matches the setting
0780  * left by the firmware, as indicated in the device tree.  Since a mismatch
0781  * will eventually result in DMA failures, we print * and error and call
0782  * BUG() in that case.
0783  */
0784 
0785 #define KERNEL_COHERENCY    (!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
0786 
0787 static int __init check_cache_coherency(void)
0788 {
0789     struct device_node *np;
0790     const void *prop;
0791     bool devtree_coherency;
0792 
0793     np = of_find_node_by_path("/");
0794     prop = of_get_property(np, "coherency-off", NULL);
0795     of_node_put(np);
0796 
0797     devtree_coherency = prop ? false : true;
0798 
0799     if (devtree_coherency != KERNEL_COHERENCY) {
0800         printk(KERN_ERR
0801             "kernel coherency:%s != device tree_coherency:%s\n",
0802             KERNEL_COHERENCY ? "on" : "off",
0803             devtree_coherency ? "on" : "off");
0804         BUG();
0805     }
0806 
0807     return 0;
0808 }
0809 
0810 late_initcall(check_cache_coherency);
0811 #endif /* CONFIG_CHECK_CACHE_COHERENCY */
0812 
0813 void ppc_printk_progress(char *s, unsigned short hex)
0814 {
0815     pr_info("%s\n", s);
0816 }
0817 
0818 static __init void print_system_info(void)
0819 {
0820     pr_info("-----------------------------------------------------\n");
0821     pr_info("phys_mem_size     = 0x%llx\n",
0822         (unsigned long long)memblock_phys_mem_size());
0823 
0824     pr_info("dcache_bsize      = 0x%x\n", dcache_bsize);
0825     pr_info("icache_bsize      = 0x%x\n", icache_bsize);
0826 
0827     pr_info("cpu_features      = 0x%016lx\n", cur_cpu_spec->cpu_features);
0828     pr_info("  possible        = 0x%016lx\n",
0829         (unsigned long)CPU_FTRS_POSSIBLE);
0830     pr_info("  always          = 0x%016lx\n",
0831         (unsigned long)CPU_FTRS_ALWAYS);
0832     pr_info("cpu_user_features = 0x%08x 0x%08x\n",
0833         cur_cpu_spec->cpu_user_features,
0834         cur_cpu_spec->cpu_user_features2);
0835     pr_info("mmu_features      = 0x%08x\n", cur_cpu_spec->mmu_features);
0836 #ifdef CONFIG_PPC64
0837     pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
0838 #ifdef CONFIG_PPC_BOOK3S
0839     pr_info("vmalloc start     = 0x%lx\n", KERN_VIRT_START);
0840     pr_info("IO start          = 0x%lx\n", KERN_IO_START);
0841     pr_info("vmemmap start     = 0x%lx\n", (unsigned long)vmemmap);
0842 #endif
0843 #endif
0844 
0845     if (!early_radix_enabled())
0846         print_system_hash_info();
0847 
0848     if (PHYSICAL_START > 0)
0849         pr_info("physical_start    = 0x%llx\n",
0850                (unsigned long long)PHYSICAL_START);
0851     pr_info("-----------------------------------------------------\n");
0852 }
0853 
0854 #ifdef CONFIG_SMP
0855 static void __init smp_setup_pacas(void)
0856 {
0857     int cpu;
0858 
0859     for_each_possible_cpu(cpu) {
0860         if (cpu == smp_processor_id())
0861             continue;
0862         allocate_paca(cpu);
0863         set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
0864     }
0865 
0866     memblock_free(cpu_to_phys_id, nr_cpu_ids * sizeof(u32));
0867     cpu_to_phys_id = NULL;
0868 }
0869 #endif
0870 
0871 /*
0872  * Called into from start_kernel this initializes memblock, which is used
0873  * to manage page allocation until mem_init is called.
0874  */
0875 void __init setup_arch(char **cmdline_p)
0876 {
0877     kasan_init();
0878 
0879     *cmdline_p = boot_command_line;
0880 
0881     /* Set a half-reasonable default so udelay does something sensible */
0882     loops_per_jiffy = 500000000 / HZ;
0883 
0884     /* Unflatten the device-tree passed by prom_init or kexec */
0885     unflatten_device_tree();
0886 
0887     /*
0888      * Initialize cache line/block info from device-tree (on ppc64) or
0889      * just cputable (on ppc32).
0890      */
0891     initialize_cache_info();
0892 
0893     /* Initialize RTAS if available. */
0894     rtas_initialize();
0895 
0896     /* Check if we have an initrd provided via the device-tree. */
0897     check_for_initrd();
0898 
0899     /* Probe the machine type, establish ppc_md. */
0900     probe_machine();
0901 
0902     /* Setup panic notifier if requested by the platform. */
0903     setup_panic();
0904 
0905     /*
0906      * Configure ppc_md.power_save (ppc32 only, 64-bit machines do
0907      * it from their respective probe() function.
0908      */
0909     setup_power_save();
0910 
0911     /* Discover standard serial ports. */
0912     find_legacy_serial_ports();
0913 
0914     /* Register early console with the printk subsystem. */
0915     register_early_udbg_console();
0916 
0917     /* Setup the various CPU maps based on the device-tree. */
0918     smp_setup_cpu_maps();
0919 
0920     /* Initialize xmon. */
0921     xmon_setup();
0922 
0923     /* Check the SMT related command line arguments (ppc64). */
0924     check_smt_enabled();
0925 
0926     /* Parse memory topology */
0927     mem_topology_setup();
0928 
0929     /*
0930      * Release secondary cpus out of their spinloops at 0x60 now that
0931      * we can map physical -> logical CPU ids.
0932      *
0933      * Freescale Book3e parts spin in a loop provided by firmware,
0934      * so smp_release_cpus() does nothing for them.
0935      */
0936 #ifdef CONFIG_SMP
0937     smp_setup_pacas();
0938 
0939     /* On BookE, setup per-core TLB data structures. */
0940     setup_tlb_core_data();
0941 #endif
0942 
0943     /* Print various info about the machine that has been gathered so far. */
0944     print_system_info();
0945 
0946     klp_init_thread_info(&init_task);
0947 
0948     setup_initial_init_mm(_stext, _etext, _edata, _end);
0949 
0950     mm_iommu_init(&init_mm);
0951     irqstack_early_init();
0952     exc_lvl_early_init();
0953     emergency_stack_init();
0954 
0955     mce_init();
0956     smp_release_cpus();
0957 
0958     initmem_init();
0959 
0960     /*
0961      * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
0962      * be called after initmem_init(), so that pageblock_order is initialised.
0963      */
0964     kvm_cma_reserve();
0965     gigantic_hugetlb_cma_reserve();
0966 
0967     early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
0968 
0969     if (ppc_md.setup_arch)
0970         ppc_md.setup_arch();
0971 
0972     setup_barrier_nospec();
0973     setup_spectre_v2();
0974 
0975     paging_init();
0976 
0977     /* Initialize the MMU context management stuff. */
0978     mmu_context_init();
0979 
0980     /* Interrupt code needs to be 64K-aligned. */
0981     if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff)
0982         panic("Kernelbase not 64K-aligned (0x%lx)!\n",
0983               (unsigned long)_stext);
0984 }