Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Procedures for creating, accessing and interpreting the device tree.
0004  *
0005  * Paul Mackerras   August 1996.
0006  * Copyright (C) 1996-2005 Paul Mackerras.
0007  * 
0008  *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
0009  *    {engebret|bergner}@us.ibm.com 
0010  */
0011 
0012 #undef DEBUG
0013 
0014 #include <linux/kernel.h>
0015 #include <linux/string.h>
0016 #include <linux/init.h>
0017 #include <linux/threads.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/types.h>
0020 #include <linux/pci.h>
0021 #include <linux/delay.h>
0022 #include <linux/initrd.h>
0023 #include <linux/bitops.h>
0024 #include <linux/export.h>
0025 #include <linux/kexec.h>
0026 #include <linux/irq.h>
0027 #include <linux/memblock.h>
0028 #include <linux/of.h>
0029 #include <linux/of_fdt.h>
0030 #include <linux/libfdt.h>
0031 #include <linux/cpu.h>
0032 #include <linux/pgtable.h>
0033 
0034 #include <asm/rtas.h>
0035 #include <asm/page.h>
0036 #include <asm/processor.h>
0037 #include <asm/irq.h>
0038 #include <asm/io.h>
0039 #include <asm/kdump.h>
0040 #include <asm/smp.h>
0041 #include <asm/mmu.h>
0042 #include <asm/paca.h>
0043 #include <asm/powernv.h>
0044 #include <asm/iommu.h>
0045 #include <asm/btext.h>
0046 #include <asm/sections.h>
0047 #include <asm/setup.h>
0048 #include <asm/pci-bridge.h>
0049 #include <asm/kexec.h>
0050 #include <asm/opal.h>
0051 #include <asm/fadump.h>
0052 #include <asm/epapr_hcalls.h>
0053 #include <asm/firmware.h>
0054 #include <asm/dt_cpu_ftrs.h>
0055 #include <asm/drmem.h>
0056 #include <asm/ultravisor.h>
0057 #include <asm/prom.h>
0058 
0059 #include <mm/mmu_decl.h>
0060 
0061 #ifdef DEBUG
0062 #define DBG(fmt...) printk(KERN_ERR fmt)
0063 #else
0064 #define DBG(fmt...)
0065 #endif
0066 
0067 int *chip_id_lookup_table;
0068 
0069 #ifdef CONFIG_PPC64
0070 int __initdata iommu_is_off;
0071 int __initdata iommu_force_on;
0072 unsigned long tce_alloc_start, tce_alloc_end;
0073 u64 ppc64_rma_size;
0074 #endif
0075 static phys_addr_t first_memblock_size;
0076 static int __initdata boot_cpu_count;
0077 
0078 static int __init early_parse_mem(char *p)
0079 {
0080     if (!p)
0081         return 1;
0082 
0083     memory_limit = PAGE_ALIGN(memparse(p, &p));
0084     DBG("memory limit = 0x%llx\n", memory_limit);
0085 
0086     return 0;
0087 }
0088 early_param("mem", early_parse_mem);
0089 
0090 /*
0091  * overlaps_initrd - check for overlap with page aligned extension of
0092  * initrd.
0093  */
0094 static inline int overlaps_initrd(unsigned long start, unsigned long size)
0095 {
0096 #ifdef CONFIG_BLK_DEV_INITRD
0097     if (!initrd_start)
0098         return 0;
0099 
0100     return  (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
0101             start <= ALIGN(initrd_end, PAGE_SIZE);
0102 #else
0103     return 0;
0104 #endif
0105 }
0106 
0107 /**
0108  * move_device_tree - move tree to an unused area, if needed.
0109  *
0110  * The device tree may be allocated beyond our memory limit, or inside the
0111  * crash kernel region for kdump, or within the page aligned range of initrd.
0112  * If so, move it out of the way.
0113  */
0114 static void __init move_device_tree(void)
0115 {
0116     unsigned long start, size;
0117     void *p;
0118 
0119     DBG("-> move_device_tree\n");
0120 
0121     start = __pa(initial_boot_params);
0122     size = fdt_totalsize(initial_boot_params);
0123 
0124     if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
0125         !memblock_is_memory(start + size - 1) ||
0126         overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
0127         p = memblock_alloc_raw(size, PAGE_SIZE);
0128         if (!p)
0129             panic("Failed to allocate %lu bytes to move device tree\n",
0130                   size);
0131         memcpy(p, initial_boot_params, size);
0132         initial_boot_params = p;
0133         DBG("Moved device tree to 0x%px\n", p);
0134     }
0135 
0136     DBG("<- move_device_tree\n");
0137 }
0138 
0139 /*
0140  * ibm,pa-features is a per-cpu property that contains a string of
0141  * attribute descriptors, each of which has a 2 byte header plus up
0142  * to 254 bytes worth of processor attribute bits.  First header
0143  * byte specifies the number of bytes following the header.
0144  * Second header byte is an "attribute-specifier" type, of which
0145  * zero is the only currently-defined value.
0146  * Implementation:  Pass in the byte and bit offset for the feature
0147  * that we are interested in.  The function will return -1 if the
0148  * pa-features property is missing, or a 1/0 to indicate if the feature
0149  * is supported/not supported.  Note that the bit numbers are
0150  * big-endian to match the definition in PAPR.
0151  */
0152 static struct ibm_pa_feature {
0153     unsigned long   cpu_features;   /* CPU_FTR_xxx bit */
0154     unsigned long   mmu_features;   /* MMU_FTR_xxx bit */
0155     unsigned int    cpu_user_ftrs;  /* PPC_FEATURE_xxx bit */
0156     unsigned int    cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
0157     unsigned char   pabyte;     /* byte number in ibm,pa-features */
0158     unsigned char   pabit;      /* bit number (big-endian) */
0159     unsigned char   invert;     /* if 1, pa bit set => clear feature */
0160 } ibm_pa_features[] __initdata = {
0161     { .pabyte = 0,  .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
0162     { .pabyte = 0,  .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
0163     { .pabyte = 0,  .pabit = 3, .cpu_features  = CPU_FTR_CTRL },
0164     { .pabyte = 0,  .pabit = 6, .cpu_features  = CPU_FTR_NOEXECUTE },
0165     { .pabyte = 1,  .pabit = 2, .mmu_features  = MMU_FTR_CI_LARGE_PAGE },
0166 #ifdef CONFIG_PPC_RADIX_MMU
0167     { .pabyte = 40, .pabit = 0, .mmu_features  = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
0168 #endif
0169     { .pabyte = 5,  .pabit = 0, .cpu_features  = CPU_FTR_REAL_LE,
0170                     .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
0171     /*
0172      * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
0173      * we don't want to turn on TM here, so we use the *_COMP versions
0174      * which are 0 if the kernel doesn't support TM.
0175      */
0176     { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
0177       .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
0178 
0179     { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
0180 };
0181 
0182 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
0183                  unsigned long tablelen,
0184                  struct ibm_pa_feature *fp,
0185                  unsigned long ft_size)
0186 {
0187     unsigned long i, len, bit;
0188 
0189     /* find descriptor with type == 0 */
0190     for (;;) {
0191         if (tablelen < 3)
0192             return;
0193         len = 2 + ftrs[0];
0194         if (tablelen < len)
0195             return;     /* descriptor 0 not found */
0196         if (ftrs[1] == 0)
0197             break;
0198         tablelen -= len;
0199         ftrs += len;
0200     }
0201 
0202     /* loop over bits we know about */
0203     for (i = 0; i < ft_size; ++i, ++fp) {
0204         if (fp->pabyte >= ftrs[0])
0205             continue;
0206         bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
0207         if (bit ^ fp->invert) {
0208             cur_cpu_spec->cpu_features |= fp->cpu_features;
0209             cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
0210             cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
0211             cur_cpu_spec->mmu_features |= fp->mmu_features;
0212         } else {
0213             cur_cpu_spec->cpu_features &= ~fp->cpu_features;
0214             cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
0215             cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
0216             cur_cpu_spec->mmu_features &= ~fp->mmu_features;
0217         }
0218     }
0219 }
0220 
0221 static void __init check_cpu_pa_features(unsigned long node)
0222 {
0223     const unsigned char *pa_ftrs;
0224     int tablelen;
0225 
0226     pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
0227     if (pa_ftrs == NULL)
0228         return;
0229 
0230     scan_features(node, pa_ftrs, tablelen,
0231               ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
0232 }
0233 
0234 #ifdef CONFIG_PPC_64S_HASH_MMU
0235 static void __init init_mmu_slb_size(unsigned long node)
0236 {
0237     const __be32 *slb_size_ptr;
0238 
0239     slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
0240             of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
0241 
0242     if (slb_size_ptr)
0243         mmu_slb_size = be32_to_cpup(slb_size_ptr);
0244 }
0245 #else
0246 #define init_mmu_slb_size(node) do { } while(0)
0247 #endif
0248 
0249 static struct feature_property {
0250     const char *name;
0251     u32 min_value;
0252     unsigned long cpu_feature;
0253     unsigned long cpu_user_ftr;
0254 } feature_properties[] __initdata = {
0255 #ifdef CONFIG_ALTIVEC
0256     {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
0257     {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
0258 #endif /* CONFIG_ALTIVEC */
0259 #ifdef CONFIG_VSX
0260     /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
0261     {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
0262 #endif /* CONFIG_VSX */
0263 #ifdef CONFIG_PPC64
0264     {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
0265     {"ibm,purr", 1, CPU_FTR_PURR, 0},
0266     {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
0267 #endif /* CONFIG_PPC64 */
0268 };
0269 
0270 #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
0271 static __init void identical_pvr_fixup(unsigned long node)
0272 {
0273     unsigned int pvr;
0274     const char *model = of_get_flat_dt_prop(node, "model", NULL);
0275 
0276     /*
0277      * Since 440GR(x)/440EP(x) processors have the same pvr,
0278      * we check the node path and set bit 28 in the cur_cpu_spec
0279      * pvr for EP(x) processor version. This bit is always 0 in
0280      * the "real" pvr. Then we call identify_cpu again with
0281      * the new logical pvr to enable FPU support.
0282      */
0283     if (model && strstr(model, "440EP")) {
0284         pvr = cur_cpu_spec->pvr_value | 0x8;
0285         identify_cpu(0, pvr);
0286         DBG("Using logical pvr %x for %s\n", pvr, model);
0287     }
0288 }
0289 #else
0290 #define identical_pvr_fixup(node) do { } while(0)
0291 #endif
0292 
0293 static void __init check_cpu_feature_properties(unsigned long node)
0294 {
0295     int i;
0296     struct feature_property *fp = feature_properties;
0297     const __be32 *prop;
0298 
0299     for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
0300         prop = of_get_flat_dt_prop(node, fp->name, NULL);
0301         if (prop && be32_to_cpup(prop) >= fp->min_value) {
0302             cur_cpu_spec->cpu_features |= fp->cpu_feature;
0303             cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
0304         }
0305     }
0306 }
0307 
0308 static int __init early_init_dt_scan_cpus(unsigned long node,
0309                       const char *uname, int depth,
0310                       void *data)
0311 {
0312     const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
0313     const __be32 *prop;
0314     const __be32 *intserv;
0315     int i, nthreads;
0316     int len;
0317     int found = -1;
0318     int found_thread = 0;
0319 
0320     /* We are scanning "cpu" nodes only */
0321     if (type == NULL || strcmp(type, "cpu") != 0)
0322         return 0;
0323 
0324     /* Get physical cpuid */
0325     intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
0326     if (!intserv)
0327         intserv = of_get_flat_dt_prop(node, "reg", &len);
0328 
0329     nthreads = len / sizeof(int);
0330 
0331     /*
0332      * Now see if any of these threads match our boot cpu.
0333      * NOTE: This must match the parsing done in smp_setup_cpu_maps.
0334      */
0335     for (i = 0; i < nthreads; i++) {
0336         if (be32_to_cpu(intserv[i]) ==
0337             fdt_boot_cpuid_phys(initial_boot_params)) {
0338             found = boot_cpu_count;
0339             found_thread = i;
0340         }
0341 #ifdef CONFIG_SMP
0342         /* logical cpu id is always 0 on UP kernels */
0343         boot_cpu_count++;
0344 #endif
0345     }
0346 
0347     /* Not the boot CPU */
0348     if (found < 0)
0349         return 0;
0350 
0351     DBG("boot cpu: logical %d physical %d\n", found,
0352         be32_to_cpu(intserv[found_thread]));
0353     boot_cpuid = found;
0354 
0355     // Pass the boot CPU's hard CPU id back to our caller
0356     *((u32 *)data) = be32_to_cpu(intserv[found_thread]);
0357 
0358     /*
0359      * PAPR defines "logical" PVR values for cpus that
0360      * meet various levels of the architecture:
0361      * 0x0f000001   Architecture version 2.04
0362      * 0x0f000002   Architecture version 2.05
0363      * If the cpu-version property in the cpu node contains
0364      * such a value, we call identify_cpu again with the
0365      * logical PVR value in order to use the cpu feature
0366      * bits appropriate for the architecture level.
0367      *
0368      * A POWER6 partition in "POWER6 architected" mode
0369      * uses the 0x0f000002 PVR value; in POWER5+ mode
0370      * it uses 0x0f000001.
0371      *
0372      * If we're using device tree CPU feature discovery then we don't
0373      * support the cpu-version property, and it's the responsibility of the
0374      * firmware/hypervisor to provide the correct feature set for the
0375      * architecture level via the ibm,powerpc-cpu-features binding.
0376      */
0377     if (!dt_cpu_ftrs_in_use()) {
0378         prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
0379         if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
0380             identify_cpu(0, be32_to_cpup(prop));
0381 
0382         check_cpu_feature_properties(node);
0383         check_cpu_pa_features(node);
0384     }
0385 
0386     identical_pvr_fixup(node);
0387     init_mmu_slb_size(node);
0388 
0389 #ifdef CONFIG_PPC64
0390     if (nthreads == 1)
0391         cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
0392     else if (!dt_cpu_ftrs_in_use())
0393         cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
0394 #endif
0395 
0396     return 0;
0397 }
0398 
0399 static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
0400                         const char *uname,
0401                         int depth, void *data)
0402 {
0403     const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
0404 
0405     /* Use common scan routine to determine if this is the chosen node */
0406     if (early_init_dt_scan_chosen(data) < 0)
0407         return 0;
0408 
0409 #ifdef CONFIG_PPC64
0410     /* check if iommu is forced on or off */
0411     if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
0412         iommu_is_off = 1;
0413     if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
0414         iommu_force_on = 1;
0415 #endif
0416 
0417     /* mem=x on the command line is the preferred mechanism */
0418     lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
0419     if (lprop)
0420         memory_limit = *lprop;
0421 
0422 #ifdef CONFIG_PPC64
0423     lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
0424     if (lprop)
0425         tce_alloc_start = *lprop;
0426     lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
0427     if (lprop)
0428         tce_alloc_end = *lprop;
0429 #endif
0430 
0431 #ifdef CONFIG_KEXEC_CORE
0432     lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
0433     if (lprop)
0434         crashk_res.start = *lprop;
0435 
0436     lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
0437     if (lprop)
0438         crashk_res.end = crashk_res.start + *lprop - 1;
0439 #endif
0440 
0441     /* break now */
0442     return 1;
0443 }
0444 
0445 /*
0446  * Compare the range against max mem limit and update
0447  * size if it cross the limit.
0448  */
0449 
0450 #ifdef CONFIG_SPARSEMEM
0451 static bool __init validate_mem_limit(u64 base, u64 *size)
0452 {
0453     u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
0454 
0455     if (base >= max_mem)
0456         return false;
0457     if ((base + *size) > max_mem)
0458         *size = max_mem - base;
0459     return true;
0460 }
0461 #else
0462 static bool __init validate_mem_limit(u64 base, u64 *size)
0463 {
0464     return true;
0465 }
0466 #endif
0467 
0468 #ifdef CONFIG_PPC_PSERIES
0469 /*
0470  * Interpret the ibm dynamic reconfiguration memory LMBs.
0471  * This contains a list of memory blocks along with NUMA affinity
0472  * information.
0473  */
0474 static int  __init early_init_drmem_lmb(struct drmem_lmb *lmb,
0475                     const __be32 **usm,
0476                     void *data)
0477 {
0478     u64 base, size;
0479     int is_kexec_kdump = 0, rngs;
0480 
0481     base = lmb->base_addr;
0482     size = drmem_lmb_size();
0483     rngs = 1;
0484 
0485     /*
0486      * Skip this block if the reserved bit is set in flags
0487      * or if the block is not assigned to this partition.
0488      */
0489     if ((lmb->flags & DRCONF_MEM_RESERVED) ||
0490         !(lmb->flags & DRCONF_MEM_ASSIGNED))
0491         return 0;
0492 
0493     if (*usm)
0494         is_kexec_kdump = 1;
0495 
0496     if (is_kexec_kdump) {
0497         /*
0498          * For each memblock in ibm,dynamic-memory, a
0499          * corresponding entry in linux,drconf-usable-memory
0500          * property contains a counter 'p' followed by 'p'
0501          * (base, size) duple. Now read the counter from
0502          * linux,drconf-usable-memory property
0503          */
0504         rngs = dt_mem_next_cell(dt_root_size_cells, usm);
0505         if (!rngs) /* there are no (base, size) duple */
0506             return 0;
0507     }
0508 
0509     do {
0510         if (is_kexec_kdump) {
0511             base = dt_mem_next_cell(dt_root_addr_cells, usm);
0512             size = dt_mem_next_cell(dt_root_size_cells, usm);
0513         }
0514 
0515         if (iommu_is_off) {
0516             if (base >= 0x80000000ul)
0517                 continue;
0518             if ((base + size) > 0x80000000ul)
0519                 size = 0x80000000ul - base;
0520         }
0521 
0522         if (!validate_mem_limit(base, &size))
0523             continue;
0524 
0525         DBG("Adding: %llx -> %llx\n", base, size);
0526         memblock_add(base, size);
0527 
0528         if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
0529             memblock_mark_hotplug(base, size);
0530     } while (--rngs);
0531 
0532     return 0;
0533 }
0534 #endif /* CONFIG_PPC_PSERIES */
0535 
0536 static int __init early_init_dt_scan_memory_ppc(void)
0537 {
0538 #ifdef CONFIG_PPC_PSERIES
0539     const void *fdt = initial_boot_params;
0540     int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
0541 
0542     if (node > 0)
0543         walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
0544 
0545 #endif
0546 
0547     return early_init_dt_scan_memory();
0548 }
0549 
0550 /*
0551  * For a relocatable kernel, we need to get the memstart_addr first,
0552  * then use it to calculate the virtual kernel start address. This has
0553  * to happen at a very early stage (before machine_init). In this case,
0554  * we just want to get the memstart_address and would not like to mess the
0555  * memblock at this stage. So introduce a variable to skip the memblock_add()
0556  * for this reason.
0557  */
0558 #ifdef CONFIG_RELOCATABLE
0559 static int add_mem_to_memblock = 1;
0560 #else
0561 #define add_mem_to_memblock 1
0562 #endif
0563 
0564 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
0565 {
0566 #ifdef CONFIG_PPC64
0567     if (iommu_is_off) {
0568         if (base >= 0x80000000ul)
0569             return;
0570         if ((base + size) > 0x80000000ul)
0571             size = 0x80000000ul - base;
0572     }
0573 #endif
0574     /* Keep track of the beginning of memory -and- the size of
0575      * the very first block in the device-tree as it represents
0576      * the RMA on ppc64 server
0577      */
0578     if (base < memstart_addr) {
0579         memstart_addr = base;
0580         first_memblock_size = size;
0581     }
0582 
0583     /* Add the chunk to the MEMBLOCK list */
0584     if (add_mem_to_memblock) {
0585         if (validate_mem_limit(base, &size))
0586             memblock_add(base, size);
0587     }
0588 }
0589 
0590 static void __init early_reserve_mem_dt(void)
0591 {
0592     unsigned long i, dt_root;
0593     int len;
0594     const __be32 *prop;
0595 
0596     early_init_fdt_reserve_self();
0597     early_init_fdt_scan_reserved_mem();
0598 
0599     dt_root = of_get_flat_dt_root();
0600 
0601     prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
0602 
0603     if (!prop)
0604         return;
0605 
0606     DBG("Found new-style reserved-ranges\n");
0607 
0608     /* Each reserved range is an (address,size) pair, 2 cells each,
0609      * totalling 4 cells per range. */
0610     for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
0611         u64 base, size;
0612 
0613         base = of_read_number(prop + (i * 4) + 0, 2);
0614         size = of_read_number(prop + (i * 4) + 2, 2);
0615 
0616         if (size) {
0617             DBG("reserving: %llx -> %llx\n", base, size);
0618             memblock_reserve(base, size);
0619         }
0620     }
0621 }
0622 
0623 static void __init early_reserve_mem(void)
0624 {
0625     __be64 *reserve_map;
0626 
0627     reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
0628             fdt_off_mem_rsvmap(initial_boot_params));
0629 
0630     /* Look for the new "reserved-regions" property in the DT */
0631     early_reserve_mem_dt();
0632 
0633 #ifdef CONFIG_BLK_DEV_INITRD
0634     /* Then reserve the initrd, if any */
0635     if (initrd_start && (initrd_end > initrd_start)) {
0636         memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
0637             ALIGN(initrd_end, PAGE_SIZE) -
0638             ALIGN_DOWN(initrd_start, PAGE_SIZE));
0639     }
0640 #endif /* CONFIG_BLK_DEV_INITRD */
0641 
0642     if (!IS_ENABLED(CONFIG_PPC32))
0643         return;
0644 
0645     /* 
0646      * Handle the case where we might be booting from an old kexec
0647      * image that setup the mem_rsvmap as pairs of 32-bit values
0648      */
0649     if (be64_to_cpup(reserve_map) > 0xffffffffull) {
0650         u32 base_32, size_32;
0651         __be32 *reserve_map_32 = (__be32 *)reserve_map;
0652 
0653         DBG("Found old 32-bit reserve map\n");
0654 
0655         while (1) {
0656             base_32 = be32_to_cpup(reserve_map_32++);
0657             size_32 = be32_to_cpup(reserve_map_32++);
0658             if (size_32 == 0)
0659                 break;
0660             DBG("reserving: %x -> %x\n", base_32, size_32);
0661             memblock_reserve(base_32, size_32);
0662         }
0663         return;
0664     }
0665 }
0666 
0667 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0668 static bool tm_disabled __initdata;
0669 
0670 static int __init parse_ppc_tm(char *str)
0671 {
0672     bool res;
0673 
0674     if (kstrtobool(str, &res))
0675         return -EINVAL;
0676 
0677     tm_disabled = !res;
0678 
0679     return 0;
0680 }
0681 early_param("ppc_tm", parse_ppc_tm);
0682 
0683 static void __init tm_init(void)
0684 {
0685     if (tm_disabled) {
0686         pr_info("Disabling hardware transactional memory (HTM)\n");
0687         cur_cpu_spec->cpu_user_features2 &=
0688             ~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
0689         cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
0690         return;
0691     }
0692 
0693     pnv_tm_init();
0694 }
0695 #else
0696 static void tm_init(void) { }
0697 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
0698 
0699 #ifdef CONFIG_PPC64
0700 static void __init save_fscr_to_task(void)
0701 {
0702     /*
0703      * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
0704      * have configured via the device tree features or via __init_FSCR().
0705      * That value will then be propagated to pid 1 (init) and all future
0706      * processes.
0707      */
0708     if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
0709         init_task.thread.fscr = mfspr(SPRN_FSCR);
0710 }
0711 #else
0712 static inline void save_fscr_to_task(void) {}
0713 #endif
0714 
0715 
0716 void __init early_init_devtree(void *params)
0717 {
0718     u32 boot_cpu_hwid;
0719     phys_addr_t limit;
0720 
0721     DBG(" -> early_init_devtree(%px)\n", params);
0722 
0723     /* Too early to BUG_ON(), do it by hand */
0724     if (!early_init_dt_verify(params))
0725         panic("BUG: Failed verifying flat device tree, bad version?");
0726 
0727 #ifdef CONFIG_PPC_RTAS
0728     /* Some machines might need RTAS info for debugging, grab it now. */
0729     of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
0730 #endif
0731 
0732 #ifdef CONFIG_PPC_POWERNV
0733     /* Some machines might need OPAL info for debugging, grab it now. */
0734     of_scan_flat_dt(early_init_dt_scan_opal, NULL);
0735 
0736     /* Scan tree for ultravisor feature */
0737     of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
0738 #endif
0739 
0740 #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
0741     /* scan tree to see if dump is active during last boot */
0742     of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
0743 #endif
0744 
0745     /* Retrieve various informations from the /chosen node of the
0746      * device-tree, including the platform type, initrd location and
0747      * size, TCE reserve, and more ...
0748      */
0749     of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
0750 
0751     /* Scan memory nodes and rebuild MEMBLOCKs */
0752     early_init_dt_scan_root();
0753     early_init_dt_scan_memory_ppc();
0754 
0755     /*
0756      * As generic code authors expect to be able to use static keys
0757      * in early_param() handlers, we initialize the static keys just
0758      * before parsing early params (it's fine to call jump_label_init()
0759      * more than once).
0760      */
0761     jump_label_init();
0762     parse_early_param();
0763 
0764     /* make sure we've parsed cmdline for mem= before this */
0765     if (memory_limit)
0766         first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
0767     setup_initial_memory_limit(memstart_addr, first_memblock_size);
0768     /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
0769     memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
0770     /* If relocatable, reserve first 32k for interrupt vectors etc. */
0771     if (PHYSICAL_START > MEMORY_START)
0772         memblock_reserve(MEMORY_START, 0x8000);
0773     reserve_kdump_trampoline();
0774 #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
0775     /*
0776      * If we fail to reserve memory for firmware-assisted dump then
0777      * fallback to kexec based kdump.
0778      */
0779     if (fadump_reserve_mem() == 0)
0780 #endif
0781         reserve_crashkernel();
0782     early_reserve_mem();
0783 
0784     /* Ensure that total memory size is page-aligned. */
0785     limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
0786     memblock_enforce_memory_limit(limit);
0787 
0788 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
0789     if (!early_radix_enabled())
0790         memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
0791 #endif
0792 
0793     memblock_allow_resize();
0794     memblock_dump_all();
0795 
0796     DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
0797 
0798     /* We may need to relocate the flat tree, do it now.
0799      * FIXME .. and the initrd too? */
0800     move_device_tree();
0801 
0802     DBG("Scanning CPUs ...\n");
0803 
0804     dt_cpu_ftrs_scan();
0805 
0806     /* Retrieve CPU related informations from the flat tree
0807      * (altivec support, boot CPU ID, ...)
0808      */
0809     of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid);
0810     if (boot_cpuid < 0) {
0811         printk("Failed to identify boot CPU !\n");
0812         BUG();
0813     }
0814 
0815     save_fscr_to_task();
0816 
0817 #if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
0818     /* We'll later wait for secondaries to check in; there are
0819      * NCPUS-1 non-boot CPUs  :-)
0820      */
0821     spinning_secondaries = boot_cpu_count - 1;
0822 #endif
0823 
0824     mmu_early_init_devtree();
0825 
0826     // NB. paca is not installed until later in early_setup()
0827     allocate_paca_ptrs();
0828     allocate_paca(boot_cpuid);
0829     set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
0830 
0831 #ifdef CONFIG_PPC_POWERNV
0832     /* Scan and build the list of machine check recoverable ranges */
0833     of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
0834 #endif
0835     epapr_paravirt_early_init();
0836 
0837     /* Now try to figure out if we are running on LPAR and so on */
0838     pseries_probe_fw_features();
0839 
0840     /*
0841      * Initialize pkey features and default AMR/IAMR values
0842      */
0843     pkey_early_init_devtree();
0844 
0845 #ifdef CONFIG_PPC_PS3
0846     /* Identify PS3 firmware */
0847     if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
0848         powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
0849 #endif
0850 
0851     tm_init();
0852 
0853     DBG(" <- early_init_devtree()\n");
0854 }
0855 
0856 #ifdef CONFIG_RELOCATABLE
0857 /*
0858  * This function run before early_init_devtree, so we have to init
0859  * initial_boot_params.
0860  */
0861 void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
0862 {
0863     /* Setup flat device-tree pointer */
0864     initial_boot_params = params;
0865 
0866     /*
0867      * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
0868      * mess the memblock.
0869      */
0870     add_mem_to_memblock = 0;
0871     early_init_dt_scan_root();
0872     early_init_dt_scan_memory_ppc();
0873     add_mem_to_memblock = 1;
0874 
0875     if (size)
0876         *size = first_memblock_size;
0877 }
0878 #endif
0879 
0880 /*******
0881  *
0882  * New implementation of the OF "find" APIs, return a refcounted
0883  * object, call of_node_put() when done.  The device tree and list
0884  * are protected by a rw_lock.
0885  *
0886  * Note that property management will need some locking as well,
0887  * this isn't dealt with yet.
0888  *
0889  *******/
0890 
0891 /**
0892  * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
0893  * @np: device node of the device
0894  *
0895  * This looks for a property "ibm,chip-id" in the node or any
0896  * of its parents and returns its content, or -1 if it cannot
0897  * be found.
0898  */
0899 int of_get_ibm_chip_id(struct device_node *np)
0900 {
0901     of_node_get(np);
0902     while (np) {
0903         u32 chip_id;
0904 
0905         /*
0906          * Skiboot may produce memory nodes that contain more than one
0907          * cell in chip-id, we only read the first one here.
0908          */
0909         if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
0910             of_node_put(np);
0911             return chip_id;
0912         }
0913 
0914         np = of_get_next_parent(np);
0915     }
0916     return -1;
0917 }
0918 EXPORT_SYMBOL(of_get_ibm_chip_id);
0919 
0920 /**
0921  * cpu_to_chip_id - Return the cpus chip-id
0922  * @cpu: The logical cpu number.
0923  *
0924  * Return the value of the ibm,chip-id property corresponding to the given
0925  * logical cpu number. If the chip-id can not be found, returns -1.
0926  */
0927 int cpu_to_chip_id(int cpu)
0928 {
0929     struct device_node *np;
0930     int ret = -1, idx;
0931 
0932     idx = cpu / threads_per_core;
0933     if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
0934         return chip_id_lookup_table[idx];
0935 
0936     np = of_get_cpu_node(cpu, NULL);
0937     if (np) {
0938         ret = of_get_ibm_chip_id(np);
0939         of_node_put(np);
0940 
0941         if (chip_id_lookup_table)
0942             chip_id_lookup_table[idx] = ret;
0943     }
0944 
0945     return ret;
0946 }
0947 EXPORT_SYMBOL(cpu_to_chip_id);
0948 
0949 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
0950 {
0951 #ifdef CONFIG_SMP
0952     /*
0953      * Early firmware scanning must use this rather than
0954      * get_hard_smp_processor_id because we don't have pacas allocated
0955      * until memory topology is discovered.
0956      */
0957     if (cpu_to_phys_id != NULL)
0958         return (int)phys_id == cpu_to_phys_id[cpu];
0959 #endif
0960 
0961     return (int)phys_id == get_hard_smp_processor_id(cpu);
0962 }