Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 1995 Linus Torvalds
0007  * Copyright (C) 1995 Waldorf Electronics
0008  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
0009  * Copyright (C) 1996 Stoned Elipot
0010  * Copyright (C) 1999 Silicon Graphics, Inc.
0011  * Copyright (C) 2000, 2001, 2002, 2007  Maciej W. Rozycki
0012  */
0013 #include <linux/init.h>
0014 #include <linux/ioport.h>
0015 #include <linux/export.h>
0016 #include <linux/screen_info.h>
0017 #include <linux/memblock.h>
0018 #include <linux/initrd.h>
0019 #include <linux/root_dev.h>
0020 #include <linux/highmem.h>
0021 #include <linux/console.h>
0022 #include <linux/pfn.h>
0023 #include <linux/debugfs.h>
0024 #include <linux/kexec.h>
0025 #include <linux/sizes.h>
0026 #include <linux/device.h>
0027 #include <linux/dma-map-ops.h>
0028 #include <linux/decompress/generic.h>
0029 #include <linux/of_fdt.h>
0030 #include <linux/dmi.h>
0031 #include <linux/crash_dump.h>
0032 
0033 #include <asm/addrspace.h>
0034 #include <asm/bootinfo.h>
0035 #include <asm/bugs.h>
0036 #include <asm/cache.h>
0037 #include <asm/cdmm.h>
0038 #include <asm/cpu.h>
0039 #include <asm/debug.h>
0040 #include <asm/mmzone.h>
0041 #include <asm/sections.h>
0042 #include <asm/setup.h>
0043 #include <asm/smp-ops.h>
0044 #include <asm/prom.h>
0045 
0046 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
0047 char __section(".appended_dtb") __appended_dtb[0x100000];
0048 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
0049 
0050 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
0051 
0052 EXPORT_SYMBOL(cpu_data);
0053 
0054 #ifdef CONFIG_VT
0055 struct screen_info screen_info;
0056 #endif
0057 
0058 /*
0059  * Setup information
0060  *
0061  * These are initialized so they are in the .data section
0062  */
0063 unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
0064 
0065 EXPORT_SYMBOL(mips_machtype);
0066 
0067 static char __initdata command_line[COMMAND_LINE_SIZE];
0068 char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
0069 
0070 #ifdef CONFIG_CMDLINE_BOOL
0071 static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
0072 #else
0073 static const char builtin_cmdline[] __initconst = "";
0074 #endif
0075 
0076 /*
0077  * mips_io_port_base is the begin of the address space to which x86 style
0078  * I/O ports are mapped.
0079  */
0080 unsigned long mips_io_port_base = -1;
0081 EXPORT_SYMBOL(mips_io_port_base);
0082 
0083 static struct resource code_resource = { .name = "Kernel code", };
0084 static struct resource data_resource = { .name = "Kernel data", };
0085 static struct resource bss_resource = { .name = "Kernel bss", };
0086 
0087 unsigned long __kaslr_offset __ro_after_init;
0088 EXPORT_SYMBOL(__kaslr_offset);
0089 
0090 static void *detect_magic __initdata = detect_memory_region;
0091 
0092 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
0093 unsigned long ARCH_PFN_OFFSET;
0094 EXPORT_SYMBOL(ARCH_PFN_OFFSET);
0095 #endif
0096 
0097 void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
0098 {
0099     void *dm = &detect_magic;
0100     phys_addr_t size;
0101 
0102     for (size = sz_min; size < sz_max; size <<= 1) {
0103         if (!memcmp(dm, dm + size, sizeof(detect_magic)))
0104             break;
0105     }
0106 
0107     pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
0108         ((unsigned long long) size) / SZ_1M,
0109         (unsigned long long) start,
0110         ((unsigned long long) sz_min) / SZ_1M,
0111         ((unsigned long long) sz_max) / SZ_1M);
0112 
0113     memblock_add(start, size);
0114 }
0115 
0116 /*
0117  * Manage initrd
0118  */
0119 #ifdef CONFIG_BLK_DEV_INITRD
0120 
0121 static int __init rd_start_early(char *p)
0122 {
0123     unsigned long start = memparse(p, &p);
0124 
0125 #ifdef CONFIG_64BIT
0126     /* Guess if the sign extension was forgotten by bootloader */
0127     if (start < XKPHYS)
0128         start = (int)start;
0129 #endif
0130     initrd_start = start;
0131     initrd_end += start;
0132     return 0;
0133 }
0134 early_param("rd_start", rd_start_early);
0135 
0136 static int __init rd_size_early(char *p)
0137 {
0138     initrd_end += memparse(p, &p);
0139     return 0;
0140 }
0141 early_param("rd_size", rd_size_early);
0142 
0143 /* it returns the next free pfn after initrd */
0144 static unsigned long __init init_initrd(void)
0145 {
0146     unsigned long end;
0147 
0148     /*
0149      * Board specific code or command line parser should have
0150      * already set up initrd_start and initrd_end. In these cases
0151      * perfom sanity checks and use them if all looks good.
0152      */
0153     if (!initrd_start || initrd_end <= initrd_start)
0154         goto disable;
0155 
0156     if (initrd_start & ~PAGE_MASK) {
0157         pr_err("initrd start must be page aligned\n");
0158         goto disable;
0159     }
0160     if (initrd_start < PAGE_OFFSET) {
0161         pr_err("initrd start < PAGE_OFFSET\n");
0162         goto disable;
0163     }
0164 
0165     /*
0166      * Sanitize initrd addresses. For example firmware
0167      * can't guess if they need to pass them through
0168      * 64-bits values if the kernel has been built in pure
0169      * 32-bit. We need also to switch from KSEG0 to XKPHYS
0170      * addresses now, so the code can now safely use __pa().
0171      */
0172     end = __pa(initrd_end);
0173     initrd_end = (unsigned long)__va(end);
0174     initrd_start = (unsigned long)__va(__pa(initrd_start));
0175 
0176     ROOT_DEV = Root_RAM0;
0177     return PFN_UP(end);
0178 disable:
0179     initrd_start = 0;
0180     initrd_end = 0;
0181     return 0;
0182 }
0183 
0184 /* In some conditions (e.g. big endian bootloader with a little endian
0185    kernel), the initrd might appear byte swapped.  Try to detect this and
0186    byte swap it if needed.  */
0187 static void __init maybe_bswap_initrd(void)
0188 {
0189 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
0190     u64 buf;
0191 
0192     /* Check for CPIO signature */
0193     if (!memcmp((void *)initrd_start, "070701", 6))
0194         return;
0195 
0196     /* Check for compressed initrd */
0197     if (decompress_method((unsigned char *)initrd_start, 8, NULL))
0198         return;
0199 
0200     /* Try again with a byte swapped header */
0201     buf = swab64p((u64 *)initrd_start);
0202     if (!memcmp(&buf, "070701", 6) ||
0203         decompress_method((unsigned char *)(&buf), 8, NULL)) {
0204         unsigned long i;
0205 
0206         pr_info("Byteswapped initrd detected\n");
0207         for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
0208             swab64s((u64 *)i);
0209     }
0210 #endif
0211 }
0212 
0213 static void __init finalize_initrd(void)
0214 {
0215     unsigned long size = initrd_end - initrd_start;
0216 
0217     if (size == 0) {
0218         printk(KERN_INFO "Initrd not found or empty");
0219         goto disable;
0220     }
0221     if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
0222         printk(KERN_ERR "Initrd extends beyond end of memory");
0223         goto disable;
0224     }
0225 
0226     maybe_bswap_initrd();
0227 
0228     memblock_reserve(__pa(initrd_start), size);
0229     initrd_below_start_ok = 1;
0230 
0231     pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
0232         initrd_start, size);
0233     return;
0234 disable:
0235     printk(KERN_CONT " - disabling initrd\n");
0236     initrd_start = 0;
0237     initrd_end = 0;
0238 }
0239 
0240 #else  /* !CONFIG_BLK_DEV_INITRD */
0241 
0242 static unsigned long __init init_initrd(void)
0243 {
0244     return 0;
0245 }
0246 
0247 #define finalize_initrd()   do {} while (0)
0248 
0249 #endif
0250 
0251 /*
0252  * Initialize the bootmem allocator. It also setup initrd related data
0253  * if needed.
0254  */
0255 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
0256 
0257 static void __init bootmem_init(void)
0258 {
0259     init_initrd();
0260     finalize_initrd();
0261 }
0262 
0263 #else  /* !CONFIG_SGI_IP27 */
0264 
0265 static void __init bootmem_init(void)
0266 {
0267     phys_addr_t ramstart, ramend;
0268     unsigned long start, end;
0269     int i;
0270 
0271     ramstart = memblock_start_of_DRAM();
0272     ramend = memblock_end_of_DRAM();
0273 
0274     /*
0275      * Sanity check any INITRD first. We don't take it into account
0276      * for bootmem setup initially, rely on the end-of-kernel-code
0277      * as our memory range starting point. Once bootmem is inited we
0278      * will reserve the area used for the initrd.
0279      */
0280     init_initrd();
0281 
0282     /* Reserve memory occupied by kernel. */
0283     memblock_reserve(__pa_symbol(&_text),
0284             __pa_symbol(&_end) - __pa_symbol(&_text));
0285 
0286     /* max_low_pfn is not a number of pages but the end pfn of low mem */
0287 
0288 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
0289     ARCH_PFN_OFFSET = PFN_UP(ramstart);
0290 #else
0291     /*
0292      * Reserve any memory between the start of RAM and PHYS_OFFSET
0293      */
0294     if (ramstart > PHYS_OFFSET)
0295         memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
0296 
0297     if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
0298         pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
0299             (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
0300             (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
0301     }
0302 #endif
0303 
0304     min_low_pfn = ARCH_PFN_OFFSET;
0305     max_pfn = PFN_DOWN(ramend);
0306     for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
0307         /*
0308          * Skip highmem here so we get an accurate max_low_pfn if low
0309          * memory stops short of high memory.
0310          * If the region overlaps HIGHMEM_START, end is clipped so
0311          * max_pfn excludes the highmem portion.
0312          */
0313         if (start >= PFN_DOWN(HIGHMEM_START))
0314             continue;
0315         if (end > PFN_DOWN(HIGHMEM_START))
0316             end = PFN_DOWN(HIGHMEM_START);
0317         if (end > max_low_pfn)
0318             max_low_pfn = end;
0319     }
0320 
0321     if (min_low_pfn >= max_low_pfn)
0322         panic("Incorrect memory mapping !!!");
0323 
0324     if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
0325 #ifdef CONFIG_HIGHMEM
0326         highstart_pfn = PFN_DOWN(HIGHMEM_START);
0327         highend_pfn = max_pfn;
0328 #else
0329         max_low_pfn = PFN_DOWN(HIGHMEM_START);
0330         max_pfn = max_low_pfn;
0331 #endif
0332     }
0333 
0334     /*
0335      * Reserve initrd memory if needed.
0336      */
0337     finalize_initrd();
0338 }
0339 
0340 #endif  /* CONFIG_SGI_IP27 */
0341 
0342 static int usermem __initdata;
0343 
0344 static int __init early_parse_mem(char *p)
0345 {
0346     phys_addr_t start, size;
0347 
0348     if (!p) {
0349         pr_err("mem parameter is empty, do nothing\n");
0350         return -EINVAL;
0351     }
0352 
0353     /*
0354      * If a user specifies memory size, we
0355      * blow away any automatically generated
0356      * size.
0357      */
0358     if (usermem == 0) {
0359         usermem = 1;
0360         memblock_remove(memblock_start_of_DRAM(),
0361             memblock_end_of_DRAM() - memblock_start_of_DRAM());
0362     }
0363     start = 0;
0364     size = memparse(p, &p);
0365     if (*p == '@')
0366         start = memparse(p + 1, &p);
0367 
0368     if (IS_ENABLED(CONFIG_NUMA))
0369         memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
0370     else
0371         memblock_add(start, size);
0372 
0373     return 0;
0374 }
0375 early_param("mem", early_parse_mem);
0376 
0377 static int __init early_parse_memmap(char *p)
0378 {
0379     char *oldp;
0380     u64 start_at, mem_size;
0381 
0382     if (!p)
0383         return -EINVAL;
0384 
0385     if (!strncmp(p, "exactmap", 8)) {
0386         pr_err("\"memmap=exactmap\" invalid on MIPS\n");
0387         return 0;
0388     }
0389 
0390     oldp = p;
0391     mem_size = memparse(p, &p);
0392     if (p == oldp)
0393         return -EINVAL;
0394 
0395     if (*p == '@') {
0396         start_at = memparse(p+1, &p);
0397         memblock_add(start_at, mem_size);
0398     } else if (*p == '#') {
0399         pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
0400         return -EINVAL;
0401     } else if (*p == '$') {
0402         start_at = memparse(p+1, &p);
0403         memblock_add(start_at, mem_size);
0404         memblock_reserve(start_at, mem_size);
0405     } else {
0406         pr_err("\"memmap\" invalid format!\n");
0407         return -EINVAL;
0408     }
0409 
0410     if (*p == '\0') {
0411         usermem = 1;
0412         return 0;
0413     } else
0414         return -EINVAL;
0415 }
0416 early_param("memmap", early_parse_memmap);
0417 
0418 static void __init mips_reserve_vmcore(void)
0419 {
0420 #ifdef CONFIG_PROC_VMCORE
0421     phys_addr_t start, end;
0422     u64 i;
0423 
0424     if (!elfcorehdr_size) {
0425         for_each_mem_range(i, &start, &end) {
0426             if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
0427                 /*
0428                  * Reserve from the elf core header to the end of
0429                  * the memory segment, that should all be kdump
0430                  * reserved memory.
0431                  */
0432                 elfcorehdr_size = end - elfcorehdr_addr;
0433                 break;
0434             }
0435         }
0436     }
0437 
0438     pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
0439         (unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
0440 
0441     memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
0442 #endif
0443 }
0444 
0445 #ifdef CONFIG_KEXEC
0446 
0447 /* 64M alignment for crash kernel regions */
0448 #define CRASH_ALIGN SZ_64M
0449 #define CRASH_ADDR_MAX  SZ_512M
0450 
0451 static void __init mips_parse_crashkernel(void)
0452 {
0453     unsigned long long total_mem;
0454     unsigned long long crash_size, crash_base;
0455     int ret;
0456 
0457     total_mem = memblock_phys_mem_size();
0458     ret = parse_crashkernel(boot_command_line, total_mem,
0459                 &crash_size, &crash_base);
0460     if (ret != 0 || crash_size <= 0)
0461         return;
0462 
0463     if (crash_base <= 0) {
0464         crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
0465                                CRASH_ALIGN,
0466                                CRASH_ADDR_MAX);
0467         if (!crash_base) {
0468             pr_warn("crashkernel reservation failed - No suitable area found.\n");
0469             return;
0470         }
0471     } else {
0472         unsigned long long start;
0473 
0474         start = memblock_phys_alloc_range(crash_size, 1,
0475                           crash_base,
0476                           crash_base + crash_size);
0477         if (start != crash_base) {
0478             pr_warn("Invalid memory region reserved for crash kernel\n");
0479             return;
0480         }
0481     }
0482 
0483     crashk_res.start = crash_base;
0484     crashk_res.end   = crash_base + crash_size - 1;
0485 }
0486 
0487 static void __init request_crashkernel(struct resource *res)
0488 {
0489     int ret;
0490 
0491     if (crashk_res.start == crashk_res.end)
0492         return;
0493 
0494     ret = request_resource(res, &crashk_res);
0495     if (!ret)
0496         pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
0497             (unsigned long)(resource_size(&crashk_res) >> 20),
0498             (unsigned long)(crashk_res.start  >> 20));
0499 }
0500 #else /* !defined(CONFIG_KEXEC)     */
0501 static void __init mips_parse_crashkernel(void)
0502 {
0503 }
0504 
0505 static void __init request_crashkernel(struct resource *res)
0506 {
0507 }
0508 #endif /* !defined(CONFIG_KEXEC)  */
0509 
0510 static void __init check_kernel_sections_mem(void)
0511 {
0512     phys_addr_t start = __pa_symbol(&_text);
0513     phys_addr_t size = __pa_symbol(&_end) - start;
0514 
0515     if (!memblock_is_region_memory(start, size)) {
0516         pr_info("Kernel sections are not in the memory maps\n");
0517         memblock_add(start, size);
0518     }
0519 }
0520 
0521 static void __init bootcmdline_append(const char *s, size_t max)
0522 {
0523     if (!s[0] || !max)
0524         return;
0525 
0526     if (boot_command_line[0])
0527         strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
0528 
0529     strlcat(boot_command_line, s, max);
0530 }
0531 
0532 #ifdef CONFIG_OF_EARLY_FLATTREE
0533 
0534 static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
0535                       int depth, void *data)
0536 {
0537     bool *dt_bootargs = data;
0538     const char *p;
0539     int l;
0540 
0541     if (depth != 1 || !data ||
0542         (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
0543         return 0;
0544 
0545     p = of_get_flat_dt_prop(node, "bootargs", &l);
0546     if (p != NULL && l > 0) {
0547         bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
0548         *dt_bootargs = true;
0549     }
0550 
0551     return 1;
0552 }
0553 
0554 #endif /* CONFIG_OF_EARLY_FLATTREE */
0555 
0556 static void __init bootcmdline_init(void)
0557 {
0558     bool dt_bootargs = false;
0559 
0560     /*
0561      * If CMDLINE_OVERRIDE is enabled then initializing the command line is
0562      * trivial - we simply use the built-in command line unconditionally &
0563      * unmodified.
0564      */
0565     if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
0566         strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
0567         return;
0568     }
0569 
0570     /*
0571      * If the user specified a built-in command line &
0572      * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
0573      * prepended to arguments from the bootloader or DT so we'll copy them
0574      * to the start of boot_command_line here. Otherwise, empty
0575      * boot_command_line to undo anything early_init_dt_scan_chosen() did.
0576      */
0577     if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
0578         strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
0579     else
0580         boot_command_line[0] = 0;
0581 
0582 #ifdef CONFIG_OF_EARLY_FLATTREE
0583     /*
0584      * If we're configured to take boot arguments from DT, look for those
0585      * now.
0586      */
0587     if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
0588         IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
0589         of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
0590 #endif
0591 
0592     /*
0593      * If we didn't get any arguments from DT (regardless of whether that's
0594      * because we weren't configured to look for them, or because we looked
0595      * & found none) then we'll take arguments from the bootloader.
0596      * plat_mem_setup() should have filled arcs_cmdline with arguments from
0597      * the bootloader.
0598      */
0599     if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
0600         bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
0601 
0602     /*
0603      * If the user specified a built-in command line & we didn't already
0604      * prepend it, we append it to boot_command_line here.
0605      */
0606     if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
0607         !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
0608         bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
0609 }
0610 
0611 /*
0612  * arch_mem_init - initialize memory management subsystem
0613  *
0614  *  o plat_mem_setup() detects the memory configuration and will record detected
0615  *    memory areas using memblock_add.
0616  *
0617  * At this stage the memory configuration of the system is known to the
0618  * kernel but generic memory management system is still entirely uninitialized.
0619  *
0620  *  o bootmem_init()
0621  *  o sparse_init()
0622  *  o paging_init()
0623  *  o dma_contiguous_reserve()
0624  *
0625  * At this stage the bootmem allocator is ready to use.
0626  *
0627  * NOTE: historically plat_mem_setup did the entire platform initialization.
0628  *   This was rather impractical because it meant plat_mem_setup had to
0629  * get away without any kind of memory allocator.  To keep old code from
0630  * breaking plat_setup was just renamed to plat_mem_setup and a second platform
0631  * initialization hook for anything else was introduced.
0632  */
0633 static void __init arch_mem_init(char **cmdline_p)
0634 {
0635     /* call board setup routine */
0636     plat_mem_setup();
0637     memblock_set_bottom_up(true);
0638 
0639     bootcmdline_init();
0640     strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
0641     *cmdline_p = command_line;
0642 
0643     parse_early_param();
0644 
0645     if (usermem)
0646         pr_info("User-defined physical RAM map overwrite\n");
0647 
0648     check_kernel_sections_mem();
0649 
0650     early_init_fdt_reserve_self();
0651     early_init_fdt_scan_reserved_mem();
0652 
0653 #ifndef CONFIG_NUMA
0654     memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
0655 #endif
0656     bootmem_init();
0657 
0658     /*
0659      * Prevent memblock from allocating high memory.
0660      * This cannot be done before max_low_pfn is detected, so up
0661      * to this point is possible to only reserve physical memory
0662      * with memblock_reserve; memblock_alloc* can be used
0663      * only after this point
0664      */
0665     memblock_set_current_limit(PFN_PHYS(max_low_pfn));
0666 
0667     mips_reserve_vmcore();
0668 
0669     mips_parse_crashkernel();
0670     device_tree_init();
0671 
0672     /*
0673      * In order to reduce the possibility of kernel panic when failed to
0674      * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
0675      * low memory as small as possible before plat_swiotlb_setup(), so
0676      * make sparse_init() using top-down allocation.
0677      */
0678     memblock_set_bottom_up(false);
0679     sparse_init();
0680     memblock_set_bottom_up(true);
0681 
0682     plat_swiotlb_setup();
0683 
0684     dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
0685 
0686     /* Reserve for hibernation. */
0687     memblock_reserve(__pa_symbol(&__nosave_begin),
0688         __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
0689 
0690     early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
0691 }
0692 
0693 static void __init resource_init(void)
0694 {
0695     phys_addr_t start, end;
0696     u64 i;
0697 
0698     if (UNCAC_BASE != IO_BASE)
0699         return;
0700 
0701     code_resource.start = __pa_symbol(&_text);
0702     code_resource.end = __pa_symbol(&_etext) - 1;
0703     data_resource.start = __pa_symbol(&_etext);
0704     data_resource.end = __pa_symbol(&_edata) - 1;
0705     bss_resource.start = __pa_symbol(&__bss_start);
0706     bss_resource.end = __pa_symbol(&__bss_stop) - 1;
0707 
0708     for_each_mem_range(i, &start, &end) {
0709         struct resource *res;
0710 
0711         res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
0712         if (!res)
0713             panic("%s: Failed to allocate %zu bytes\n", __func__,
0714                   sizeof(struct resource));
0715 
0716         res->start = start;
0717         /*
0718          * In memblock, end points to the first byte after the
0719          * range while in resourses, end points to the last byte in
0720          * the range.
0721          */
0722         res->end = end - 1;
0723         res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
0724         res->name = "System RAM";
0725 
0726         request_resource(&iomem_resource, res);
0727 
0728         /*
0729          *  We don't know which RAM region contains kernel data,
0730          *  so we try it repeatedly and let the resource manager
0731          *  test it.
0732          */
0733         request_resource(res, &code_resource);
0734         request_resource(res, &data_resource);
0735         request_resource(res, &bss_resource);
0736         request_crashkernel(res);
0737     }
0738 }
0739 
0740 #ifdef CONFIG_SMP
0741 static void __init prefill_possible_map(void)
0742 {
0743     int i, possible = num_possible_cpus();
0744 
0745     if (possible > nr_cpu_ids)
0746         possible = nr_cpu_ids;
0747 
0748     for (i = 0; i < possible; i++)
0749         set_cpu_possible(i, true);
0750     for (; i < NR_CPUS; i++)
0751         set_cpu_possible(i, false);
0752 
0753     nr_cpu_ids = possible;
0754 }
0755 #else
0756 static inline void prefill_possible_map(void) {}
0757 #endif
0758 
0759 void __init setup_arch(char **cmdline_p)
0760 {
0761     cpu_probe();
0762     mips_cm_probe();
0763     prom_init();
0764 
0765     setup_early_fdc_console();
0766 #ifdef CONFIG_EARLY_PRINTK
0767     setup_early_printk();
0768 #endif
0769     cpu_report();
0770     check_bugs_early();
0771 
0772 #if defined(CONFIG_VT)
0773 #if defined(CONFIG_VGA_CONSOLE)
0774     conswitchp = &vga_con;
0775 #endif
0776 #endif
0777 
0778     arch_mem_init(cmdline_p);
0779     dmi_setup();
0780 
0781     resource_init();
0782     plat_smp_setup();
0783     prefill_possible_map();
0784 
0785     cpu_cache_init();
0786     paging_init();
0787 
0788     memblock_dump_all();
0789 }
0790 
0791 unsigned long kernelsp[NR_CPUS];
0792 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
0793 
0794 #ifdef CONFIG_DEBUG_FS
0795 struct dentry *mips_debugfs_dir;
0796 static int __init debugfs_mips(void)
0797 {
0798     mips_debugfs_dir = debugfs_create_dir("mips", NULL);
0799     return 0;
0800 }
0801 arch_initcall(debugfs_mips);
0802 #endif
0803 
0804 #ifdef CONFIG_DMA_NONCOHERENT
0805 static int __init setcoherentio(char *str)
0806 {
0807     dma_default_coherent = true;
0808     pr_info("Hardware DMA cache coherency (command line)\n");
0809     return 0;
0810 }
0811 early_param("coherentio", setcoherentio);
0812 
0813 static int __init setnocoherentio(char *str)
0814 {
0815     dma_default_coherent = false;
0816     pr_info("Software DMA cache coherency (command line)\n");
0817     return 0;
0818 }
0819 early_param("nocoherentio", setnocoherentio);
0820 #endif