Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/kernel/setup.c
0004  *
0005  *  Copyright (C) 1995-2001 Russell King
0006  */
0007 #include <linux/efi.h>
0008 #include <linux/export.h>
0009 #include <linux/kernel.h>
0010 #include <linux/stddef.h>
0011 #include <linux/ioport.h>
0012 #include <linux/delay.h>
0013 #include <linux/utsname.h>
0014 #include <linux/initrd.h>
0015 #include <linux/console.h>
0016 #include <linux/seq_file.h>
0017 #include <linux/screen_info.h>
0018 #include <linux/of_platform.h>
0019 #include <linux/init.h>
0020 #include <linux/kexec.h>
0021 #include <linux/libfdt.h>
0022 #include <linux/of_fdt.h>
0023 #include <linux/cpu.h>
0024 #include <linux/interrupt.h>
0025 #include <linux/smp.h>
0026 #include <linux/proc_fs.h>
0027 #include <linux/memblock.h>
0028 #include <linux/bug.h>
0029 #include <linux/compiler.h>
0030 #include <linux/sort.h>
0031 #include <linux/psci.h>
0032 
0033 #include <asm/unified.h>
0034 #include <asm/cp15.h>
0035 #include <asm/cpu.h>
0036 #include <asm/cputype.h>
0037 #include <asm/efi.h>
0038 #include <asm/elf.h>
0039 #include <asm/early_ioremap.h>
0040 #include <asm/fixmap.h>
0041 #include <asm/procinfo.h>
0042 #include <asm/psci.h>
0043 #include <asm/sections.h>
0044 #include <asm/setup.h>
0045 #include <asm/smp_plat.h>
0046 #include <asm/mach-types.h>
0047 #include <asm/cacheflush.h>
0048 #include <asm/cachetype.h>
0049 #include <asm/tlbflush.h>
0050 #include <asm/xen/hypervisor.h>
0051 
0052 #include <asm/prom.h>
0053 #include <asm/mach/arch.h>
0054 #include <asm/mach/irq.h>
0055 #include <asm/mach/time.h>
0056 #include <asm/system_info.h>
0057 #include <asm/system_misc.h>
0058 #include <asm/traps.h>
0059 #include <asm/unwind.h>
0060 #include <asm/memblock.h>
0061 #include <asm/virt.h>
0062 #include <asm/kasan.h>
0063 
0064 #include "atags.h"
0065 
0066 
0067 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
0068 char fpe_type[8];
0069 
0070 static int __init fpe_setup(char *line)
0071 {
0072     memcpy(fpe_type, line, 8);
0073     return 1;
0074 }
0075 
0076 __setup("fpe=", fpe_setup);
0077 #endif
0078 
0079 extern void init_default_cache_policy(unsigned long);
0080 extern void paging_init(const struct machine_desc *desc);
0081 extern void early_mm_init(const struct machine_desc *);
0082 extern void adjust_lowmem_bounds(void);
0083 extern enum reboot_mode reboot_mode;
0084 extern void setup_dma_zone(const struct machine_desc *desc);
0085 
0086 unsigned int processor_id;
0087 EXPORT_SYMBOL(processor_id);
0088 unsigned int __machine_arch_type __read_mostly;
0089 EXPORT_SYMBOL(__machine_arch_type);
0090 unsigned int cacheid __read_mostly;
0091 EXPORT_SYMBOL(cacheid);
0092 
0093 unsigned int __atags_pointer __initdata;
0094 
0095 unsigned int system_rev;
0096 EXPORT_SYMBOL(system_rev);
0097 
0098 const char *system_serial;
0099 EXPORT_SYMBOL(system_serial);
0100 
0101 unsigned int system_serial_low;
0102 EXPORT_SYMBOL(system_serial_low);
0103 
0104 unsigned int system_serial_high;
0105 EXPORT_SYMBOL(system_serial_high);
0106 
0107 unsigned int elf_hwcap __read_mostly;
0108 EXPORT_SYMBOL(elf_hwcap);
0109 
0110 unsigned int elf_hwcap2 __read_mostly;
0111 EXPORT_SYMBOL(elf_hwcap2);
0112 
0113 
0114 #ifdef MULTI_CPU
0115 struct processor processor __ro_after_init;
0116 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
0117 struct processor *cpu_vtable[NR_CPUS] = {
0118     [0] = &processor,
0119 };
0120 #endif
0121 #endif
0122 #ifdef MULTI_TLB
0123 struct cpu_tlb_fns cpu_tlb __ro_after_init;
0124 #endif
0125 #ifdef MULTI_USER
0126 struct cpu_user_fns cpu_user __ro_after_init;
0127 #endif
0128 #ifdef MULTI_CACHE
0129 struct cpu_cache_fns cpu_cache __ro_after_init;
0130 #endif
0131 #ifdef CONFIG_OUTER_CACHE
0132 struct outer_cache_fns outer_cache __ro_after_init;
0133 EXPORT_SYMBOL(outer_cache);
0134 #endif
0135 
0136 /*
0137  * Cached cpu_architecture() result for use by assembler code.
0138  * C code should use the cpu_architecture() function instead of accessing this
0139  * variable directly.
0140  */
0141 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
0142 
0143 struct stack {
0144     u32 irq[4];
0145     u32 abt[4];
0146     u32 und[4];
0147     u32 fiq[4];
0148 } ____cacheline_aligned;
0149 
0150 #ifndef CONFIG_CPU_V7M
0151 static struct stack stacks[NR_CPUS];
0152 #endif
0153 
0154 char elf_platform[ELF_PLATFORM_SIZE];
0155 EXPORT_SYMBOL(elf_platform);
0156 
0157 static const char *cpu_name;
0158 static const char *machine_name;
0159 static char __initdata cmd_line[COMMAND_LINE_SIZE];
0160 const struct machine_desc *machine_desc __initdata;
0161 
0162 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
0163 #define ENDIANNESS ((char)endian_test.l)
0164 
0165 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
0166 
0167 /*
0168  * Standard memory resources
0169  */
0170 static struct resource mem_res[] = {
0171     {
0172         .name = "Video RAM",
0173         .start = 0,
0174         .end = 0,
0175         .flags = IORESOURCE_MEM
0176     },
0177     {
0178         .name = "Kernel code",
0179         .start = 0,
0180         .end = 0,
0181         .flags = IORESOURCE_SYSTEM_RAM
0182     },
0183     {
0184         .name = "Kernel data",
0185         .start = 0,
0186         .end = 0,
0187         .flags = IORESOURCE_SYSTEM_RAM
0188     }
0189 };
0190 
0191 #define video_ram   mem_res[0]
0192 #define kernel_code mem_res[1]
0193 #define kernel_data mem_res[2]
0194 
0195 static struct resource io_res[] = {
0196     {
0197         .name = "reserved",
0198         .start = 0x3bc,
0199         .end = 0x3be,
0200         .flags = IORESOURCE_IO | IORESOURCE_BUSY
0201     },
0202     {
0203         .name = "reserved",
0204         .start = 0x378,
0205         .end = 0x37f,
0206         .flags = IORESOURCE_IO | IORESOURCE_BUSY
0207     },
0208     {
0209         .name = "reserved",
0210         .start = 0x278,
0211         .end = 0x27f,
0212         .flags = IORESOURCE_IO | IORESOURCE_BUSY
0213     }
0214 };
0215 
0216 #define lp0 io_res[0]
0217 #define lp1 io_res[1]
0218 #define lp2 io_res[2]
0219 
0220 static const char *proc_arch[] = {
0221     "undefined/unknown",
0222     "3",
0223     "4",
0224     "4T",
0225     "5",
0226     "5T",
0227     "5TE",
0228     "5TEJ",
0229     "6TEJ",
0230     "7",
0231     "7M",
0232     "?(12)",
0233     "?(13)",
0234     "?(14)",
0235     "?(15)",
0236     "?(16)",
0237     "?(17)",
0238 };
0239 
0240 #ifdef CONFIG_CPU_V7M
0241 static int __get_cpu_architecture(void)
0242 {
0243     return CPU_ARCH_ARMv7M;
0244 }
0245 #else
0246 static int __get_cpu_architecture(void)
0247 {
0248     int cpu_arch;
0249 
0250     if ((read_cpuid_id() & 0x0008f000) == 0) {
0251         cpu_arch = CPU_ARCH_UNKNOWN;
0252     } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
0253         cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
0254     } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
0255         cpu_arch = (read_cpuid_id() >> 16) & 7;
0256         if (cpu_arch)
0257             cpu_arch += CPU_ARCH_ARMv3;
0258     } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
0259         /* Revised CPUID format. Read the Memory Model Feature
0260          * Register 0 and check for VMSAv7 or PMSAv7 */
0261         unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
0262         if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
0263             (mmfr0 & 0x000000f0) >= 0x00000030)
0264             cpu_arch = CPU_ARCH_ARMv7;
0265         else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
0266              (mmfr0 & 0x000000f0) == 0x00000020)
0267             cpu_arch = CPU_ARCH_ARMv6;
0268         else
0269             cpu_arch = CPU_ARCH_UNKNOWN;
0270     } else
0271         cpu_arch = CPU_ARCH_UNKNOWN;
0272 
0273     return cpu_arch;
0274 }
0275 #endif
0276 
0277 int __pure cpu_architecture(void)
0278 {
0279     BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
0280 
0281     return __cpu_architecture;
0282 }
0283 
0284 static int cpu_has_aliasing_icache(unsigned int arch)
0285 {
0286     int aliasing_icache;
0287     unsigned int id_reg, num_sets, line_size;
0288 
0289     /* PIPT caches never alias. */
0290     if (icache_is_pipt())
0291         return 0;
0292 
0293     /* arch specifies the register format */
0294     switch (arch) {
0295     case CPU_ARCH_ARMv7:
0296         set_csselr(CSSELR_ICACHE | CSSELR_L1);
0297         isb();
0298         id_reg = read_ccsidr();
0299         line_size = 4 << ((id_reg & 0x7) + 2);
0300         num_sets = ((id_reg >> 13) & 0x7fff) + 1;
0301         aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
0302         break;
0303     case CPU_ARCH_ARMv6:
0304         aliasing_icache = read_cpuid_cachetype() & (1 << 11);
0305         break;
0306     default:
0307         /* I-cache aliases will be handled by D-cache aliasing code */
0308         aliasing_icache = 0;
0309     }
0310 
0311     return aliasing_icache;
0312 }
0313 
0314 static void __init cacheid_init(void)
0315 {
0316     unsigned int arch = cpu_architecture();
0317 
0318     if (arch >= CPU_ARCH_ARMv6) {
0319         unsigned int cachetype = read_cpuid_cachetype();
0320 
0321         if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
0322             cacheid = 0;
0323         } else if ((cachetype & (7 << 29)) == 4 << 29) {
0324             /* ARMv7 register format */
0325             arch = CPU_ARCH_ARMv7;
0326             cacheid = CACHEID_VIPT_NONALIASING;
0327             switch (cachetype & (3 << 14)) {
0328             case (1 << 14):
0329                 cacheid |= CACHEID_ASID_TAGGED;
0330                 break;
0331             case (3 << 14):
0332                 cacheid |= CACHEID_PIPT;
0333                 break;
0334             }
0335         } else {
0336             arch = CPU_ARCH_ARMv6;
0337             if (cachetype & (1 << 23))
0338                 cacheid = CACHEID_VIPT_ALIASING;
0339             else
0340                 cacheid = CACHEID_VIPT_NONALIASING;
0341         }
0342         if (cpu_has_aliasing_icache(arch))
0343             cacheid |= CACHEID_VIPT_I_ALIASING;
0344     } else {
0345         cacheid = CACHEID_VIVT;
0346     }
0347 
0348     pr_info("CPU: %s data cache, %s instruction cache\n",
0349         cache_is_vivt() ? "VIVT" :
0350         cache_is_vipt_aliasing() ? "VIPT aliasing" :
0351         cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
0352         cache_is_vivt() ? "VIVT" :
0353         icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
0354         icache_is_vipt_aliasing() ? "VIPT aliasing" :
0355         icache_is_pipt() ? "PIPT" :
0356         cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
0357 }
0358 
0359 /*
0360  * These functions re-use the assembly code in head.S, which
0361  * already provide the required functionality.
0362  */
0363 extern struct proc_info_list *lookup_processor_type(unsigned int);
0364 
0365 void __init early_print(const char *str, ...)
0366 {
0367     extern void printascii(const char *);
0368     char buf[256];
0369     va_list ap;
0370 
0371     va_start(ap, str);
0372     vsnprintf(buf, sizeof(buf), str, ap);
0373     va_end(ap);
0374 
0375 #ifdef CONFIG_DEBUG_LL
0376     printascii(buf);
0377 #endif
0378     printk("%s", buf);
0379 }
0380 
0381 #ifdef CONFIG_ARM_PATCH_IDIV
0382 
0383 static inline u32 __attribute_const__ sdiv_instruction(void)
0384 {
0385     if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
0386         /* "sdiv r0, r0, r1" */
0387         u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
0388         return __opcode_to_mem_thumb32(insn);
0389     }
0390 
0391     /* "sdiv r0, r0, r1" */
0392     return __opcode_to_mem_arm(0xe710f110);
0393 }
0394 
0395 static inline u32 __attribute_const__ udiv_instruction(void)
0396 {
0397     if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
0398         /* "udiv r0, r0, r1" */
0399         u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
0400         return __opcode_to_mem_thumb32(insn);
0401     }
0402 
0403     /* "udiv r0, r0, r1" */
0404     return __opcode_to_mem_arm(0xe730f110);
0405 }
0406 
0407 static inline u32 __attribute_const__ bx_lr_instruction(void)
0408 {
0409     if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
0410         /* "bx lr; nop" */
0411         u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
0412         return __opcode_to_mem_thumb32(insn);
0413     }
0414 
0415     /* "bx lr" */
0416     return __opcode_to_mem_arm(0xe12fff1e);
0417 }
0418 
0419 static void __init patch_aeabi_idiv(void)
0420 {
0421     extern void __aeabi_uidiv(void);
0422     extern void __aeabi_idiv(void);
0423     uintptr_t fn_addr;
0424     unsigned int mask;
0425 
0426     mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
0427     if (!(elf_hwcap & mask))
0428         return;
0429 
0430     pr_info("CPU: div instructions available: patching division code\n");
0431 
0432     fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
0433     asm ("" : "+g" (fn_addr));
0434     ((u32 *)fn_addr)[0] = udiv_instruction();
0435     ((u32 *)fn_addr)[1] = bx_lr_instruction();
0436     flush_icache_range(fn_addr, fn_addr + 8);
0437 
0438     fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
0439     asm ("" : "+g" (fn_addr));
0440     ((u32 *)fn_addr)[0] = sdiv_instruction();
0441     ((u32 *)fn_addr)[1] = bx_lr_instruction();
0442     flush_icache_range(fn_addr, fn_addr + 8);
0443 }
0444 
0445 #else
0446 static inline void patch_aeabi_idiv(void) { }
0447 #endif
0448 
0449 static void __init cpuid_init_hwcaps(void)
0450 {
0451     int block;
0452     u32 isar5;
0453 
0454     if (cpu_architecture() < CPU_ARCH_ARMv7)
0455         return;
0456 
0457     block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
0458     if (block >= 2)
0459         elf_hwcap |= HWCAP_IDIVA;
0460     if (block >= 1)
0461         elf_hwcap |= HWCAP_IDIVT;
0462 
0463     /* LPAE implies atomic ldrd/strd instructions */
0464     block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
0465     if (block >= 5)
0466         elf_hwcap |= HWCAP_LPAE;
0467 
0468     /* check for supported v8 Crypto instructions */
0469     isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
0470 
0471     block = cpuid_feature_extract_field(isar5, 4);
0472     if (block >= 2)
0473         elf_hwcap2 |= HWCAP2_PMULL;
0474     if (block >= 1)
0475         elf_hwcap2 |= HWCAP2_AES;
0476 
0477     block = cpuid_feature_extract_field(isar5, 8);
0478     if (block >= 1)
0479         elf_hwcap2 |= HWCAP2_SHA1;
0480 
0481     block = cpuid_feature_extract_field(isar5, 12);
0482     if (block >= 1)
0483         elf_hwcap2 |= HWCAP2_SHA2;
0484 
0485     block = cpuid_feature_extract_field(isar5, 16);
0486     if (block >= 1)
0487         elf_hwcap2 |= HWCAP2_CRC32;
0488 }
0489 
0490 static void __init elf_hwcap_fixup(void)
0491 {
0492     unsigned id = read_cpuid_id();
0493 
0494     /*
0495      * HWCAP_TLS is available only on 1136 r1p0 and later,
0496      * see also kuser_get_tls_init.
0497      */
0498     if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
0499         ((id >> 20) & 3) == 0) {
0500         elf_hwcap &= ~HWCAP_TLS;
0501         return;
0502     }
0503 
0504     /* Verify if CPUID scheme is implemented */
0505     if ((id & 0x000f0000) != 0x000f0000)
0506         return;
0507 
0508     /*
0509      * If the CPU supports LDREX/STREX and LDREXB/STREXB,
0510      * avoid advertising SWP; it may not be atomic with
0511      * multiprocessing cores.
0512      */
0513     if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
0514         (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
0515          cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
0516         elf_hwcap &= ~HWCAP_SWP;
0517 }
0518 
0519 /*
0520  * cpu_init - initialise one CPU.
0521  *
0522  * cpu_init sets up the per-CPU stacks.
0523  */
0524 void notrace cpu_init(void)
0525 {
0526 #ifndef CONFIG_CPU_V7M
0527     unsigned int cpu = smp_processor_id();
0528     struct stack *stk = &stacks[cpu];
0529 
0530     if (cpu >= NR_CPUS) {
0531         pr_crit("CPU%u: bad primary CPU number\n", cpu);
0532         BUG();
0533     }
0534 
0535     /*
0536      * This only works on resume and secondary cores. For booting on the
0537      * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
0538      */
0539     set_my_cpu_offset(per_cpu_offset(cpu));
0540 
0541     cpu_proc_init();
0542 
0543     /*
0544      * Define the placement constraint for the inline asm directive below.
0545      * In Thumb-2, msr with an immediate value is not allowed.
0546      */
0547 #ifdef CONFIG_THUMB2_KERNEL
0548 #define PLC_l   "l"
0549 #define PLC_r   "r"
0550 #else
0551 #define PLC_l   "I"
0552 #define PLC_r   "I"
0553 #endif
0554 
0555     /*
0556      * setup stacks for re-entrant exception handlers
0557      */
0558     __asm__ (
0559     "msr    cpsr_c, %1\n\t"
0560     "add    r14, %0, %2\n\t"
0561     "mov    sp, r14\n\t"
0562     "msr    cpsr_c, %3\n\t"
0563     "add    r14, %0, %4\n\t"
0564     "mov    sp, r14\n\t"
0565     "msr    cpsr_c, %5\n\t"
0566     "add    r14, %0, %6\n\t"
0567     "mov    sp, r14\n\t"
0568     "msr    cpsr_c, %7\n\t"
0569     "add    r14, %0, %8\n\t"
0570     "mov    sp, r14\n\t"
0571     "msr    cpsr_c, %9"
0572         :
0573         : "r" (stk),
0574           PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
0575           "I" (offsetof(struct stack, irq[0])),
0576           PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
0577           "I" (offsetof(struct stack, abt[0])),
0578           PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
0579           "I" (offsetof(struct stack, und[0])),
0580           PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
0581           "I" (offsetof(struct stack, fiq[0])),
0582           PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
0583         : "r14");
0584 #endif
0585 }
0586 
0587 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
0588 
0589 void __init smp_setup_processor_id(void)
0590 {
0591     int i;
0592     u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
0593     u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0594 
0595     cpu_logical_map(0) = cpu;
0596     for (i = 1; i < nr_cpu_ids; ++i)
0597         cpu_logical_map(i) = i == cpu ? 0 : i;
0598 
0599     /*
0600      * clear __my_cpu_offset on boot CPU to avoid hang caused by
0601      * using percpu variable early, for example, lockdep will
0602      * access percpu variable inside lock_release
0603      */
0604     set_my_cpu_offset(0);
0605 
0606     pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
0607 }
0608 
0609 struct mpidr_hash mpidr_hash;
0610 #ifdef CONFIG_SMP
0611 /**
0612  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
0613  *            level in order to build a linear index from an
0614  *            MPIDR value. Resulting algorithm is a collision
0615  *            free hash carried out through shifting and ORing
0616  */
0617 static void __init smp_build_mpidr_hash(void)
0618 {
0619     u32 i, affinity;
0620     u32 fs[3], bits[3], ls, mask = 0;
0621     /*
0622      * Pre-scan the list of MPIDRS and filter out bits that do
0623      * not contribute to affinity levels, ie they never toggle.
0624      */
0625     for_each_possible_cpu(i)
0626         mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
0627     pr_debug("mask of set bits 0x%x\n", mask);
0628     /*
0629      * Find and stash the last and first bit set at all affinity levels to
0630      * check how many bits are required to represent them.
0631      */
0632     for (i = 0; i < 3; i++) {
0633         affinity = MPIDR_AFFINITY_LEVEL(mask, i);
0634         /*
0635          * Find the MSB bit and LSB bits position
0636          * to determine how many bits are required
0637          * to express the affinity level.
0638          */
0639         ls = fls(affinity);
0640         fs[i] = affinity ? ffs(affinity) - 1 : 0;
0641         bits[i] = ls - fs[i];
0642     }
0643     /*
0644      * An index can be created from the MPIDR by isolating the
0645      * significant bits at each affinity level and by shifting
0646      * them in order to compress the 24 bits values space to a
0647      * compressed set of values. This is equivalent to hashing
0648      * the MPIDR through shifting and ORing. It is a collision free
0649      * hash though not minimal since some levels might contain a number
0650      * of CPUs that is not an exact power of 2 and their bit
0651      * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
0652      */
0653     mpidr_hash.shift_aff[0] = fs[0];
0654     mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
0655     mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
0656                         (bits[1] + bits[0]);
0657     mpidr_hash.mask = mask;
0658     mpidr_hash.bits = bits[2] + bits[1] + bits[0];
0659     pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
0660                 mpidr_hash.shift_aff[0],
0661                 mpidr_hash.shift_aff[1],
0662                 mpidr_hash.shift_aff[2],
0663                 mpidr_hash.mask,
0664                 mpidr_hash.bits);
0665     /*
0666      * 4x is an arbitrary value used to warn on a hash table much bigger
0667      * than expected on most systems.
0668      */
0669     if (mpidr_hash_size() > 4 * num_possible_cpus())
0670         pr_warn("Large number of MPIDR hash buckets detected\n");
0671     sync_cache_w(&mpidr_hash);
0672 }
0673 #endif
0674 
0675 /*
0676  * locate processor in the list of supported processor types.  The linker
0677  * builds this table for us from the entries in arch/arm/mm/proc-*.S
0678  */
0679 struct proc_info_list *lookup_processor(u32 midr)
0680 {
0681     struct proc_info_list *list = lookup_processor_type(midr);
0682 
0683     if (!list) {
0684         pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
0685                smp_processor_id(), midr);
0686         while (1)
0687         /* can't use cpu_relax() here as it may require MMU setup */;
0688     }
0689 
0690     return list;
0691 }
0692 
0693 static void __init setup_processor(void)
0694 {
0695     unsigned int midr = read_cpuid_id();
0696     struct proc_info_list *list = lookup_processor(midr);
0697 
0698     cpu_name = list->cpu_name;
0699     __cpu_architecture = __get_cpu_architecture();
0700 
0701     init_proc_vtable(list->proc);
0702 #ifdef MULTI_TLB
0703     cpu_tlb = *list->tlb;
0704 #endif
0705 #ifdef MULTI_USER
0706     cpu_user = *list->user;
0707 #endif
0708 #ifdef MULTI_CACHE
0709     cpu_cache = *list->cache;
0710 #endif
0711 
0712     pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
0713         list->cpu_name, midr, midr & 15,
0714         proc_arch[cpu_architecture()], get_cr());
0715 
0716     snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
0717          list->arch_name, ENDIANNESS);
0718     snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
0719          list->elf_name, ENDIANNESS);
0720     elf_hwcap = list->elf_hwcap;
0721 
0722     cpuid_init_hwcaps();
0723     patch_aeabi_idiv();
0724 
0725 #ifndef CONFIG_ARM_THUMB
0726     elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
0727 #endif
0728 #ifdef CONFIG_MMU
0729     init_default_cache_policy(list->__cpu_mm_mmu_flags);
0730 #endif
0731     erratum_a15_798181_init();
0732 
0733     elf_hwcap_fixup();
0734 
0735     cacheid_init();
0736     cpu_init();
0737 }
0738 
0739 void __init dump_machine_table(void)
0740 {
0741     const struct machine_desc *p;
0742 
0743     early_print("Available machine support:\n\nID (hex)\tNAME\n");
0744     for_each_machine_desc(p)
0745         early_print("%08x\t%s\n", p->nr, p->name);
0746 
0747     early_print("\nPlease check your kernel config and/or bootloader.\n");
0748 
0749     while (true)
0750         /* can't use cpu_relax() here as it may require MMU setup */;
0751 }
0752 
0753 int __init arm_add_memory(u64 start, u64 size)
0754 {
0755     u64 aligned_start;
0756 
0757     /*
0758      * Ensure that start/size are aligned to a page boundary.
0759      * Size is rounded down, start is rounded up.
0760      */
0761     aligned_start = PAGE_ALIGN(start);
0762     if (aligned_start > start + size)
0763         size = 0;
0764     else
0765         size -= aligned_start - start;
0766 
0767 #ifndef CONFIG_PHYS_ADDR_T_64BIT
0768     if (aligned_start > ULONG_MAX) {
0769         pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
0770             start);
0771         return -EINVAL;
0772     }
0773 
0774     if (aligned_start + size > ULONG_MAX) {
0775         pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
0776             (long long)start);
0777         /*
0778          * To ensure bank->start + bank->size is representable in
0779          * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
0780          * This means we lose a page after masking.
0781          */
0782         size = ULONG_MAX - aligned_start;
0783     }
0784 #endif
0785 
0786     if (aligned_start < PHYS_OFFSET) {
0787         if (aligned_start + size <= PHYS_OFFSET) {
0788             pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
0789                 aligned_start, aligned_start + size);
0790             return -EINVAL;
0791         }
0792 
0793         pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
0794             aligned_start, (u64)PHYS_OFFSET);
0795 
0796         size -= PHYS_OFFSET - aligned_start;
0797         aligned_start = PHYS_OFFSET;
0798     }
0799 
0800     start = aligned_start;
0801     size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
0802 
0803     /*
0804      * Check whether this memory region has non-zero size or
0805      * invalid node number.
0806      */
0807     if (size == 0)
0808         return -EINVAL;
0809 
0810     memblock_add(start, size);
0811     return 0;
0812 }
0813 
0814 /*
0815  * Pick out the memory size.  We look for mem=size@start,
0816  * where start and size are "size[KkMm]"
0817  */
0818 
0819 static int __init early_mem(char *p)
0820 {
0821     static int usermem __initdata = 0;
0822     u64 size;
0823     u64 start;
0824     char *endp;
0825 
0826     /*
0827      * If the user specifies memory size, we
0828      * blow away any automatically generated
0829      * size.
0830      */
0831     if (usermem == 0) {
0832         usermem = 1;
0833         memblock_remove(memblock_start_of_DRAM(),
0834             memblock_end_of_DRAM() - memblock_start_of_DRAM());
0835     }
0836 
0837     start = PHYS_OFFSET;
0838     size  = memparse(p, &endp);
0839     if (*endp == '@')
0840         start = memparse(endp + 1, NULL);
0841 
0842     arm_add_memory(start, size);
0843 
0844     return 0;
0845 }
0846 early_param("mem", early_mem);
0847 
0848 static void __init request_standard_resources(const struct machine_desc *mdesc)
0849 {
0850     phys_addr_t start, end, res_end;
0851     struct resource *res;
0852     u64 i;
0853 
0854     kernel_code.start   = virt_to_phys(_text);
0855     kernel_code.end     = virt_to_phys(__init_begin - 1);
0856     kernel_data.start   = virt_to_phys(_sdata);
0857     kernel_data.end     = virt_to_phys(_end - 1);
0858 
0859     for_each_mem_range(i, &start, &end) {
0860         unsigned long boot_alias_start;
0861 
0862         /*
0863          * In memblock, end points to the first byte after the
0864          * range while in resourses, end points to the last byte in
0865          * the range.
0866          */
0867         res_end = end - 1;
0868 
0869         /*
0870          * Some systems have a special memory alias which is only
0871          * used for booting.  We need to advertise this region to
0872          * kexec-tools so they know where bootable RAM is located.
0873          */
0874         boot_alias_start = phys_to_idmap(start);
0875         if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
0876             res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
0877             if (!res)
0878                 panic("%s: Failed to allocate %zu bytes\n",
0879                       __func__, sizeof(*res));
0880             res->name = "System RAM (boot alias)";
0881             res->start = boot_alias_start;
0882             res->end = phys_to_idmap(res_end);
0883             res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
0884             request_resource(&iomem_resource, res);
0885         }
0886 
0887         res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
0888         if (!res)
0889             panic("%s: Failed to allocate %zu bytes\n", __func__,
0890                   sizeof(*res));
0891         res->name  = "System RAM";
0892         res->start = start;
0893         res->end = res_end;
0894         res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
0895 
0896         request_resource(&iomem_resource, res);
0897 
0898         if (kernel_code.start >= res->start &&
0899             kernel_code.end <= res->end)
0900             request_resource(res, &kernel_code);
0901         if (kernel_data.start >= res->start &&
0902             kernel_data.end <= res->end)
0903             request_resource(res, &kernel_data);
0904     }
0905 
0906     if (mdesc->video_start) {
0907         video_ram.start = mdesc->video_start;
0908         video_ram.end   = mdesc->video_end;
0909         request_resource(&iomem_resource, &video_ram);
0910     }
0911 
0912     /*
0913      * Some machines don't have the possibility of ever
0914      * possessing lp0, lp1 or lp2
0915      */
0916     if (mdesc->reserve_lp0)
0917         request_resource(&ioport_resource, &lp0);
0918     if (mdesc->reserve_lp1)
0919         request_resource(&ioport_resource, &lp1);
0920     if (mdesc->reserve_lp2)
0921         request_resource(&ioport_resource, &lp2);
0922 }
0923 
0924 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
0925     defined(CONFIG_EFI)
0926 struct screen_info screen_info = {
0927  .orig_video_lines  = 30,
0928  .orig_video_cols   = 80,
0929  .orig_video_mode   = 0,
0930  .orig_video_ega_bx = 0,
0931  .orig_video_isVGA  = 1,
0932  .orig_video_points = 8
0933 };
0934 #endif
0935 
0936 static int __init customize_machine(void)
0937 {
0938     /*
0939      * customizes platform devices, or adds new ones
0940      * On DT based machines, we fall back to populating the
0941      * machine from the device tree, if no callback is provided,
0942      * otherwise we would always need an init_machine callback.
0943      */
0944     if (machine_desc->init_machine)
0945         machine_desc->init_machine();
0946 
0947     return 0;
0948 }
0949 arch_initcall(customize_machine);
0950 
0951 static int __init init_machine_late(void)
0952 {
0953     struct device_node *root;
0954     int ret;
0955 
0956     if (machine_desc->init_late)
0957         machine_desc->init_late();
0958 
0959     root = of_find_node_by_path("/");
0960     if (root) {
0961         ret = of_property_read_string(root, "serial-number",
0962                           &system_serial);
0963         if (ret)
0964             system_serial = NULL;
0965     }
0966 
0967     if (!system_serial)
0968         system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
0969                       system_serial_high,
0970                       system_serial_low);
0971 
0972     return 0;
0973 }
0974 late_initcall(init_machine_late);
0975 
0976 #ifdef CONFIG_KEXEC
0977 /*
0978  * The crash region must be aligned to 128MB to avoid
0979  * zImage relocating below the reserved region.
0980  */
0981 #define CRASH_ALIGN (128 << 20)
0982 
0983 static inline unsigned long long get_total_mem(void)
0984 {
0985     unsigned long total;
0986 
0987     total = max_low_pfn - min_low_pfn;
0988     return total << PAGE_SHIFT;
0989 }
0990 
0991 /**
0992  * reserve_crashkernel() - reserves memory are for crash kernel
0993  *
0994  * This function reserves memory area given in "crashkernel=" kernel command
0995  * line parameter. The memory reserved is used by a dump capture kernel when
0996  * primary kernel is crashing.
0997  */
0998 static void __init reserve_crashkernel(void)
0999 {
1000     unsigned long long crash_size, crash_base;
1001     unsigned long long total_mem;
1002     int ret;
1003 
1004     total_mem = get_total_mem();
1005     ret = parse_crashkernel(boot_command_line, total_mem,
1006                 &crash_size, &crash_base);
1007     /* invalid value specified or crashkernel=0 */
1008     if (ret || !crash_size)
1009         return;
1010 
1011     if (crash_base <= 0) {
1012         unsigned long long crash_max = idmap_to_phys((u32)~0);
1013         unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1014         if (crash_max > lowmem_max)
1015             crash_max = lowmem_max;
1016 
1017         crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
1018                                CRASH_ALIGN, crash_max);
1019         if (!crash_base) {
1020             pr_err("crashkernel reservation failed - No suitable area found.\n");
1021             return;
1022         }
1023     } else {
1024         unsigned long long crash_max = crash_base + crash_size;
1025         unsigned long long start;
1026 
1027         start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
1028                           crash_base, crash_max);
1029         if (!start) {
1030             pr_err("crashkernel reservation failed - memory is in use.\n");
1031             return;
1032         }
1033     }
1034 
1035     pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1036         (unsigned long)(crash_size >> 20),
1037         (unsigned long)(crash_base >> 20),
1038         (unsigned long)(total_mem >> 20));
1039 
1040     /* The crashk resource must always be located in normal mem */
1041     crashk_res.start = crash_base;
1042     crashk_res.end = crash_base + crash_size - 1;
1043     insert_resource(&iomem_resource, &crashk_res);
1044 
1045     if (arm_has_idmap_alias()) {
1046         /*
1047          * If we have a special RAM alias for use at boot, we
1048          * need to advertise to kexec tools where the alias is.
1049          */
1050         static struct resource crashk_boot_res = {
1051             .name = "Crash kernel (boot alias)",
1052             .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1053         };
1054 
1055         crashk_boot_res.start = phys_to_idmap(crash_base);
1056         crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1057         insert_resource(&iomem_resource, &crashk_boot_res);
1058     }
1059 }
1060 #else
1061 static inline void reserve_crashkernel(void) {}
1062 #endif /* CONFIG_KEXEC */
1063 
1064 void __init hyp_mode_check(void)
1065 {
1066 #ifdef CONFIG_ARM_VIRT_EXT
1067     sync_boot_mode();
1068 
1069     if (is_hyp_mode_available()) {
1070         pr_info("CPU: All CPU(s) started in HYP mode.\n");
1071         pr_info("CPU: Virtualization extensions available.\n");
1072     } else if (is_hyp_mode_mismatched()) {
1073         pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1074             __boot_cpu_mode & MODE_MASK);
1075         pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1076     } else
1077         pr_info("CPU: All CPU(s) started in SVC mode.\n");
1078 #endif
1079 }
1080 
1081 static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1082 
1083 static int arm_restart(struct notifier_block *nb, unsigned long action,
1084                void *data)
1085 {
1086     __arm_pm_restart(action, data);
1087     return NOTIFY_DONE;
1088 }
1089 
1090 static struct notifier_block arm_restart_nb = {
1091     .notifier_call = arm_restart,
1092     .priority = 128,
1093 };
1094 
1095 void __init setup_arch(char **cmdline_p)
1096 {
1097     const struct machine_desc *mdesc = NULL;
1098     void *atags_vaddr = NULL;
1099 
1100     if (__atags_pointer)
1101         atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1102 
1103     setup_processor();
1104     if (atags_vaddr) {
1105         mdesc = setup_machine_fdt(atags_vaddr);
1106         if (mdesc)
1107             memblock_reserve(__atags_pointer,
1108                      fdt_totalsize(atags_vaddr));
1109     }
1110     if (!mdesc)
1111         mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1112     if (!mdesc) {
1113         early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1114         early_print("  r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1115                 __atags_pointer);
1116         if (__atags_pointer)
1117             early_print("  r2[]=%*ph\n", 16, atags_vaddr);
1118         dump_machine_table();
1119     }
1120 
1121     machine_desc = mdesc;
1122     machine_name = mdesc->name;
1123     dump_stack_set_arch_desc("%s", mdesc->name);
1124 
1125     if (mdesc->reboot_mode != REBOOT_HARD)
1126         reboot_mode = mdesc->reboot_mode;
1127 
1128     setup_initial_init_mm(_text, _etext, _edata, _end);
1129 
1130     /* populate cmd_line too for later use, preserving boot_command_line */
1131     strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1132     *cmdline_p = cmd_line;
1133 
1134     early_fixmap_init();
1135     early_ioremap_init();
1136 
1137     parse_early_param();
1138 
1139 #ifdef CONFIG_MMU
1140     early_mm_init(mdesc);
1141 #endif
1142     setup_dma_zone(mdesc);
1143     xen_early_init();
1144     efi_init();
1145     /*
1146      * Make sure the calculation for lowmem/highmem is set appropriately
1147      * before reserving/allocating any memory
1148      */
1149     adjust_lowmem_bounds();
1150     arm_memblock_init(mdesc);
1151     /* Memory may have been removed so recalculate the bounds. */
1152     adjust_lowmem_bounds();
1153 
1154     early_ioremap_reset();
1155 
1156     paging_init(mdesc);
1157     kasan_init();
1158     request_standard_resources(mdesc);
1159 
1160     if (mdesc->restart) {
1161         __arm_pm_restart = mdesc->restart;
1162         register_restart_handler(&arm_restart_nb);
1163     }
1164 
1165     unflatten_device_tree();
1166 
1167     arm_dt_init_cpu_maps();
1168     psci_dt_init();
1169 #ifdef CONFIG_SMP
1170     if (is_smp()) {
1171         if (!mdesc->smp_init || !mdesc->smp_init()) {
1172             if (psci_smp_available())
1173                 smp_set_ops(&psci_smp_ops);
1174             else if (mdesc->smp)
1175                 smp_set_ops(mdesc->smp);
1176         }
1177         smp_init_cpus();
1178         smp_build_mpidr_hash();
1179     }
1180 #endif
1181 
1182     if (!is_smp())
1183         hyp_mode_check();
1184 
1185     reserve_crashkernel();
1186 
1187 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1188     handle_arch_irq = mdesc->handle_irq;
1189 #endif
1190 
1191 #ifdef CONFIG_VT
1192 #if defined(CONFIG_VGA_CONSOLE)
1193     conswitchp = &vga_con;
1194 #endif
1195 #endif
1196 
1197     if (mdesc->init_early)
1198         mdesc->init_early();
1199 }
1200 
1201 
1202 static int __init topology_init(void)
1203 {
1204     int cpu;
1205 
1206     for_each_possible_cpu(cpu) {
1207         struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1208         cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1209         register_cpu(&cpuinfo->cpu, cpu);
1210     }
1211 
1212     return 0;
1213 }
1214 subsys_initcall(topology_init);
1215 
1216 #ifdef CONFIG_HAVE_PROC_CPU
1217 static int __init proc_cpu_init(void)
1218 {
1219     struct proc_dir_entry *res;
1220 
1221     res = proc_mkdir("cpu", NULL);
1222     if (!res)
1223         return -ENOMEM;
1224     return 0;
1225 }
1226 fs_initcall(proc_cpu_init);
1227 #endif
1228 
1229 static const char *hwcap_str[] = {
1230     "swp",
1231     "half",
1232     "thumb",
1233     "26bit",
1234     "fastmult",
1235     "fpa",
1236     "vfp",
1237     "edsp",
1238     "java",
1239     "iwmmxt",
1240     "crunch",
1241     "thumbee",
1242     "neon",
1243     "vfpv3",
1244     "vfpv3d16",
1245     "tls",
1246     "vfpv4",
1247     "idiva",
1248     "idivt",
1249     "vfpd32",
1250     "lpae",
1251     "evtstrm",
1252     NULL
1253 };
1254 
1255 static const char *hwcap2_str[] = {
1256     "aes",
1257     "pmull",
1258     "sha1",
1259     "sha2",
1260     "crc32",
1261     NULL
1262 };
1263 
1264 static int c_show(struct seq_file *m, void *v)
1265 {
1266     int i, j;
1267     u32 cpuid;
1268 
1269     for_each_online_cpu(i) {
1270         /*
1271          * glibc reads /proc/cpuinfo to determine the number of
1272          * online processors, looking for lines beginning with
1273          * "processor".  Give glibc what it expects.
1274          */
1275         seq_printf(m, "processor\t: %d\n", i);
1276         cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1277         seq_printf(m, "model name\t: %s rev %d (%s)\n",
1278                cpu_name, cpuid & 15, elf_platform);
1279 
1280 #if defined(CONFIG_SMP)
1281         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1282                per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1283                (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1284 #else
1285         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1286                loops_per_jiffy / (500000/HZ),
1287                (loops_per_jiffy / (5000/HZ)) % 100);
1288 #endif
1289         /* dump out the processor features */
1290         seq_puts(m, "Features\t: ");
1291 
1292         for (j = 0; hwcap_str[j]; j++)
1293             if (elf_hwcap & (1 << j))
1294                 seq_printf(m, "%s ", hwcap_str[j]);
1295 
1296         for (j = 0; hwcap2_str[j]; j++)
1297             if (elf_hwcap2 & (1 << j))
1298                 seq_printf(m, "%s ", hwcap2_str[j]);
1299 
1300         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1301         seq_printf(m, "CPU architecture: %s\n",
1302                proc_arch[cpu_architecture()]);
1303 
1304         if ((cpuid & 0x0008f000) == 0x00000000) {
1305             /* pre-ARM7 */
1306             seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1307         } else {
1308             if ((cpuid & 0x0008f000) == 0x00007000) {
1309                 /* ARM7 */
1310                 seq_printf(m, "CPU variant\t: 0x%02x\n",
1311                        (cpuid >> 16) & 127);
1312             } else {
1313                 /* post-ARM7 */
1314                 seq_printf(m, "CPU variant\t: 0x%x\n",
1315                        (cpuid >> 20) & 15);
1316             }
1317             seq_printf(m, "CPU part\t: 0x%03x\n",
1318                    (cpuid >> 4) & 0xfff);
1319         }
1320         seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1321     }
1322 
1323     seq_printf(m, "Hardware\t: %s\n", machine_name);
1324     seq_printf(m, "Revision\t: %04x\n", system_rev);
1325     seq_printf(m, "Serial\t\t: %s\n", system_serial);
1326 
1327     return 0;
1328 }
1329 
1330 static void *c_start(struct seq_file *m, loff_t *pos)
1331 {
1332     return *pos < 1 ? (void *)1 : NULL;
1333 }
1334 
1335 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1336 {
1337     ++*pos;
1338     return NULL;
1339 }
1340 
1341 static void c_stop(struct seq_file *m, void *v)
1342 {
1343 }
1344 
1345 const struct seq_operations cpuinfo_op = {
1346     .start  = c_start,
1347     .next   = c_next,
1348     .stop   = c_stop,
1349     .show   = c_show
1350 };