Back to home page

LXR

 
 

    


0001 /*
0002  * Low-level CPU initialisation
0003  * Based on arch/arm/kernel/head.S
0004  *
0005  * Copyright (C) 1994-2002 Russell King
0006  * Copyright (C) 2003-2012 ARM Ltd.
0007  * Authors: Catalin Marinas <catalin.marinas@arm.com>
0008  *      Will Deacon <will.deacon@arm.com>
0009  *
0010  * This program is free software; you can redistribute it and/or modify
0011  * it under the terms of the GNU General Public License version 2 as
0012  * published by the Free Software Foundation.
0013  *
0014  * This program is distributed in the hope that it will be useful,
0015  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0016  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0017  * GNU General Public License for more details.
0018  *
0019  * You should have received a copy of the GNU General Public License
0020  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
0021  */
0022 
0023 #include <linux/linkage.h>
0024 #include <linux/init.h>
0025 #include <linux/irqchip/arm-gic-v3.h>
0026 
0027 #include <asm/assembler.h>
0028 #include <asm/boot.h>
0029 #include <asm/ptrace.h>
0030 #include <asm/asm-offsets.h>
0031 #include <asm/cache.h>
0032 #include <asm/cputype.h>
0033 #include <asm/elf.h>
0034 #include <asm/kernel-pgtable.h>
0035 #include <asm/kvm_arm.h>
0036 #include <asm/memory.h>
0037 #include <asm/pgtable-hwdef.h>
0038 #include <asm/pgtable.h>
0039 #include <asm/page.h>
0040 #include <asm/smp.h>
0041 #include <asm/sysreg.h>
0042 #include <asm/thread_info.h>
0043 #include <asm/virt.h>
0044 
0045 #define __PHYS_OFFSET   (KERNEL_START - TEXT_OFFSET)
0046 
0047 #if (TEXT_OFFSET & 0xfff) != 0
0048 #error TEXT_OFFSET must be at least 4KB aligned
0049 #elif (PAGE_OFFSET & 0x1fffff) != 0
0050 #error PAGE_OFFSET must be at least 2MB aligned
0051 #elif TEXT_OFFSET > 0x1fffff
0052 #error TEXT_OFFSET must be less than 2MB
0053 #endif
0054 
0055 /*
0056  * Kernel startup entry point.
0057  * ---------------------------
0058  *
0059  * The requirements are:
0060  *   MMU = off, D-cache = off, I-cache = on or off,
0061  *   x0 = physical address to the FDT blob.
0062  *
0063  * This code is mostly position independent so you call this at
0064  * __pa(PAGE_OFFSET + TEXT_OFFSET).
0065  *
0066  * Note that the callee-saved registers are used for storing variables
0067  * that are useful before the MMU is enabled. The allocations are described
0068  * in the entry routines.
0069  */
0070     __HEAD
0071 _head:
0072     /*
0073      * DO NOT MODIFY. Image header expected by Linux boot-loaders.
0074      */
0075 #ifdef CONFIG_EFI
0076     /*
0077      * This add instruction has no meaningful effect except that
0078      * its opcode forms the magic "MZ" signature required by UEFI.
0079      */
0080     add x13, x18, #0x16
0081     b   stext
0082 #else
0083     b   stext               // branch to kernel start, magic
0084     .long   0               // reserved
0085 #endif
0086     le64sym _kernel_offset_le       // Image load offset from start of RAM, little-endian
0087     le64sym _kernel_size_le         // Effective size of kernel image, little-endian
0088     le64sym _kernel_flags_le        // Informative flags, little-endian
0089     .quad   0               // reserved
0090     .quad   0               // reserved
0091     .quad   0               // reserved
0092     .byte   0x41                // Magic number, "ARM\x64"
0093     .byte   0x52
0094     .byte   0x4d
0095     .byte   0x64
0096 #ifdef CONFIG_EFI
0097     .long   pe_header - _head       // Offset to the PE header.
0098 #else
0099     .word   0               // reserved
0100 #endif
0101 
0102 #ifdef CONFIG_EFI
0103     .align 3
0104 pe_header:
0105     .ascii  "PE"
0106     .short  0
0107 coff_header:
0108     .short  0xaa64              // AArch64
0109     .short  2               // nr_sections
0110     .long   0               // TimeDateStamp
0111     .long   0               // PointerToSymbolTable
0112     .long   1               // NumberOfSymbols
0113     .short  section_table - optional_header // SizeOfOptionalHeader
0114     .short  0x206               // Characteristics.
0115                         // IMAGE_FILE_DEBUG_STRIPPED |
0116                         // IMAGE_FILE_EXECUTABLE_IMAGE |
0117                         // IMAGE_FILE_LINE_NUMS_STRIPPED
0118 optional_header:
0119     .short  0x20b               // PE32+ format
0120     .byte   0x02                // MajorLinkerVersion
0121     .byte   0x14                // MinorLinkerVersion
0122     .long   _end - efi_header_end       // SizeOfCode
0123     .long   0               // SizeOfInitializedData
0124     .long   0               // SizeOfUninitializedData
0125     .long   __efistub_entry - _head     // AddressOfEntryPoint
0126     .long   efi_header_end - _head      // BaseOfCode
0127 
0128 extra_header_fields:
0129     .quad   0               // ImageBase
0130     .long   0x1000              // SectionAlignment
0131     .long   PECOFF_FILE_ALIGNMENT       // FileAlignment
0132     .short  0               // MajorOperatingSystemVersion
0133     .short  0               // MinorOperatingSystemVersion
0134     .short  0               // MajorImageVersion
0135     .short  0               // MinorImageVersion
0136     .short  0               // MajorSubsystemVersion
0137     .short  0               // MinorSubsystemVersion
0138     .long   0               // Win32VersionValue
0139 
0140     .long   _end - _head            // SizeOfImage
0141 
0142     // Everything before the kernel image is considered part of the header
0143     .long   efi_header_end - _head      // SizeOfHeaders
0144     .long   0               // CheckSum
0145     .short  0xa             // Subsystem (EFI application)
0146     .short  0               // DllCharacteristics
0147     .quad   0               // SizeOfStackReserve
0148     .quad   0               // SizeOfStackCommit
0149     .quad   0               // SizeOfHeapReserve
0150     .quad   0               // SizeOfHeapCommit
0151     .long   0               // LoaderFlags
0152     .long   0x6             // NumberOfRvaAndSizes
0153 
0154     .quad   0               // ExportTable
0155     .quad   0               // ImportTable
0156     .quad   0               // ResourceTable
0157     .quad   0               // ExceptionTable
0158     .quad   0               // CertificationTable
0159     .quad   0               // BaseRelocationTable
0160 
0161     // Section table
0162 section_table:
0163 
0164     /*
0165      * The EFI application loader requires a relocation section
0166      * because EFI applications must be relocatable.  This is a
0167      * dummy section as far as we are concerned.
0168      */
0169     .ascii  ".reloc"
0170     .byte   0
0171     .byte   0           // end of 0 padding of section name
0172     .long   0
0173     .long   0
0174     .long   0           // SizeOfRawData
0175     .long   0           // PointerToRawData
0176     .long   0           // PointerToRelocations
0177     .long   0           // PointerToLineNumbers
0178     .short  0           // NumberOfRelocations
0179     .short  0           // NumberOfLineNumbers
0180     .long   0x42100040      // Characteristics (section flags)
0181 
0182 
0183     .ascii  ".text"
0184     .byte   0
0185     .byte   0
0186     .byte   0               // end of 0 padding of section name
0187     .long   _end - efi_header_end   // VirtualSize
0188     .long   efi_header_end - _head  // VirtualAddress
0189     .long   _edata - efi_header_end // SizeOfRawData
0190     .long   efi_header_end - _head  // PointerToRawData
0191 
0192     .long   0       // PointerToRelocations (0 for executables)
0193     .long   0       // PointerToLineNumbers (0 for executables)
0194     .short  0       // NumberOfRelocations  (0 for executables)
0195     .short  0       // NumberOfLineNumbers  (0 for executables)
0196     .long   0xe0500020  // Characteristics (section flags)
0197 
0198     /*
0199      * EFI will load .text onwards at the 4k section alignment
0200      * described in the PE/COFF header. To ensure that instruction
0201      * sequences using an adrp and a :lo12: immediate will function
0202      * correctly at this alignment, we must ensure that .text is
0203      * placed at a 4k boundary in the Image to begin with.
0204      */
0205     .align 12
0206 efi_header_end:
0207 #endif
0208 
0209     __INIT
0210 
0211     /*
0212      * The following callee saved general purpose registers are used on the
0213      * primary lowlevel boot path:
0214      *
0215      *  Register   Scope                      Purpose
0216      *  x21        stext() .. start_kernel()  FDT pointer passed at boot in x0
0217      *  x23        stext() .. start_kernel()  physical misalignment/KASLR offset
0218      *  x28        __create_page_tables()     callee preserved temp register
0219      *  x19/x20    __primary_switch()         callee preserved temp registers
0220      */
0221 ENTRY(stext)
0222     bl  preserve_boot_args
0223     bl  el2_setup           // Drop to EL1, w0=cpu_boot_mode
0224     adrp    x23, __PHYS_OFFSET
0225     and x23, x23, MIN_KIMG_ALIGN - 1    // KASLR offset, defaults to 0
0226     bl  set_cpu_boot_mode_flag
0227     bl  __create_page_tables
0228     /*
0229      * The following calls CPU setup code, see arch/arm64/mm/proc.S for
0230      * details.
0231      * On return, the CPU will be ready for the MMU to be turned on and
0232      * the TCR will have been set.
0233      */
0234     bl  __cpu_setup         // initialise processor
0235     b   __primary_switch
0236 ENDPROC(stext)
0237 
0238 /*
0239  * Preserve the arguments passed by the bootloader in x0 .. x3
0240  */
0241 preserve_boot_args:
0242     mov x21, x0             // x21=FDT
0243 
0244     adr_l   x0, boot_args           // record the contents of
0245     stp x21, x1, [x0]           // x0 .. x3 at kernel entry
0246     stp x2, x3, [x0, #16]
0247 
0248     dmb sy              // needed before dc ivac with
0249                         // MMU off
0250 
0251     add x1, x0, #0x20           // 4 x 8 bytes
0252     b   __inval_cache_range     // tail call
0253 ENDPROC(preserve_boot_args)
0254 
0255 /*
0256  * Macro to create a table entry to the next page.
0257  *
0258  *  tbl:    page table address
0259  *  virt:   virtual address
0260  *  shift:  #imm page table shift
0261  *  ptrs:   #imm pointers per table page
0262  *
0263  * Preserves:   virt
0264  * Corrupts:    tmp1, tmp2
0265  * Returns: tbl -> next level table page address
0266  */
0267     .macro  create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
0268     lsr \tmp1, \virt, #\shift
0269     and \tmp1, \tmp1, #\ptrs - 1    // table index
0270     add \tmp2, \tbl, #PAGE_SIZE
0271     orr \tmp2, \tmp2, #PMD_TYPE_TABLE   // address of next table and entry type
0272     str \tmp2, [\tbl, \tmp1, lsl #3]
0273     add \tbl, \tbl, #PAGE_SIZE      // next level table page
0274     .endm
0275 
0276 /*
0277  * Macro to populate the PGD (and possibily PUD) for the corresponding
0278  * block entry in the next level (tbl) for the given virtual address.
0279  *
0280  * Preserves:   tbl, next, virt
0281  * Corrupts:    tmp1, tmp2
0282  */
0283     .macro  create_pgd_entry, tbl, virt, tmp1, tmp2
0284     create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
0285 #if SWAPPER_PGTABLE_LEVELS > 3
0286     create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
0287 #endif
0288 #if SWAPPER_PGTABLE_LEVELS > 2
0289     create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
0290 #endif
0291     .endm
0292 
0293 /*
0294  * Macro to populate block entries in the page table for the start..end
0295  * virtual range (inclusive).
0296  *
0297  * Preserves:   tbl, flags
0298  * Corrupts:    phys, start, end, pstate
0299  */
0300     .macro  create_block_map, tbl, flags, phys, start, end
0301     lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT
0302     lsr \start, \start, #SWAPPER_BLOCK_SHIFT
0303     and \start, \start, #PTRS_PER_PTE - 1   // table index
0304     orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT  // table entry
0305     lsr \end, \end, #SWAPPER_BLOCK_SHIFT
0306     and \end, \end, #PTRS_PER_PTE - 1       // table end index
0307 9999:   str \phys, [\tbl, \start, lsl #3]       // store the entry
0308     add \start, \start, #1          // next entry
0309     add \phys, \phys, #SWAPPER_BLOCK_SIZE       // next block
0310     cmp \start, \end
0311     b.ls    9999b
0312     .endm
0313 
0314 /*
0315  * Setup the initial page tables. We only setup the barest amount which is
0316  * required to get the kernel running. The following sections are required:
0317  *   - identity mapping to enable the MMU (low address, TTBR0)
0318  *   - first few MB of the kernel linear mapping to jump to once the MMU has
0319  *     been enabled
0320  */
0321 __create_page_tables:
0322     mov x28, lr
0323 
0324     /*
0325      * Invalidate the idmap and swapper page tables to avoid potential
0326      * dirty cache lines being evicted.
0327      */
0328     adrp    x0, idmap_pg_dir
0329     adrp    x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
0330     bl  __inval_cache_range
0331 
0332     /*
0333      * Clear the idmap and swapper page tables.
0334      */
0335     adrp    x0, idmap_pg_dir
0336     adrp    x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
0337 1:  stp xzr, xzr, [x0], #16
0338     stp xzr, xzr, [x0], #16
0339     stp xzr, xzr, [x0], #16
0340     stp xzr, xzr, [x0], #16
0341     cmp x0, x6
0342     b.lo    1b
0343 
0344     mov x7, SWAPPER_MM_MMUFLAGS
0345 
0346     /*
0347      * Create the identity mapping.
0348      */
0349     adrp    x0, idmap_pg_dir
0350     adrp    x3, __idmap_text_start      // __pa(__idmap_text_start)
0351 
0352 #ifndef CONFIG_ARM64_VA_BITS_48
0353 #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
0354 #define EXTRA_PTRS  (1 << (48 - EXTRA_SHIFT))
0355 
0356     /*
0357      * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
0358      * created that covers system RAM if that is located sufficiently high
0359      * in the physical address space. So for the ID map, use an extended
0360      * virtual range in that case, by configuring an additional translation
0361      * level.
0362      * First, we have to verify our assumption that the current value of
0363      * VA_BITS was chosen such that all translation levels are fully
0364      * utilised, and that lowering T0SZ will always result in an additional
0365      * translation level to be configured.
0366      */
0367 #if VA_BITS != EXTRA_SHIFT
0368 #error "Mismatch between VA_BITS and page size/number of translation levels"
0369 #endif
0370 
0371     /*
0372      * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
0373      * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
0374      * this number conveniently equals the number of leading zeroes in
0375      * the physical address of __idmap_text_end.
0376      */
0377     adrp    x5, __idmap_text_end
0378     clz x5, x5
0379     cmp x5, TCR_T0SZ(VA_BITS)   // default T0SZ small enough?
0380     b.ge    1f          // .. then skip additional level
0381 
0382     adr_l   x6, idmap_t0sz
0383     str x5, [x6]
0384     dmb sy
0385     dc  ivac, x6        // Invalidate potentially stale cache line
0386 
0387     create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
0388 1:
0389 #endif
0390 
0391     create_pgd_entry x0, x3, x5, x6
0392     mov x5, x3              // __pa(__idmap_text_start)
0393     adr_l   x6, __idmap_text_end        // __pa(__idmap_text_end)
0394     create_block_map x0, x7, x3, x5, x6
0395 
0396     /*
0397      * Map the kernel image (starting with PHYS_OFFSET).
0398      */
0399     adrp    x0, swapper_pg_dir
0400     mov_q   x5, KIMAGE_VADDR + TEXT_OFFSET  // compile time __va(_text)
0401     add x5, x5, x23         // add KASLR displacement
0402     create_pgd_entry x0, x5, x3, x6
0403     adrp    x6, _end            // runtime __pa(_end)
0404     adrp    x3, _text           // runtime __pa(_text)
0405     sub x6, x6, x3          // _end - _text
0406     add x6, x6, x5          // runtime __va(_end)
0407     create_block_map x0, x7, x3, x5, x6
0408 
0409     /*
0410      * Since the page tables have been populated with non-cacheable
0411      * accesses (MMU disabled), invalidate the idmap and swapper page
0412      * tables again to remove any speculatively loaded cache lines.
0413      */
0414     adrp    x0, idmap_pg_dir
0415     adrp    x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
0416     dmb sy
0417     bl  __inval_cache_range
0418 
0419     ret x28
0420 ENDPROC(__create_page_tables)
0421     .ltorg
0422 
0423 /*
0424  * The following fragment of code is executed with the MMU enabled.
0425  *
0426  *   x0 = __PHYS_OFFSET
0427  */
0428 __primary_switched:
0429     adrp    x4, init_thread_union
0430     add sp, x4, #THREAD_SIZE
0431     adr_l   x5, init_task
0432     msr sp_el0, x5          // Save thread_info
0433 
0434     adr_l   x8, vectors         // load VBAR_EL1 with virtual
0435     msr vbar_el1, x8            // vector table address
0436     isb
0437 
0438     stp xzr, x30, [sp, #-16]!
0439     mov x29, sp
0440 
0441     str_l   x21, __fdt_pointer, x5      // Save FDT pointer
0442 
0443     ldr_l   x4, kimage_vaddr        // Save the offset between
0444     sub x4, x4, x0          // the kernel virtual and
0445     str_l   x4, kimage_voffset, x5      // physical mappings
0446 
0447     // Clear BSS
0448     adr_l   x0, __bss_start
0449     mov x1, xzr
0450     adr_l   x2, __bss_stop
0451     sub x2, x2, x0
0452     bl  __pi_memset
0453     dsb ishst               // Make zero page visible to PTW
0454 
0455 #ifdef CONFIG_KASAN
0456     bl  kasan_early_init
0457 #endif
0458 #ifdef CONFIG_RANDOMIZE_BASE
0459     tst x23, ~(MIN_KIMG_ALIGN - 1)  // already running randomized?
0460     b.ne    0f
0461     mov x0, x21             // pass FDT address in x0
0462     mov x1, x23             // pass modulo offset in x1
0463     bl  kaslr_early_init        // parse FDT for KASLR options
0464     cbz x0, 0f              // KASLR disabled? just proceed
0465     orr x23, x23, x0            // record KASLR offset
0466     ldp x29, x30, [sp], #16     // we must enable KASLR, return
0467     ret                 // to __primary_switch()
0468 0:
0469 #endif
0470     b   start_kernel
0471 ENDPROC(__primary_switched)
0472 
0473 /*
0474  * end early head section, begin head code that is also used for
0475  * hotplug and needs to have the same protections as the text region
0476  */
0477     .section ".idmap.text","ax"
0478 
0479 ENTRY(kimage_vaddr)
0480     .quad       _text - TEXT_OFFSET
0481 
0482 /*
0483  * If we're fortunate enough to boot at EL2, ensure that the world is
0484  * sane before dropping to EL1.
0485  *
0486  * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
0487  * booted in EL1 or EL2 respectively.
0488  */
0489 ENTRY(el2_setup)
0490     mrs x0, CurrentEL
0491     cmp x0, #CurrentEL_EL2
0492     b.ne    1f
0493     mrs x0, sctlr_el2
0494 CPU_BE( orr x0, x0, #(1 << 25)  )   // Set the EE bit for EL2
0495 CPU_LE( bic x0, x0, #(1 << 25)  )   // Clear the EE bit for EL2
0496     msr sctlr_el2, x0
0497     b   2f
0498 1:  mrs x0, sctlr_el1
0499 CPU_BE( orr x0, x0, #(3 << 24)  )   // Set the EE and E0E bits for EL1
0500 CPU_LE( bic x0, x0, #(3 << 24)  )   // Clear the EE and E0E bits for EL1
0501     msr sctlr_el1, x0
0502     mov w0, #BOOT_CPU_MODE_EL1      // This cpu booted in EL1
0503     isb
0504     ret
0505 
0506 2:
0507 #ifdef CONFIG_ARM64_VHE
0508     /*
0509      * Check for VHE being present. For the rest of the EL2 setup,
0510      * x2 being non-zero indicates that we do have VHE, and that the
0511      * kernel is intended to run at EL2.
0512      */
0513     mrs x2, id_aa64mmfr1_el1
0514     ubfx    x2, x2, #8, #4
0515 #else
0516     mov x2, xzr
0517 #endif
0518 
0519     /* Hyp configuration. */
0520     mov x0, #HCR_RW         // 64-bit EL1
0521     cbz x2, set_hcr
0522     orr x0, x0, #HCR_TGE        // Enable Host Extensions
0523     orr x0, x0, #HCR_E2H
0524 set_hcr:
0525     msr hcr_el2, x0
0526     isb
0527 
0528     /*
0529      * Allow Non-secure EL1 and EL0 to access physical timer and counter.
0530      * This is not necessary for VHE, since the host kernel runs in EL2,
0531      * and EL0 accesses are configured in the later stage of boot process.
0532      * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
0533      * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
0534      * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
0535      * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
0536      * EL2.
0537      */
0538     cbnz    x2, 1f
0539     mrs x0, cnthctl_el2
0540     orr x0, x0, #3          // Enable EL1 physical timers
0541     msr cnthctl_el2, x0
0542 1:
0543     msr cntvoff_el2, xzr        // Clear virtual offset
0544 
0545 #ifdef CONFIG_ARM_GIC_V3
0546     /* GICv3 system register access */
0547     mrs x0, id_aa64pfr0_el1
0548     ubfx    x0, x0, #24, #4
0549     cmp x0, #1
0550     b.ne    3f
0551 
0552     mrs_s   x0, ICC_SRE_EL2
0553     orr x0, x0, #ICC_SRE_EL2_SRE    // Set ICC_SRE_EL2.SRE==1
0554     orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
0555     msr_s   ICC_SRE_EL2, x0
0556     isb                 // Make sure SRE is now set
0557     mrs_s   x0, ICC_SRE_EL2         // Read SRE back,
0558     tbz x0, #0, 3f          // and check that it sticks
0559     msr_s   ICH_HCR_EL2, xzr        // Reset ICC_HCR_EL2 to defaults
0560 
0561 3:
0562 #endif
0563 
0564     /* Populate ID registers. */
0565     mrs x0, midr_el1
0566     mrs x1, mpidr_el1
0567     msr vpidr_el2, x0
0568     msr vmpidr_el2, x1
0569 
0570     /*
0571      * When VHE is not in use, early init of EL2 and EL1 needs to be
0572      * done here.
0573      * When VHE _is_ in use, EL1 will not be used in the host and
0574      * requires no configuration, and all non-hyp-specific EL2 setup
0575      * will be done via the _EL1 system register aliases in __cpu_setup.
0576      */
0577     cbnz    x2, 1f
0578 
0579     /* sctlr_el1 */
0580     mov x0, #0x0800         // Set/clear RES{1,0} bits
0581 CPU_BE( movk    x0, #0x33d0, lsl #16    )   // Set EE and E0E on BE systems
0582 CPU_LE( movk    x0, #0x30d0, lsl #16    )   // Clear EE and E0E on LE systems
0583     msr sctlr_el1, x0
0584 
0585     /* Coprocessor traps. */
0586     mov x0, #0x33ff
0587     msr cptr_el2, x0            // Disable copro. traps to EL2
0588 1:
0589 
0590 #ifdef CONFIG_COMPAT
0591     msr hstr_el2, xzr           // Disable CP15 traps to EL2
0592 #endif
0593 
0594     /* EL2 debug */
0595     mrs x0, id_aa64dfr0_el1     // Check ID_AA64DFR0_EL1 PMUVer
0596     sbfx    x0, x0, #8, #4
0597     cmp x0, #1
0598     b.lt    4f              // Skip if no PMU present
0599     mrs x0, pmcr_el0            // Disable debug access traps
0600     ubfx    x0, x0, #11, #5         // to EL2 and allow access to
0601 4:
0602     csel    x0, xzr, x0, lt         // all PMU counters from EL1
0603     msr mdcr_el2, x0            // (if they exist)
0604 
0605     /* Stage-2 translation */
0606     msr vttbr_el2, xzr
0607 
0608     cbz x2, install_el2_stub
0609 
0610     mov w0, #BOOT_CPU_MODE_EL2      // This CPU booted in EL2
0611     isb
0612     ret
0613 
0614 install_el2_stub:
0615     /* Hypervisor stub */
0616     adrp    x0, __hyp_stub_vectors
0617     add x0, x0, #:lo12:__hyp_stub_vectors
0618     msr vbar_el2, x0
0619 
0620     /* spsr */
0621     mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
0622               PSR_MODE_EL1h)
0623     msr spsr_el2, x0
0624     msr elr_el2, lr
0625     mov w0, #BOOT_CPU_MODE_EL2      // This CPU booted in EL2
0626     eret
0627 ENDPROC(el2_setup)
0628 
0629 /*
0630  * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
0631  * in x20. See arch/arm64/include/asm/virt.h for more info.
0632  */
0633 set_cpu_boot_mode_flag:
0634     adr_l   x1, __boot_cpu_mode
0635     cmp w0, #BOOT_CPU_MODE_EL2
0636     b.ne    1f
0637     add x1, x1, #4
0638 1:  str w0, [x1]            // This CPU has booted in EL1
0639     dmb sy
0640     dc  ivac, x1            // Invalidate potentially stale cache line
0641     ret
0642 ENDPROC(set_cpu_boot_mode_flag)
0643 
0644 /*
0645  * These values are written with the MMU off, but read with the MMU on.
0646  * Writers will invalidate the corresponding address, discarding up to a
0647  * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
0648  * sufficient alignment that the CWG doesn't overlap another section.
0649  */
0650     .pushsection ".mmuoff.data.write", "aw"
0651 /*
0652  * We need to find out the CPU boot mode long after boot, so we need to
0653  * store it in a writable variable.
0654  *
0655  * This is not in .bss, because we set it sufficiently early that the boot-time
0656  * zeroing of .bss would clobber it.
0657  */
0658 ENTRY(__boot_cpu_mode)
0659     .long   BOOT_CPU_MODE_EL2
0660     .long   BOOT_CPU_MODE_EL1
0661 /*
0662  * The booting CPU updates the failed status @__early_cpu_boot_status,
0663  * with MMU turned off.
0664  */
0665 ENTRY(__early_cpu_boot_status)
0666     .long   0
0667 
0668     .popsection
0669 
0670     /*
0671      * This provides a "holding pen" for platforms to hold all secondary
0672      * cores are held until we're ready for them to initialise.
0673      */
0674 ENTRY(secondary_holding_pen)
0675     bl  el2_setup           // Drop to EL1, w0=cpu_boot_mode
0676     bl  set_cpu_boot_mode_flag
0677     mrs x0, mpidr_el1
0678     mov_q   x1, MPIDR_HWID_BITMASK
0679     and x0, x0, x1
0680     adr_l   x3, secondary_holding_pen_release
0681 pen:    ldr x4, [x3]
0682     cmp x4, x0
0683     b.eq    secondary_startup
0684     wfe
0685     b   pen
0686 ENDPROC(secondary_holding_pen)
0687 
0688     /*
0689      * Secondary entry point that jumps straight into the kernel. Only to
0690      * be used where CPUs are brought online dynamically by the kernel.
0691      */
0692 ENTRY(secondary_entry)
0693     bl  el2_setup           // Drop to EL1
0694     bl  set_cpu_boot_mode_flag
0695     b   secondary_startup
0696 ENDPROC(secondary_entry)
0697 
0698 secondary_startup:
0699     /*
0700      * Common entry point for secondary CPUs.
0701      */
0702     bl  __cpu_setup         // initialise processor
0703     bl  __enable_mmu
0704     ldr x8, =__secondary_switched
0705     br  x8
0706 ENDPROC(secondary_startup)
0707 
0708 __secondary_switched:
0709     adr_l   x5, vectors
0710     msr vbar_el1, x5
0711     isb
0712 
0713     adr_l   x0, secondary_data
0714     ldr x1, [x0, #CPU_BOOT_STACK]   // get secondary_data.stack
0715     mov sp, x1
0716     ldr x2, [x0, #CPU_BOOT_TASK]
0717     msr sp_el0, x2
0718     mov x29, #0
0719     b   secondary_start_kernel
0720 ENDPROC(__secondary_switched)
0721 
0722 /*
0723  * The booting CPU updates the failed status @__early_cpu_boot_status,
0724  * with MMU turned off.
0725  *
0726  * update_early_cpu_boot_status tmp, status
0727  *  - Corrupts tmp1, tmp2
0728  *  - Writes 'status' to __early_cpu_boot_status and makes sure
0729  *    it is committed to memory.
0730  */
0731 
0732     .macro  update_early_cpu_boot_status status, tmp1, tmp2
0733     mov \tmp2, #\status
0734     adr_l   \tmp1, __early_cpu_boot_status
0735     str \tmp2, [\tmp1]
0736     dmb sy
0737     dc  ivac, \tmp1         // Invalidate potentially stale cache line
0738     .endm
0739 
0740 /*
0741  * Enable the MMU.
0742  *
0743  *  x0  = SCTLR_EL1 value for turning on the MMU.
0744  *
0745  * Returns to the caller via x30/lr. This requires the caller to be covered
0746  * by the .idmap.text section.
0747  *
0748  * Checks if the selected granule size is supported by the CPU.
0749  * If it isn't, park the CPU
0750  */
0751 ENTRY(__enable_mmu)
0752     mrs x1, ID_AA64MMFR0_EL1
0753     ubfx    x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
0754     cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
0755     b.ne    __no_granule_support
0756     update_early_cpu_boot_status 0, x1, x2
0757     adrp    x1, idmap_pg_dir
0758     adrp    x2, swapper_pg_dir
0759     msr ttbr0_el1, x1           // load TTBR0
0760     msr ttbr1_el1, x2           // load TTBR1
0761     isb
0762     msr sctlr_el1, x0
0763     isb
0764     /*
0765      * Invalidate the local I-cache so that any instructions fetched
0766      * speculatively from the PoC are discarded, since they may have
0767      * been dynamically patched at the PoU.
0768      */
0769     ic  iallu
0770     dsb nsh
0771     isb
0772     ret
0773 ENDPROC(__enable_mmu)
0774 
0775 __no_granule_support:
0776     /* Indicate that this CPU can't boot and is stuck in the kernel */
0777     update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
0778 1:
0779     wfe
0780     wfi
0781     b   1b
0782 ENDPROC(__no_granule_support)
0783 
0784 #ifdef CONFIG_RELOCATABLE
0785 __relocate_kernel:
0786     /*
0787      * Iterate over each entry in the relocation table, and apply the
0788      * relocations in place.
0789      */
0790     ldr w9, =__rela_offset      // offset to reloc table
0791     ldr w10, =__rela_size       // size of reloc table
0792 
0793     mov_q   x11, KIMAGE_VADDR       // default virtual offset
0794     add x11, x11, x23           // actual virtual offset
0795     add x9, x9, x11         // __va(.rela)
0796     add x10, x9, x10            // __va(.rela) + sizeof(.rela)
0797 
0798 0:  cmp x9, x10
0799     b.hs    1f
0800     ldp x11, x12, [x9], #24
0801     ldr x13, [x9, #-8]
0802     cmp w12, #R_AARCH64_RELATIVE
0803     b.ne    0b
0804     add x13, x13, x23           // relocate
0805     str x13, [x11, x23]
0806     b   0b
0807 1:  ret
0808 ENDPROC(__relocate_kernel)
0809 #endif
0810 
0811 __primary_switch:
0812 #ifdef CONFIG_RANDOMIZE_BASE
0813     mov x19, x0             // preserve new SCTLR_EL1 value
0814     mrs x20, sctlr_el1          // preserve old SCTLR_EL1 value
0815 #endif
0816 
0817     bl  __enable_mmu
0818 #ifdef CONFIG_RELOCATABLE
0819     bl  __relocate_kernel
0820 #ifdef CONFIG_RANDOMIZE_BASE
0821     ldr x8, =__primary_switched
0822     adrp    x0, __PHYS_OFFSET
0823     blr x8
0824 
0825     /*
0826      * If we return here, we have a KASLR displacement in x23 which we need
0827      * to take into account by discarding the current kernel mapping and
0828      * creating a new one.
0829      */
0830     msr sctlr_el1, x20          // disable the MMU
0831     isb
0832     bl  __create_page_tables        // recreate kernel mapping
0833 
0834     tlbi    vmalle1             // Remove any stale TLB entries
0835     dsb nsh
0836 
0837     msr sctlr_el1, x19          // re-enable the MMU
0838     isb
0839     ic  iallu               // flush instructions fetched
0840     dsb nsh             // via old mapping
0841     isb
0842 
0843     bl  __relocate_kernel
0844 #endif
0845 #endif
0846     ldr x8, =__primary_switched
0847     adrp    x0, __PHYS_OFFSET
0848     br  x8
0849 ENDPROC(__primary_switch)