0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/linkage.h>
0013 #include <linux/init.h>
0014 #include <linux/pgtable.h>
0015
0016 #include <asm/asm_pointer_auth.h>
0017 #include <asm/assembler.h>
0018 #include <asm/boot.h>
0019 #include <asm/bug.h>
0020 #include <asm/ptrace.h>
0021 #include <asm/asm-offsets.h>
0022 #include <asm/cache.h>
0023 #include <asm/cputype.h>
0024 #include <asm/el2_setup.h>
0025 #include <asm/elf.h>
0026 #include <asm/image.h>
0027 #include <asm/kernel-pgtable.h>
0028 #include <asm/kvm_arm.h>
0029 #include <asm/memory.h>
0030 #include <asm/pgtable-hwdef.h>
0031 #include <asm/page.h>
0032 #include <asm/scs.h>
0033 #include <asm/smp.h>
0034 #include <asm/sysreg.h>
0035 #include <asm/thread_info.h>
0036 #include <asm/virt.h>
0037
0038 #include "efi-header.S"
0039
0040 #if (PAGE_OFFSET & 0x1fffff) != 0
0041 #error PAGE_OFFSET must be at least 2MB aligned
0042 #endif
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 __HEAD
0057
0058
0059
0060 efi_signature_nop // special NOP to identity as PE/COFF executable
0061 b primary_entry // branch to kernel start, magic
0062 .quad 0 // Image load offset from start of RAM, little-endian
0063 le64sym _kernel_size_le // Effective size of kernel image, little-endian
0064 le64sym _kernel_flags_le // Informative flags, little-endian
0065 .quad 0 // reserved
0066 .quad 0 // reserved
0067 .quad 0 // reserved
0068 .ascii ARM64_IMAGE_MAGIC // Magic number
0069 .long .Lpe_header_offset // Offset to the PE header.
0070
0071 __EFI_PE_HEADER
0072
0073 __INIT
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 SYM_CODE_START(primary_entry)
0089 bl preserve_boot_args
0090 bl init_kernel_el // w0=cpu_boot_mode
0091 mov x20, x0
0092 bl create_idmap
0093
0094
0095
0096
0097
0098
0099
0100 #if VA_BITS > 48
0101 mrs_s x0, SYS_ID_AA64MMFR2_EL1
0102 tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT
0103 mov x0, #VA_BITS
0104 mov x25, #VA_BITS_MIN
0105 csel x25, x25, x0, eq
0106 mov x0, x25
0107 #endif
0108 bl __cpu_setup // initialise processor
0109 b __primary_switch
0110 SYM_CODE_END(primary_entry)
0111
0112
0113
0114
0115 SYM_CODE_START_LOCAL(preserve_boot_args)
0116 mov x21, x0 // x21=FDT
0117
0118 adr_l x0, boot_args // record the contents of
0119 stp x21, x1, [x0] // x0 .. x3 at kernel entry
0120 stp x2, x3, [x0, #16]
0121
0122 dmb sy // needed before dc ivac with
0123 // MMU off
0124
0125 add x1, x0, #0x20 // 4 x 8 bytes
0126 b dcache_inval_poc // tail call
0127 SYM_CODE_END(preserve_boot_args)
0128
0129 SYM_FUNC_START_LOCAL(clear_page_tables)
0130
0131
0132
0133 adrp x0, init_pg_dir
0134 adrp x1, init_pg_end
0135 sub x2, x1, x0
0136 mov x1, xzr
0137 b __pi_memset // tail call
0138 SYM_FUNC_END(clear_page_tables)
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
0157 .Lpe\@: phys_to_pte \tmp1, \rtbl
0158 orr \tmp1, \tmp1, \flags // tmp1 = table entry
0159 str \tmp1, [\tbl, \index, lsl #3]
0160 add \rtbl, \rtbl, \inc // rtbl = pa next level
0161 add \index, \index, #1
0162 cmp \index, \eindex
0163 b.ls .Lpe\@
0164 .endm
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 .macro compute_indices, vstart, vend, shift, order, istart, iend, count
0185 ubfx \istart, \vstart, \shift, \order
0186 ubfx \iend, \vend, \shift, \order
0187 add \iend, \iend, \count, lsl \order
0188 sub \count, \iend, \istart
0189 .endm
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extra_shift
0212 sub \vend, \vend, #1
0213 add \rtbl, \tbl, #PAGE_SIZE
0214 mov \count, #0
0215
0216 .ifnb \extra_shift
0217 tst \vend, #~((1 << (\extra_shift)) - 1)
0218 b.eq .L_\@
0219 compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count
0220 mov \sv, \rtbl
0221 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
0222 mov \tbl, \sv
0223 .endif
0224 .L_\@:
0225 compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count
0226 mov \sv, \rtbl
0227 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
0228 mov \tbl, \sv
0229
0230 #if SWAPPER_PGTABLE_LEVELS > 3
0231 compute_indices \vstart, \vend, #PUD_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
0232 mov \sv, \rtbl
0233 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
0234 mov \tbl, \sv
0235 #endif
0236
0237 #if SWAPPER_PGTABLE_LEVELS > 2
0238 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
0239 mov \sv, \rtbl
0240 populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
0241 mov \tbl, \sv
0242 #endif
0243
0244 compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
0245 bic \rtbl, \phys, #SWAPPER_BLOCK_SIZE - 1
0246 populate_entries \tbl, \rtbl, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
0247 .endm
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 SYM_FUNC_START_LOCAL(remap_region)
0263 sub x3, x3, #1 // make end inclusive
0264
0265 // Get the index offset for the start of the last level table
0266 lsr x1, x1, x6
0267 bfi x1, xzr, #0, #PAGE_SHIFT - 3
0268
0269 // Derive the start and end indexes into the last level table
0270 // associated with the provided region
0271 lsr x2, x2, x6
0272 lsr x3, x3, x6
0273 sub x2, x2, x1
0274 sub x3, x3, x1
0275
0276 mov x1, #1
0277 lsl x6, x1, x6 // block size at this level
0278
0279 populate_entries x0, x4, x2, x3, x5, x6, x7
0280 ret
0281 SYM_FUNC_END(remap_region)
0282
0283 SYM_FUNC_START_LOCAL(create_idmap)
0284 mov x28, lr
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309 #if (VA_BITS < 48)
0310 #define IDMAP_PGD_ORDER (VA_BITS - PGDIR_SHIFT)
0311 #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
0312
0313
0314
0315
0316
0317
0318
0319
0320 #if VA_BITS != EXTRA_SHIFT
0321 #error "Mismatch between VA_BITS and page size/number of translation levels"
0322 #endif
0323 #else
0324 #define IDMAP_PGD_ORDER (PHYS_MASK_SHIFT - PGDIR_SHIFT)
0325 #define EXTRA_SHIFT
0326
0327
0328
0329
0330 #endif
0331 adrp x0, init_idmap_pg_dir
0332 adrp x3, _text
0333 adrp x6, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE
0334 mov x7, SWAPPER_RX_MMUFLAGS
0335
0336 map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
0337
0338
0339 adrp x1, _text
0340 adrp x2, init_pg_dir
0341 adrp x3, init_pg_end
0342 bic x4, x2, #SWAPPER_BLOCK_SIZE - 1
0343 mov x5, SWAPPER_RW_MMUFLAGS
0344 mov x6, #SWAPPER_BLOCK_SHIFT
0345 bl remap_region
0346
0347
0348 adrp x1, _text
0349 adrp x22, _end + SWAPPER_BLOCK_SIZE
0350 bic x2, x22, #SWAPPER_BLOCK_SIZE - 1
0351 bfi x22, x21, #0, #SWAPPER_BLOCK_SHIFT // remapped FDT address
0352 add x3, x2, #MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE
0353 bic x4, x21, #SWAPPER_BLOCK_SIZE - 1
0354 mov x5, SWAPPER_RW_MMUFLAGS
0355 mov x6, #SWAPPER_BLOCK_SHIFT
0356 bl remap_region
0357
0358
0359
0360
0361
0362
0363 dmb sy
0364
0365 adrp x0, init_idmap_pg_dir
0366 adrp x1, init_idmap_pg_end
0367 bl dcache_inval_poc
0368 ret x28
0369 SYM_FUNC_END(create_idmap)
0370
0371 SYM_FUNC_START_LOCAL(create_kernel_mapping)
0372 adrp x0, init_pg_dir
0373 mov_q x5, KIMAGE_VADDR // compile time __va(_text)
0374 #ifdef CONFIG_RELOCATABLE
0375 add x5, x5, x23 // add KASLR displacement
0376 #endif
0377 adrp x6, _end // runtime __pa(_end)
0378 adrp x3, _text // runtime __pa(_text)
0379 sub x6, x6, x3 // _end - _text
0380 add x6, x6, x5 // runtime __va(_end)
0381 mov x7, SWAPPER_RW_MMUFLAGS
0382
0383 map_memory x0, x1, x5, x6, x7, x3, (VA_BITS - PGDIR_SHIFT), x10, x11, x12, x13, x14
0384
0385 dsb ishst // sync with page table walker
0386 ret
0387 SYM_FUNC_END(create_kernel_mapping)
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397 .macro init_cpu_task tsk, tmp1, tmp2
0398 msr sp_el0, \tsk
0399
0400 ldr \tmp1, [\tsk, #TSK_STACK]
0401 add sp, \tmp1, #THREAD_SIZE
0402 sub sp, sp, #PT_REGS_SIZE
0403
0404 stp xzr, xzr, [sp, #S_STACKFRAME]
0405 add x29, sp, #S_STACKFRAME
0406
0407 scs_load \tsk
0408
0409 adr_l \tmp1, __per_cpu_offset
0410 ldr w\tmp2, [\tsk, #TSK_TI_CPU]
0411 ldr \tmp1, [\tmp1, \tmp2, lsl #3]
0412 set_this_cpu_offset \tmp1
0413 .endm
0414
0415
0416
0417
0418
0419
0420 SYM_FUNC_START_LOCAL(__primary_switched)
0421 adr_l x4, init_task
0422 init_cpu_task x4, x5, x6
0423
0424 adr_l x8, vectors // load VBAR_EL1 with virtual
0425 msr vbar_el1, x8 // vector table address
0426 isb
0427
0428 stp x29, x30, [sp, #-16]!
0429 mov x29, sp
0430
0431 str_l x21, __fdt_pointer, x5 // Save FDT pointer
0432
0433 ldr_l x4, kimage_vaddr // Save the offset between
0434 sub x4, x4, x0 // the kernel virtual and
0435 str_l x4, kimage_voffset, x5 // physical mappings
0436
0437 mov x0, x20
0438 bl set_cpu_boot_mode_flag
0439
0440 // Clear BSS
0441 adr_l x0, __bss_start
0442 mov x1, xzr
0443 adr_l x2, __bss_stop
0444 sub x2, x2, x0
0445 bl __pi_memset
0446 dsb ishst // Make zero page visible to PTW
0447
0448 #if VA_BITS > 48
0449 adr_l x8, vabits_actual // Set this early so KASAN early init
0450 str x25, [x8] // ... observes the correct value
0451 dc civac, x8 // Make visible to booting secondaries
0452 #endif
0453
0454 #ifdef CONFIG_RANDOMIZE_BASE
0455 adrp x5, memstart_offset_seed // Save KASLR linear map seed
0456 strh w24, [x5, :lo12:memstart_offset_seed]
0457 #endif
0458 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
0459 bl kasan_early_init
0460 #endif
0461 mov x0, x21 // pass FDT address in x0
0462 bl early_fdt_map // Try mapping the FDT early
0463 mov x0, x20 // pass the full boot status
0464 bl init_feature_override // Parse cpu feature overrides
0465 mov x0, x20
0466 bl finalise_el2 // Prefer VHE if possible
0467 ldp x29, x30, [sp], #16
0468 bl start_kernel
0469 ASM_BUG()
0470 SYM_FUNC_END(__primary_switched)
0471
0472
0473
0474
0475
0476 .section ".idmap.text","awx"
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 SYM_FUNC_START(init_kernel_el)
0491 mrs x0, CurrentEL
0492 cmp x0, #CurrentEL_EL2
0493 b.eq init_el2
0494
0495 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
0496 mov_q x0, INIT_SCTLR_EL1_MMU_OFF
0497 msr sctlr_el1, x0
0498 isb
0499 mov_q x0, INIT_PSTATE_EL1
0500 msr spsr_el1, x0
0501 msr elr_el1, lr
0502 mov w0, #BOOT_CPU_MODE_EL1
0503 eret
0504
0505 SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
0506 mov_q x0, HCR_HOST_NVHE_FLAGS
0507 msr hcr_el2, x0
0508 isb
0509
0510 init_el2_state
0511
0512
0513 adr_l x0, __hyp_stub_vectors
0514 msr vbar_el2, x0
0515 isb
0516
0517 mov_q x1, INIT_SCTLR_EL1_MMU_OFF
0518
0519
0520
0521
0522
0523
0524 mrs x0, hcr_el2
0525 and x0, x0, #HCR_E2H
0526 cbz x0, 1f
0527
0528
0529 msr_s SYS_SCTLR_EL12, x1
0530 mov x2, #BOOT_CPU_FLAG_E2H
0531 b 2f
0532
0533 1:
0534 msr sctlr_el1, x1
0535 mov x2, xzr
0536 2:
0537 msr elr_el2, lr
0538 mov w0, #BOOT_CPU_MODE_EL2
0539 orr x0, x0, x2
0540 eret
0541 SYM_FUNC_END(init_kernel_el)
0542
0543
0544
0545
0546
0547 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
0548 adr_l x1, __boot_cpu_mode
0549 cmp w0, #BOOT_CPU_MODE_EL2
0550 b.ne 1f
0551 add x1, x1, #4
0552 1: str w0, [x1] // Save CPU boot mode
0553 ret
0554 SYM_FUNC_END(set_cpu_boot_mode_flag)
0555
0556
0557
0558
0559
0560 SYM_FUNC_START(secondary_holding_pen)
0561 bl init_kernel_el // w0=cpu_boot_mode
0562 mrs x2, mpidr_el1
0563 mov_q x1, MPIDR_HWID_BITMASK
0564 and x2, x2, x1
0565 adr_l x3, secondary_holding_pen_release
0566 pen: ldr x4, [x3]
0567 cmp x4, x2
0568 b.eq secondary_startup
0569 wfe
0570 b pen
0571 SYM_FUNC_END(secondary_holding_pen)
0572
0573
0574
0575
0576
0577 SYM_FUNC_START(secondary_entry)
0578 bl init_kernel_el // w0=cpu_boot_mode
0579 b secondary_startup
0580 SYM_FUNC_END(secondary_entry)
0581
0582 SYM_FUNC_START_LOCAL(secondary_startup)
0583
0584
0585
0586 mov x20, x0 // preserve boot mode
0587 bl finalise_el2
0588 bl __cpu_secondary_check52bitva
0589 #if VA_BITS > 48
0590 ldr_l x0, vabits_actual
0591 #endif
0592 bl __cpu_setup // initialise processor
0593 adrp x1, swapper_pg_dir
0594 adrp x2, idmap_pg_dir
0595 bl __enable_mmu
0596 ldr x8, =__secondary_switched
0597 br x8
0598 SYM_FUNC_END(secondary_startup)
0599
0600 SYM_FUNC_START_LOCAL(__secondary_switched)
0601 mov x0, x20
0602 bl set_cpu_boot_mode_flag
0603 str_l xzr, __early_cpu_boot_status, x3
0604 adr_l x5, vectors
0605 msr vbar_el1, x5
0606 isb
0607
0608 adr_l x0, secondary_data
0609 ldr x2, [x0, #CPU_BOOT_TASK]
0610 cbz x2, __secondary_too_slow
0611
0612 init_cpu_task x2, x1, x3
0613
0614 #ifdef CONFIG_ARM64_PTR_AUTH
0615 ptrauth_keys_init_cpu x2, x3, x4, x5
0616 #endif
0617
0618 bl secondary_start_kernel
0619 ASM_BUG()
0620 SYM_FUNC_END(__secondary_switched)
0621
0622 SYM_FUNC_START_LOCAL(__secondary_too_slow)
0623 wfe
0624 wfi
0625 b __secondary_too_slow
0626 SYM_FUNC_END(__secondary_too_slow)
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 .macro update_early_cpu_boot_status status, tmp1, tmp2
0639 mov \tmp2, #\status
0640 adr_l \tmp1, __early_cpu_boot_status
0641 str \tmp2, [\tmp1]
0642 dmb sy
0643 dc ivac, \tmp1 // Invalidate potentially stale cache line
0644 .endm
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659 SYM_FUNC_START(__enable_mmu)
0660 mrs x3, ID_AA64MMFR0_EL1
0661 ubfx x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4
0662 cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
0663 b.lt __no_granule_support
0664 cmp x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
0665 b.gt __no_granule_support
0666 phys_to_ttbr x2, x2
0667 msr ttbr0_el1, x2 // load TTBR0
0668 load_ttbr1 x1, x1, x3
0669
0670 set_sctlr_el1 x0
0671
0672 ret
0673 SYM_FUNC_END(__enable_mmu)
0674
0675 SYM_FUNC_START(__cpu_secondary_check52bitva)
0676 #if VA_BITS > 48
0677 ldr_l x0, vabits_actual
0678 cmp x0, #52
0679 b.ne 2f
0680
0681 mrs_s x0, SYS_ID_AA64MMFR2_EL1
0682 and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
0683 cbnz x0, 2f
0684
0685 update_early_cpu_boot_status \
0686 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
0687 1: wfe
0688 wfi
0689 b 1b
0690
0691 #endif
0692 2: ret
0693 SYM_FUNC_END(__cpu_secondary_check52bitva)
0694
0695 SYM_FUNC_START_LOCAL(__no_granule_support)
0696
0697 update_early_cpu_boot_status \
0698 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
0699 1:
0700 wfe
0701 wfi
0702 b 1b
0703 SYM_FUNC_END(__no_granule_support)
0704
0705 #ifdef CONFIG_RELOCATABLE
0706 SYM_FUNC_START_LOCAL(__relocate_kernel)
0707
0708
0709
0710
0711 adr_l x9, __rela_start
0712 adr_l x10, __rela_end
0713 mov_q x11, KIMAGE_VADDR // default virtual offset
0714 add x11, x11, x23 // actual virtual offset
0715
0716 0: cmp x9, x10
0717 b.hs 1f
0718 ldp x12, x13, [x9], #24
0719 ldr x14, [x9, #-8]
0720 cmp w13, #R_AARCH64_RELATIVE
0721 b.ne 0b
0722 add x14, x14, x23 // relocate
0723 str x14, [x12, x23]
0724 b 0b
0725
0726 1:
0727 #ifdef CONFIG_RELR
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758 adr_l x9, __relr_start
0759 adr_l x10, __relr_end
0760
0761 2: cmp x9, x10
0762 b.hs 7f
0763 ldr x11, [x9], #8
0764 tbnz x11, #0, 3f // branch to handle bitmaps
0765 add x13, x11, x23
0766 ldr x12, [x13] // relocate address entry
0767 add x12, x12, x23
0768 str x12, [x13], #8 // adjust to start of bitmap
0769 b 2b
0770
0771 3: mov x14, x13
0772 4: lsr x11, x11, #1
0773 cbz x11, 6f
0774 tbz x11, #0, 5f // skip bit if not set
0775 ldr x12, [x14] // relocate bit
0776 add x12, x12, x23
0777 str x12, [x14]
0778
0779 5: add x14, x14, #8 // move to next bit's address
0780 b 4b
0781
0782 6:
0783
0784
0785
0786 add x13, x13, #(8 * 63)
0787 b 2b
0788
0789 7:
0790 #endif
0791 ret
0792
0793 SYM_FUNC_END(__relocate_kernel)
0794 #endif
0795
0796 SYM_FUNC_START_LOCAL(__primary_switch)
0797 adrp x1, reserved_pg_dir
0798 adrp x2, init_idmap_pg_dir
0799 bl __enable_mmu
0800 #ifdef CONFIG_RELOCATABLE
0801 adrp x23, KERNEL_START
0802 and x23, x23, MIN_KIMG_ALIGN - 1
0803 #ifdef CONFIG_RANDOMIZE_BASE
0804 mov x0, x22
0805 adrp x1, init_pg_end
0806 mov sp, x1
0807 mov x29, xzr
0808 bl __pi_kaslr_early_init
0809 and x24, x0, #SZ_2M - 1 // capture memstart offset seed
0810 bic x0, x0, #SZ_2M - 1
0811 orr x23, x23, x0 // record kernel offset
0812 #endif
0813 #endif
0814 bl clear_page_tables
0815 bl create_kernel_mapping
0816
0817 adrp x1, init_pg_dir
0818 load_ttbr1 x1, x1, x2
0819 #ifdef CONFIG_RELOCATABLE
0820 bl __relocate_kernel
0821 #endif
0822 ldr x8, =__primary_switched
0823 adrp x0, KERNEL_START // __pa(KERNEL_START)
0824 br x8
0825 SYM_FUNC_END(__primary_switch)