Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * kexec for arm64
0004  *
0005  * Copyright (C) Linaro.
0006  * Copyright (C) Huawei Futurewei Technologies.
0007  * Copyright (C) 2021, Microsoft Corporation.
0008  * Pasha Tatashin <pasha.tatashin@soleen.com>
0009  */
0010 
0011 #include <linux/kexec.h>
0012 #include <linux/linkage.h>
0013 
0014 #include <asm/assembler.h>
0015 #include <asm/kexec.h>
0016 #include <asm/page.h>
0017 #include <asm/sysreg.h>
0018 #include <asm/virt.h>
0019 
0020 .macro turn_off_mmu tmp1, tmp2
0021     mov_q   \tmp1, INIT_SCTLR_EL1_MMU_OFF
0022     pre_disable_mmu_workaround
0023     msr sctlr_el1, \tmp1
0024     isb
0025 .endm
0026 
0027 .section    ".kexec_relocate.text", "ax"
0028 /*
0029  * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
0030  *
0031  * The memory that the old kernel occupies may be overwritten when copying the
0032  * new image to its final location.  To assure that the
0033  * arm64_relocate_new_kernel routine which does that copy is not overwritten,
0034  * all code and data needed by arm64_relocate_new_kernel must be between the
0035  * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end.  The
0036  * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
0037  * safe memory that has been set up to be preserved during the copy operation.
0038  */
0039 SYM_CODE_START(arm64_relocate_new_kernel)
0040     /*
0041      * The kimage structure isn't allocated specially and may be clobbered
0042      * during relocation. We must load any values we need from it prior to
0043      * any relocation occurring.
0044      */
0045     ldr x28, [x0, #KIMAGE_START]
0046     ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
0047     ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
0048 
0049     /* Setup the list loop variables. */
0050     ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
0051     ldr x17, [x0, #KIMAGE_ARCH_TTBR1]   /* x17 = linear map copy */
0052     ldr x16, [x0, #KIMAGE_HEAD]     /* x16 = kimage_head */
0053     ldr x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET] /* x22 phys_offset */
0054     raw_dcache_line_size x15, x1        /* x15 = dcache line size */
0055     break_before_make_ttbr_switch   x18, x17, x1, x2 /* set linear map */
0056 .Lloop:
0057     and x12, x16, PAGE_MASK     /* x12 = addr */
0058     sub x12, x12, x22           /* Convert x12 to virt */
0059     /* Test the entry flags. */
0060 .Ltest_source:
0061     tbz x16, IND_SOURCE_BIT, .Ltest_indirection
0062 
0063     /* Invalidate dest page to PoC. */
0064     mov x19, x13
0065     copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
0066     add x1, x19, #PAGE_SIZE
0067     dcache_by_myline_op civac, sy, x19, x1, x15, x20
0068     b   .Lnext
0069 .Ltest_indirection:
0070     tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
0071     mov x14, x12            /* ptr = addr */
0072     b   .Lnext
0073 .Ltest_destination:
0074     tbz x16, IND_DESTINATION_BIT, .Lnext
0075     mov x13, x12            /* dest = addr */
0076 .Lnext:
0077     ldr x16, [x14], #8          /* entry = *ptr++ */
0078     tbz x16, IND_DONE_BIT, .Lloop   /* while (!(entry & DONE)) */
0079     /* wait for writes from copy_page to finish */
0080     dsb nsh
0081     ic  iallu
0082     dsb nsh
0083     isb
0084     turn_off_mmu x12, x13
0085 
0086     /* Start new image. */
0087     cbz x27, .Lel1
0088     mov x1, x28             /* kernel entry point */
0089     mov x2, x26             /* dtb address */
0090     mov x3, xzr
0091     mov x4, xzr
0092     mov     x0, #HVC_SOFT_RESTART
0093     hvc #0              /* Jumps from el2 */
0094 .Lel1:
0095     mov x0, x26             /* dtb address */
0096     mov x1, xzr
0097     mov x2, xzr
0098     mov x3, xzr
0099     br  x28             /* Jumps from el1 */
0100 SYM_CODE_END(arm64_relocate_new_kernel)