Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2012,2013 - ARM Ltd
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #include <linux/arm-smccc.h>
0008 #include <linux/linkage.h>
0009 
0010 #include <asm/alternative.h>
0011 #include <asm/assembler.h>
0012 #include <asm/el2_setup.h>
0013 #include <asm/kvm_arm.h>
0014 #include <asm/kvm_asm.h>
0015 #include <asm/kvm_mmu.h>
0016 #include <asm/pgtable-hwdef.h>
0017 #include <asm/sysreg.h>
0018 #include <asm/virt.h>
0019 
0020     .text
0021     .pushsection    .idmap.text, "ax"
0022 
0023     .align  11
0024 
0025 SYM_CODE_START(__kvm_hyp_init)
0026     ventry  __invalid       // Synchronous EL2t
0027     ventry  __invalid       // IRQ EL2t
0028     ventry  __invalid       // FIQ EL2t
0029     ventry  __invalid       // Error EL2t
0030 
0031     ventry  __invalid       // Synchronous EL2h
0032     ventry  __invalid       // IRQ EL2h
0033     ventry  __invalid       // FIQ EL2h
0034     ventry  __invalid       // Error EL2h
0035 
0036     ventry  __do_hyp_init       // Synchronous 64-bit EL1
0037     ventry  __invalid       // IRQ 64-bit EL1
0038     ventry  __invalid       // FIQ 64-bit EL1
0039     ventry  __invalid       // Error 64-bit EL1
0040 
0041     ventry  __invalid       // Synchronous 32-bit EL1
0042     ventry  __invalid       // IRQ 32-bit EL1
0043     ventry  __invalid       // FIQ 32-bit EL1
0044     ventry  __invalid       // Error 32-bit EL1
0045 
0046 __invalid:
0047     b   .
0048 
0049     /*
0050      * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
0051      *
0052      * x0: SMCCC function ID
0053      * x1: struct kvm_nvhe_init_params PA
0054      */
0055 __do_hyp_init:
0056     /* Check for a stub HVC call */
0057     cmp x0, #HVC_STUB_HCALL_NR
0058     b.lo    __kvm_handle_stub_hvc
0059 
0060     mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
0061     cmp x0, x3
0062     b.eq    1f
0063 
0064     mov x0, #SMCCC_RET_NOT_SUPPORTED
0065     eret
0066 
0067 1:  mov x0, x1
0068     mov x3, lr
0069     bl  ___kvm_hyp_init         // Clobbers x0..x2
0070     mov lr, x3
0071 
0072     /* Hello, World! */
0073     mov x0, #SMCCC_RET_SUCCESS
0074     eret
0075 SYM_CODE_END(__kvm_hyp_init)
0076 
0077 /*
0078  * Initialize the hypervisor in EL2.
0079  *
0080  * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
0081  * and leave x3 for the caller.
0082  *
0083  * x0: struct kvm_nvhe_init_params PA
0084  */
0085 SYM_CODE_START_LOCAL(___kvm_hyp_init)
0086     ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
0087     msr tpidr_el2, x1
0088 
0089     ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA]
0090     mov sp, x1
0091 
0092     ldr x1, [x0, #NVHE_INIT_MAIR_EL2]
0093     msr mair_el2, x1
0094 
0095     ldr x1, [x0, #NVHE_INIT_HCR_EL2]
0096     msr hcr_el2, x1
0097 
0098     ldr x1, [x0, #NVHE_INIT_VTTBR]
0099     msr vttbr_el2, x1
0100 
0101     ldr x1, [x0, #NVHE_INIT_VTCR]
0102     msr vtcr_el2, x1
0103 
0104     ldr x1, [x0, #NVHE_INIT_PGD_PA]
0105     phys_to_ttbr x2, x1
0106 alternative_if ARM64_HAS_CNP
0107     orr x2, x2, #TTBR_CNP_BIT
0108 alternative_else_nop_endif
0109     msr ttbr0_el2, x2
0110 
0111     /*
0112      * Set the PS bits in TCR_EL2.
0113      */
0114     ldr x0, [x0, #NVHE_INIT_TCR_EL2]
0115     tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
0116     msr tcr_el2, x0
0117 
0118     isb
0119 
0120     /* Invalidate the stale TLBs from Bootloader */
0121     tlbi    alle2
0122     tlbi    vmalls12e1
0123     dsb sy
0124 
0125     mov_q   x0, INIT_SCTLR_EL2_MMU_ON
0126 alternative_if ARM64_HAS_ADDRESS_AUTH
0127     mov_q   x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
0128              SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
0129     orr x0, x0, x1
0130 alternative_else_nop_endif
0131     msr sctlr_el2, x0
0132     isb
0133 
0134     /* Set the host vector */
0135     ldr x0, =__kvm_hyp_host_vector
0136     msr vbar_el2, x0
0137 
0138     ret
0139 SYM_CODE_END(___kvm_hyp_init)
0140 
0141 /*
0142  * PSCI CPU_ON entry point
0143  *
0144  * x0: struct kvm_nvhe_init_params PA
0145  */
0146 SYM_CODE_START(kvm_hyp_cpu_entry)
0147     mov x1, #1              // is_cpu_on = true
0148     b   __kvm_hyp_init_cpu
0149 SYM_CODE_END(kvm_hyp_cpu_entry)
0150 
0151 /*
0152  * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
0153  *
0154  * x0: struct kvm_nvhe_init_params PA
0155  */
0156 SYM_CODE_START(kvm_hyp_cpu_resume)
0157     mov x1, #0              // is_cpu_on = false
0158     b   __kvm_hyp_init_cpu
0159 SYM_CODE_END(kvm_hyp_cpu_resume)
0160 
0161 /*
0162  * Common code for CPU entry points. Initializes EL2 state and
0163  * installs the hypervisor before handing over to a C handler.
0164  *
0165  * x0: struct kvm_nvhe_init_params PA
0166  * x1: bool is_cpu_on
0167  */
0168 SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
0169     mov x28, x0             // Stash arguments
0170     mov x29, x1
0171 
0172     /* Check that the core was booted in EL2. */
0173     mrs x0, CurrentEL
0174     cmp x0, #CurrentEL_EL2
0175     b.eq    2f
0176 
0177     /* The core booted in EL1. KVM cannot be initialized on it. */
0178 1:  wfe
0179     wfi
0180     b   1b
0181 
0182 2:  msr SPsel, #1           // We want to use SP_EL{1,2}
0183 
0184     /* Initialize EL2 CPU state to sane values. */
0185     init_el2_state              // Clobbers x0..x2
0186 
0187     /* Enable MMU, set vectors and stack. */
0188     mov x0, x28
0189     bl  ___kvm_hyp_init         // Clobbers x0..x2
0190 
0191     /* Leave idmap. */
0192     mov x0, x29
0193     ldr x1, =kvm_host_psci_cpu_entry
0194     br  x1
0195 SYM_CODE_END(__kvm_hyp_init_cpu)
0196 
0197 SYM_CODE_START(__kvm_handle_stub_hvc)
0198     cmp x0, #HVC_SOFT_RESTART
0199     b.ne    1f
0200 
0201     /* This is where we're about to jump, staying at EL2 */
0202     msr elr_el2, x1
0203     mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
0204     msr spsr_el2, x0
0205 
0206     /* Shuffle the arguments, and don't come back */
0207     mov x0, x2
0208     mov x1, x3
0209     mov x2, x4
0210     b   reset
0211 
0212 1:  cmp x0, #HVC_RESET_VECTORS
0213     b.ne    1f
0214 
0215     /*
0216      * Set the HVC_RESET_VECTORS return code before entering the common
0217      * path so that we do not clobber x0-x2 in case we are coming via
0218      * HVC_SOFT_RESTART.
0219      */
0220     mov x0, xzr
0221 reset:
0222     /* Reset kvm back to the hyp stub. */
0223     mov_q   x5, INIT_SCTLR_EL2_MMU_OFF
0224     pre_disable_mmu_workaround
0225     msr sctlr_el2, x5
0226     isb
0227 
0228 alternative_if ARM64_KVM_PROTECTED_MODE
0229     mov_q   x5, HCR_HOST_NVHE_FLAGS
0230     msr hcr_el2, x5
0231 alternative_else_nop_endif
0232 
0233     /* Install stub vectors */
0234     adr_l   x5, __hyp_stub_vectors
0235     msr vbar_el2, x5
0236     eret
0237 
0238 1:  /* Bad stub call */
0239     mov_q   x0, HVC_STUB_ERR
0240     eret
0241 
0242 SYM_CODE_END(__kvm_handle_stub_hvc)
0243 
0244 SYM_FUNC_START(__pkvm_init_switch_pgd)
0245     /* Turn the MMU off */
0246     pre_disable_mmu_workaround
0247     mrs x2, sctlr_el2
0248     bic x3, x2, #SCTLR_ELx_M
0249     msr sctlr_el2, x3
0250     isb
0251 
0252     tlbi    alle2
0253 
0254     /* Install the new pgtables */
0255     ldr x3, [x0, #NVHE_INIT_PGD_PA]
0256     phys_to_ttbr x4, x3
0257 alternative_if ARM64_HAS_CNP
0258     orr x4, x4, #TTBR_CNP_BIT
0259 alternative_else_nop_endif
0260     msr ttbr0_el2, x4
0261 
0262     /* Set the new stack pointer */
0263     ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA]
0264     mov sp, x0
0265 
0266     /* And turn the MMU back on! */
0267     set_sctlr_el2   x2
0268     ret x1
0269 SYM_FUNC_END(__pkvm_init_switch_pgd)
0270 
0271     .popsection