Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Hypervisor stub
0004  *
0005  * Copyright (C) 2012 ARM Ltd.
0006  * Author:  Marc Zyngier <marc.zyngier@arm.com>
0007  */
0008 
0009 #include <linux/init.h>
0010 #include <linux/linkage.h>
0011 
0012 #include <asm/assembler.h>
0013 #include <asm/el2_setup.h>
0014 #include <asm/kvm_arm.h>
0015 #include <asm/kvm_asm.h>
0016 #include <asm/ptrace.h>
0017 #include <asm/virt.h>
0018 
0019 // Warning, hardcoded register allocation
0020 // This will clobber x1 and x2, and expect x1 to contain
0021 // the id register value as read from the HW
0022 .macro __check_override idreg, fld, width, pass, fail
0023     ubfx    x1, x1, #\fld, #\width
0024     cbz x1, \fail
0025 
0026     adr_l   x1, \idreg\()_override
0027     ldr x2, [x1, FTR_OVR_VAL_OFFSET]
0028     ldr x1, [x1, FTR_OVR_MASK_OFFSET]
0029     ubfx    x2, x2, #\fld, #\width
0030     ubfx    x1, x1, #\fld, #\width
0031     cmp x1, xzr
0032     and x2, x2, x1
0033     csinv   x2, x2, xzr, ne
0034     cbnz    x2, \pass
0035     b   \fail
0036 .endm
0037 
0038 .macro check_override idreg, fld, pass, fail
0039     mrs x1, \idreg\()_el1
0040     __check_override \idreg \fld 4 \pass \fail
0041 .endm
0042 
0043     .text
0044     .pushsection    .hyp.text, "ax"
0045 
0046     .align 11
0047 
0048 SYM_CODE_START(__hyp_stub_vectors)
0049     ventry  el2_sync_invalid        // Synchronous EL2t
0050     ventry  el2_irq_invalid         // IRQ EL2t
0051     ventry  el2_fiq_invalid         // FIQ EL2t
0052     ventry  el2_error_invalid       // Error EL2t
0053 
0054     ventry  elx_sync            // Synchronous EL2h
0055     ventry  el2_irq_invalid         // IRQ EL2h
0056     ventry  el2_fiq_invalid         // FIQ EL2h
0057     ventry  el2_error_invalid       // Error EL2h
0058 
0059     ventry  elx_sync            // Synchronous 64-bit EL1
0060     ventry  el1_irq_invalid         // IRQ 64-bit EL1
0061     ventry  el1_fiq_invalid         // FIQ 64-bit EL1
0062     ventry  el1_error_invalid       // Error 64-bit EL1
0063 
0064     ventry  el1_sync_invalid        // Synchronous 32-bit EL1
0065     ventry  el1_irq_invalid         // IRQ 32-bit EL1
0066     ventry  el1_fiq_invalid         // FIQ 32-bit EL1
0067     ventry  el1_error_invalid       // Error 32-bit EL1
0068 SYM_CODE_END(__hyp_stub_vectors)
0069 
0070     .align 11
0071 
0072 SYM_CODE_START_LOCAL(elx_sync)
0073     cmp x0, #HVC_SET_VECTORS
0074     b.ne    1f
0075     msr vbar_el2, x1
0076     b   9f
0077 
0078 1:  cmp x0, #HVC_FINALISE_EL2
0079     b.eq    __finalise_el2
0080 
0081 2:  cmp x0, #HVC_SOFT_RESTART
0082     b.ne    3f
0083     mov x0, x2
0084     mov x2, x4
0085     mov x4, x1
0086     mov x1, x3
0087     br  x4              // no return
0088 
0089 3:  cmp x0, #HVC_RESET_VECTORS
0090     beq 9f              // Nothing to reset!
0091 
0092     /* Someone called kvm_call_hyp() against the hyp-stub... */
0093     mov_q   x0, HVC_STUB_ERR
0094     eret
0095 
0096 9:  mov x0, xzr
0097     eret
0098 SYM_CODE_END(elx_sync)
0099 
0100 SYM_CODE_START_LOCAL(__finalise_el2)
0101     check_override id_aa64pfr0 ID_AA64PFR0_SVE_SHIFT .Linit_sve .Lskip_sve
0102 
0103 .Linit_sve: /* SVE register access */
0104     mrs x0, cptr_el2            // Disable SVE traps
0105     bic x0, x0, #CPTR_EL2_TZ
0106     msr cptr_el2, x0
0107     isb
0108     mov x1, #ZCR_ELx_LEN_MASK       // SVE: Enable full vector
0109     msr_s   SYS_ZCR_EL2, x1         // length for EL1.
0110 
0111 .Lskip_sve:
0112     check_override id_aa64pfr1 ID_AA64PFR1_SME_SHIFT .Linit_sme .Lskip_sme
0113 
0114 .Linit_sme: /* SME register access and priority mapping */
0115     mrs x0, cptr_el2            // Disable SME traps
0116     bic x0, x0, #CPTR_EL2_TSM
0117     msr cptr_el2, x0
0118     isb
0119 
0120     mrs x1, sctlr_el2
0121     orr x1, x1, #SCTLR_ELx_ENTP2    // Disable TPIDR2 traps
0122     msr sctlr_el2, x1
0123     isb
0124 
0125     mov x0, #0              // SMCR controls
0126 
0127     // Full FP in SM?
0128     mrs_s   x1, SYS_ID_AA64SMFR0_EL1
0129     __check_override id_aa64smfr0 ID_AA64SMFR0_EL1_FA64_SHIFT 1 .Linit_sme_fa64 .Lskip_sme_fa64
0130 
0131 .Linit_sme_fa64:
0132     orr x0, x0, SMCR_ELx_FA64_MASK
0133 .Lskip_sme_fa64:
0134 
0135     orr x0, x0, #SMCR_ELx_LEN_MASK  // Enable full SME vector
0136     msr_s   SYS_SMCR_EL2, x0        // length for EL1.
0137 
0138     mrs_s   x1, SYS_SMIDR_EL1       // Priority mapping supported?
0139     ubfx    x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
0140     cbz     x1, .Lskip_sme
0141 
0142     msr_s   SYS_SMPRIMAP_EL2, xzr       // Make all priorities equal
0143 
0144     mrs x1, id_aa64mmfr1_el1        // HCRX_EL2 present?
0145     ubfx    x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
0146     cbz x1, .Lskip_sme
0147 
0148     mrs_s   x1, SYS_HCRX_EL2
0149     orr x1, x1, #HCRX_EL2_SMPME_MASK    // Enable priority mapping
0150     msr_s   SYS_HCRX_EL2, x1
0151 
0152 .Lskip_sme:
0153 
0154     // nVHE? No way! Give me the real thing!
0155     // Sanity check: MMU *must* be off
0156     mrs x1, sctlr_el2
0157     tbnz    x1, #0, 1f
0158 
0159     // Needs to be VHE capable, obviously
0160     check_override id_aa64mmfr1 ID_AA64MMFR1_VHE_SHIFT 2f 1f
0161 
0162 1:  mov_q   x0, HVC_STUB_ERR
0163     eret
0164 2:
0165     // Engage the VHE magic!
0166     mov_q   x0, HCR_HOST_VHE_FLAGS
0167     msr hcr_el2, x0
0168     isb
0169 
0170     // Use the EL1 allocated stack, per-cpu offset
0171     mrs x0, sp_el1
0172     mov sp, x0
0173     mrs x0, tpidr_el1
0174     msr tpidr_el2, x0
0175 
0176     // FP configuration, vectors
0177     mrs_s   x0, SYS_CPACR_EL12
0178     msr cpacr_el1, x0
0179     mrs_s   x0, SYS_VBAR_EL12
0180     msr vbar_el1, x0
0181 
0182     // Use EL2 translations for SPE & TRBE and disable access from EL1
0183     mrs x0, mdcr_el2
0184     bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
0185     bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
0186     msr mdcr_el2, x0
0187 
0188     // Transfer the MM state from EL1 to EL2
0189     mrs_s   x0, SYS_TCR_EL12
0190     msr tcr_el1, x0
0191     mrs_s   x0, SYS_TTBR0_EL12
0192     msr ttbr0_el1, x0
0193     mrs_s   x0, SYS_TTBR1_EL12
0194     msr ttbr1_el1, x0
0195     mrs_s   x0, SYS_MAIR_EL12
0196     msr mair_el1, x0
0197     isb
0198 
0199     // Hack the exception return to stay at EL2
0200     mrs x0, spsr_el1
0201     and x0, x0, #~PSR_MODE_MASK
0202     mov x1, #PSR_MODE_EL2h
0203     orr x0, x0, x1
0204     msr spsr_el1, x0
0205 
0206     b   enter_vhe
0207 SYM_CODE_END(__finalise_el2)
0208 
0209     // At the point where we reach enter_vhe(), we run with
0210     // the MMU off (which is enforced by __finalise_el2()).
0211     // We thus need to be in the idmap, or everything will
0212     // explode when enabling the MMU.
0213 
0214     .pushsection    .idmap.text, "ax"
0215 
0216 SYM_CODE_START_LOCAL(enter_vhe)
0217     // Invalidate TLBs before enabling the MMU
0218     tlbi    vmalle1
0219     dsb nsh
0220     isb
0221 
0222     // Enable the EL2 S1 MMU, as set up from EL1
0223     mrs_s   x0, SYS_SCTLR_EL12
0224     set_sctlr_el1   x0
0225 
0226     // Disable the EL1 S1 MMU for a good measure
0227     mov_q   x0, INIT_SCTLR_EL1_MMU_OFF
0228     msr_s   SYS_SCTLR_EL12, x0
0229 
0230     mov x0, xzr
0231 
0232     eret
0233 SYM_CODE_END(enter_vhe)
0234 
0235     .popsection
0236 
0237 .macro invalid_vector   label
0238 SYM_CODE_START_LOCAL(\label)
0239     b \label
0240 SYM_CODE_END(\label)
0241 .endm
0242 
0243     invalid_vector  el2_sync_invalid
0244     invalid_vector  el2_irq_invalid
0245     invalid_vector  el2_fiq_invalid
0246     invalid_vector  el2_error_invalid
0247     invalid_vector  el1_sync_invalid
0248     invalid_vector  el1_irq_invalid
0249     invalid_vector  el1_fiq_invalid
0250     invalid_vector  el1_error_invalid
0251 
0252     .popsection
0253 
0254 /*
0255  * __hyp_set_vectors: Call this after boot to set the initial hypervisor
0256  * vectors as part of hypervisor installation.  On an SMP system, this should
0257  * be called on each CPU.
0258  *
0259  * x0 must be the physical address of the new vector table, and must be
0260  * 2KB aligned.
0261  *
0262  * Before calling this, you must check that the stub hypervisor is installed
0263  * everywhere, by waiting for any secondary CPUs to be brought up and then
0264  * checking that is_hyp_mode_available() is true.
0265  *
0266  * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
0267  * something else went wrong... in such cases, trying to install a new
0268  * hypervisor is unlikely to work as desired.
0269  *
0270  * When you call into your shiny new hypervisor, sp_el2 will contain junk,
0271  * so you will need to set that to something sensible at the new hypervisor's
0272  * initialisation entry point.
0273  */
0274 
0275 SYM_FUNC_START(__hyp_set_vectors)
0276     mov x1, x0
0277     mov x0, #HVC_SET_VECTORS
0278     hvc #0
0279     ret
0280 SYM_FUNC_END(__hyp_set_vectors)
0281 
0282 SYM_FUNC_START(__hyp_reset_vectors)
0283     mov x0, #HVC_RESET_VECTORS
0284     hvc #0
0285     ret
0286 SYM_FUNC_END(__hyp_reset_vectors)
0287 
0288 /*
0289  * Entry point to finalise EL2 and switch to VHE if deemed capable
0290  *
0291  * w0: boot mode, as returned by init_kernel_el()
0292  */
0293 SYM_FUNC_START(finalise_el2)
0294     // Need to have booted at EL2
0295     cmp w0, #BOOT_CPU_MODE_EL2
0296     b.ne    1f
0297 
0298     // and still be at EL1
0299     mrs x0, CurrentEL
0300     cmp x0, #CurrentEL_EL1
0301     b.ne    1f
0302 
0303     mov x0, #HVC_FINALISE_EL2
0304     hvc #0
0305 1:
0306     ret
0307 SYM_FUNC_END(finalise_el2)