Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2012,2013 - ARM Ltd
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  */
0006 
0007 #ifndef __ARM_KVM_INIT_H__
0008 #define __ARM_KVM_INIT_H__
0009 
0010 #ifndef __ASSEMBLY__
0011 #error Assembly-only header
0012 #endif
0013 
0014 #include <asm/kvm_arm.h>
0015 #include <asm/ptrace.h>
0016 #include <asm/sysreg.h>
0017 #include <linux/irqchip/arm-gic-v3.h>
0018 
0019 .macro __init_el2_sctlr
0020     mov_q   x0, INIT_SCTLR_EL2_MMU_OFF
0021     msr sctlr_el2, x0
0022     isb
0023 .endm
0024 
0025 /*
0026  * Allow Non-secure EL1 and EL0 to access physical timer and counter.
0027  * This is not necessary for VHE, since the host kernel runs in EL2,
0028  * and EL0 accesses are configured in the later stage of boot process.
0029  * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
0030  * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
0031  * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
0032  * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
0033  * EL2.
0034  */
0035 .macro __init_el2_timers
0036     mov x0, #3              // Enable EL1 physical timers
0037     msr cnthctl_el2, x0
0038     msr cntvoff_el2, xzr        // Clear virtual offset
0039 .endm
0040 
0041 .macro __init_el2_debug
0042     mrs x1, id_aa64dfr0_el1
0043     sbfx    x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
0044     cmp x0, #1
0045     b.lt    .Lskip_pmu_\@           // Skip if no PMU present
0046     mrs x0, pmcr_el0            // Disable debug access traps
0047     ubfx    x0, x0, #11, #5         // to EL2 and allow access to
0048 .Lskip_pmu_\@:
0049     csel    x2, xzr, x0, lt         // all PMU counters from EL1
0050 
0051     /* Statistical profiling */
0052     ubfx    x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
0053     cbz x0, .Lskip_spe_\@       // Skip if SPE not present
0054 
0055     mrs_s   x0, SYS_PMBIDR_EL1              // If SPE available at EL2,
0056     and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
0057     cbnz    x0, .Lskip_spe_el2_\@       // then permit sampling of physical
0058     mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
0059               1 << SYS_PMSCR_EL2_PA_SHIFT)
0060     msr_s   SYS_PMSCR_EL2, x0       // addresses and physical counter
0061 .Lskip_spe_el2_\@:
0062     mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
0063     orr x2, x2, x0          // If we don't have VHE, then
0064                         // use EL1&0 translation.
0065 
0066 .Lskip_spe_\@:
0067     /* Trace buffer */
0068     ubfx    x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
0069     cbz x0, .Lskip_trace_\@     // Skip if TraceBuffer is not present
0070 
0071     mrs_s   x0, SYS_TRBIDR_EL1
0072     and x0, x0, TRBIDR_PROG
0073     cbnz    x0, .Lskip_trace_\@     // If TRBE is available at EL2
0074 
0075     mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
0076     orr x2, x2, x0          // allow the EL1&0 translation
0077                         // to own it.
0078 
0079 .Lskip_trace_\@:
0080     msr mdcr_el2, x2            // Configure debug traps
0081 .endm
0082 
0083 /* LORegions */
0084 .macro __init_el2_lor
0085     mrs x1, id_aa64mmfr1_el1
0086     ubfx    x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
0087     cbz x0, .Lskip_lor_\@
0088     msr_s   SYS_LORC_EL1, xzr
0089 .Lskip_lor_\@:
0090 .endm
0091 
0092 /* Stage-2 translation */
0093 .macro __init_el2_stage2
0094     msr vttbr_el2, xzr
0095 .endm
0096 
0097 /* GICv3 system register access */
0098 .macro __init_el2_gicv3
0099     mrs x0, id_aa64pfr0_el1
0100     ubfx    x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
0101     cbz x0, .Lskip_gicv3_\@
0102 
0103     mrs_s   x0, SYS_ICC_SRE_EL2
0104     orr x0, x0, #ICC_SRE_EL2_SRE    // Set ICC_SRE_EL2.SRE==1
0105     orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
0106     msr_s   SYS_ICC_SRE_EL2, x0
0107     isb                 // Make sure SRE is now set
0108     mrs_s   x0, SYS_ICC_SRE_EL2     // Read SRE back,
0109     tbz x0, #0, .Lskip_gicv3_\@     // and check that it sticks
0110     msr_s   SYS_ICH_HCR_EL2, xzr        // Reset ICH_HCR_EL2 to defaults
0111 .Lskip_gicv3_\@:
0112 .endm
0113 
0114 .macro __init_el2_hstr
0115     msr hstr_el2, xzr           // Disable CP15 traps to EL2
0116 .endm
0117 
0118 /* Virtual CPU ID registers */
0119 .macro __init_el2_nvhe_idregs
0120     mrs x0, midr_el1
0121     mrs x1, mpidr_el1
0122     msr vpidr_el2, x0
0123     msr vmpidr_el2, x1
0124 .endm
0125 
0126 /* Coprocessor traps */
0127 .macro __init_el2_nvhe_cptr
0128     mov x0, #0x33ff
0129     msr cptr_el2, x0            // Disable copro. traps to EL2
0130 .endm
0131 
0132 /* Disable any fine grained traps */
0133 .macro __init_el2_fgt
0134     mrs x1, id_aa64mmfr0_el1
0135     ubfx    x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
0136     cbz x1, .Lskip_fgt_\@
0137 
0138     mov x0, xzr
0139     mrs x1, id_aa64dfr0_el1
0140     ubfx    x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
0141     cmp x1, #3
0142     b.lt    .Lset_debug_fgt_\@
0143     /* Disable PMSNEVFR_EL1 read and write traps */
0144     orr x0, x0, #(1 << 62)
0145 
0146 .Lset_debug_fgt_\@:
0147     msr_s   SYS_HDFGRTR_EL2, x0
0148     msr_s   SYS_HDFGWTR_EL2, x0
0149 
0150     mov x0, xzr
0151     mrs x1, id_aa64pfr1_el1
0152     ubfx    x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
0153     cbz x1, .Lset_fgt_\@
0154 
0155     /* Disable nVHE traps of TPIDR2 and SMPRI */
0156     orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
0157     orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
0158 
0159 .Lset_fgt_\@:
0160     msr_s   SYS_HFGRTR_EL2, x0
0161     msr_s   SYS_HFGWTR_EL2, x0
0162     msr_s   SYS_HFGITR_EL2, xzr
0163 
0164     mrs x1, id_aa64pfr0_el1     // AMU traps UNDEF without AMU
0165     ubfx    x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4
0166     cbz x1, .Lskip_fgt_\@
0167 
0168     msr_s   SYS_HAFGRTR_EL2, xzr
0169 .Lskip_fgt_\@:
0170 .endm
0171 
0172 .macro __init_el2_nvhe_prepare_eret
0173     mov x0, #INIT_PSTATE_EL1
0174     msr spsr_el2, x0
0175 .endm
0176 
0177 /**
0178  * Initialize EL2 registers to sane values. This should be called early on all
0179  * cores that were booted in EL2. Note that everything gets initialised as
0180  * if VHE was not evailable. The kernel context will be upgraded to VHE
0181  * if possible later on in the boot process
0182  *
0183  * Regs: x0, x1 and x2 are clobbered.
0184  */
0185 .macro init_el2_state
0186     __init_el2_sctlr
0187     __init_el2_timers
0188     __init_el2_debug
0189     __init_el2_lor
0190     __init_el2_stage2
0191     __init_el2_gicv3
0192     __init_el2_hstr
0193     __init_el2_nvhe_idregs
0194     __init_el2_nvhe_cptr
0195     __init_el2_fgt
0196     __init_el2_nvhe_prepare_eret
0197 .endm
0198 
0199 #endif /* __ARM_KVM_INIT_H__ */