Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Fault injection for both 32 and 64bit guests.
0004  *
0005  * Copyright (C) 2012,2013 - ARM Ltd
0006  * Author: Marc Zyngier <marc.zyngier@arm.com>
0007  *
0008  * Based on arch/arm/kvm/emulate.c
0009  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
0010  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
0011  */
0012 
0013 #include <hyp/adjust_pc.h>
0014 #include <linux/kvm_host.h>
0015 #include <asm/kvm_emulate.h>
0016 
0017 #if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__)
0018 #error Hypervisor code only!
0019 #endif
0020 
0021 static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
0022 {
0023     u64 val;
0024 
0025     if (__vcpu_read_sys_reg_from_cpu(reg, &val))
0026         return val;
0027 
0028     return __vcpu_sys_reg(vcpu, reg);
0029 }
0030 
0031 static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
0032 {
0033     if (__vcpu_write_sys_reg_to_cpu(val, reg))
0034         return;
0035 
0036      __vcpu_sys_reg(vcpu, reg) = val;
0037 }
0038 
0039 static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
0040 {
0041     if (has_vhe())
0042         write_sysreg_el1(val, SYS_SPSR);
0043     else
0044         __vcpu_sys_reg(vcpu, SPSR_EL1) = val;
0045 }
0046 
0047 static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
0048 {
0049     if (has_vhe())
0050         write_sysreg(val, spsr_abt);
0051     else
0052         vcpu->arch.ctxt.spsr_abt = val;
0053 }
0054 
0055 static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
0056 {
0057     if (has_vhe())
0058         write_sysreg(val, spsr_und);
0059     else
0060         vcpu->arch.ctxt.spsr_und = val;
0061 }
0062 
0063 /*
0064  * This performs the exception entry at a given EL (@target_mode), stashing PC
0065  * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
0066  * The EL passed to this function *must* be a non-secure, privileged mode with
0067  * bit 0 being set (PSTATE.SP == 1).
0068  *
0069  * When an exception is taken, most PSTATE fields are left unchanged in the
0070  * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
0071  * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
0072  * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
0073  *
0074  * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
0075  * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
0076  *
0077  * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
0078  * MSB to LSB.
0079  */
0080 static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
0081                   enum exception_type type)
0082 {
0083     unsigned long sctlr, vbar, old, new, mode;
0084     u64 exc_offset;
0085 
0086     mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
0087 
0088     if      (mode == target_mode)
0089         exc_offset = CURRENT_EL_SP_ELx_VECTOR;
0090     else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
0091         exc_offset = CURRENT_EL_SP_EL0_VECTOR;
0092     else if (!(mode & PSR_MODE32_BIT))
0093         exc_offset = LOWER_EL_AArch64_VECTOR;
0094     else
0095         exc_offset = LOWER_EL_AArch32_VECTOR;
0096 
0097     switch (target_mode) {
0098     case PSR_MODE_EL1h:
0099         vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL1);
0100         sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
0101         __vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
0102         break;
0103     default:
0104         /* Don't do that */
0105         BUG();
0106     }
0107 
0108     *vcpu_pc(vcpu) = vbar + exc_offset + type;
0109 
0110     old = *vcpu_cpsr(vcpu);
0111     new = 0;
0112 
0113     new |= (old & PSR_N_BIT);
0114     new |= (old & PSR_Z_BIT);
0115     new |= (old & PSR_C_BIT);
0116     new |= (old & PSR_V_BIT);
0117 
0118     if (kvm_has_mte(vcpu->kvm))
0119         new |= PSR_TCO_BIT;
0120 
0121     new |= (old & PSR_DIT_BIT);
0122 
0123     // PSTATE.UAO is set to zero upon any exception to AArch64
0124     // See ARM DDI 0487E.a, page D5-2579.
0125 
0126     // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
0127     // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
0128     // See ARM DDI 0487E.a, page D5-2578.
0129     new |= (old & PSR_PAN_BIT);
0130     if (!(sctlr & SCTLR_EL1_SPAN))
0131         new |= PSR_PAN_BIT;
0132 
0133     // PSTATE.SS is set to zero upon any exception to AArch64
0134     // See ARM DDI 0487E.a, page D2-2452.
0135 
0136     // PSTATE.IL is set to zero upon any exception to AArch64
0137     // See ARM DDI 0487E.a, page D1-2306.
0138 
0139     // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
0140     // See ARM DDI 0487E.a, page D13-3258
0141     if (sctlr & SCTLR_ELx_DSSBS)
0142         new |= PSR_SSBS_BIT;
0143 
0144     // PSTATE.BTYPE is set to zero upon any exception to AArch64
0145     // See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
0146 
0147     new |= PSR_D_BIT;
0148     new |= PSR_A_BIT;
0149     new |= PSR_I_BIT;
0150     new |= PSR_F_BIT;
0151 
0152     new |= target_mode;
0153 
0154     *vcpu_cpsr(vcpu) = new;
0155     __vcpu_write_spsr(vcpu, old);
0156 }
0157 
0158 /*
0159  * When an exception is taken, most CPSR fields are left unchanged in the
0160  * handler. However, some are explicitly overridden (e.g. M[4:0]).
0161  *
0162  * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
0163  * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
0164  * obsoleted by the ARMv7 virtualization extensions and is RES0.
0165  *
0166  * For the SPSR layout seen from AArch32, see:
0167  * - ARM DDI 0406C.d, page B1-1148
0168  * - ARM DDI 0487E.a, page G8-6264
0169  *
0170  * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
0171  * - ARM DDI 0487E.a, page C5-426
0172  *
0173  * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
0174  * MSB to LSB.
0175  */
0176 static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
0177 {
0178     u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
0179     unsigned long old, new;
0180 
0181     old = *vcpu_cpsr(vcpu);
0182     new = 0;
0183 
0184     new |= (old & PSR_AA32_N_BIT);
0185     new |= (old & PSR_AA32_Z_BIT);
0186     new |= (old & PSR_AA32_C_BIT);
0187     new |= (old & PSR_AA32_V_BIT);
0188     new |= (old & PSR_AA32_Q_BIT);
0189 
0190     // CPSR.IT[7:0] are set to zero upon any exception
0191     // See ARM DDI 0487E.a, section G1.12.3
0192     // See ARM DDI 0406C.d, section B1.8.3
0193 
0194     new |= (old & PSR_AA32_DIT_BIT);
0195 
0196     // CPSR.SSBS is set to SCTLR.DSSBS upon any exception
0197     // See ARM DDI 0487E.a, page G8-6244
0198     if (sctlr & BIT(31))
0199         new |= PSR_AA32_SSBS_BIT;
0200 
0201     // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
0202     // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
0203     // See ARM DDI 0487E.a, page G8-6246
0204     new |= (old & PSR_AA32_PAN_BIT);
0205     if (!(sctlr & BIT(23)))
0206         new |= PSR_AA32_PAN_BIT;
0207 
0208     // SS does not exist in AArch32, so ignore
0209 
0210     // CPSR.IL is set to zero upon any exception
0211     // See ARM DDI 0487E.a, page G1-5527
0212 
0213     new |= (old & PSR_AA32_GE_MASK);
0214 
0215     // CPSR.IT[7:0] are set to zero upon any exception
0216     // See prior comment above
0217 
0218     // CPSR.E is set to SCTLR.EE upon any exception
0219     // See ARM DDI 0487E.a, page G8-6245
0220     // See ARM DDI 0406C.d, page B4-1701
0221     if (sctlr & BIT(25))
0222         new |= PSR_AA32_E_BIT;
0223 
0224     // CPSR.A is unchanged upon an exception to Undefined, Supervisor
0225     // CPSR.A is set upon an exception to other modes
0226     // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
0227     // See ARM DDI 0406C.d, page B1-1182
0228     new |= (old & PSR_AA32_A_BIT);
0229     if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
0230         new |= PSR_AA32_A_BIT;
0231 
0232     // CPSR.I is set upon any exception
0233     // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
0234     // See ARM DDI 0406C.d, page B1-1182
0235     new |= PSR_AA32_I_BIT;
0236 
0237     // CPSR.F is set upon an exception to FIQ
0238     // CPSR.F is unchanged upon an exception to other modes
0239     // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
0240     // See ARM DDI 0406C.d, page B1-1182
0241     new |= (old & PSR_AA32_F_BIT);
0242     if (mode == PSR_AA32_MODE_FIQ)
0243         new |= PSR_AA32_F_BIT;
0244 
0245     // CPSR.T is set to SCTLR.TE upon any exception
0246     // See ARM DDI 0487E.a, page G8-5514
0247     // See ARM DDI 0406C.d, page B1-1181
0248     if (sctlr & BIT(30))
0249         new |= PSR_AA32_T_BIT;
0250 
0251     new |= mode;
0252 
0253     return new;
0254 }
0255 
0256 /*
0257  * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
0258  */
0259 static const u8 return_offsets[8][2] = {
0260     [0] = { 0, 0 },     /* Reset, unused */
0261     [1] = { 4, 2 },     /* Undefined */
0262     [2] = { 0, 0 },     /* SVC, unused */
0263     [3] = { 4, 4 },     /* Prefetch abort */
0264     [4] = { 8, 8 },     /* Data abort */
0265     [5] = { 0, 0 },     /* HVC, unused */
0266     [6] = { 4, 4 },     /* IRQ, unused */
0267     [7] = { 4, 4 },     /* FIQ, unused */
0268 };
0269 
0270 static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
0271 {
0272     unsigned long spsr = *vcpu_cpsr(vcpu);
0273     bool is_thumb = (spsr & PSR_AA32_T_BIT);
0274     u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
0275     u32 return_address;
0276 
0277     *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
0278     return_address   = *vcpu_pc(vcpu);
0279     return_address  += return_offsets[vect_offset >> 2][is_thumb];
0280 
0281     /* KVM only enters the ABT and UND modes, so only deal with those */
0282     switch(mode) {
0283     case PSR_AA32_MODE_ABT:
0284         __vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr));
0285         vcpu_gp_regs(vcpu)->compat_lr_abt = return_address;
0286         break;
0287 
0288     case PSR_AA32_MODE_UND:
0289         __vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr));
0290         vcpu_gp_regs(vcpu)->compat_lr_und = return_address;
0291         break;
0292     }
0293 
0294     /* Branch to exception vector */
0295     if (sctlr & (1 << 13))
0296         vect_offset += 0xffff0000;
0297     else /* always have security exceptions */
0298         vect_offset += __vcpu_read_sys_reg(vcpu, VBAR_EL1);
0299 
0300     *vcpu_pc(vcpu) = vect_offset;
0301 }
0302 
0303 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
0304 {
0305     if (vcpu_el1_is_32bit(vcpu)) {
0306         switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
0307         case unpack_vcpu_flag(EXCEPT_AA32_UND):
0308             enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
0309             break;
0310         case unpack_vcpu_flag(EXCEPT_AA32_IABT):
0311             enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
0312             break;
0313         case unpack_vcpu_flag(EXCEPT_AA32_DABT):
0314             enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
0315             break;
0316         default:
0317             /* Err... */
0318             break;
0319         }
0320     } else {
0321         switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
0322         case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
0323             enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
0324             break;
0325         default:
0326             /*
0327              * Only EL1_SYNC makes sense so far, EL2_{SYNC,IRQ}
0328              * will be implemented at some point. Everything
0329              * else gets silently ignored.
0330              */
0331             break;
0332         }
0333     }
0334 }
0335 
0336 /*
0337  * Adjust the guest PC (and potentially exception state) depending on
0338  * flags provided by the emulation code.
0339  */
0340 void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
0341 {
0342     if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
0343         kvm_inject_exception(vcpu);
0344         vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
0345         vcpu_clear_flag(vcpu, EXCEPT_MASK);
0346     } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
0347         kvm_skip_instr(vcpu);
0348         vcpu_clear_flag(vcpu, INCREMENT_PC);
0349     }
0350 }