0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <hyp/adjust_pc.h>
0014 #include <linux/kvm_host.h>
0015 #include <asm/kvm_emulate.h>
0016
0017 #if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__)
0018 #error Hypervisor code only!
0019 #endif
0020
0021 static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
0022 {
0023 u64 val;
0024
0025 if (__vcpu_read_sys_reg_from_cpu(reg, &val))
0026 return val;
0027
0028 return __vcpu_sys_reg(vcpu, reg);
0029 }
0030
0031 static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
0032 {
0033 if (__vcpu_write_sys_reg_to_cpu(val, reg))
0034 return;
0035
0036 __vcpu_sys_reg(vcpu, reg) = val;
0037 }
0038
0039 static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
0040 {
0041 if (has_vhe())
0042 write_sysreg_el1(val, SYS_SPSR);
0043 else
0044 __vcpu_sys_reg(vcpu, SPSR_EL1) = val;
0045 }
0046
0047 static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
0048 {
0049 if (has_vhe())
0050 write_sysreg(val, spsr_abt);
0051 else
0052 vcpu->arch.ctxt.spsr_abt = val;
0053 }
0054
0055 static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
0056 {
0057 if (has_vhe())
0058 write_sysreg(val, spsr_und);
0059 else
0060 vcpu->arch.ctxt.spsr_und = val;
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080 static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
0081 enum exception_type type)
0082 {
0083 unsigned long sctlr, vbar, old, new, mode;
0084 u64 exc_offset;
0085
0086 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
0087
0088 if (mode == target_mode)
0089 exc_offset = CURRENT_EL_SP_ELx_VECTOR;
0090 else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
0091 exc_offset = CURRENT_EL_SP_EL0_VECTOR;
0092 else if (!(mode & PSR_MODE32_BIT))
0093 exc_offset = LOWER_EL_AArch64_VECTOR;
0094 else
0095 exc_offset = LOWER_EL_AArch32_VECTOR;
0096
0097 switch (target_mode) {
0098 case PSR_MODE_EL1h:
0099 vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL1);
0100 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
0101 __vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
0102 break;
0103 default:
0104
0105 BUG();
0106 }
0107
0108 *vcpu_pc(vcpu) = vbar + exc_offset + type;
0109
0110 old = *vcpu_cpsr(vcpu);
0111 new = 0;
0112
0113 new |= (old & PSR_N_BIT);
0114 new |= (old & PSR_Z_BIT);
0115 new |= (old & PSR_C_BIT);
0116 new |= (old & PSR_V_BIT);
0117
0118 if (kvm_has_mte(vcpu->kvm))
0119 new |= PSR_TCO_BIT;
0120
0121 new |= (old & PSR_DIT_BIT);
0122
0123
0124
0125
0126
0127
0128
0129 new |= (old & PSR_PAN_BIT);
0130 if (!(sctlr & SCTLR_EL1_SPAN))
0131 new |= PSR_PAN_BIT;
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 if (sctlr & SCTLR_ELx_DSSBS)
0142 new |= PSR_SSBS_BIT;
0143
0144
0145
0146
0147 new |= PSR_D_BIT;
0148 new |= PSR_A_BIT;
0149 new |= PSR_I_BIT;
0150 new |= PSR_F_BIT;
0151
0152 new |= target_mode;
0153
0154 *vcpu_cpsr(vcpu) = new;
0155 __vcpu_write_spsr(vcpu, old);
0156 }
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
0177 {
0178 u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
0179 unsigned long old, new;
0180
0181 old = *vcpu_cpsr(vcpu);
0182 new = 0;
0183
0184 new |= (old & PSR_AA32_N_BIT);
0185 new |= (old & PSR_AA32_Z_BIT);
0186 new |= (old & PSR_AA32_C_BIT);
0187 new |= (old & PSR_AA32_V_BIT);
0188 new |= (old & PSR_AA32_Q_BIT);
0189
0190
0191
0192
0193
0194 new |= (old & PSR_AA32_DIT_BIT);
0195
0196
0197
0198 if (sctlr & BIT(31))
0199 new |= PSR_AA32_SSBS_BIT;
0200
0201
0202
0203
0204 new |= (old & PSR_AA32_PAN_BIT);
0205 if (!(sctlr & BIT(23)))
0206 new |= PSR_AA32_PAN_BIT;
0207
0208
0209
0210
0211
0212
0213 new |= (old & PSR_AA32_GE_MASK);
0214
0215
0216
0217
0218
0219
0220
0221 if (sctlr & BIT(25))
0222 new |= PSR_AA32_E_BIT;
0223
0224
0225
0226
0227
0228 new |= (old & PSR_AA32_A_BIT);
0229 if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
0230 new |= PSR_AA32_A_BIT;
0231
0232
0233
0234
0235 new |= PSR_AA32_I_BIT;
0236
0237
0238
0239
0240
0241 new |= (old & PSR_AA32_F_BIT);
0242 if (mode == PSR_AA32_MODE_FIQ)
0243 new |= PSR_AA32_F_BIT;
0244
0245
0246
0247
0248 if (sctlr & BIT(30))
0249 new |= PSR_AA32_T_BIT;
0250
0251 new |= mode;
0252
0253 return new;
0254 }
0255
0256
0257
0258
0259 static const u8 return_offsets[8][2] = {
0260 [0] = { 0, 0 },
0261 [1] = { 4, 2 },
0262 [2] = { 0, 0 },
0263 [3] = { 4, 4 },
0264 [4] = { 8, 8 },
0265 [5] = { 0, 0 },
0266 [6] = { 4, 4 },
0267 [7] = { 4, 4 },
0268 };
0269
0270 static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
0271 {
0272 unsigned long spsr = *vcpu_cpsr(vcpu);
0273 bool is_thumb = (spsr & PSR_AA32_T_BIT);
0274 u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
0275 u32 return_address;
0276
0277 *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
0278 return_address = *vcpu_pc(vcpu);
0279 return_address += return_offsets[vect_offset >> 2][is_thumb];
0280
0281
0282 switch(mode) {
0283 case PSR_AA32_MODE_ABT:
0284 __vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr));
0285 vcpu_gp_regs(vcpu)->compat_lr_abt = return_address;
0286 break;
0287
0288 case PSR_AA32_MODE_UND:
0289 __vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr));
0290 vcpu_gp_regs(vcpu)->compat_lr_und = return_address;
0291 break;
0292 }
0293
0294
0295 if (sctlr & (1 << 13))
0296 vect_offset += 0xffff0000;
0297 else
0298 vect_offset += __vcpu_read_sys_reg(vcpu, VBAR_EL1);
0299
0300 *vcpu_pc(vcpu) = vect_offset;
0301 }
0302
0303 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
0304 {
0305 if (vcpu_el1_is_32bit(vcpu)) {
0306 switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
0307 case unpack_vcpu_flag(EXCEPT_AA32_UND):
0308 enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
0309 break;
0310 case unpack_vcpu_flag(EXCEPT_AA32_IABT):
0311 enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
0312 break;
0313 case unpack_vcpu_flag(EXCEPT_AA32_DABT):
0314 enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
0315 break;
0316 default:
0317
0318 break;
0319 }
0320 } else {
0321 switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
0322 case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
0323 enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
0324 break;
0325 default:
0326
0327
0328
0329
0330
0331 break;
0332 }
0333 }
0334 }
0335
0336
0337
0338
0339
0340 void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
0341 {
0342 if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
0343 kvm_inject_exception(vcpu);
0344 vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
0345 vcpu_clear_flag(vcpu, EXCEPT_MASK);
0346 } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
0347 kvm_skip_instr(vcpu);
0348 vcpu_clear_flag(vcpu, INCREMENT_PC);
0349 }
0350 }