0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/kvm_host.h>
0014 #include <asm/kvm_emulate.h>
0015 #include <asm/esr.h>
0016
0017 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
0018 {
0019 unsigned long cpsr = *vcpu_cpsr(vcpu);
0020 bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
0021 u64 esr = 0;
0022
0023 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
0024
0025 vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
0026
0027
0028
0029
0030
0031 if (kvm_vcpu_trap_il_is32bit(vcpu))
0032 esr |= ESR_ELx_IL;
0033
0034
0035
0036
0037
0038 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
0039 esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
0040 else
0041 esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
0042
0043 if (!is_iabt)
0044 esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
0045
0046 vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
0047 }
0048
0049 static void inject_undef64(struct kvm_vcpu *vcpu)
0050 {
0051 u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
0052
0053 kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
0054
0055
0056
0057
0058
0059 if (kvm_vcpu_trap_il_is32bit(vcpu))
0060 esr |= ESR_ELx_IL;
0061
0062 vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
0063 }
0064
0065 #define DFSR_FSC_EXTABT_LPAE 0x10
0066 #define DFSR_FSC_EXTABT_nLPAE 0x08
0067 #define DFSR_LPAE BIT(9)
0068 #define TTBCR_EAE BIT(31)
0069
0070 static void inject_undef32(struct kvm_vcpu *vcpu)
0071 {
0072 kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
0073 }
0074
0075
0076
0077
0078
0079 static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
0080 {
0081 u64 far;
0082 u32 fsr;
0083
0084
0085 if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
0086 fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
0087 } else {
0088
0089 fsr = DFSR_FSC_EXTABT_nLPAE;
0090 }
0091
0092 far = vcpu_read_sys_reg(vcpu, FAR_EL1);
0093
0094 if (is_pabt) {
0095 kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
0096 far &= GENMASK(31, 0);
0097 far |= (u64)addr << 32;
0098 vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
0099 } else {
0100 kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
0101 far &= GENMASK(63, 32);
0102 far |= addr;
0103 vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
0104 }
0105
0106 vcpu_write_sys_reg(vcpu, far, FAR_EL1);
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
0118 {
0119 if (vcpu_el1_is_32bit(vcpu))
0120 inject_abt32(vcpu, false, addr);
0121 else
0122 inject_abt64(vcpu, false, addr);
0123 }
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
0134 {
0135 if (vcpu_el1_is_32bit(vcpu))
0136 inject_abt32(vcpu, true, addr);
0137 else
0138 inject_abt64(vcpu, true, addr);
0139 }
0140
0141 void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
0142 {
0143 unsigned long addr, esr;
0144
0145 addr = kvm_vcpu_get_fault_ipa(vcpu);
0146 addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
0147
0148 if (kvm_vcpu_trap_is_iabt(vcpu))
0149 kvm_inject_pabt(vcpu, addr);
0150 else
0151 kvm_inject_dabt(vcpu, addr);
0152
0153
0154
0155
0156
0157
0158
0159
0160 if (vcpu_el1_is_32bit(vcpu) &&
0161 !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
0162 return;
0163
0164 esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
0165 esr &= ~GENMASK_ULL(5, 0);
0166 vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
0167 }
0168
0169
0170
0171
0172
0173
0174
0175
0176 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
0177 {
0178 if (vcpu_el1_is_32bit(vcpu))
0179 inject_undef32(vcpu);
0180 else
0181 inject_undef64(vcpu);
0182 }
0183
0184 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
0185 {
0186 vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
0187 *vcpu_hcr(vcpu) |= HCR_VSE;
0188 }
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
0203 {
0204 kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
0205 }