0001
0002
0003
0004
0005
0006
0007 #include <hyp/switch.h>
0008
0009 #include <linux/arm-smccc.h>
0010 #include <linux/kvm_host.h>
0011 #include <linux/types.h>
0012 #include <linux/jump_label.h>
0013 #include <linux/percpu.h>
0014 #include <uapi/linux/psci.h>
0015
0016 #include <kvm/arm_psci.h>
0017
0018 #include <asm/barrier.h>
0019 #include <asm/cpufeature.h>
0020 #include <asm/kprobes.h>
0021 #include <asm/kvm_asm.h>
0022 #include <asm/kvm_emulate.h>
0023 #include <asm/kvm_hyp.h>
0024 #include <asm/kvm_mmu.h>
0025 #include <asm/fpsimd.h>
0026 #include <asm/debug-monitors.h>
0027 #include <asm/processor.h>
0028 #include <asm/thread_info.h>
0029 #include <asm/vectors.h>
0030
0031
0032 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
0033 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
0034 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
0035
0036 static void __activate_traps(struct kvm_vcpu *vcpu)
0037 {
0038 u64 val;
0039
0040 ___activate_traps(vcpu);
0041
0042 val = read_sysreg(cpacr_el1);
0043 val |= CPACR_EL1_TTA;
0044 val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
0045 CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 val |= CPTR_EL2_TAM;
0057
0058 if (guest_owns_fp_regs(vcpu)) {
0059 if (vcpu_has_sve(vcpu))
0060 val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
0061 } else {
0062 val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
0063 __activate_traps_fpsimd32(vcpu);
0064 }
0065
0066 if (cpus_have_final_cap(ARM64_SME))
0067 write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
0068 sctlr_el2);
0069
0070 write_sysreg(val, cpacr_el1);
0071
0072 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
0073 }
0074 NOKPROBE_SYMBOL(__activate_traps);
0075
0076 static void __deactivate_traps(struct kvm_vcpu *vcpu)
0077 {
0078 const char *host_vectors = vectors;
0079
0080 ___deactivate_traps(vcpu);
0081
0082 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
0083
0084
0085
0086
0087
0088
0089 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
0090
0091 if (cpus_have_final_cap(ARM64_SME))
0092 write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
0093 sctlr_el2);
0094
0095 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
0096
0097 if (!arm64_kernel_unmapped_at_el0())
0098 host_vectors = __this_cpu_read(this_cpu_vector);
0099 write_sysreg(host_vectors, vbar_el1);
0100 }
0101 NOKPROBE_SYMBOL(__deactivate_traps);
0102
0103 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
0104 {
0105 __activate_traps_common(vcpu);
0106 }
0107
0108 void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
0109 {
0110 __deactivate_traps_common(vcpu);
0111 }
0112
0113 static const exit_handler_fn hyp_exit_handlers[] = {
0114 [0 ... ESR_ELx_EC_MAX] = NULL,
0115 [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
0116 [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
0117 [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
0118 [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
0119 [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
0120 [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
0121 [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
0122 };
0123
0124 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
0125 {
0126 return hyp_exit_handlers;
0127 }
0128
0129 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
0130 {
0131 }
0132
0133
0134 static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
0135 {
0136 struct kvm_cpu_context *host_ctxt;
0137 struct kvm_cpu_context *guest_ctxt;
0138 u64 exit_code;
0139
0140 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
0141 host_ctxt->__hyp_running_vcpu = vcpu;
0142 guest_ctxt = &vcpu->arch.ctxt;
0143
0144 sysreg_save_host_state_vhe(host_ctxt);
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
0158 __activate_traps(vcpu);
0159
0160 __kvm_adjust_pc(vcpu);
0161
0162 sysreg_restore_guest_state_vhe(guest_ctxt);
0163 __debug_switch_to_guest(vcpu);
0164
0165 do {
0166
0167 exit_code = __guest_enter(vcpu);
0168
0169
0170 } while (fixup_guest_exit(vcpu, &exit_code));
0171
0172 sysreg_save_guest_state_vhe(guest_ctxt);
0173
0174 __deactivate_traps(vcpu);
0175
0176 sysreg_restore_host_state_vhe(host_ctxt);
0177
0178 if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
0179 __fpsimd_save_fpexc32(vcpu);
0180
0181 __debug_switch_to_host(vcpu);
0182
0183 return exit_code;
0184 }
0185 NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
0186
0187 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
0188 {
0189 int ret;
0190
0191 local_daif_mask();
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 pmr_sync();
0203
0204 ret = __kvm_vcpu_run_vhe(vcpu);
0205
0206
0207
0208
0209
0210 local_daif_restore(DAIF_PROCCTX_NOIRQ);
0211
0212
0213
0214
0215
0216
0217 isb();
0218
0219 return ret;
0220 }
0221
0222 static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
0223 {
0224 struct kvm_cpu_context *host_ctxt;
0225 struct kvm_vcpu *vcpu;
0226
0227 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
0228 vcpu = host_ctxt->__hyp_running_vcpu;
0229
0230 __deactivate_traps(vcpu);
0231 sysreg_restore_host_state_vhe(host_ctxt);
0232
0233 panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n",
0234 spsr, elr,
0235 read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
0236 read_sysreg(hpfar_el2), par, vcpu);
0237 }
0238 NOKPROBE_SYMBOL(__hyp_call_panic);
0239
0240 void __noreturn hyp_panic(void)
0241 {
0242 u64 spsr = read_sysreg_el2(SYS_SPSR);
0243 u64 elr = read_sysreg_el2(SYS_ELR);
0244 u64 par = read_sysreg_par();
0245
0246 __hyp_call_panic(spsr, elr, par);
0247 unreachable();
0248 }
0249
0250 asmlinkage void kvm_unexpected_el2_exception(void)
0251 {
0252 __kvm_unexpected_el2_exception();
0253 }