Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2012,2013 - ARM Ltd
0004  * Author: Marc Zyngier <marc.zyngier@arm.com>
0005  *
0006  * Derived from arch/arm/kvm/handle_exit.c:
0007  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
0008  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
0009  */
0010 
0011 #include <linux/kvm.h>
0012 #include <linux/kvm_host.h>
0013 
0014 #include <asm/esr.h>
0015 #include <asm/exception.h>
0016 #include <asm/kvm_asm.h>
0017 #include <asm/kvm_emulate.h>
0018 #include <asm/kvm_mmu.h>
0019 #include <asm/debug-monitors.h>
0020 #include <asm/stacktrace/nvhe.h>
0021 #include <asm/traps.h>
0022 
0023 #include <kvm/arm_hypercalls.h>
0024 
0025 #define CREATE_TRACE_POINTS
0026 #include "trace_handle_exit.h"
0027 
0028 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
0029 
0030 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
0031 {
0032     if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
0033         kvm_inject_vabt(vcpu);
0034 }
0035 
0036 static int handle_hvc(struct kvm_vcpu *vcpu)
0037 {
0038     int ret;
0039 
0040     trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
0041                 kvm_vcpu_hvc_get_imm(vcpu));
0042     vcpu->stat.hvc_exit_stat++;
0043 
0044     ret = kvm_hvc_call_handler(vcpu);
0045     if (ret < 0) {
0046         vcpu_set_reg(vcpu, 0, ~0UL);
0047         return 1;
0048     }
0049 
0050     return ret;
0051 }
0052 
0053 static int handle_smc(struct kvm_vcpu *vcpu)
0054 {
0055     /*
0056      * "If an SMC instruction executed at Non-secure EL1 is
0057      * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
0058      * Trap exception, not a Secure Monitor Call exception [...]"
0059      *
0060      * We need to advance the PC after the trap, as it would
0061      * otherwise return to the same address...
0062      */
0063     vcpu_set_reg(vcpu, 0, ~0UL);
0064     kvm_incr_pc(vcpu);
0065     return 1;
0066 }
0067 
0068 /*
0069  * Guest access to FP/ASIMD registers are routed to this handler only
0070  * when the system doesn't support FP/ASIMD.
0071  */
0072 static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
0073 {
0074     kvm_inject_undefined(vcpu);
0075     return 1;
0076 }
0077 
0078 /**
0079  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
0080  *          instruction executed by a guest
0081  *
0082  * @vcpu:   the vcpu pointer
0083  *
0084  * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
0085  * decides to.
0086  * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
0087  * world-switches and schedule other host processes until there is an
0088  * incoming IRQ or FIQ to the VM.
0089  * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
0090  *
0091  * WF{I,E}T can immediately return if the deadline has already expired.
0092  */
0093 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
0094 {
0095     u64 esr = kvm_vcpu_get_esr(vcpu);
0096 
0097     if (esr & ESR_ELx_WFx_ISS_WFE) {
0098         trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
0099         vcpu->stat.wfe_exit_stat++;
0100     } else {
0101         trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
0102         vcpu->stat.wfi_exit_stat++;
0103     }
0104 
0105     if (esr & ESR_ELx_WFx_ISS_WFxT) {
0106         if (esr & ESR_ELx_WFx_ISS_RV) {
0107             u64 val, now;
0108 
0109             now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
0110             val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
0111 
0112             if (now >= val)
0113                 goto out;
0114         } else {
0115             /* Treat WFxT as WFx if RN is invalid */
0116             esr &= ~ESR_ELx_WFx_ISS_WFxT;
0117         }
0118     }
0119 
0120     if (esr & ESR_ELx_WFx_ISS_WFE) {
0121         kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
0122     } else {
0123         if (esr & ESR_ELx_WFx_ISS_WFxT)
0124             vcpu_set_flag(vcpu, IN_WFIT);
0125 
0126         kvm_vcpu_wfi(vcpu);
0127     }
0128 out:
0129     kvm_incr_pc(vcpu);
0130 
0131     return 1;
0132 }
0133 
0134 /**
0135  * kvm_handle_guest_debug - handle a debug exception instruction
0136  *
0137  * @vcpu:   the vcpu pointer
0138  *
0139  * We route all debug exceptions through the same handler. If both the
0140  * guest and host are using the same debug facilities it will be up to
0141  * userspace to re-inject the correct exception for guest delivery.
0142  *
0143  * @return: 0 (while setting vcpu->run->exit_reason)
0144  */
0145 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
0146 {
0147     struct kvm_run *run = vcpu->run;
0148     u64 esr = kvm_vcpu_get_esr(vcpu);
0149 
0150     run->exit_reason = KVM_EXIT_DEBUG;
0151     run->debug.arch.hsr = lower_32_bits(esr);
0152     run->debug.arch.hsr_high = upper_32_bits(esr);
0153     run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
0154 
0155     if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
0156         run->debug.arch.far = vcpu->arch.fault.far_el2;
0157 
0158     return 0;
0159 }
0160 
0161 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
0162 {
0163     u64 esr = kvm_vcpu_get_esr(vcpu);
0164 
0165     kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
0166               esr, esr_get_class_string(esr));
0167 
0168     kvm_inject_undefined(vcpu);
0169     return 1;
0170 }
0171 
0172 /*
0173  * Guest access to SVE registers should be routed to this handler only
0174  * when the system doesn't support SVE.
0175  */
0176 static int handle_sve(struct kvm_vcpu *vcpu)
0177 {
0178     kvm_inject_undefined(vcpu);
0179     return 1;
0180 }
0181 
0182 /*
0183  * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
0184  * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
0185  * that we can do is give the guest an UNDEF.
0186  */
0187 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
0188 {
0189     kvm_inject_undefined(vcpu);
0190     return 1;
0191 }
0192 
0193 static exit_handle_fn arm_exit_handlers[] = {
0194     [0 ... ESR_ELx_EC_MAX]  = kvm_handle_unknown_ec,
0195     [ESR_ELx_EC_WFx]    = kvm_handle_wfx,
0196     [ESR_ELx_EC_CP15_32]    = kvm_handle_cp15_32,
0197     [ESR_ELx_EC_CP15_64]    = kvm_handle_cp15_64,
0198     [ESR_ELx_EC_CP14_MR]    = kvm_handle_cp14_32,
0199     [ESR_ELx_EC_CP14_LS]    = kvm_handle_cp14_load_store,
0200     [ESR_ELx_EC_CP10_ID]    = kvm_handle_cp10_id,
0201     [ESR_ELx_EC_CP14_64]    = kvm_handle_cp14_64,
0202     [ESR_ELx_EC_HVC32]  = handle_hvc,
0203     [ESR_ELx_EC_SMC32]  = handle_smc,
0204     [ESR_ELx_EC_HVC64]  = handle_hvc,
0205     [ESR_ELx_EC_SMC64]  = handle_smc,
0206     [ESR_ELx_EC_SYS64]  = kvm_handle_sys_reg,
0207     [ESR_ELx_EC_SVE]    = handle_sve,
0208     [ESR_ELx_EC_IABT_LOW]   = kvm_handle_guest_abort,
0209     [ESR_ELx_EC_DABT_LOW]   = kvm_handle_guest_abort,
0210     [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
0211     [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
0212     [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
0213     [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
0214     [ESR_ELx_EC_BRK64]  = kvm_handle_guest_debug,
0215     [ESR_ELx_EC_FP_ASIMD]   = handle_no_fpsimd,
0216     [ESR_ELx_EC_PAC]    = kvm_handle_ptrauth,
0217 };
0218 
0219 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
0220 {
0221     u64 esr = kvm_vcpu_get_esr(vcpu);
0222     u8 esr_ec = ESR_ELx_EC(esr);
0223 
0224     return arm_exit_handlers[esr_ec];
0225 }
0226 
0227 /*
0228  * We may be single-stepping an emulated instruction. If the emulation
0229  * has been completed in the kernel, we can return to userspace with a
0230  * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
0231  * emulation first.
0232  */
0233 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
0234 {
0235     int handled;
0236 
0237     /*
0238      * See ARM ARM B1.14.1: "Hyp traps on instructions
0239      * that fail their condition code check"
0240      */
0241     if (!kvm_condition_valid(vcpu)) {
0242         kvm_incr_pc(vcpu);
0243         handled = 1;
0244     } else {
0245         exit_handle_fn exit_handler;
0246 
0247         exit_handler = kvm_get_exit_handler(vcpu);
0248         handled = exit_handler(vcpu);
0249     }
0250 
0251     return handled;
0252 }
0253 
0254 /*
0255  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
0256  * proper exit to userspace.
0257  */
0258 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
0259 {
0260     struct kvm_run *run = vcpu->run;
0261 
0262     if (ARM_SERROR_PENDING(exception_index)) {
0263         /*
0264          * The SError is handled by handle_exit_early(). If the guest
0265          * survives it will re-execute the original instruction.
0266          */
0267         return 1;
0268     }
0269 
0270     exception_index = ARM_EXCEPTION_CODE(exception_index);
0271 
0272     switch (exception_index) {
0273     case ARM_EXCEPTION_IRQ:
0274         return 1;
0275     case ARM_EXCEPTION_EL1_SERROR:
0276         return 1;
0277     case ARM_EXCEPTION_TRAP:
0278         return handle_trap_exceptions(vcpu);
0279     case ARM_EXCEPTION_HYP_GONE:
0280         /*
0281          * EL2 has been reset to the hyp-stub. This happens when a guest
0282          * is pre-emptied by kvm_reboot()'s shutdown call.
0283          */
0284         run->exit_reason = KVM_EXIT_FAIL_ENTRY;
0285         return 0;
0286     case ARM_EXCEPTION_IL:
0287         /*
0288          * We attempted an illegal exception return.  Guest state must
0289          * have been corrupted somehow.  Give up.
0290          */
0291         run->exit_reason = KVM_EXIT_FAIL_ENTRY;
0292         return -EINVAL;
0293     default:
0294         kvm_pr_unimpl("Unsupported exception type: %d",
0295                   exception_index);
0296         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
0297         return 0;
0298     }
0299 }
0300 
0301 /* For exit types that need handling before we can be preempted */
0302 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
0303 {
0304     if (ARM_SERROR_PENDING(exception_index)) {
0305         if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
0306             u64 disr = kvm_vcpu_get_disr(vcpu);
0307 
0308             kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
0309         } else {
0310             kvm_inject_vabt(vcpu);
0311         }
0312 
0313         return;
0314     }
0315 
0316     exception_index = ARM_EXCEPTION_CODE(exception_index);
0317 
0318     if (exception_index == ARM_EXCEPTION_EL1_SERROR)
0319         kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
0320 }
0321 
0322 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
0323                           u64 elr_virt, u64 elr_phys,
0324                           u64 par, uintptr_t vcpu,
0325                           u64 far, u64 hpfar) {
0326     u64 elr_in_kimg = __phys_to_kimg(elr_phys);
0327     u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
0328     u64 mode = spsr & PSR_MODE_MASK;
0329     u64 panic_addr = elr_virt + hyp_offset;
0330 
0331     if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
0332         kvm_err("Invalid host exception to nVHE hyp!\n");
0333     } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
0334            (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
0335         const char *file = NULL;
0336         unsigned int line = 0;
0337 
0338         /* All hyp bugs, including warnings, are treated as fatal. */
0339         if (!is_protected_kvm_enabled() ||
0340             IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
0341             struct bug_entry *bug = find_bug(elr_in_kimg);
0342 
0343             if (bug)
0344                 bug_get_file_line(bug, &file, &line);
0345         }
0346 
0347         if (file)
0348             kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
0349         else
0350             kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
0351                     (void *)(panic_addr + kaslr_offset()));
0352     } else {
0353         kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
0354                 (void *)(panic_addr + kaslr_offset()));
0355     }
0356 
0357     /* Dump the nVHE hypervisor backtrace */
0358     kvm_nvhe_dump_backtrace(hyp_offset);
0359 
0360     /*
0361      * Hyp has panicked and we're going to handle that by panicking the
0362      * kernel. The kernel offset will be revealed in the panic so we're
0363      * also safe to reveal the hyp offset as a debugging aid for translating
0364      * hyp VAs to vmlinux addresses.
0365      */
0366     kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
0367 
0368     panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
0369           spsr, elr_virt, esr, far, hpfar, par, vcpu);
0370 }