Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2020 - Google Inc
0004  * Author: Andrew Scull <ascull@google.com>
0005  */
0006 
0007 #include <hyp/adjust_pc.h>
0008 
0009 #include <asm/pgtable-types.h>
0010 #include <asm/kvm_asm.h>
0011 #include <asm/kvm_emulate.h>
0012 #include <asm/kvm_host.h>
0013 #include <asm/kvm_hyp.h>
0014 #include <asm/kvm_mmu.h>
0015 
0016 #include <nvhe/mem_protect.h>
0017 #include <nvhe/mm.h>
0018 #include <nvhe/trap_handler.h>
0019 
0020 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
0021 
0022 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
0023 
0024 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
0025 {
0026     DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
0027 
0028     cpu_reg(host_ctxt, 1) =  __kvm_vcpu_run(kern_hyp_va(vcpu));
0029 }
0030 
0031 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
0032 {
0033     DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
0034 
0035     __kvm_adjust_pc(kern_hyp_va(vcpu));
0036 }
0037 
0038 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
0039 {
0040     __kvm_flush_vm_context();
0041 }
0042 
0043 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
0044 {
0045     DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
0046     DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
0047     DECLARE_REG(int, level, host_ctxt, 3);
0048 
0049     __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
0050 }
0051 
0052 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
0053 {
0054     DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
0055 
0056     __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
0057 }
0058 
0059 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
0060 {
0061     DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
0062 
0063     __kvm_flush_cpu_context(kern_hyp_va(mmu));
0064 }
0065 
0066 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
0067 {
0068     __kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
0069 }
0070 
0071 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
0072 {
0073     u64 tmp;
0074 
0075     tmp = read_sysreg_el2(SYS_SCTLR);
0076     tmp |= SCTLR_ELx_DSSBS;
0077     write_sysreg_el2(tmp, SYS_SCTLR);
0078 }
0079 
0080 static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
0081 {
0082     cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
0083 }
0084 
0085 static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
0086 {
0087     cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
0088 }
0089 
0090 static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
0091 {
0092     __vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
0093 }
0094 
0095 static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
0096 {
0097     __vgic_v3_init_lrs();
0098 }
0099 
0100 static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
0101 {
0102     cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
0103 }
0104 
0105 static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
0106 {
0107     DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
0108 
0109     __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
0110 }
0111 
0112 static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
0113 {
0114     DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
0115 
0116     __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
0117 }
0118 
0119 static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
0120 {
0121     DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
0122     DECLARE_REG(unsigned long, size, host_ctxt, 2);
0123     DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
0124     DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
0125     DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
0126 
0127     /*
0128      * __pkvm_init() will return only if an error occurred, otherwise it
0129      * will tail-call in __pkvm_init_finalise() which will have to deal
0130      * with the host context directly.
0131      */
0132     cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
0133                         hyp_va_bits);
0134 }
0135 
0136 static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
0137 {
0138     DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
0139 
0140     cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
0141 }
0142 
0143 static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
0144 {
0145     DECLARE_REG(u64, pfn, host_ctxt, 1);
0146 
0147     cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
0148 }
0149 
0150 static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
0151 {
0152     DECLARE_REG(u64, pfn, host_ctxt, 1);
0153 
0154     cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
0155 }
0156 
0157 static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
0158 {
0159     DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
0160     DECLARE_REG(size_t, size, host_ctxt, 2);
0161     DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
0162 
0163     /*
0164      * __pkvm_create_private_mapping() populates a pointer with the
0165      * hypervisor start address of the allocation.
0166      *
0167      * However, handle___pkvm_create_private_mapping() hypercall crosses the
0168      * EL1/EL2 boundary so the pointer would not be valid in this context.
0169      *
0170      * Instead pass the allocation address as the return value (or return
0171      * ERR_PTR() on failure).
0172      */
0173     unsigned long haddr;
0174     int err = __pkvm_create_private_mapping(phys, size, prot, &haddr);
0175 
0176     if (err)
0177         haddr = (unsigned long)ERR_PTR(err);
0178 
0179     cpu_reg(host_ctxt, 1) = haddr;
0180 }
0181 
0182 static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
0183 {
0184     cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
0185 }
0186 
0187 static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
0188 {
0189     DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
0190 
0191     __pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
0192 }
0193 
0194 typedef void (*hcall_t)(struct kvm_cpu_context *);
0195 
0196 #define HANDLE_FUNC(x)  [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
0197 
0198 static const hcall_t host_hcall[] = {
0199     /* ___kvm_hyp_init */
0200     HANDLE_FUNC(__kvm_get_mdcr_el2),
0201     HANDLE_FUNC(__pkvm_init),
0202     HANDLE_FUNC(__pkvm_create_private_mapping),
0203     HANDLE_FUNC(__pkvm_cpu_set_vector),
0204     HANDLE_FUNC(__kvm_enable_ssbs),
0205     HANDLE_FUNC(__vgic_v3_init_lrs),
0206     HANDLE_FUNC(__vgic_v3_get_gic_config),
0207     HANDLE_FUNC(__pkvm_prot_finalize),
0208 
0209     HANDLE_FUNC(__pkvm_host_share_hyp),
0210     HANDLE_FUNC(__pkvm_host_unshare_hyp),
0211     HANDLE_FUNC(__kvm_adjust_pc),
0212     HANDLE_FUNC(__kvm_vcpu_run),
0213     HANDLE_FUNC(__kvm_flush_vm_context),
0214     HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
0215     HANDLE_FUNC(__kvm_tlb_flush_vmid),
0216     HANDLE_FUNC(__kvm_flush_cpu_context),
0217     HANDLE_FUNC(__kvm_timer_set_cntvoff),
0218     HANDLE_FUNC(__vgic_v3_read_vmcr),
0219     HANDLE_FUNC(__vgic_v3_write_vmcr),
0220     HANDLE_FUNC(__vgic_v3_save_aprs),
0221     HANDLE_FUNC(__vgic_v3_restore_aprs),
0222     HANDLE_FUNC(__pkvm_vcpu_init_traps),
0223 };
0224 
0225 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
0226 {
0227     DECLARE_REG(unsigned long, id, host_ctxt, 0);
0228     unsigned long hcall_min = 0;
0229     hcall_t hfn;
0230 
0231     /*
0232      * If pKVM has been initialised then reject any calls to the
0233      * early "privileged" hypercalls. Note that we cannot reject
0234      * calls to __pkvm_prot_finalize for two reasons: (1) The static
0235      * key used to determine initialisation must be toggled prior to
0236      * finalisation and (2) finalisation is performed on a per-CPU
0237      * basis. This is all fine, however, since __pkvm_prot_finalize
0238      * returns -EPERM after the first call for a given CPU.
0239      */
0240     if (static_branch_unlikely(&kvm_protected_mode_initialized))
0241         hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
0242 
0243     id -= KVM_HOST_SMCCC_ID(0);
0244 
0245     if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
0246         goto inval;
0247 
0248     hfn = host_hcall[id];
0249     if (unlikely(!hfn))
0250         goto inval;
0251 
0252     cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
0253     hfn(host_ctxt);
0254 
0255     return;
0256 inval:
0257     cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
0258 }
0259 
0260 static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
0261 {
0262     __kvm_hyp_host_forward_smc(host_ctxt);
0263 }
0264 
0265 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
0266 {
0267     bool handled;
0268 
0269     handled = kvm_host_psci_handler(host_ctxt);
0270     if (!handled)
0271         default_host_smc_handler(host_ctxt);
0272 
0273     /* SMC was trapped, move ELR past the current PC. */
0274     kvm_skip_host_instr();
0275 }
0276 
0277 void handle_trap(struct kvm_cpu_context *host_ctxt)
0278 {
0279     u64 esr = read_sysreg_el2(SYS_ESR);
0280 
0281     switch (ESR_ELx_EC(esr)) {
0282     case ESR_ELx_EC_HVC64:
0283         handle_host_hcall(host_ctxt);
0284         break;
0285     case ESR_ELx_EC_SMC64:
0286         handle_host_smc(host_ctxt);
0287         break;
0288     case ESR_ELx_EC_SVE:
0289         sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
0290         isb();
0291         sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
0292         break;
0293     case ESR_ELx_EC_IABT_LOW:
0294     case ESR_ELx_EC_DABT_LOW:
0295         handle_host_mem_abort(host_ctxt);
0296         break;
0297     default:
0298         BUG();
0299     }
0300 }