Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2021 Google LLC
0004  * Author: Fuad Tabba <tabba@google.com>
0005  */
0006 
0007 #include <linux/kvm_host.h>
0008 #include <linux/mm.h>
0009 #include <nvhe/fixed_config.h>
0010 #include <nvhe/trap_handler.h>
0011 
0012 /*
0013  * Set trap register values based on features in ID_AA64PFR0.
0014  */
0015 static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
0016 {
0017     const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
0018     u64 hcr_set = HCR_RW;
0019     u64 hcr_clear = 0;
0020     u64 cptr_set = 0;
0021 
0022     /* Protected KVM does not support AArch32 guests. */
0023     BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0),
0024         PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY);
0025     BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1),
0026         PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY);
0027 
0028     /*
0029      * Linux guests assume support for floating-point and Advanced SIMD. Do
0030      * not change the trapping behavior for these from the KVM default.
0031      */
0032     BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
0033                 PVM_ID_AA64PFR0_ALLOW));
0034     BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
0035                 PVM_ID_AA64PFR0_ALLOW));
0036 
0037     /* Trap RAS unless all current versions are supported */
0038     if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) <
0039         ID_AA64PFR0_RAS_V1P1) {
0040         hcr_set |= HCR_TERR | HCR_TEA;
0041         hcr_clear |= HCR_FIEN;
0042     }
0043 
0044     /* Trap AMU */
0045     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) {
0046         hcr_clear |= HCR_AMVOFFEN;
0047         cptr_set |= CPTR_EL2_TAM;
0048     }
0049 
0050     /* Trap SVE */
0051     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids))
0052         cptr_set |= CPTR_EL2_TZ;
0053 
0054     vcpu->arch.hcr_el2 |= hcr_set;
0055     vcpu->arch.hcr_el2 &= ~hcr_clear;
0056     vcpu->arch.cptr_el2 |= cptr_set;
0057 }
0058 
0059 /*
0060  * Set trap register values based on features in ID_AA64PFR1.
0061  */
0062 static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
0063 {
0064     const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
0065     u64 hcr_set = 0;
0066     u64 hcr_clear = 0;
0067 
0068     /* Memory Tagging: Trap and Treat as Untagged if not supported. */
0069     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) {
0070         hcr_set |= HCR_TID5;
0071         hcr_clear |= HCR_DCT | HCR_ATA;
0072     }
0073 
0074     vcpu->arch.hcr_el2 |= hcr_set;
0075     vcpu->arch.hcr_el2 &= ~hcr_clear;
0076 }
0077 
0078 /*
0079  * Set trap register values based on features in ID_AA64DFR0.
0080  */
0081 static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
0082 {
0083     const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
0084     u64 mdcr_set = 0;
0085     u64 mdcr_clear = 0;
0086     u64 cptr_set = 0;
0087 
0088     /* Trap/constrain PMU */
0089     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) {
0090         mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
0091         mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
0092                   MDCR_EL2_HPMN_MASK;
0093     }
0094 
0095     /* Trap Debug */
0096     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids))
0097         mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
0098 
0099     /* Trap OS Double Lock */
0100     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids))
0101         mdcr_set |= MDCR_EL2_TDOSA;
0102 
0103     /* Trap SPE */
0104     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) {
0105         mdcr_set |= MDCR_EL2_TPMS;
0106         mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
0107     }
0108 
0109     /* Trap Trace Filter */
0110     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids))
0111         mdcr_set |= MDCR_EL2_TTRF;
0112 
0113     /* Trap Trace */
0114     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids))
0115         cptr_set |= CPTR_EL2_TTA;
0116 
0117     vcpu->arch.mdcr_el2 |= mdcr_set;
0118     vcpu->arch.mdcr_el2 &= ~mdcr_clear;
0119     vcpu->arch.cptr_el2 |= cptr_set;
0120 }
0121 
0122 /*
0123  * Set trap register values based on features in ID_AA64MMFR0.
0124  */
0125 static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
0126 {
0127     const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
0128     u64 mdcr_set = 0;
0129 
0130     /* Trap Debug Communications Channel registers */
0131     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids))
0132         mdcr_set |= MDCR_EL2_TDCC;
0133 
0134     vcpu->arch.mdcr_el2 |= mdcr_set;
0135 }
0136 
0137 /*
0138  * Set trap register values based on features in ID_AA64MMFR1.
0139  */
0140 static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
0141 {
0142     const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
0143     u64 hcr_set = 0;
0144 
0145     /* Trap LOR */
0146     if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids))
0147         hcr_set |= HCR_TLOR;
0148 
0149     vcpu->arch.hcr_el2 |= hcr_set;
0150 }
0151 
0152 /*
0153  * Set baseline trap register values.
0154  */
0155 static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
0156 {
0157     const u64 hcr_trap_feat_regs = HCR_TID3;
0158     const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
0159 
0160     /*
0161      * Always trap:
0162      * - Feature id registers: to control features exposed to guests
0163      * - Implementation-defined features
0164      */
0165     vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
0166 
0167     /* Clear res0 and set res1 bits to trap potential new features. */
0168     vcpu->arch.hcr_el2 &= ~(HCR_RES0);
0169     vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
0170     vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
0171     vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
0172 }
0173 
0174 /*
0175  * Initialize trap register values for protected VMs.
0176  */
0177 void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
0178 {
0179     pvm_init_trap_regs(vcpu);
0180     pvm_init_traps_aa64pfr0(vcpu);
0181     pvm_init_traps_aa64pfr1(vcpu);
0182     pvm_init_traps_aa64dfr0(vcpu);
0183     pvm_init_traps_aa64mmfr0(vcpu);
0184     pvm_init_traps_aa64mmfr1(vcpu);
0185 }