0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/errno.h>
0012 #include <linux/kernel.h>
0013 #include <linux/kvm_host.h>
0014 #include <linux/kvm.h>
0015 #include <linux/hw_breakpoint.h>
0016 #include <linux/slab.h>
0017 #include <linux/string.h>
0018 #include <linux/types.h>
0019
0020 #include <kvm/arm_arch_timer.h>
0021
0022 #include <asm/cpufeature.h>
0023 #include <asm/cputype.h>
0024 #include <asm/fpsimd.h>
0025 #include <asm/ptrace.h>
0026 #include <asm/kvm_arm.h>
0027 #include <asm/kvm_asm.h>
0028 #include <asm/kvm_emulate.h>
0029 #include <asm/kvm_mmu.h>
0030 #include <asm/virt.h>
0031
0032
0033 static u32 kvm_ipa_limit;
0034
0035
0036
0037
0038 #define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
0039 PSR_F_BIT | PSR_D_BIT)
0040
0041 #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
0042 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
0043
0044 unsigned int kvm_sve_max_vl;
0045
0046 int kvm_arm_init_sve(void)
0047 {
0048 if (system_supports_sve()) {
0049 kvm_sve_max_vl = sve_max_virtualisable_vl();
0050
0051
0052
0053
0054
0055
0056
0057 if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
0058 kvm_sve_max_vl = VL_ARCH_MAX;
0059
0060
0061
0062
0063
0064 if (kvm_sve_max_vl < sve_max_vl())
0065 pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
0066 kvm_sve_max_vl);
0067 }
0068
0069 return 0;
0070 }
0071
0072 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
0073 {
0074 if (!system_supports_sve())
0075 return -EINVAL;
0076
0077 vcpu->arch.sve_max_vl = kvm_sve_max_vl;
0078
0079
0080
0081
0082
0083
0084 vcpu_set_flag(vcpu, GUEST_HAS_SVE);
0085
0086 return 0;
0087 }
0088
0089
0090
0091
0092
0093 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
0094 {
0095 void *buf;
0096 unsigned int vl;
0097 size_t reg_sz;
0098 int ret;
0099
0100 vl = vcpu->arch.sve_max_vl;
0101
0102
0103
0104
0105
0106
0107 if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
0108 vl > VL_ARCH_MAX))
0109 return -EIO;
0110
0111 reg_sz = vcpu_sve_state_size(vcpu);
0112 buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
0113 if (!buf)
0114 return -ENOMEM;
0115
0116 ret = kvm_share_hyp(buf, buf + reg_sz);
0117 if (ret) {
0118 kfree(buf);
0119 return ret;
0120 }
0121
0122 vcpu->arch.sve_state = buf;
0123 vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
0124 return 0;
0125 }
0126
0127 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
0128 {
0129 switch (feature) {
0130 case KVM_ARM_VCPU_SVE:
0131 if (!vcpu_has_sve(vcpu))
0132 return -EINVAL;
0133
0134 if (kvm_arm_vcpu_sve_finalized(vcpu))
0135 return -EPERM;
0136
0137 return kvm_vcpu_finalize_sve(vcpu);
0138 }
0139
0140 return -EINVAL;
0141 }
0142
0143 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
0144 {
0145 if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
0146 return false;
0147
0148 return true;
0149 }
0150
0151 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
0152 {
0153 void *sve_state = vcpu->arch.sve_state;
0154
0155 kvm_vcpu_unshare_task_fp(vcpu);
0156 kvm_unshare_hyp(vcpu, vcpu + 1);
0157 if (sve_state)
0158 kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
0159 kfree(sve_state);
0160 }
0161
0162 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
0163 {
0164 if (vcpu_has_sve(vcpu))
0165 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
0166 }
0167
0168 static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
0169 {
0170
0171
0172
0173
0174
0175 if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
0176 !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
0177 !system_has_full_ptr_auth())
0178 return -EINVAL;
0179
0180 vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
0181 return 0;
0182 }
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
0197 {
0198 struct kvm *kvm = vcpu->kvm;
0199 bool is32bit;
0200
0201 is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
0202
0203 lockdep_assert_held(&kvm->lock);
0204
0205 if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
0206
0207
0208
0209
0210 if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
0211 return 0;
0212
0213 return -EINVAL;
0214 }
0215
0216 if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
0217 return -EINVAL;
0218
0219
0220 if (kvm_has_mte(kvm) && is32bit)
0221 return -EINVAL;
0222
0223 if (is32bit)
0224 set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
0225
0226 set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
0227
0228 return 0;
0229 }
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
0250 {
0251 struct vcpu_reset_state reset_state;
0252 int ret;
0253 bool loaded;
0254 u32 pstate;
0255
0256 mutex_lock(&vcpu->kvm->lock);
0257 ret = kvm_set_vm_width(vcpu);
0258 if (!ret) {
0259 reset_state = vcpu->arch.reset_state;
0260 WRITE_ONCE(vcpu->arch.reset_state.reset, false);
0261 }
0262 mutex_unlock(&vcpu->kvm->lock);
0263
0264 if (ret)
0265 return ret;
0266
0267
0268 kvm_pmu_vcpu_reset(vcpu);
0269
0270 preempt_disable();
0271 loaded = (vcpu->cpu != -1);
0272 if (loaded)
0273 kvm_arch_vcpu_put(vcpu);
0274
0275 if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
0276 if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
0277 ret = kvm_vcpu_enable_sve(vcpu);
0278 if (ret)
0279 goto out;
0280 }
0281 } else {
0282 kvm_vcpu_reset_sve(vcpu);
0283 }
0284
0285 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
0286 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
0287 if (kvm_vcpu_enable_ptrauth(vcpu)) {
0288 ret = -EINVAL;
0289 goto out;
0290 }
0291 }
0292
0293 switch (vcpu->arch.target) {
0294 default:
0295 if (vcpu_el1_is_32bit(vcpu)) {
0296 pstate = VCPU_RESET_PSTATE_SVC;
0297 } else {
0298 pstate = VCPU_RESET_PSTATE_EL1;
0299 }
0300
0301 if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
0302 ret = -EINVAL;
0303 goto out;
0304 }
0305 break;
0306 }
0307
0308
0309 memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
0310 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
0311 vcpu->arch.ctxt.spsr_abt = 0;
0312 vcpu->arch.ctxt.spsr_und = 0;
0313 vcpu->arch.ctxt.spsr_irq = 0;
0314 vcpu->arch.ctxt.spsr_fiq = 0;
0315 vcpu_gp_regs(vcpu)->pstate = pstate;
0316
0317
0318 kvm_reset_sys_regs(vcpu);
0319
0320
0321
0322
0323
0324 if (reset_state.reset) {
0325 unsigned long target_pc = reset_state.pc;
0326
0327
0328 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
0329 target_pc &= ~1UL;
0330 vcpu_set_thumb(vcpu);
0331 }
0332
0333
0334 if (reset_state.be)
0335 kvm_vcpu_set_be(vcpu);
0336
0337 *vcpu_pc(vcpu) = target_pc;
0338 vcpu_set_reg(vcpu, 0, reset_state.r0);
0339 }
0340
0341
0342 ret = kvm_timer_vcpu_reset(vcpu);
0343 out:
0344 if (loaded)
0345 kvm_arch_vcpu_load(vcpu, smp_processor_id());
0346 preempt_enable();
0347 return ret;
0348 }
0349
0350 u32 get_kvm_ipa_limit(void)
0351 {
0352 return kvm_ipa_limit;
0353 }
0354
0355 int kvm_set_ipa_limit(void)
0356 {
0357 unsigned int parange;
0358 u64 mmfr0;
0359
0360 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
0361 parange = cpuid_feature_extract_unsigned_field(mmfr0,
0362 ID_AA64MMFR0_PARANGE_SHIFT);
0363
0364
0365
0366
0367
0368
0369 if (PAGE_SIZE != SZ_64K)
0370 parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
0371
0372
0373
0374
0375
0376 switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) {
0377 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
0378 kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
0379 return -EINVAL;
0380 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
0381 kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
0382 break;
0383 case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
0384 kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
0385 break;
0386 default:
0387 kvm_err("Unsupported value for TGRAN_2, giving up\n");
0388 return -EINVAL;
0389 }
0390
0391 kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
0392 kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
0393 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
0394 " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
0395
0396 return 0;
0397 }
0398
0399 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
0400 {
0401 u64 mmfr0, mmfr1;
0402 u32 phys_shift;
0403
0404 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
0405 return -EINVAL;
0406
0407 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
0408 if (phys_shift) {
0409 if (phys_shift > kvm_ipa_limit ||
0410 phys_shift < ARM64_MIN_PARANGE_BITS)
0411 return -EINVAL;
0412 } else {
0413 phys_shift = KVM_PHYS_SHIFT;
0414 if (phys_shift > kvm_ipa_limit) {
0415 pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
0416 current->comm);
0417 return -EINVAL;
0418 }
0419 }
0420
0421 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
0422 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
0423 kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
0424
0425 return 0;
0426 }