0001
0002
0003
0004 #include <linux/arm-smccc.h>
0005 #include <linux/kvm_host.h>
0006
0007 #include <asm/kvm_emulate.h>
0008
0009 #include <kvm/arm_hypercalls.h>
0010 #include <kvm/arm_psci.h>
0011
0012 #define KVM_ARM_SMCCC_STD_FEATURES \
0013 GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0)
0014 #define KVM_ARM_SMCCC_STD_HYP_FEATURES \
0015 GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
0016 #define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \
0017 GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
0018
0019 static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
0020 {
0021 struct system_time_snapshot systime_snapshot;
0022 u64 cycles = ~0UL;
0023 u32 feature;
0024
0025
0026
0027
0028
0029 ktime_get_snapshot(&systime_snapshot);
0030
0031
0032
0033
0034
0035
0036 if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER)
0037 return;
0038
0039
0040
0041
0042
0043
0044 feature = smccc_get_arg1(vcpu);
0045 switch (feature) {
0046 case KVM_PTP_VIRT_COUNTER:
0047 cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2);
0048 break;
0049 case KVM_PTP_PHYS_COUNTER:
0050 cycles = systime_snapshot.cycles;
0051 break;
0052 default:
0053 return;
0054 }
0055
0056
0057
0058
0059
0060
0061
0062 val[0] = upper_32_bits(systime_snapshot.real);
0063 val[1] = lower_32_bits(systime_snapshot.real);
0064 val[2] = upper_32_bits(cycles);
0065 val[3] = lower_32_bits(cycles);
0066 }
0067
0068 static bool kvm_hvc_call_default_allowed(u32 func_id)
0069 {
0070 switch (func_id) {
0071
0072
0073
0074
0075
0076 case ARM_SMCCC_VERSION_FUNC_ID:
0077 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
0078 return true;
0079 default:
0080
0081 if (ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
0082 ARM_SMCCC_FUNC_NUM(func_id) <= 0x1f)
0083 return true;
0084
0085
0086
0087
0088
0089 if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3))
0090 return true;
0091
0092 return false;
0093 }
0094 }
0095
0096 static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id)
0097 {
0098 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
0099
0100 switch (func_id) {
0101 case ARM_SMCCC_TRNG_VERSION:
0102 case ARM_SMCCC_TRNG_FEATURES:
0103 case ARM_SMCCC_TRNG_GET_UUID:
0104 case ARM_SMCCC_TRNG_RND32:
0105 case ARM_SMCCC_TRNG_RND64:
0106 return test_bit(KVM_REG_ARM_STD_BIT_TRNG_V1_0,
0107 &smccc_feat->std_bmap);
0108 case ARM_SMCCC_HV_PV_TIME_FEATURES:
0109 case ARM_SMCCC_HV_PV_TIME_ST:
0110 return test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
0111 &smccc_feat->std_hyp_bmap);
0112 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
0113 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
0114 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT,
0115 &smccc_feat->vendor_hyp_bmap);
0116 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
0117 return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
0118 &smccc_feat->vendor_hyp_bmap);
0119 default:
0120 return kvm_hvc_call_default_allowed(func_id);
0121 }
0122 }
0123
0124 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
0125 {
0126 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
0127 u32 func_id = smccc_get_function(vcpu);
0128 u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
0129 u32 feature;
0130 gpa_t gpa;
0131
0132 if (!kvm_hvc_call_allowed(vcpu, func_id))
0133 goto out;
0134
0135 switch (func_id) {
0136 case ARM_SMCCC_VERSION_FUNC_ID:
0137 val[0] = ARM_SMCCC_VERSION_1_1;
0138 break;
0139 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
0140 feature = smccc_get_arg1(vcpu);
0141 switch (feature) {
0142 case ARM_SMCCC_ARCH_WORKAROUND_1:
0143 switch (arm64_get_spectre_v2_state()) {
0144 case SPECTRE_VULNERABLE:
0145 break;
0146 case SPECTRE_MITIGATED:
0147 val[0] = SMCCC_RET_SUCCESS;
0148 break;
0149 case SPECTRE_UNAFFECTED:
0150 val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
0151 break;
0152 }
0153 break;
0154 case ARM_SMCCC_ARCH_WORKAROUND_2:
0155 switch (arm64_get_spectre_v4_state()) {
0156 case SPECTRE_VULNERABLE:
0157 break;
0158 case SPECTRE_MITIGATED:
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169 if (cpus_have_final_cap(ARM64_SSBS))
0170 break;
0171 fallthrough;
0172 case SPECTRE_UNAFFECTED:
0173 val[0] = SMCCC_RET_NOT_REQUIRED;
0174 break;
0175 }
0176 break;
0177 case ARM_SMCCC_ARCH_WORKAROUND_3:
0178 switch (arm64_get_spectre_bhb_state()) {
0179 case SPECTRE_VULNERABLE:
0180 break;
0181 case SPECTRE_MITIGATED:
0182 val[0] = SMCCC_RET_SUCCESS;
0183 break;
0184 case SPECTRE_UNAFFECTED:
0185 val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
0186 break;
0187 }
0188 break;
0189 case ARM_SMCCC_HV_PV_TIME_FEATURES:
0190 if (test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
0191 &smccc_feat->std_hyp_bmap))
0192 val[0] = SMCCC_RET_SUCCESS;
0193 break;
0194 }
0195 break;
0196 case ARM_SMCCC_HV_PV_TIME_FEATURES:
0197 val[0] = kvm_hypercall_pv_features(vcpu);
0198 break;
0199 case ARM_SMCCC_HV_PV_TIME_ST:
0200 gpa = kvm_init_stolen_time(vcpu);
0201 if (gpa != GPA_INVALID)
0202 val[0] = gpa;
0203 break;
0204 case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
0205 val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
0206 val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
0207 val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2;
0208 val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
0209 break;
0210 case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
0211 val[0] = smccc_feat->vendor_hyp_bmap;
0212 break;
0213 case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
0214 kvm_ptp_get_time(vcpu, val);
0215 break;
0216 case ARM_SMCCC_TRNG_VERSION:
0217 case ARM_SMCCC_TRNG_FEATURES:
0218 case ARM_SMCCC_TRNG_GET_UUID:
0219 case ARM_SMCCC_TRNG_RND32:
0220 case ARM_SMCCC_TRNG_RND64:
0221 return kvm_trng_call(vcpu);
0222 default:
0223 return kvm_psci_call(vcpu);
0224 }
0225
0226 out:
0227 smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
0228 return 1;
0229 }
0230
0231 static const u64 kvm_arm_fw_reg_ids[] = {
0232 KVM_REG_ARM_PSCI_VERSION,
0233 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1,
0234 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2,
0235 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3,
0236 KVM_REG_ARM_STD_BMAP,
0237 KVM_REG_ARM_STD_HYP_BMAP,
0238 KVM_REG_ARM_VENDOR_HYP_BMAP,
0239 };
0240
0241 void kvm_arm_init_hypercalls(struct kvm *kvm)
0242 {
0243 struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
0244
0245 smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES;
0246 smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
0247 smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
0248 }
0249
0250 int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
0251 {
0252 return ARRAY_SIZE(kvm_arm_fw_reg_ids);
0253 }
0254
0255 int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
0256 {
0257 int i;
0258
0259 for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) {
0260 if (put_user(kvm_arm_fw_reg_ids[i], uindices++))
0261 return -EFAULT;
0262 }
0263
0264 return 0;
0265 }
0266
0267 #define KVM_REG_FEATURE_LEVEL_MASK GENMASK(3, 0)
0268
0269
0270
0271
0272
0273 static int get_kernel_wa_level(u64 regid)
0274 {
0275 switch (regid) {
0276 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
0277 switch (arm64_get_spectre_v2_state()) {
0278 case SPECTRE_VULNERABLE:
0279 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
0280 case SPECTRE_MITIGATED:
0281 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
0282 case SPECTRE_UNAFFECTED:
0283 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
0284 }
0285 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
0286 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
0287 switch (arm64_get_spectre_v4_state()) {
0288 case SPECTRE_MITIGATED:
0289
0290
0291
0292
0293
0294 if (cpus_have_final_cap(ARM64_SSBS))
0295 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
0296 fallthrough;
0297 case SPECTRE_UNAFFECTED:
0298 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
0299 case SPECTRE_VULNERABLE:
0300 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
0301 }
0302 break;
0303 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
0304 switch (arm64_get_spectre_bhb_state()) {
0305 case SPECTRE_VULNERABLE:
0306 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
0307 case SPECTRE_MITIGATED:
0308 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
0309 case SPECTRE_UNAFFECTED:
0310 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
0311 }
0312 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
0313 }
0314
0315 return -EINVAL;
0316 }
0317
0318 int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
0319 {
0320 struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
0321 void __user *uaddr = (void __user *)(long)reg->addr;
0322 u64 val;
0323
0324 switch (reg->id) {
0325 case KVM_REG_ARM_PSCI_VERSION:
0326 val = kvm_psci_version(vcpu);
0327 break;
0328 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
0329 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
0330 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
0331 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
0332 break;
0333 case KVM_REG_ARM_STD_BMAP:
0334 val = READ_ONCE(smccc_feat->std_bmap);
0335 break;
0336 case KVM_REG_ARM_STD_HYP_BMAP:
0337 val = READ_ONCE(smccc_feat->std_hyp_bmap);
0338 break;
0339 case KVM_REG_ARM_VENDOR_HYP_BMAP:
0340 val = READ_ONCE(smccc_feat->vendor_hyp_bmap);
0341 break;
0342 default:
0343 return -ENOENT;
0344 }
0345
0346 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
0347 return -EFAULT;
0348
0349 return 0;
0350 }
0351
0352 static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
0353 {
0354 int ret = 0;
0355 struct kvm *kvm = vcpu->kvm;
0356 struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
0357 unsigned long *fw_reg_bmap, fw_reg_features;
0358
0359 switch (reg_id) {
0360 case KVM_REG_ARM_STD_BMAP:
0361 fw_reg_bmap = &smccc_feat->std_bmap;
0362 fw_reg_features = KVM_ARM_SMCCC_STD_FEATURES;
0363 break;
0364 case KVM_REG_ARM_STD_HYP_BMAP:
0365 fw_reg_bmap = &smccc_feat->std_hyp_bmap;
0366 fw_reg_features = KVM_ARM_SMCCC_STD_HYP_FEATURES;
0367 break;
0368 case KVM_REG_ARM_VENDOR_HYP_BMAP:
0369 fw_reg_bmap = &smccc_feat->vendor_hyp_bmap;
0370 fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
0371 break;
0372 default:
0373 return -ENOENT;
0374 }
0375
0376
0377 if (val & ~fw_reg_features)
0378 return -EINVAL;
0379
0380 mutex_lock(&kvm->lock);
0381
0382 if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
0383 val != *fw_reg_bmap) {
0384 ret = -EBUSY;
0385 goto out;
0386 }
0387
0388 WRITE_ONCE(*fw_reg_bmap, val);
0389 out:
0390 mutex_unlock(&kvm->lock);
0391 return ret;
0392 }
0393
0394 int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
0395 {
0396 void __user *uaddr = (void __user *)(long)reg->addr;
0397 u64 val;
0398 int wa_level;
0399
0400 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
0401 return -EFAULT;
0402
0403 switch (reg->id) {
0404 case KVM_REG_ARM_PSCI_VERSION:
0405 {
0406 bool wants_02;
0407
0408 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
0409
0410 switch (val) {
0411 case KVM_ARM_PSCI_0_1:
0412 if (wants_02)
0413 return -EINVAL;
0414 vcpu->kvm->arch.psci_version = val;
0415 return 0;
0416 case KVM_ARM_PSCI_0_2:
0417 case KVM_ARM_PSCI_1_0:
0418 case KVM_ARM_PSCI_1_1:
0419 if (!wants_02)
0420 return -EINVAL;
0421 vcpu->kvm->arch.psci_version = val;
0422 return 0;
0423 }
0424 break;
0425 }
0426
0427 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
0428 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
0429 if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
0430 return -EINVAL;
0431
0432 if (get_kernel_wa_level(reg->id) < val)
0433 return -EINVAL;
0434
0435 return 0;
0436
0437 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
0438 if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
0439 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
0440 return -EINVAL;
0441
0442
0443 if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
0444 (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
0445 return -EINVAL;
0446
0447
0448
0449
0450
0451 switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
0452 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
0453 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
0454 wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
0455 break;
0456 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
0457 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
0458 wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
0459 break;
0460 default:
0461 return -EINVAL;
0462 }
0463
0464
0465
0466
0467
0468 if (get_kernel_wa_level(reg->id) < wa_level)
0469 return -EINVAL;
0470
0471 return 0;
0472 case KVM_REG_ARM_STD_BMAP:
0473 case KVM_REG_ARM_STD_HYP_BMAP:
0474 case KVM_REG_ARM_VENDOR_HYP_BMAP:
0475 return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val);
0476 default:
0477 return -ENOENT;
0478 }
0479
0480 return -EINVAL;
0481 }