0001
0002
0003
0004
0005
0006
0007 #include <linux/bug.h>
0008 #include <linux/cpu_pm.h>
0009 #include <linux/entry-kvm.h>
0010 #include <linux/errno.h>
0011 #include <linux/err.h>
0012 #include <linux/kvm_host.h>
0013 #include <linux/list.h>
0014 #include <linux/module.h>
0015 #include <linux/vmalloc.h>
0016 #include <linux/fs.h>
0017 #include <linux/mman.h>
0018 #include <linux/sched.h>
0019 #include <linux/kmemleak.h>
0020 #include <linux/kvm.h>
0021 #include <linux/kvm_irqfd.h>
0022 #include <linux/irqbypass.h>
0023 #include <linux/sched/stat.h>
0024 #include <linux/psci.h>
0025 #include <trace/events/kvm.h>
0026
0027 #define CREATE_TRACE_POINTS
0028 #include "trace_arm.h"
0029
0030 #include <linux/uaccess.h>
0031 #include <asm/ptrace.h>
0032 #include <asm/mman.h>
0033 #include <asm/tlbflush.h>
0034 #include <asm/cacheflush.h>
0035 #include <asm/cpufeature.h>
0036 #include <asm/virt.h>
0037 #include <asm/kvm_arm.h>
0038 #include <asm/kvm_asm.h>
0039 #include <asm/kvm_mmu.h>
0040 #include <asm/kvm_emulate.h>
0041 #include <asm/sections.h>
0042
0043 #include <kvm/arm_hypercalls.h>
0044 #include <kvm/arm_pmu.h>
0045 #include <kvm/arm_psci.h>
0046
0047 static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
0048 DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
0049
0050 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
0051
0052 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
0053 unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
0054 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
0055
0056 static bool vgic_present;
0057
0058 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
0059 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
0060
0061 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
0062 {
0063 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
0064 }
0065
0066 int kvm_arch_hardware_setup(void *opaque)
0067 {
0068 return 0;
0069 }
0070
0071 int kvm_arch_check_processor_compat(void *opaque)
0072 {
0073 return 0;
0074 }
0075
0076 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
0077 struct kvm_enable_cap *cap)
0078 {
0079 int r;
0080
0081 if (cap->flags)
0082 return -EINVAL;
0083
0084 switch (cap->cap) {
0085 case KVM_CAP_ARM_NISV_TO_USER:
0086 r = 0;
0087 set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
0088 &kvm->arch.flags);
0089 break;
0090 case KVM_CAP_ARM_MTE:
0091 mutex_lock(&kvm->lock);
0092 if (!system_supports_mte() || kvm->created_vcpus) {
0093 r = -EINVAL;
0094 } else {
0095 r = 0;
0096 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
0097 }
0098 mutex_unlock(&kvm->lock);
0099 break;
0100 case KVM_CAP_ARM_SYSTEM_SUSPEND:
0101 r = 0;
0102 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
0103 break;
0104 default:
0105 r = -EINVAL;
0106 break;
0107 }
0108
0109 return r;
0110 }
0111
0112 static int kvm_arm_default_max_vcpus(void)
0113 {
0114 return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
0115 }
0116
0117 static void set_default_spectre(struct kvm *kvm)
0118 {
0119
0120
0121
0122
0123
0124
0125
0126
0127 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
0128 kvm->arch.pfr0_csv2 = 1;
0129 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
0130 kvm->arch.pfr0_csv3 = 1;
0131 }
0132
0133
0134
0135
0136
0137 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
0138 {
0139 int ret;
0140
0141 ret = kvm_arm_setup_stage2(kvm, type);
0142 if (ret)
0143 return ret;
0144
0145 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
0146 if (ret)
0147 return ret;
0148
0149 ret = kvm_share_hyp(kvm, kvm + 1);
0150 if (ret)
0151 goto out_free_stage2_pgd;
0152
0153 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) {
0154 ret = -ENOMEM;
0155 goto out_free_stage2_pgd;
0156 }
0157 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
0158
0159 kvm_vgic_early_init(kvm);
0160
0161
0162 kvm->max_vcpus = kvm_arm_default_max_vcpus();
0163
0164 set_default_spectre(kvm);
0165 kvm_arm_init_hypercalls(kvm);
0166
0167 return ret;
0168 out_free_stage2_pgd:
0169 kvm_free_stage2_pgd(&kvm->arch.mmu);
0170 return ret;
0171 }
0172
0173 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
0174 {
0175 return VM_FAULT_SIGBUS;
0176 }
0177
0178
0179
0180
0181
0182
0183 void kvm_arch_destroy_vm(struct kvm *kvm)
0184 {
0185 bitmap_free(kvm->arch.pmu_filter);
0186 free_cpumask_var(kvm->arch.supported_cpus);
0187
0188 kvm_vgic_destroy(kvm);
0189
0190 kvm_destroy_vcpus(kvm);
0191
0192 kvm_unshare_hyp(kvm, kvm + 1);
0193 }
0194
0195 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
0196 {
0197 int r;
0198 switch (ext) {
0199 case KVM_CAP_IRQCHIP:
0200 r = vgic_present;
0201 break;
0202 case KVM_CAP_IOEVENTFD:
0203 case KVM_CAP_DEVICE_CTRL:
0204 case KVM_CAP_USER_MEMORY:
0205 case KVM_CAP_SYNC_MMU:
0206 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
0207 case KVM_CAP_ONE_REG:
0208 case KVM_CAP_ARM_PSCI:
0209 case KVM_CAP_ARM_PSCI_0_2:
0210 case KVM_CAP_READONLY_MEM:
0211 case KVM_CAP_MP_STATE:
0212 case KVM_CAP_IMMEDIATE_EXIT:
0213 case KVM_CAP_VCPU_EVENTS:
0214 case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
0215 case KVM_CAP_ARM_NISV_TO_USER:
0216 case KVM_CAP_ARM_INJECT_EXT_DABT:
0217 case KVM_CAP_SET_GUEST_DEBUG:
0218 case KVM_CAP_VCPU_ATTRIBUTES:
0219 case KVM_CAP_PTP_KVM:
0220 case KVM_CAP_ARM_SYSTEM_SUSPEND:
0221 r = 1;
0222 break;
0223 case KVM_CAP_SET_GUEST_DEBUG2:
0224 return KVM_GUESTDBG_VALID_MASK;
0225 case KVM_CAP_ARM_SET_DEVICE_ADDR:
0226 r = 1;
0227 break;
0228 case KVM_CAP_NR_VCPUS:
0229
0230
0231
0232
0233
0234
0235 r = min_t(unsigned int, num_online_cpus(),
0236 kvm_arm_default_max_vcpus());
0237 break;
0238 case KVM_CAP_MAX_VCPUS:
0239 case KVM_CAP_MAX_VCPU_ID:
0240 if (kvm)
0241 r = kvm->max_vcpus;
0242 else
0243 r = kvm_arm_default_max_vcpus();
0244 break;
0245 case KVM_CAP_MSI_DEVID:
0246 if (!kvm)
0247 r = -EINVAL;
0248 else
0249 r = kvm->arch.vgic.msis_require_devid;
0250 break;
0251 case KVM_CAP_ARM_USER_IRQ:
0252
0253
0254
0255
0256 r = 1;
0257 break;
0258 case KVM_CAP_ARM_MTE:
0259 r = system_supports_mte();
0260 break;
0261 case KVM_CAP_STEAL_TIME:
0262 r = kvm_arm_pvtime_supported();
0263 break;
0264 case KVM_CAP_ARM_EL1_32BIT:
0265 r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1);
0266 break;
0267 case KVM_CAP_GUEST_DEBUG_HW_BPS:
0268 r = get_num_brps();
0269 break;
0270 case KVM_CAP_GUEST_DEBUG_HW_WPS:
0271 r = get_num_wrps();
0272 break;
0273 case KVM_CAP_ARM_PMU_V3:
0274 r = kvm_arm_support_pmu_v3();
0275 break;
0276 case KVM_CAP_ARM_INJECT_SERROR_ESR:
0277 r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
0278 break;
0279 case KVM_CAP_ARM_VM_IPA_SIZE:
0280 r = get_kvm_ipa_limit();
0281 break;
0282 case KVM_CAP_ARM_SVE:
0283 r = system_supports_sve();
0284 break;
0285 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
0286 case KVM_CAP_ARM_PTRAUTH_GENERIC:
0287 r = system_has_full_ptr_auth();
0288 break;
0289 default:
0290 r = 0;
0291 }
0292
0293 return r;
0294 }
0295
0296 long kvm_arch_dev_ioctl(struct file *filp,
0297 unsigned int ioctl, unsigned long arg)
0298 {
0299 return -EINVAL;
0300 }
0301
0302 struct kvm *kvm_arch_alloc_vm(void)
0303 {
0304 size_t sz = sizeof(struct kvm);
0305
0306 if (!has_vhe())
0307 return kzalloc(sz, GFP_KERNEL_ACCOUNT);
0308
0309 return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO);
0310 }
0311
0312 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
0313 {
0314 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
0315 return -EBUSY;
0316
0317 if (id >= kvm->max_vcpus)
0318 return -EINVAL;
0319
0320 return 0;
0321 }
0322
0323 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
0324 {
0325 int err;
0326
0327
0328 vcpu->arch.target = -1;
0329 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
0330
0331 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
0332
0333
0334
0335
0336
0337 vcpu->arch.fp_state = FP_STATE_FREE;
0338
0339
0340 kvm_timer_vcpu_init(vcpu);
0341
0342 kvm_pmu_vcpu_init(vcpu);
0343
0344 kvm_arm_reset_debug_ptr(vcpu);
0345
0346 kvm_arm_pvtime_vcpu_init(&vcpu->arch);
0347
0348 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
0349
0350 err = kvm_vgic_vcpu_init(vcpu);
0351 if (err)
0352 return err;
0353
0354 return kvm_share_hyp(vcpu, vcpu + 1);
0355 }
0356
0357 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
0358 {
0359 }
0360
0361 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
0362 {
0363 if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
0364 static_branch_dec(&userspace_irqchip_in_use);
0365
0366 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
0367 kvm_timer_vcpu_terminate(vcpu);
0368 kvm_pmu_vcpu_destroy(vcpu);
0369
0370 kvm_arm_vcpu_destroy(vcpu);
0371 }
0372
0373 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
0374 {
0375
0376 }
0377
0378 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
0379 {
0380
0381 }
0382
0383 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
0384 {
0385 struct kvm_s2_mmu *mmu;
0386 int *last_ran;
0387
0388 mmu = vcpu->arch.hw_mmu;
0389 last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 if (*last_ran != vcpu->vcpu_id) {
0401 kvm_call_hyp(__kvm_flush_cpu_context, mmu);
0402 *last_ran = vcpu->vcpu_id;
0403 }
0404
0405 vcpu->cpu = cpu;
0406
0407 kvm_vgic_load(vcpu);
0408 kvm_timer_vcpu_load(vcpu);
0409 if (has_vhe())
0410 kvm_vcpu_load_sysregs_vhe(vcpu);
0411 kvm_arch_vcpu_load_fp(vcpu);
0412 kvm_vcpu_pmu_restore_guest(vcpu);
0413 if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
0414 kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
0415
0416 if (single_task_running())
0417 vcpu_clear_wfx_traps(vcpu);
0418 else
0419 vcpu_set_wfx_traps(vcpu);
0420
0421 if (vcpu_has_ptrauth(vcpu))
0422 vcpu_ptrauth_disable(vcpu);
0423 kvm_arch_vcpu_load_debug_state_flags(vcpu);
0424
0425 if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
0426 vcpu_set_on_unsupported_cpu(vcpu);
0427 }
0428
0429 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
0430 {
0431 kvm_arch_vcpu_put_debug_state_flags(vcpu);
0432 kvm_arch_vcpu_put_fp(vcpu);
0433 if (has_vhe())
0434 kvm_vcpu_put_sysregs_vhe(vcpu);
0435 kvm_timer_vcpu_put(vcpu);
0436 kvm_vgic_put(vcpu);
0437 kvm_vcpu_pmu_restore_host(vcpu);
0438 kvm_arm_vmid_clear_active();
0439
0440 vcpu_clear_on_unsupported_cpu(vcpu);
0441 vcpu->cpu = -1;
0442 }
0443
0444 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
0445 {
0446 vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
0447 kvm_make_request(KVM_REQ_SLEEP, vcpu);
0448 kvm_vcpu_kick(vcpu);
0449 }
0450
0451 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
0452 {
0453 return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
0454 }
0455
0456 static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
0457 {
0458 vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
0459 kvm_make_request(KVM_REQ_SUSPEND, vcpu);
0460 kvm_vcpu_kick(vcpu);
0461 }
0462
0463 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
0464 {
0465 return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
0466 }
0467
0468 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
0469 struct kvm_mp_state *mp_state)
0470 {
0471 *mp_state = vcpu->arch.mp_state;
0472
0473 return 0;
0474 }
0475
0476 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
0477 struct kvm_mp_state *mp_state)
0478 {
0479 int ret = 0;
0480
0481 switch (mp_state->mp_state) {
0482 case KVM_MP_STATE_RUNNABLE:
0483 vcpu->arch.mp_state = *mp_state;
0484 break;
0485 case KVM_MP_STATE_STOPPED:
0486 kvm_arm_vcpu_power_off(vcpu);
0487 break;
0488 case KVM_MP_STATE_SUSPENDED:
0489 kvm_arm_vcpu_suspend(vcpu);
0490 break;
0491 default:
0492 ret = -EINVAL;
0493 }
0494
0495 return ret;
0496 }
0497
0498
0499
0500
0501
0502
0503
0504
0505 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
0506 {
0507 bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
0508 return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
0509 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
0510 }
0511
0512 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
0513 {
0514 return vcpu_mode_priv(vcpu);
0515 }
0516
0517 #ifdef CONFIG_GUEST_PERF_EVENTS
0518 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
0519 {
0520 return *vcpu_pc(vcpu);
0521 }
0522 #endif
0523
0524 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
0525 {
0526 return vcpu->arch.target >= 0;
0527 }
0528
0529
0530
0531
0532
0533
0534 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
0535 {
0536 struct kvm *kvm = vcpu->kvm;
0537 int ret;
0538
0539 if (!kvm_vcpu_initialized(vcpu))
0540 return -ENOEXEC;
0541
0542 if (!kvm_arm_vcpu_is_finalized(vcpu))
0543 return -EPERM;
0544
0545 ret = kvm_arch_vcpu_run_map_fp(vcpu);
0546 if (ret)
0547 return ret;
0548
0549 if (likely(vcpu_has_run_once(vcpu)))
0550 return 0;
0551
0552 kvm_arm_vcpu_init_debug(vcpu);
0553
0554 if (likely(irqchip_in_kernel(kvm))) {
0555
0556
0557
0558
0559 ret = kvm_vgic_map_resources(kvm);
0560 if (ret)
0561 return ret;
0562 }
0563
0564 ret = kvm_timer_enable(vcpu);
0565 if (ret)
0566 return ret;
0567
0568 ret = kvm_arm_pmu_v3_enable(vcpu);
0569 if (ret)
0570 return ret;
0571
0572 if (!irqchip_in_kernel(kvm)) {
0573
0574
0575
0576
0577 static_branch_inc(&userspace_irqchip_in_use);
0578 }
0579
0580
0581
0582
0583
0584
0585 if (kvm_vm_is_protected(kvm))
0586 kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
0587
0588 mutex_lock(&kvm->lock);
0589 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
0590 mutex_unlock(&kvm->lock);
0591
0592 return ret;
0593 }
0594
0595 bool kvm_arch_intc_initialized(struct kvm *kvm)
0596 {
0597 return vgic_initialized(kvm);
0598 }
0599
0600 void kvm_arm_halt_guest(struct kvm *kvm)
0601 {
0602 unsigned long i;
0603 struct kvm_vcpu *vcpu;
0604
0605 kvm_for_each_vcpu(i, vcpu, kvm)
0606 vcpu->arch.pause = true;
0607 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
0608 }
0609
0610 void kvm_arm_resume_guest(struct kvm *kvm)
0611 {
0612 unsigned long i;
0613 struct kvm_vcpu *vcpu;
0614
0615 kvm_for_each_vcpu(i, vcpu, kvm) {
0616 vcpu->arch.pause = false;
0617 __kvm_vcpu_wake_up(vcpu);
0618 }
0619 }
0620
0621 static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu)
0622 {
0623 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
0624
0625 rcuwait_wait_event(wait,
0626 (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
0627 TASK_INTERRUPTIBLE);
0628
0629 if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) {
0630
0631 kvm_make_request(KVM_REQ_SLEEP, vcpu);
0632 }
0633
0634
0635
0636
0637
0638
0639 smp_rmb();
0640 }
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
0651 {
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662 preempt_disable();
0663 kvm_vgic_vmcr_sync(vcpu);
0664 vgic_v4_put(vcpu, true);
0665 preempt_enable();
0666
0667 kvm_vcpu_halt(vcpu);
0668 vcpu_clear_flag(vcpu, IN_WFIT);
0669 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
0670
0671 preempt_disable();
0672 vgic_v4_load(vcpu);
0673 preempt_enable();
0674 }
0675
0676 static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
0677 {
0678 if (!kvm_arm_vcpu_suspended(vcpu))
0679 return 1;
0680
0681 kvm_vcpu_wfi(vcpu);
0682
0683
0684
0685
0686
0687
0688 kvm_make_request(KVM_REQ_SUSPEND, vcpu);
0689
0690
0691
0692
0693
0694 if (kvm_arch_vcpu_runnable(vcpu)) {
0695 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
0696 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
0697 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
0698 return 0;
0699 }
0700
0701
0702
0703
0704
0705
0706 return 1;
0707 }
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718 static int check_vcpu_requests(struct kvm_vcpu *vcpu)
0719 {
0720 if (kvm_request_pending(vcpu)) {
0721 if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
0722 kvm_vcpu_sleep(vcpu);
0723
0724 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
0725 kvm_reset_vcpu(vcpu);
0726
0727
0728
0729
0730
0731 kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
0732
0733 if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
0734 kvm_update_stolen_time(vcpu);
0735
0736 if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
0737
0738 preempt_disable();
0739 vgic_v4_put(vcpu, false);
0740 vgic_v4_load(vcpu);
0741 preempt_enable();
0742 }
0743
0744 if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
0745 kvm_pmu_handle_pmcr(vcpu,
0746 __vcpu_sys_reg(vcpu, PMCR_EL0));
0747
0748 if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
0749 return kvm_vcpu_suspend(vcpu);
0750 }
0751
0752 return 1;
0753 }
0754
0755 static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
0756 {
0757 if (likely(!vcpu_mode_is_32bit(vcpu)))
0758 return false;
0759
0760 return !kvm_supports_32bit_el0();
0761 }
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777 static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
0778 {
0779 struct kvm_run *run = vcpu->run;
0780
0781
0782
0783
0784
0785
0786
0787
0788 if (static_branch_unlikely(&userspace_irqchip_in_use)) {
0789 if (kvm_timer_should_notify_user(vcpu) ||
0790 kvm_pmu_should_notify_user(vcpu)) {
0791 *ret = -EINTR;
0792 run->exit_reason = KVM_EXIT_INTR;
0793 return true;
0794 }
0795 }
0796
0797 if (unlikely(vcpu_on_unsupported_cpu(vcpu))) {
0798 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
0799 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED;
0800 run->fail_entry.cpu = smp_processor_id();
0801 *ret = 0;
0802 return true;
0803 }
0804
0805 return kvm_request_pending(vcpu) ||
0806 xfer_to_guest_mode_work_pending();
0807 }
0808
0809
0810
0811
0812
0813
0814
0815
0816 static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
0817 {
0818 int ret;
0819
0820 guest_state_enter_irqoff();
0821 ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
0822 guest_state_exit_irqoff();
0823
0824 return ret;
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
0838 {
0839 struct kvm_run *run = vcpu->run;
0840 int ret;
0841
0842 if (run->exit_reason == KVM_EXIT_MMIO) {
0843 ret = kvm_handle_mmio_return(vcpu);
0844 if (ret)
0845 return ret;
0846 }
0847
0848 vcpu_load(vcpu);
0849
0850 if (run->immediate_exit) {
0851 ret = -EINTR;
0852 goto out;
0853 }
0854
0855 kvm_sigset_activate(vcpu);
0856
0857 ret = 1;
0858 run->exit_reason = KVM_EXIT_UNKNOWN;
0859 run->flags = 0;
0860 while (ret > 0) {
0861
0862
0863
0864 ret = xfer_to_guest_mode_handle_work(vcpu);
0865 if (!ret)
0866 ret = 1;
0867
0868 if (ret > 0)
0869 ret = check_vcpu_requests(vcpu);
0870
0871
0872
0873
0874
0875
0876 preempt_disable();
0877
0878
0879
0880
0881
0882
0883
0884
0885 kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
0886
0887 kvm_pmu_flush_hwstate(vcpu);
0888
0889 local_irq_disable();
0890
0891 kvm_vgic_flush_hwstate(vcpu);
0892
0893 kvm_pmu_update_vcpu_events(vcpu);
0894
0895
0896
0897
0898
0899
0900
0901 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
0902
0903 if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
0904 vcpu->mode = OUTSIDE_GUEST_MODE;
0905 isb();
0906 kvm_pmu_sync_hwstate(vcpu);
0907 if (static_branch_unlikely(&userspace_irqchip_in_use))
0908 kvm_timer_sync_user(vcpu);
0909 kvm_vgic_sync_hwstate(vcpu);
0910 local_irq_enable();
0911 preempt_enable();
0912 continue;
0913 }
0914
0915 kvm_arm_setup_debug(vcpu);
0916 kvm_arch_vcpu_ctxflush_fp(vcpu);
0917
0918
0919
0920
0921 trace_kvm_entry(*vcpu_pc(vcpu));
0922 guest_timing_enter_irqoff();
0923
0924 ret = kvm_arm_vcpu_enter_exit(vcpu);
0925
0926 vcpu->mode = OUTSIDE_GUEST_MODE;
0927 vcpu->stat.exits++;
0928
0929
0930
0931
0932 kvm_arm_clear_debug(vcpu);
0933
0934
0935
0936
0937
0938
0939 kvm_pmu_sync_hwstate(vcpu);
0940
0941
0942
0943
0944
0945
0946 kvm_vgic_sync_hwstate(vcpu);
0947
0948
0949
0950
0951
0952
0953 if (static_branch_unlikely(&userspace_irqchip_in_use))
0954 kvm_timer_sync_user(vcpu);
0955
0956 kvm_arch_vcpu_ctxsync_fp(vcpu);
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968 if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) {
0969 local_irq_enable();
0970 isb();
0971 local_irq_disable();
0972 }
0973
0974 guest_timing_exit_irqoff();
0975
0976 local_irq_enable();
0977
0978 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
0979
0980
0981 handle_exit_early(vcpu, ret);
0982
0983 preempt_enable();
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993 if (vcpu_mode_is_bad_32bit(vcpu)) {
0994
0995
0996
0997
0998
0999
1000 vcpu->arch.target = -1;
1001 ret = ARM_EXCEPTION_IL;
1002 }
1003
1004 ret = handle_exit(vcpu, ret);
1005 }
1006
1007
1008 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
1009 kvm_timer_update_run(vcpu);
1010 kvm_pmu_update_run(vcpu);
1011 }
1012
1013 kvm_sigset_deactivate(vcpu);
1014
1015 out:
1016
1017
1018
1019
1020
1021
1022
1023 if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
1024 vcpu_get_flag(vcpu, INCREMENT_PC)))
1025 kvm_call_hyp(__kvm_adjust_pc, vcpu);
1026
1027 vcpu_put(vcpu);
1028 return ret;
1029 }
1030
1031 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
1032 {
1033 int bit_index;
1034 bool set;
1035 unsigned long *hcr;
1036
1037 if (number == KVM_ARM_IRQ_CPU_IRQ)
1038 bit_index = __ffs(HCR_VI);
1039 else
1040 bit_index = __ffs(HCR_VF);
1041
1042 hcr = vcpu_hcr(vcpu);
1043 if (level)
1044 set = test_and_set_bit(bit_index, hcr);
1045 else
1046 set = test_and_clear_bit(bit_index, hcr);
1047
1048
1049
1050
1051 if (set == level)
1052 return 0;
1053
1054
1055
1056
1057
1058
1059 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1060 kvm_vcpu_kick(vcpu);
1061
1062 return 0;
1063 }
1064
1065 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1066 bool line_status)
1067 {
1068 u32 irq = irq_level->irq;
1069 unsigned int irq_type, vcpu_idx, irq_num;
1070 int nrcpus = atomic_read(&kvm->online_vcpus);
1071 struct kvm_vcpu *vcpu = NULL;
1072 bool level = irq_level->level;
1073
1074 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
1075 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
1076 vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
1077 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
1078
1079 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
1080
1081 switch (irq_type) {
1082 case KVM_ARM_IRQ_TYPE_CPU:
1083 if (irqchip_in_kernel(kvm))
1084 return -ENXIO;
1085
1086 if (vcpu_idx >= nrcpus)
1087 return -EINVAL;
1088
1089 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1090 if (!vcpu)
1091 return -EINVAL;
1092
1093 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
1094 return -EINVAL;
1095
1096 return vcpu_interrupt_line(vcpu, irq_num, level);
1097 case KVM_ARM_IRQ_TYPE_PPI:
1098 if (!irqchip_in_kernel(kvm))
1099 return -ENXIO;
1100
1101 if (vcpu_idx >= nrcpus)
1102 return -EINVAL;
1103
1104 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1105 if (!vcpu)
1106 return -EINVAL;
1107
1108 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
1109 return -EINVAL;
1110
1111 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
1112 case KVM_ARM_IRQ_TYPE_SPI:
1113 if (!irqchip_in_kernel(kvm))
1114 return -ENXIO;
1115
1116 if (irq_num < VGIC_NR_PRIVATE_IRQS)
1117 return -EINVAL;
1118
1119 return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
1120 }
1121
1122 return -EINVAL;
1123 }
1124
1125 static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
1126 const struct kvm_vcpu_init *init)
1127 {
1128 unsigned int i, ret;
1129 u32 phys_target = kvm_target_cpu();
1130
1131 if (init->target != phys_target)
1132 return -EINVAL;
1133
1134
1135
1136
1137
1138 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
1139 return -EINVAL;
1140
1141
1142 for (i = 0; i < sizeof(init->features) * 8; i++) {
1143 bool set = (init->features[i / 32] & (1 << (i % 32)));
1144
1145 if (set && i >= KVM_VCPU_MAX_FEATURES)
1146 return -ENOENT;
1147
1148
1149
1150
1151
1152 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
1153 test_bit(i, vcpu->arch.features) != set)
1154 return -EINVAL;
1155
1156 if (set)
1157 set_bit(i, vcpu->arch.features);
1158 }
1159
1160 vcpu->arch.target = phys_target;
1161
1162
1163 ret = kvm_reset_vcpu(vcpu);
1164 if (ret) {
1165 vcpu->arch.target = -1;
1166 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
1167 }
1168
1169 return ret;
1170 }
1171
1172 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
1173 struct kvm_vcpu_init *init)
1174 {
1175 int ret;
1176
1177 ret = kvm_vcpu_set_target(vcpu, init);
1178 if (ret)
1179 return ret;
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 if (vcpu_has_run_once(vcpu)) {
1191 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1192 stage2_unmap_vm(vcpu->kvm);
1193 else
1194 icache_inval_all_pou();
1195 }
1196
1197 vcpu_reset_hcr(vcpu);
1198 vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
1199
1200
1201
1202
1203 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
1204 kvm_arm_vcpu_power_off(vcpu);
1205 else
1206 vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
1207
1208 return 0;
1209 }
1210
1211 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1212 struct kvm_device_attr *attr)
1213 {
1214 int ret = -ENXIO;
1215
1216 switch (attr->group) {
1217 default:
1218 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1219 break;
1220 }
1221
1222 return ret;
1223 }
1224
1225 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1226 struct kvm_device_attr *attr)
1227 {
1228 int ret = -ENXIO;
1229
1230 switch (attr->group) {
1231 default:
1232 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1233 break;
1234 }
1235
1236 return ret;
1237 }
1238
1239 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1240 struct kvm_device_attr *attr)
1241 {
1242 int ret = -ENXIO;
1243
1244 switch (attr->group) {
1245 default:
1246 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1247 break;
1248 }
1249
1250 return ret;
1251 }
1252
1253 static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
1254 struct kvm_vcpu_events *events)
1255 {
1256 memset(events, 0, sizeof(*events));
1257
1258 return __kvm_arm_vcpu_get_events(vcpu, events);
1259 }
1260
1261 static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
1262 struct kvm_vcpu_events *events)
1263 {
1264 int i;
1265
1266
1267 for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1268 if (events->reserved[i])
1269 return -EINVAL;
1270
1271
1272 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1273 if (events->exception.pad[i])
1274 return -EINVAL;
1275
1276 return __kvm_arm_vcpu_set_events(vcpu, events);
1277 }
1278
1279 long kvm_arch_vcpu_ioctl(struct file *filp,
1280 unsigned int ioctl, unsigned long arg)
1281 {
1282 struct kvm_vcpu *vcpu = filp->private_data;
1283 void __user *argp = (void __user *)arg;
1284 struct kvm_device_attr attr;
1285 long r;
1286
1287 switch (ioctl) {
1288 case KVM_ARM_VCPU_INIT: {
1289 struct kvm_vcpu_init init;
1290
1291 r = -EFAULT;
1292 if (copy_from_user(&init, argp, sizeof(init)))
1293 break;
1294
1295 r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1296 break;
1297 }
1298 case KVM_SET_ONE_REG:
1299 case KVM_GET_ONE_REG: {
1300 struct kvm_one_reg reg;
1301
1302 r = -ENOEXEC;
1303 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1304 break;
1305
1306 r = -EFAULT;
1307 if (copy_from_user(®, argp, sizeof(reg)))
1308 break;
1309
1310
1311
1312
1313
1314
1315 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1316 kvm_reset_vcpu(vcpu);
1317
1318 if (ioctl == KVM_SET_ONE_REG)
1319 r = kvm_arm_set_reg(vcpu, ®);
1320 else
1321 r = kvm_arm_get_reg(vcpu, ®);
1322 break;
1323 }
1324 case KVM_GET_REG_LIST: {
1325 struct kvm_reg_list __user *user_list = argp;
1326 struct kvm_reg_list reg_list;
1327 unsigned n;
1328
1329 r = -ENOEXEC;
1330 if (unlikely(!kvm_vcpu_initialized(vcpu)))
1331 break;
1332
1333 r = -EPERM;
1334 if (!kvm_arm_vcpu_is_finalized(vcpu))
1335 break;
1336
1337 r = -EFAULT;
1338 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
1339 break;
1340 n = reg_list.n;
1341 reg_list.n = kvm_arm_num_regs(vcpu);
1342 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
1343 break;
1344 r = -E2BIG;
1345 if (n < reg_list.n)
1346 break;
1347 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1348 break;
1349 }
1350 case KVM_SET_DEVICE_ATTR: {
1351 r = -EFAULT;
1352 if (copy_from_user(&attr, argp, sizeof(attr)))
1353 break;
1354 r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1355 break;
1356 }
1357 case KVM_GET_DEVICE_ATTR: {
1358 r = -EFAULT;
1359 if (copy_from_user(&attr, argp, sizeof(attr)))
1360 break;
1361 r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1362 break;
1363 }
1364 case KVM_HAS_DEVICE_ATTR: {
1365 r = -EFAULT;
1366 if (copy_from_user(&attr, argp, sizeof(attr)))
1367 break;
1368 r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1369 break;
1370 }
1371 case KVM_GET_VCPU_EVENTS: {
1372 struct kvm_vcpu_events events;
1373
1374 if (kvm_arm_vcpu_get_events(vcpu, &events))
1375 return -EINVAL;
1376
1377 if (copy_to_user(argp, &events, sizeof(events)))
1378 return -EFAULT;
1379
1380 return 0;
1381 }
1382 case KVM_SET_VCPU_EVENTS: {
1383 struct kvm_vcpu_events events;
1384
1385 if (copy_from_user(&events, argp, sizeof(events)))
1386 return -EFAULT;
1387
1388 return kvm_arm_vcpu_set_events(vcpu, &events);
1389 }
1390 case KVM_ARM_VCPU_FINALIZE: {
1391 int what;
1392
1393 if (!kvm_vcpu_initialized(vcpu))
1394 return -ENOEXEC;
1395
1396 if (get_user(what, (const int __user *)argp))
1397 return -EFAULT;
1398
1399 return kvm_arm_vcpu_finalize(vcpu, what);
1400 }
1401 default:
1402 r = -EINVAL;
1403 }
1404
1405 return r;
1406 }
1407
1408 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1409 {
1410
1411 }
1412
1413 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1414 const struct kvm_memory_slot *memslot)
1415 {
1416 kvm_flush_remote_tlbs(kvm);
1417 }
1418
1419 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1420 struct kvm_arm_device_addr *dev_addr)
1421 {
1422 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
1423 case KVM_ARM_DEVICE_VGIC_V2:
1424 if (!vgic_present)
1425 return -ENXIO;
1426 return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
1427 default:
1428 return -ENODEV;
1429 }
1430 }
1431
1432 long kvm_arch_vm_ioctl(struct file *filp,
1433 unsigned int ioctl, unsigned long arg)
1434 {
1435 struct kvm *kvm = filp->private_data;
1436 void __user *argp = (void __user *)arg;
1437
1438 switch (ioctl) {
1439 case KVM_CREATE_IRQCHIP: {
1440 int ret;
1441 if (!vgic_present)
1442 return -ENXIO;
1443 mutex_lock(&kvm->lock);
1444 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1445 mutex_unlock(&kvm->lock);
1446 return ret;
1447 }
1448 case KVM_ARM_SET_DEVICE_ADDR: {
1449 struct kvm_arm_device_addr dev_addr;
1450
1451 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1452 return -EFAULT;
1453 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1454 }
1455 case KVM_ARM_PREFERRED_TARGET: {
1456 struct kvm_vcpu_init init;
1457
1458 kvm_vcpu_preferred_target(&init);
1459
1460 if (copy_to_user(argp, &init, sizeof(init)))
1461 return -EFAULT;
1462
1463 return 0;
1464 }
1465 case KVM_ARM_MTE_COPY_TAGS: {
1466 struct kvm_arm_copy_mte_tags copy_tags;
1467
1468 if (copy_from_user(©_tags, argp, sizeof(copy_tags)))
1469 return -EFAULT;
1470 return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags);
1471 }
1472 default:
1473 return -EINVAL;
1474 }
1475 }
1476
1477 static unsigned long nvhe_percpu_size(void)
1478 {
1479 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
1480 (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
1481 }
1482
1483 static unsigned long nvhe_percpu_order(void)
1484 {
1485 unsigned long size = nvhe_percpu_size();
1486
1487 return size ? get_order(size) : 0;
1488 }
1489
1490
1491 static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
1492
1493 static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
1494 {
1495 hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot);
1496 }
1497
1498 static int kvm_init_vector_slots(void)
1499 {
1500 int err;
1501 void *base;
1502
1503 base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
1504 kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
1505
1506 base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
1507 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
1508
1509 if (kvm_system_needs_idmapped_vectors() &&
1510 !is_protected_kvm_enabled()) {
1511 err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
1512 __BP_HARDEN_HYP_VECS_SZ, &base);
1513 if (err)
1514 return err;
1515 }
1516
1517 kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
1518 kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
1519 return 0;
1520 }
1521
1522 static void cpu_prepare_hyp_mode(int cpu)
1523 {
1524 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
1525 unsigned long tcr;
1526
1527
1528
1529
1530
1531
1532
1533 params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
1534 (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
1535
1536 params->mair_el2 = read_sysreg(mair_el1);
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1;
1553 tcr &= ~TCR_T0SZ_MASK;
1554 tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
1555 params->tcr_el2 = tcr;
1556
1557 params->pgd_pa = kvm_mmu_get_httbr();
1558 if (is_protected_kvm_enabled())
1559 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
1560 else
1561 params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
1562 params->vttbr = params->vtcr = 0;
1563
1564
1565
1566
1567
1568 kvm_flush_dcache_to_poc(params, sizeof(*params));
1569 }
1570
1571 static void hyp_install_host_vector(void)
1572 {
1573 struct kvm_nvhe_init_params *params;
1574 struct arm_smccc_res res;
1575
1576
1577 __hyp_set_vectors(kvm_get_idmap_vector());
1578
1579
1580
1581
1582
1583
1584
1585 BUG_ON(!system_capabilities_finalized());
1586 params = this_cpu_ptr_nvhe_sym(kvm_init_params);
1587 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
1588 WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
1589 }
1590
1591 static void cpu_init_hyp_mode(void)
1592 {
1593 hyp_install_host_vector();
1594
1595
1596
1597
1598
1599 if (this_cpu_has_cap(ARM64_SSBS) &&
1600 arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
1601 kvm_call_hyp_nvhe(__kvm_enable_ssbs);
1602 }
1603 }
1604
1605 static void cpu_hyp_reset(void)
1606 {
1607 if (!is_kernel_in_hyp_mode())
1608 __hyp_reset_vectors();
1609 }
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631 static void cpu_set_hyp_vector(void)
1632 {
1633 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
1634 void *vector = hyp_spectre_vector_selector[data->slot];
1635
1636 if (!is_protected_kvm_enabled())
1637 *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
1638 else
1639 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
1640 }
1641
1642 static void cpu_hyp_init_context(void)
1643 {
1644 kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
1645
1646 if (!is_kernel_in_hyp_mode())
1647 cpu_init_hyp_mode();
1648 }
1649
1650 static void cpu_hyp_init_features(void)
1651 {
1652 cpu_set_hyp_vector();
1653 kvm_arm_init_debug();
1654
1655 if (is_kernel_in_hyp_mode())
1656 kvm_timer_init_vhe();
1657
1658 if (vgic_present)
1659 kvm_vgic_init_cpu_hardware();
1660 }
1661
1662 static void cpu_hyp_reinit(void)
1663 {
1664 cpu_hyp_reset();
1665 cpu_hyp_init_context();
1666 cpu_hyp_init_features();
1667 }
1668
1669 static void _kvm_arch_hardware_enable(void *discard)
1670 {
1671 if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
1672 cpu_hyp_reinit();
1673 __this_cpu_write(kvm_arm_hardware_enabled, 1);
1674 }
1675 }
1676
1677 int kvm_arch_hardware_enable(void)
1678 {
1679 _kvm_arch_hardware_enable(NULL);
1680 return 0;
1681 }
1682
1683 static void _kvm_arch_hardware_disable(void *discard)
1684 {
1685 if (__this_cpu_read(kvm_arm_hardware_enabled)) {
1686 cpu_hyp_reset();
1687 __this_cpu_write(kvm_arm_hardware_enabled, 0);
1688 }
1689 }
1690
1691 void kvm_arch_hardware_disable(void)
1692 {
1693 if (!is_protected_kvm_enabled())
1694 _kvm_arch_hardware_disable(NULL);
1695 }
1696
1697 #ifdef CONFIG_CPU_PM
1698 static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
1699 unsigned long cmd,
1700 void *v)
1701 {
1702
1703
1704
1705
1706
1707 switch (cmd) {
1708 case CPU_PM_ENTER:
1709 if (__this_cpu_read(kvm_arm_hardware_enabled))
1710
1711
1712
1713
1714
1715 cpu_hyp_reset();
1716
1717 return NOTIFY_OK;
1718 case CPU_PM_ENTER_FAILED:
1719 case CPU_PM_EXIT:
1720 if (__this_cpu_read(kvm_arm_hardware_enabled))
1721
1722 cpu_hyp_reinit();
1723
1724 return NOTIFY_OK;
1725
1726 default:
1727 return NOTIFY_DONE;
1728 }
1729 }
1730
1731 static struct notifier_block hyp_init_cpu_pm_nb = {
1732 .notifier_call = hyp_init_cpu_pm_notifier,
1733 };
1734
1735 static void hyp_cpu_pm_init(void)
1736 {
1737 if (!is_protected_kvm_enabled())
1738 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
1739 }
1740 static void hyp_cpu_pm_exit(void)
1741 {
1742 if (!is_protected_kvm_enabled())
1743 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1744 }
1745 #else
1746 static inline void hyp_cpu_pm_init(void)
1747 {
1748 }
1749 static inline void hyp_cpu_pm_exit(void)
1750 {
1751 }
1752 #endif
1753
1754 static void init_cpu_logical_map(void)
1755 {
1756 unsigned int cpu;
1757
1758
1759
1760
1761
1762
1763
1764 for_each_online_cpu(cpu)
1765 hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
1766 }
1767
1768 #define init_psci_0_1_impl_state(config, what) \
1769 config.psci_0_1_ ## what ## _implemented = psci_ops.what
1770
1771 static bool init_psci_relay(void)
1772 {
1773
1774
1775
1776
1777 if (!psci_ops.get_version) {
1778 kvm_err("Cannot initialize protected mode without PSCI\n");
1779 return false;
1780 }
1781
1782 kvm_host_psci_config.version = psci_ops.get_version();
1783
1784 if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
1785 kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
1786 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
1787 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
1788 init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
1789 init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
1790 }
1791 return true;
1792 }
1793
1794 static int init_subsystems(void)
1795 {
1796 int err = 0;
1797
1798
1799
1800
1801 on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
1802
1803
1804
1805
1806 hyp_cpu_pm_init();
1807
1808
1809
1810
1811 err = kvm_vgic_hyp_init();
1812 switch (err) {
1813 case 0:
1814 vgic_present = true;
1815 break;
1816 case -ENODEV:
1817 case -ENXIO:
1818 vgic_present = false;
1819 err = 0;
1820 break;
1821 default:
1822 goto out;
1823 }
1824
1825
1826
1827
1828 err = kvm_timer_hyp_init(vgic_present);
1829 if (err)
1830 goto out;
1831
1832 kvm_register_perf_callbacks(NULL);
1833
1834 out:
1835 if (err || !is_protected_kvm_enabled())
1836 on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
1837
1838 return err;
1839 }
1840
1841 static void teardown_hyp_mode(void)
1842 {
1843 int cpu;
1844
1845 free_hyp_pgds();
1846 for_each_possible_cpu(cpu) {
1847 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1848 free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
1849 }
1850 }
1851
1852 static int do_pkvm_init(u32 hyp_va_bits)
1853 {
1854 void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base);
1855 int ret;
1856
1857 preempt_disable();
1858 cpu_hyp_init_context();
1859 ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
1860 num_possible_cpus(), kern_hyp_va(per_cpu_base),
1861 hyp_va_bits);
1862 cpu_hyp_init_features();
1863
1864
1865
1866
1867
1868 __this_cpu_write(kvm_arm_hardware_enabled, 1);
1869 preempt_enable();
1870
1871 return ret;
1872 }
1873
1874 static int kvm_hyp_init_protection(u32 hyp_va_bits)
1875 {
1876 void *addr = phys_to_virt(hyp_mem_base);
1877 int ret;
1878
1879 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1880 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
1881 kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
1882 kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
1883 kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
1884 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
1885 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
1886 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
1887
1888 ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
1889 if (ret)
1890 return ret;
1891
1892 ret = do_pkvm_init(hyp_va_bits);
1893 if (ret)
1894 return ret;
1895
1896 free_hyp_pgds();
1897
1898 return 0;
1899 }
1900
1901
1902
1903
1904 static int init_hyp_mode(void)
1905 {
1906 u32 hyp_va_bits;
1907 int cpu;
1908 int err = -ENOMEM;
1909
1910
1911
1912
1913
1914 if (is_protected_kvm_enabled() && !hyp_mem_base)
1915 goto out_err;
1916
1917
1918
1919
1920 err = kvm_mmu_init(&hyp_va_bits);
1921 if (err)
1922 goto out_err;
1923
1924
1925
1926
1927 for_each_possible_cpu(cpu) {
1928 unsigned long stack_page;
1929
1930 stack_page = __get_free_page(GFP_KERNEL);
1931 if (!stack_page) {
1932 err = -ENOMEM;
1933 goto out_err;
1934 }
1935
1936 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
1937 }
1938
1939
1940
1941
1942 for_each_possible_cpu(cpu) {
1943 struct page *page;
1944 void *page_addr;
1945
1946 page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
1947 if (!page) {
1948 err = -ENOMEM;
1949 goto out_err;
1950 }
1951
1952 page_addr = page_address(page);
1953 memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
1954 kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
1955 }
1956
1957
1958
1959
1960 err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
1961 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
1962 if (err) {
1963 kvm_err("Cannot map world-switch code\n");
1964 goto out_err;
1965 }
1966
1967 err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
1968 kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
1969 if (err) {
1970 kvm_err("Cannot map .hyp.rodata section\n");
1971 goto out_err;
1972 }
1973
1974 err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
1975 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
1976 if (err) {
1977 kvm_err("Cannot map rodata section\n");
1978 goto out_err;
1979 }
1980
1981
1982
1983
1984
1985
1986 err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start),
1987 kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
1988 if (err) {
1989 kvm_err("Cannot map hyp bss section: %d\n", err);
1990 goto out_err;
1991 }
1992
1993 err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
1994 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
1995 if (err) {
1996 kvm_err("Cannot map bss section\n");
1997 goto out_err;
1998 }
1999
2000
2001
2002
2003 for_each_possible_cpu(cpu) {
2004 struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2005 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2006 unsigned long hyp_addr;
2007
2008
2009
2010
2011
2012
2013 err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
2014 if (err) {
2015 kvm_err("Cannot allocate hyp stack guard page\n");
2016 goto out_err;
2017 }
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028 err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE,
2029 __pa(stack_page), PAGE_HYP);
2030 if (err) {
2031 kvm_err("Cannot map hyp stack\n");
2032 goto out_err;
2033 }
2034
2035
2036
2037
2038
2039
2040
2041 params->stack_pa = __pa(stack_page);
2042
2043 params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
2044 }
2045
2046 for_each_possible_cpu(cpu) {
2047 char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
2048 char *percpu_end = percpu_begin + nvhe_percpu_size();
2049
2050
2051 err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
2052 if (err) {
2053 kvm_err("Cannot map hyp percpu region\n");
2054 goto out_err;
2055 }
2056
2057
2058 cpu_prepare_hyp_mode(cpu);
2059 }
2060
2061 if (is_protected_kvm_enabled()) {
2062 init_cpu_logical_map();
2063
2064 if (!init_psci_relay()) {
2065 err = -ENODEV;
2066 goto out_err;
2067 }
2068 }
2069
2070 if (is_protected_kvm_enabled()) {
2071 err = kvm_hyp_init_protection(hyp_va_bits);
2072 if (err) {
2073 kvm_err("Failed to init hyp memory protection\n");
2074 goto out_err;
2075 }
2076 }
2077
2078 return 0;
2079
2080 out_err:
2081 teardown_hyp_mode();
2082 kvm_err("error initializing Hyp mode: %d\n", err);
2083 return err;
2084 }
2085
2086 static void _kvm_host_prot_finalize(void *arg)
2087 {
2088 int *err = arg;
2089
2090 if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
2091 WRITE_ONCE(*err, -EINVAL);
2092 }
2093
2094 static int pkvm_drop_host_privileges(void)
2095 {
2096 int ret = 0;
2097
2098
2099
2100
2101
2102 static_branch_enable(&kvm_protected_mode_initialized);
2103 on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
2104 return ret;
2105 }
2106
2107 static int finalize_hyp_mode(void)
2108 {
2109 if (!is_protected_kvm_enabled())
2110 return 0;
2111
2112
2113
2114
2115
2116 kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
2117 kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
2118 return pkvm_drop_host_privileges();
2119 }
2120
2121 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
2122 {
2123 struct kvm_vcpu *vcpu;
2124 unsigned long i;
2125
2126 mpidr &= MPIDR_HWID_BITMASK;
2127 kvm_for_each_vcpu(i, vcpu, kvm) {
2128 if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
2129 return vcpu;
2130 }
2131 return NULL;
2132 }
2133
2134 bool kvm_arch_has_irq_bypass(void)
2135 {
2136 return true;
2137 }
2138
2139 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
2140 struct irq_bypass_producer *prod)
2141 {
2142 struct kvm_kernel_irqfd *irqfd =
2143 container_of(cons, struct kvm_kernel_irqfd, consumer);
2144
2145 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
2146 &irqfd->irq_entry);
2147 }
2148 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
2149 struct irq_bypass_producer *prod)
2150 {
2151 struct kvm_kernel_irqfd *irqfd =
2152 container_of(cons, struct kvm_kernel_irqfd, consumer);
2153
2154 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
2155 &irqfd->irq_entry);
2156 }
2157
2158 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
2159 {
2160 struct kvm_kernel_irqfd *irqfd =
2161 container_of(cons, struct kvm_kernel_irqfd, consumer);
2162
2163 kvm_arm_halt_guest(irqfd->kvm);
2164 }
2165
2166 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
2167 {
2168 struct kvm_kernel_irqfd *irqfd =
2169 container_of(cons, struct kvm_kernel_irqfd, consumer);
2170
2171 kvm_arm_resume_guest(irqfd->kvm);
2172 }
2173
2174
2175
2176
2177 int kvm_arch_init(void *opaque)
2178 {
2179 int err;
2180 bool in_hyp_mode;
2181
2182 if (!is_hyp_mode_available()) {
2183 kvm_info("HYP mode not available\n");
2184 return -ENODEV;
2185 }
2186
2187 if (kvm_get_mode() == KVM_MODE_NONE) {
2188 kvm_info("KVM disabled from command line\n");
2189 return -ENODEV;
2190 }
2191
2192 err = kvm_sys_reg_table_init();
2193 if (err) {
2194 kvm_info("Error initializing system register tables");
2195 return err;
2196 }
2197
2198 in_hyp_mode = is_kernel_in_hyp_mode();
2199
2200 if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
2201 cpus_have_final_cap(ARM64_WORKAROUND_1508412))
2202 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
2203 "Only trusted guests should be used on this system.\n");
2204
2205 err = kvm_set_ipa_limit();
2206 if (err)
2207 return err;
2208
2209 err = kvm_arm_init_sve();
2210 if (err)
2211 return err;
2212
2213 err = kvm_arm_vmid_alloc_init();
2214 if (err) {
2215 kvm_err("Failed to initialize VMID allocator.\n");
2216 return err;
2217 }
2218
2219 if (!in_hyp_mode) {
2220 err = init_hyp_mode();
2221 if (err)
2222 goto out_err;
2223 }
2224
2225 err = kvm_init_vector_slots();
2226 if (err) {
2227 kvm_err("Cannot initialise vector slots\n");
2228 goto out_err;
2229 }
2230
2231 err = init_subsystems();
2232 if (err)
2233 goto out_hyp;
2234
2235 if (!in_hyp_mode) {
2236 err = finalize_hyp_mode();
2237 if (err) {
2238 kvm_err("Failed to finalize Hyp protection\n");
2239 goto out_hyp;
2240 }
2241 }
2242
2243 if (is_protected_kvm_enabled()) {
2244 kvm_info("Protected nVHE mode initialized successfully\n");
2245 } else if (in_hyp_mode) {
2246 kvm_info("VHE mode initialized successfully\n");
2247 } else {
2248 kvm_info("Hyp mode initialized successfully\n");
2249 }
2250
2251 return 0;
2252
2253 out_hyp:
2254 hyp_cpu_pm_exit();
2255 if (!in_hyp_mode)
2256 teardown_hyp_mode();
2257 out_err:
2258 kvm_arm_vmid_alloc_free();
2259 return err;
2260 }
2261
2262
2263 void kvm_arch_exit(void)
2264 {
2265 kvm_unregister_perf_callbacks();
2266 }
2267
2268 static int __init early_kvm_mode_cfg(char *arg)
2269 {
2270 if (!arg)
2271 return -EINVAL;
2272
2273 if (strcmp(arg, "protected") == 0) {
2274 if (!is_kernel_in_hyp_mode())
2275 kvm_mode = KVM_MODE_PROTECTED;
2276 else
2277 pr_warn_once("Protected KVM not available with VHE\n");
2278
2279 return 0;
2280 }
2281
2282 if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
2283 kvm_mode = KVM_MODE_DEFAULT;
2284 return 0;
2285 }
2286
2287 if (strcmp(arg, "none") == 0) {
2288 kvm_mode = KVM_MODE_NONE;
2289 return 0;
2290 }
2291
2292 return -EINVAL;
2293 }
2294 early_param("kvm-arm.mode", early_kvm_mode_cfg);
2295
2296 enum kvm_mode kvm_get_mode(void)
2297 {
2298 return kvm_mode;
2299 }
2300
2301 static int arm_init(void)
2302 {
2303 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2304 return rc;
2305 }
2306
2307 module_init(arm_init);