0001
0002
0003
0004
0005
0006 #include <linux/uaccess.h>
0007 #include <linux/interrupt.h>
0008 #include <linux/cpu.h>
0009 #include <linux/kvm_host.h>
0010 #include <kvm/arm_vgic.h>
0011 #include <asm/kvm_emulate.h>
0012 #include <asm/kvm_mmu.h>
0013 #include "vgic.h"
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 void kvm_vgic_early_init(struct kvm *kvm)
0053 {
0054 struct vgic_dist *dist = &kvm->arch.vgic;
0055
0056 INIT_LIST_HEAD(&dist->lpi_list_head);
0057 INIT_LIST_HEAD(&dist->lpi_translation_cache);
0058 raw_spin_lock_init(&dist->lpi_list_lock);
0059 }
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071 int kvm_vgic_create(struct kvm *kvm, u32 type)
0072 {
0073 struct kvm_vcpu *vcpu;
0074 unsigned long i;
0075 int ret;
0076
0077 if (irqchip_in_kernel(kvm))
0078 return -EEXIST;
0079
0080
0081
0082
0083
0084
0085
0086 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
0087 !kvm_vgic_global_state.can_emulate_gicv2)
0088 return -ENODEV;
0089
0090 ret = -EBUSY;
0091 if (!lock_all_vcpus(kvm))
0092 return ret;
0093
0094 kvm_for_each_vcpu(i, vcpu, kvm) {
0095 if (vcpu_has_run_once(vcpu))
0096 goto out_unlock;
0097 }
0098 ret = 0;
0099
0100 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
0101 kvm->max_vcpus = VGIC_V2_MAX_CPUS;
0102 else
0103 kvm->max_vcpus = VGIC_V3_MAX_CPUS;
0104
0105 if (atomic_read(&kvm->online_vcpus) > kvm->max_vcpus) {
0106 ret = -E2BIG;
0107 goto out_unlock;
0108 }
0109
0110 kvm->arch.vgic.in_kernel = true;
0111 kvm->arch.vgic.vgic_model = type;
0112
0113 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
0114
0115 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
0116 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
0117 else
0118 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
0119
0120 out_unlock:
0121 unlock_all_vcpus(kvm);
0122 return ret;
0123 }
0124
0125
0126
0127
0128
0129
0130
0131
0132 static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
0133 {
0134 struct vgic_dist *dist = &kvm->arch.vgic;
0135 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
0136 int i;
0137
0138 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
0139 if (!dist->spis)
0140 return -ENOMEM;
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 for (i = 0; i < nr_spis; i++) {
0151 struct vgic_irq *irq = &dist->spis[i];
0152
0153 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
0154 INIT_LIST_HEAD(&irq->ap_list);
0155 raw_spin_lock_init(&irq->irq_lock);
0156 irq->vcpu = NULL;
0157 irq->target_vcpu = vcpu0;
0158 kref_init(&irq->refcount);
0159 switch (dist->vgic_model) {
0160 case KVM_DEV_TYPE_ARM_VGIC_V2:
0161 irq->targets = 0;
0162 irq->group = 0;
0163 break;
0164 case KVM_DEV_TYPE_ARM_VGIC_V3:
0165 irq->mpidr = 0;
0166 irq->group = 1;
0167 break;
0168 default:
0169 kfree(dist->spis);
0170 dist->spis = NULL;
0171 return -EINVAL;
0172 }
0173 }
0174 return 0;
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
0187 {
0188 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
0189 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
0190 int ret = 0;
0191 int i;
0192
0193 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
0194
0195 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
0196 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
0197 atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
0198
0199
0200
0201
0202
0203 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
0204 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
0205
0206 INIT_LIST_HEAD(&irq->ap_list);
0207 raw_spin_lock_init(&irq->irq_lock);
0208 irq->intid = i;
0209 irq->vcpu = NULL;
0210 irq->target_vcpu = vcpu;
0211 kref_init(&irq->refcount);
0212 if (vgic_irq_is_sgi(i)) {
0213
0214 irq->enabled = 1;
0215 irq->config = VGIC_CONFIG_EDGE;
0216 } else {
0217
0218 irq->config = VGIC_CONFIG_LEVEL;
0219 }
0220 }
0221
0222 if (!irqchip_in_kernel(vcpu->kvm))
0223 return 0;
0224
0225
0226
0227
0228
0229 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
0230 mutex_lock(&vcpu->kvm->lock);
0231 ret = vgic_register_redist_iodev(vcpu);
0232 mutex_unlock(&vcpu->kvm->lock);
0233 }
0234 return ret;
0235 }
0236
0237 static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
0238 {
0239 if (kvm_vgic_global_state.type == VGIC_V2)
0240 vgic_v2_enable(vcpu);
0241 else
0242 vgic_v3_enable(vcpu);
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255 int vgic_init(struct kvm *kvm)
0256 {
0257 struct vgic_dist *dist = &kvm->arch.vgic;
0258 struct kvm_vcpu *vcpu;
0259 int ret = 0, i;
0260 unsigned long idx;
0261
0262 if (vgic_initialized(kvm))
0263 return 0;
0264
0265
0266 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
0267 return -EBUSY;
0268
0269
0270 if (!dist->nr_spis)
0271 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
0272
0273 ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
0274 if (ret)
0275 goto out;
0276
0277
0278 kvm_for_each_vcpu(idx, vcpu, kvm) {
0279 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
0280
0281 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
0282 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
0283 switch (dist->vgic_model) {
0284 case KVM_DEV_TYPE_ARM_VGIC_V3:
0285 irq->group = 1;
0286 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
0287 break;
0288 case KVM_DEV_TYPE_ARM_VGIC_V2:
0289 irq->group = 0;
0290 irq->targets = 1U << idx;
0291 break;
0292 default:
0293 ret = -EINVAL;
0294 goto out;
0295 }
0296 }
0297 }
0298
0299 if (vgic_has_its(kvm))
0300 vgic_lpi_translation_cache_init(kvm);
0301
0302
0303
0304
0305
0306
0307 if (vgic_supports_direct_msis(kvm)) {
0308 ret = vgic_v4_init(kvm);
0309 if (ret)
0310 goto out;
0311 }
0312
0313 kvm_for_each_vcpu(idx, vcpu, kvm)
0314 kvm_vgic_vcpu_enable(vcpu);
0315
0316 ret = kvm_vgic_setup_default_irq_routing(kvm);
0317 if (ret)
0318 goto out;
0319
0320 vgic_debug_init(kvm);
0321
0322
0323
0324
0325
0326 if (!dist->implementation_rev)
0327 dist->implementation_rev = KVM_VGIC_IMP_REV_LATEST;
0328 dist->initialized = true;
0329
0330 out:
0331 return ret;
0332 }
0333
0334 static void kvm_vgic_dist_destroy(struct kvm *kvm)
0335 {
0336 struct vgic_dist *dist = &kvm->arch.vgic;
0337 struct vgic_redist_region *rdreg, *next;
0338
0339 dist->ready = false;
0340 dist->initialized = false;
0341
0342 kfree(dist->spis);
0343 dist->spis = NULL;
0344 dist->nr_spis = 0;
0345 dist->vgic_dist_base = VGIC_ADDR_UNDEF;
0346
0347 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
0348 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
0349 vgic_v3_free_redist_region(rdreg);
0350 INIT_LIST_HEAD(&dist->rd_regions);
0351 } else {
0352 dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
0353 }
0354
0355 if (vgic_has_its(kvm))
0356 vgic_lpi_translation_cache_destroy(kvm);
0357
0358 if (vgic_supports_direct_msis(kvm))
0359 vgic_v4_teardown(kvm);
0360 }
0361
0362 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
0363 {
0364 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
0365
0366
0367
0368
0369
0370 vgic_flush_pending_lpis(vcpu);
0371
0372 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
0373 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
0374 }
0375
0376
0377 static void __kvm_vgic_destroy(struct kvm *kvm)
0378 {
0379 struct kvm_vcpu *vcpu;
0380 unsigned long i;
0381
0382 vgic_debug_destroy(kvm);
0383
0384 kvm_for_each_vcpu(i, vcpu, kvm)
0385 kvm_vgic_vcpu_destroy(vcpu);
0386
0387 kvm_vgic_dist_destroy(kvm);
0388 }
0389
0390 void kvm_vgic_destroy(struct kvm *kvm)
0391 {
0392 mutex_lock(&kvm->lock);
0393 __kvm_vgic_destroy(kvm);
0394 mutex_unlock(&kvm->lock);
0395 }
0396
0397
0398
0399
0400
0401
0402
0403 int vgic_lazy_init(struct kvm *kvm)
0404 {
0405 int ret = 0;
0406
0407 if (unlikely(!vgic_initialized(kvm))) {
0408
0409
0410
0411
0412
0413
0414 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
0415 return -EBUSY;
0416
0417 mutex_lock(&kvm->lock);
0418 ret = vgic_init(kvm);
0419 mutex_unlock(&kvm->lock);
0420 }
0421
0422 return ret;
0423 }
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436 int kvm_vgic_map_resources(struct kvm *kvm)
0437 {
0438 struct vgic_dist *dist = &kvm->arch.vgic;
0439 int ret = 0;
0440
0441 if (likely(vgic_ready(kvm)))
0442 return 0;
0443
0444 mutex_lock(&kvm->lock);
0445 if (vgic_ready(kvm))
0446 goto out;
0447
0448 if (!irqchip_in_kernel(kvm))
0449 goto out;
0450
0451 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
0452 ret = vgic_v2_map_resources(kvm);
0453 else
0454 ret = vgic_v3_map_resources(kvm);
0455
0456 if (ret)
0457 __kvm_vgic_destroy(kvm);
0458 else
0459 dist->ready = true;
0460
0461 out:
0462 mutex_unlock(&kvm->lock);
0463 return ret;
0464 }
0465
0466
0467
0468 static int vgic_init_cpu_starting(unsigned int cpu)
0469 {
0470 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
0471 return 0;
0472 }
0473
0474
0475 static int vgic_init_cpu_dying(unsigned int cpu)
0476 {
0477 disable_percpu_irq(kvm_vgic_global_state.maint_irq);
0478 return 0;
0479 }
0480
0481 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
0482 {
0483
0484
0485
0486
0487
0488
0489 return IRQ_HANDLED;
0490 }
0491
0492 static struct gic_kvm_info *gic_kvm_info;
0493
0494 void __init vgic_set_kvm_info(const struct gic_kvm_info *info)
0495 {
0496 BUG_ON(gic_kvm_info != NULL);
0497 gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL);
0498 if (gic_kvm_info)
0499 *gic_kvm_info = *info;
0500 }
0501
0502
0503
0504
0505
0506
0507 void kvm_vgic_init_cpu_hardware(void)
0508 {
0509 BUG_ON(preemptible());
0510
0511
0512
0513
0514
0515 if (kvm_vgic_global_state.type == VGIC_V2)
0516 vgic_v2_init_lrs();
0517 else
0518 kvm_call_hyp(__vgic_v3_init_lrs);
0519 }
0520
0521
0522
0523
0524
0525
0526
0527 int kvm_vgic_hyp_init(void)
0528 {
0529 bool has_mask;
0530 int ret;
0531
0532 if (!gic_kvm_info)
0533 return -ENODEV;
0534
0535 has_mask = !gic_kvm_info->no_maint_irq_mask;
0536
0537 if (has_mask && !gic_kvm_info->maint_irq) {
0538 kvm_err("No vgic maintenance irq\n");
0539 return -ENXIO;
0540 }
0541
0542
0543
0544
0545
0546 if (gic_kvm_info->no_hw_deactivation) {
0547 kvm_info("Non-architectural vgic, tainting kernel\n");
0548 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
0549 kvm_vgic_global_state.no_hw_deactivation = true;
0550 }
0551
0552 switch (gic_kvm_info->type) {
0553 case GIC_V2:
0554 ret = vgic_v2_probe(gic_kvm_info);
0555 break;
0556 case GIC_V3:
0557 ret = vgic_v3_probe(gic_kvm_info);
0558 if (!ret) {
0559 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
0560 kvm_info("GIC system register CPU interface enabled\n");
0561 }
0562 break;
0563 default:
0564 ret = -ENODEV;
0565 }
0566
0567 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
0568
0569 kfree(gic_kvm_info);
0570 gic_kvm_info = NULL;
0571
0572 if (ret)
0573 return ret;
0574
0575 if (!has_mask)
0576 return 0;
0577
0578 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
0579 vgic_maintenance_handler,
0580 "vgic", kvm_get_running_vcpus());
0581 if (ret) {
0582 kvm_err("Cannot register interrupt %d\n",
0583 kvm_vgic_global_state.maint_irq);
0584 return ret;
0585 }
0586
0587 ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
0588 "kvm/arm/vgic:starting",
0589 vgic_init_cpu_starting, vgic_init_cpu_dying);
0590 if (ret) {
0591 kvm_err("Cannot register vgic CPU notifier\n");
0592 goto out_free_irq;
0593 }
0594
0595 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
0596 return 0;
0597
0598 out_free_irq:
0599 free_percpu_irq(kvm_vgic_global_state.maint_irq,
0600 kvm_get_running_vcpus());
0601 return ret;
0602 }