0001
0002
0003
0004
0005
0006
0007 #include <linux/interrupt.h>
0008 #include <linux/irq.h>
0009 #include <linux/irqdomain.h>
0010 #include <linux/kvm_host.h>
0011 #include <linux/irqchip/arm-gic-v3.h>
0012
0013 #include "vgic.h"
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 #define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
0083
0084 static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
0085 {
0086 struct kvm_vcpu *vcpu = info;
0087
0088
0089 if (!kvm_vgic_global_state.has_gicv4_1 &&
0090 !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
0091 disable_irq_nosync(irq);
0092
0093
0094
0095
0096
0097
0098 raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
0099 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
0100 raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
0101
0102 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
0103 kvm_vcpu_kick(vcpu);
0104
0105 return IRQ_HANDLED;
0106 }
0107
0108 static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
0109 {
0110 vpe->sgi_config[irq->intid].enabled = irq->enabled;
0111 vpe->sgi_config[irq->intid].group = irq->group;
0112 vpe->sgi_config[irq->intid].priority = irq->priority;
0113 }
0114
0115 static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
0116 {
0117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
0118 int i;
0119
0120
0121
0122
0123
0124
0125 for (i = 0; i < VGIC_NR_SGIS; i++) {
0126 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
0127 struct irq_desc *desc;
0128 unsigned long flags;
0129 int ret;
0130
0131 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0132
0133 if (irq->hw)
0134 goto unlock;
0135
0136 irq->hw = true;
0137 irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
0138
0139
0140 vgic_v4_sync_sgi_config(vpe, irq);
0141 desc = irq_to_desc(irq->host_irq);
0142 ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
0143 false);
0144 if (!WARN_ON(ret)) {
0145
0146 ret = irq_set_irqchip_state(irq->host_irq,
0147 IRQCHIP_STATE_PENDING,
0148 irq->pending_latch);
0149 WARN_ON(ret);
0150 irq->pending_latch = false;
0151 }
0152 unlock:
0153 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0154 vgic_put_irq(vcpu->kvm, irq);
0155 }
0156 }
0157
0158 static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
0159 {
0160 int i;
0161
0162 for (i = 0; i < VGIC_NR_SGIS; i++) {
0163 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
0164 struct irq_desc *desc;
0165 unsigned long flags;
0166 int ret;
0167
0168 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0169
0170 if (!irq->hw)
0171 goto unlock;
0172
0173 irq->hw = false;
0174 ret = irq_get_irqchip_state(irq->host_irq,
0175 IRQCHIP_STATE_PENDING,
0176 &irq->pending_latch);
0177 WARN_ON(ret);
0178
0179 desc = irq_to_desc(irq->host_irq);
0180 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
0181 unlock:
0182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0183 vgic_put_irq(vcpu->kvm, irq);
0184 }
0185 }
0186
0187
0188 void vgic_v4_configure_vsgis(struct kvm *kvm)
0189 {
0190 struct vgic_dist *dist = &kvm->arch.vgic;
0191 struct kvm_vcpu *vcpu;
0192 unsigned long i;
0193
0194 kvm_arm_halt_guest(kvm);
0195
0196 kvm_for_each_vcpu(i, vcpu, kvm) {
0197 if (dist->nassgireq)
0198 vgic_v4_enable_vsgis(vcpu);
0199 else
0200 vgic_v4_disable_vsgis(vcpu);
0201 }
0202
0203 kvm_arm_resume_guest(kvm);
0204 }
0205
0206
0207
0208
0209
0210
0211
0212 void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
0213 {
0214 struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
0215 int mask = BIT(irq->intid % BITS_PER_BYTE);
0216 void *va;
0217 u8 *ptr;
0218
0219 va = page_address(vpe->vpt_page);
0220 ptr = va + irq->intid / BITS_PER_BYTE;
0221
0222 *val = !!(*ptr & mask);
0223 }
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 int vgic_v4_init(struct kvm *kvm)
0235 {
0236 struct vgic_dist *dist = &kvm->arch.vgic;
0237 struct kvm_vcpu *vcpu;
0238 int nr_vcpus, ret;
0239 unsigned long i;
0240
0241 if (!kvm_vgic_global_state.has_gicv4)
0242 return 0;
0243
0244 if (dist->its_vm.vpes)
0245 return 0;
0246
0247 nr_vcpus = atomic_read(&kvm->online_vcpus);
0248
0249 dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
0250 GFP_KERNEL_ACCOUNT);
0251 if (!dist->its_vm.vpes)
0252 return -ENOMEM;
0253
0254 dist->its_vm.nr_vpes = nr_vcpus;
0255
0256 kvm_for_each_vcpu(i, vcpu, kvm)
0257 dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
0258
0259 ret = its_alloc_vcpu_irqs(&dist->its_vm);
0260 if (ret < 0) {
0261 kvm_err("VPE IRQ allocation failure\n");
0262 kfree(dist->its_vm.vpes);
0263 dist->its_vm.nr_vpes = 0;
0264 dist->its_vm.vpes = NULL;
0265 return ret;
0266 }
0267
0268 kvm_for_each_vcpu(i, vcpu, kvm) {
0269 int irq = dist->its_vm.vpes[i]->irq;
0270 unsigned long irq_flags = DB_IRQ_FLAGS;
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282 if (kvm_vgic_global_state.has_gicv4_1)
0283 irq_flags &= ~IRQ_NOAUTOEN;
0284 irq_set_status_flags(irq, irq_flags);
0285
0286 ret = request_irq(irq, vgic_v4_doorbell_handler,
0287 0, "vcpu", vcpu);
0288 if (ret) {
0289 kvm_err("failed to allocate vcpu IRQ%d\n", irq);
0290
0291
0292
0293
0294 dist->its_vm.nr_vpes = i;
0295 break;
0296 }
0297 }
0298
0299 if (ret)
0300 vgic_v4_teardown(kvm);
0301
0302 return ret;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311 void vgic_v4_teardown(struct kvm *kvm)
0312 {
0313 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
0314 int i;
0315
0316 if (!its_vm->vpes)
0317 return;
0318
0319 for (i = 0; i < its_vm->nr_vpes; i++) {
0320 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
0321 int irq = its_vm->vpes[i]->irq;
0322
0323 irq_clear_status_flags(irq, DB_IRQ_FLAGS);
0324 free_irq(irq, vcpu);
0325 }
0326
0327 its_free_vcpu_irqs(its_vm);
0328 kfree(its_vm->vpes);
0329 its_vm->nr_vpes = 0;
0330 its_vm->vpes = NULL;
0331 }
0332
0333 int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
0334 {
0335 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
0336
0337 if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
0338 return 0;
0339
0340 return its_make_vpe_non_resident(vpe, need_db);
0341 }
0342
0343 int vgic_v4_load(struct kvm_vcpu *vcpu)
0344 {
0345 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
0346 int err;
0347
0348 if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
0349 return 0;
0350
0351
0352
0353
0354
0355
0356
0357 err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
0358 if (err)
0359 return err;
0360
0361 err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
0362 if (err)
0363 return err;
0364
0365
0366
0367
0368
0369
0370 if (!kvm_vgic_global_state.has_gicv4_1)
0371 err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
0372
0373 return err;
0374 }
0375
0376 void vgic_v4_commit(struct kvm_vcpu *vcpu)
0377 {
0378 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
0379
0380
0381
0382
0383
0384 if (!vpe->ready)
0385 its_commit_vpe(vpe);
0386 }
0387
0388 static struct vgic_its *vgic_get_its(struct kvm *kvm,
0389 struct kvm_kernel_irq_routing_entry *irq_entry)
0390 {
0391 struct kvm_msi msi = (struct kvm_msi) {
0392 .address_lo = irq_entry->msi.address_lo,
0393 .address_hi = irq_entry->msi.address_hi,
0394 .data = irq_entry->msi.data,
0395 .flags = irq_entry->msi.flags,
0396 .devid = irq_entry->msi.devid,
0397 };
0398
0399 return vgic_msi_to_its(kvm, &msi);
0400 }
0401
0402 int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
0403 struct kvm_kernel_irq_routing_entry *irq_entry)
0404 {
0405 struct vgic_its *its;
0406 struct vgic_irq *irq;
0407 struct its_vlpi_map map;
0408 unsigned long flags;
0409 int ret;
0410
0411 if (!vgic_supports_direct_msis(kvm))
0412 return 0;
0413
0414
0415
0416
0417
0418 its = vgic_get_its(kvm, irq_entry);
0419 if (IS_ERR(its))
0420 return 0;
0421
0422 mutex_lock(&its->its_lock);
0423
0424
0425 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
0426 irq_entry->msi.data, &irq);
0427 if (ret)
0428 goto out;
0429
0430
0431
0432
0433
0434
0435
0436 map = (struct its_vlpi_map) {
0437 .vm = &kvm->arch.vgic.its_vm,
0438 .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
0439 .vintid = irq->intid,
0440 .properties = ((irq->priority & 0xfc) |
0441 (irq->enabled ? LPI_PROP_ENABLED : 0) |
0442 LPI_PROP_GROUP1),
0443 .db_enabled = true,
0444 };
0445
0446 ret = its_map_vlpi(virq, &map);
0447 if (ret)
0448 goto out;
0449
0450 irq->hw = true;
0451 irq->host_irq = virq;
0452 atomic_inc(&map.vpe->vlpi_count);
0453
0454
0455 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0456 if (irq->pending_latch) {
0457 ret = irq_set_irqchip_state(irq->host_irq,
0458 IRQCHIP_STATE_PENDING,
0459 irq->pending_latch);
0460 WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
0461
0462
0463
0464
0465
0466 irq->pending_latch = false;
0467 vgic_queue_irq_unlock(kvm, irq, flags);
0468 } else {
0469 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0470 }
0471
0472 out:
0473 mutex_unlock(&its->its_lock);
0474 return ret;
0475 }
0476
0477 int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
0478 struct kvm_kernel_irq_routing_entry *irq_entry)
0479 {
0480 struct vgic_its *its;
0481 struct vgic_irq *irq;
0482 int ret;
0483
0484 if (!vgic_supports_direct_msis(kvm))
0485 return 0;
0486
0487
0488
0489
0490
0491 its = vgic_get_its(kvm, irq_entry);
0492 if (IS_ERR(its))
0493 return 0;
0494
0495 mutex_lock(&its->its_lock);
0496
0497 ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
0498 irq_entry->msi.data, &irq);
0499 if (ret)
0500 goto out;
0501
0502 WARN_ON(!(irq->hw && irq->host_irq == virq));
0503 if (irq->hw) {
0504 atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
0505 irq->hw = false;
0506 ret = its_unmap_vlpi(virq);
0507 }
0508
0509 out:
0510 mutex_unlock(&its->its_lock);
0511 return ret;
0512 }