0001
0002
0003
0004
0005
0006 #include <linux/bitops.h>
0007 #include <linux/bsearch.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/irq.h>
0010 #include <linux/kvm.h>
0011 #include <linux/kvm_host.h>
0012 #include <kvm/iodev.h>
0013 #include <kvm/arm_arch_timer.h>
0014 #include <kvm/arm_vgic.h>
0015
0016 #include "vgic.h"
0017 #include "vgic-mmio.h"
0018
0019 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
0020 gpa_t addr, unsigned int len)
0021 {
0022 return 0;
0023 }
0024
0025 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
0026 gpa_t addr, unsigned int len)
0027 {
0028 return -1UL;
0029 }
0030
0031 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
0032 unsigned int len, unsigned long val)
0033 {
0034
0035 }
0036
0037 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
0038 unsigned int len, unsigned long val)
0039 {
0040
0041 return 0;
0042 }
0043
0044 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
0045 gpa_t addr, unsigned int len)
0046 {
0047 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0048 u32 value = 0;
0049 int i;
0050
0051
0052 for (i = 0; i < len * 8; i++) {
0053 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0054
0055 if (irq->group)
0056 value |= BIT(i);
0057
0058 vgic_put_irq(vcpu->kvm, irq);
0059 }
0060
0061 return value;
0062 }
0063
0064 static void vgic_update_vsgi(struct vgic_irq *irq)
0065 {
0066 WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
0067 }
0068
0069 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
0070 unsigned int len, unsigned long val)
0071 {
0072 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0073 int i;
0074 unsigned long flags;
0075
0076 for (i = 0; i < len * 8; i++) {
0077 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0078
0079 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0080 irq->group = !!(val & BIT(i));
0081 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
0082 vgic_update_vsgi(irq);
0083 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0084 } else {
0085 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
0086 }
0087
0088 vgic_put_irq(vcpu->kvm, irq);
0089 }
0090 }
0091
0092
0093
0094
0095
0096 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
0097 gpa_t addr, unsigned int len)
0098 {
0099 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0100 u32 value = 0;
0101 int i;
0102
0103
0104 for (i = 0; i < len * 8; i++) {
0105 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0106
0107 if (irq->enabled)
0108 value |= (1U << i);
0109
0110 vgic_put_irq(vcpu->kvm, irq);
0111 }
0112
0113 return value;
0114 }
0115
0116 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
0117 gpa_t addr, unsigned int len,
0118 unsigned long val)
0119 {
0120 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0121 int i;
0122 unsigned long flags;
0123
0124 for_each_set_bit(i, &val, len * 8) {
0125 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0126
0127 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0128 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
0129 if (!irq->enabled) {
0130 struct irq_data *data;
0131
0132 irq->enabled = true;
0133 data = &irq_to_desc(irq->host_irq)->irq_data;
0134 while (irqd_irq_disabled(data))
0135 enable_irq(irq->host_irq);
0136 }
0137
0138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0139 vgic_put_irq(vcpu->kvm, irq);
0140
0141 continue;
0142 } else if (vgic_irq_is_mapped_level(irq)) {
0143 bool was_high = irq->line_level;
0144
0145
0146
0147
0148
0149
0150 irq->line_level = vgic_get_phys_line_level(irq);
0151
0152
0153
0154
0155 if (!irq->active && was_high && !irq->line_level)
0156 vgic_irq_set_phys_active(irq, false);
0157 }
0158 irq->enabled = true;
0159 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
0160
0161 vgic_put_irq(vcpu->kvm, irq);
0162 }
0163 }
0164
0165 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
0166 gpa_t addr, unsigned int len,
0167 unsigned long val)
0168 {
0169 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0170 int i;
0171 unsigned long flags;
0172
0173 for_each_set_bit(i, &val, len * 8) {
0174 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0175
0176 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0177 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
0178 disable_irq_nosync(irq->host_irq);
0179
0180 irq->enabled = false;
0181
0182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0183 vgic_put_irq(vcpu->kvm, irq);
0184 }
0185 }
0186
0187 int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
0188 gpa_t addr, unsigned int len,
0189 unsigned long val)
0190 {
0191 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0192 int i;
0193 unsigned long flags;
0194
0195 for_each_set_bit(i, &val, len * 8) {
0196 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0197
0198 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0199 irq->enabled = true;
0200 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
0201
0202 vgic_put_irq(vcpu->kvm, irq);
0203 }
0204
0205 return 0;
0206 }
0207
0208 int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
0209 gpa_t addr, unsigned int len,
0210 unsigned long val)
0211 {
0212 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0213 int i;
0214 unsigned long flags;
0215
0216 for_each_set_bit(i, &val, len * 8) {
0217 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0218
0219 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0220 irq->enabled = false;
0221 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0222
0223 vgic_put_irq(vcpu->kvm, irq);
0224 }
0225
0226 return 0;
0227 }
0228
0229 static unsigned long __read_pending(struct kvm_vcpu *vcpu,
0230 gpa_t addr, unsigned int len,
0231 bool is_user)
0232 {
0233 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0234 u32 value = 0;
0235 int i;
0236
0237
0238 for (i = 0; i < len * 8; i++) {
0239 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0240 unsigned long flags;
0241 bool val;
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0253 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
0254 int err;
0255
0256 val = false;
0257 err = irq_get_irqchip_state(irq->host_irq,
0258 IRQCHIP_STATE_PENDING,
0259 &val);
0260 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
0261 } else if (!is_user && vgic_irq_is_mapped_level(irq)) {
0262 val = vgic_get_phys_line_level(irq);
0263 } else {
0264 switch (vcpu->kvm->arch.vgic.vgic_model) {
0265 case KVM_DEV_TYPE_ARM_VGIC_V3:
0266 if (is_user) {
0267 val = irq->pending_latch;
0268 break;
0269 }
0270 fallthrough;
0271 default:
0272 val = irq_is_pending(irq);
0273 break;
0274 }
0275 }
0276
0277 value |= ((u32)val << i);
0278 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0279
0280 vgic_put_irq(vcpu->kvm, irq);
0281 }
0282
0283 return value;
0284 }
0285
0286 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
0287 gpa_t addr, unsigned int len)
0288 {
0289 return __read_pending(vcpu, addr, len, false);
0290 }
0291
0292 unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
0293 gpa_t addr, unsigned int len)
0294 {
0295 return __read_pending(vcpu, addr, len, true);
0296 }
0297
0298 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
0299 {
0300 return (vgic_irq_is_sgi(irq->intid) &&
0301 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
0302 }
0303
0304 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
0305 gpa_t addr, unsigned int len,
0306 unsigned long val)
0307 {
0308 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0309 int i;
0310 unsigned long flags;
0311
0312 for_each_set_bit(i, &val, len * 8) {
0313 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0314
0315
0316 if (is_vgic_v2_sgi(vcpu, irq)) {
0317 vgic_put_irq(vcpu->kvm, irq);
0318 continue;
0319 }
0320
0321 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0322
0323 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
0324
0325 int err;
0326 err = irq_set_irqchip_state(irq->host_irq,
0327 IRQCHIP_STATE_PENDING,
0328 true);
0329 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
0330
0331 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0332 vgic_put_irq(vcpu->kvm, irq);
0333
0334 continue;
0335 }
0336
0337 irq->pending_latch = true;
0338 if (irq->hw)
0339 vgic_irq_set_phys_active(irq, true);
0340
0341 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
0342 vgic_put_irq(vcpu->kvm, irq);
0343 }
0344 }
0345
0346 int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
0347 gpa_t addr, unsigned int len,
0348 unsigned long val)
0349 {
0350 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0351 int i;
0352 unsigned long flags;
0353
0354 for_each_set_bit(i, &val, len * 8) {
0355 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0356
0357 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0358 irq->pending_latch = true;
0359
0360
0361
0362
0363
0364
0365 if (is_vgic_v2_sgi(vcpu, irq))
0366 irq->source |= BIT(vcpu->vcpu_id);
0367
0368 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
0369
0370 vgic_put_irq(vcpu->kvm, irq);
0371 }
0372
0373 return 0;
0374 }
0375
0376
0377 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
0378 {
0379 irq->pending_latch = false;
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392 vgic_irq_set_phys_pending(irq, false);
0393 if (!irq->active)
0394 vgic_irq_set_phys_active(irq, false);
0395 }
0396
0397 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
0398 gpa_t addr, unsigned int len,
0399 unsigned long val)
0400 {
0401 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0402 int i;
0403 unsigned long flags;
0404
0405 for_each_set_bit(i, &val, len * 8) {
0406 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0407
0408
0409 if (is_vgic_v2_sgi(vcpu, irq)) {
0410 vgic_put_irq(vcpu->kvm, irq);
0411 continue;
0412 }
0413
0414 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0415
0416 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
0417
0418 int err;
0419 err = irq_set_irqchip_state(irq->host_irq,
0420 IRQCHIP_STATE_PENDING,
0421 false);
0422 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
0423
0424 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0425 vgic_put_irq(vcpu->kvm, irq);
0426
0427 continue;
0428 }
0429
0430 if (irq->hw)
0431 vgic_hw_irq_cpending(vcpu, irq);
0432 else
0433 irq->pending_latch = false;
0434
0435 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0436 vgic_put_irq(vcpu->kvm, irq);
0437 }
0438 }
0439
0440 int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
0441 gpa_t addr, unsigned int len,
0442 unsigned long val)
0443 {
0444 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0445 int i;
0446 unsigned long flags;
0447
0448 for_each_set_bit(i, &val, len * 8) {
0449 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0450
0451 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0452
0453
0454
0455
0456
0457 if (is_vgic_v2_sgi(vcpu, irq))
0458 irq->source = 0;
0459
0460 irq->pending_latch = false;
0461
0462 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0463
0464 vgic_put_irq(vcpu->kvm, irq);
0465 }
0466
0467 return 0;
0468 }
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485 static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
0486 {
0487 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
0488 intid >= VGIC_NR_PRIVATE_IRQS)
0489 kvm_arm_halt_guest(vcpu->kvm);
0490 }
0491
0492
0493 static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
0494 {
0495 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
0496 intid >= VGIC_NR_PRIVATE_IRQS)
0497 kvm_arm_resume_guest(vcpu->kvm);
0498 }
0499
0500 static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
0501 gpa_t addr, unsigned int len)
0502 {
0503 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0504 u32 value = 0;
0505 int i;
0506
0507
0508 for (i = 0; i < len * 8; i++) {
0509 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0510
0511
0512
0513
0514
0515 if (irq->active)
0516 value |= (1U << i);
0517
0518 vgic_put_irq(vcpu->kvm, irq);
0519 }
0520
0521 return value;
0522 }
0523
0524 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
0525 gpa_t addr, unsigned int len)
0526 {
0527 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0528 u32 val;
0529
0530 mutex_lock(&vcpu->kvm->lock);
0531 vgic_access_active_prepare(vcpu, intid);
0532
0533 val = __vgic_mmio_read_active(vcpu, addr, len);
0534
0535 vgic_access_active_finish(vcpu, intid);
0536 mutex_unlock(&vcpu->kvm->lock);
0537
0538 return val;
0539 }
0540
0541 unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
0542 gpa_t addr, unsigned int len)
0543 {
0544 return __vgic_mmio_read_active(vcpu, addr, len);
0545 }
0546
0547
0548 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
0549 bool active, bool is_uaccess)
0550 {
0551 if (is_uaccess)
0552 return;
0553
0554 irq->active = active;
0555 vgic_irq_set_phys_active(irq, active);
0556 }
0557
0558 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
0559 bool active)
0560 {
0561 unsigned long flags;
0562 struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
0563
0564 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0565
0566 if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
0567 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
0568 } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
0569
0570
0571
0572
0573
0574 irq->active = false;
0575 } else {
0576 u32 model = vcpu->kvm->arch.vgic.vgic_model;
0577 u8 active_source;
0578
0579 irq->active = active;
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
0593
0594 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
0595 active && vgic_irq_is_sgi(irq->intid))
0596 irq->active_source = active_source;
0597 }
0598
0599 if (irq->active)
0600 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
0601 else
0602 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0603 }
0604
0605 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
0606 gpa_t addr, unsigned int len,
0607 unsigned long val)
0608 {
0609 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0610 int i;
0611
0612 for_each_set_bit(i, &val, len * 8) {
0613 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0614 vgic_mmio_change_active(vcpu, irq, false);
0615 vgic_put_irq(vcpu->kvm, irq);
0616 }
0617 }
0618
0619 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
0620 gpa_t addr, unsigned int len,
0621 unsigned long val)
0622 {
0623 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0624
0625 mutex_lock(&vcpu->kvm->lock);
0626 vgic_access_active_prepare(vcpu, intid);
0627
0628 __vgic_mmio_write_cactive(vcpu, addr, len, val);
0629
0630 vgic_access_active_finish(vcpu, intid);
0631 mutex_unlock(&vcpu->kvm->lock);
0632 }
0633
0634 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
0635 gpa_t addr, unsigned int len,
0636 unsigned long val)
0637 {
0638 __vgic_mmio_write_cactive(vcpu, addr, len, val);
0639 return 0;
0640 }
0641
0642 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
0643 gpa_t addr, unsigned int len,
0644 unsigned long val)
0645 {
0646 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0647 int i;
0648
0649 for_each_set_bit(i, &val, len * 8) {
0650 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0651 vgic_mmio_change_active(vcpu, irq, true);
0652 vgic_put_irq(vcpu->kvm, irq);
0653 }
0654 }
0655
0656 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
0657 gpa_t addr, unsigned int len,
0658 unsigned long val)
0659 {
0660 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
0661
0662 mutex_lock(&vcpu->kvm->lock);
0663 vgic_access_active_prepare(vcpu, intid);
0664
0665 __vgic_mmio_write_sactive(vcpu, addr, len, val);
0666
0667 vgic_access_active_finish(vcpu, intid);
0668 mutex_unlock(&vcpu->kvm->lock);
0669 }
0670
0671 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
0672 gpa_t addr, unsigned int len,
0673 unsigned long val)
0674 {
0675 __vgic_mmio_write_sactive(vcpu, addr, len, val);
0676 return 0;
0677 }
0678
0679 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
0680 gpa_t addr, unsigned int len)
0681 {
0682 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
0683 int i;
0684 u64 val = 0;
0685
0686 for (i = 0; i < len; i++) {
0687 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0688
0689 val |= (u64)irq->priority << (i * 8);
0690
0691 vgic_put_irq(vcpu->kvm, irq);
0692 }
0693
0694 return val;
0695 }
0696
0697
0698
0699
0700
0701
0702
0703
0704 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
0705 gpa_t addr, unsigned int len,
0706 unsigned long val)
0707 {
0708 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
0709 int i;
0710 unsigned long flags;
0711
0712 for (i = 0; i < len; i++) {
0713 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0714
0715 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0716
0717 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
0718 if (irq->hw && vgic_irq_is_sgi(irq->intid))
0719 vgic_update_vsgi(irq);
0720 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0721
0722 vgic_put_irq(vcpu->kvm, irq);
0723 }
0724 }
0725
0726 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
0727 gpa_t addr, unsigned int len)
0728 {
0729 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
0730 u32 value = 0;
0731 int i;
0732
0733 for (i = 0; i < len * 4; i++) {
0734 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0735
0736 if (irq->config == VGIC_CONFIG_EDGE)
0737 value |= (2U << (i * 2));
0738
0739 vgic_put_irq(vcpu->kvm, irq);
0740 }
0741
0742 return value;
0743 }
0744
0745 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
0746 gpa_t addr, unsigned int len,
0747 unsigned long val)
0748 {
0749 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
0750 int i;
0751 unsigned long flags;
0752
0753 for (i = 0; i < len * 4; i++) {
0754 struct vgic_irq *irq;
0755
0756
0757
0758
0759
0760
0761
0762 if (intid + i < VGIC_NR_PRIVATE_IRQS)
0763 continue;
0764
0765 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0766 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0767
0768 if (test_bit(i * 2 + 1, &val))
0769 irq->config = VGIC_CONFIG_EDGE;
0770 else
0771 irq->config = VGIC_CONFIG_LEVEL;
0772
0773 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0774 vgic_put_irq(vcpu->kvm, irq);
0775 }
0776 }
0777
0778 u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
0779 {
0780 int i;
0781 u32 val = 0;
0782 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
0783
0784 for (i = 0; i < 32; i++) {
0785 struct vgic_irq *irq;
0786
0787 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
0788 continue;
0789
0790 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0791 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
0792 val |= (1U << i);
0793
0794 vgic_put_irq(vcpu->kvm, irq);
0795 }
0796
0797 return val;
0798 }
0799
0800 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
0801 const u32 val)
0802 {
0803 int i;
0804 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
0805 unsigned long flags;
0806
0807 for (i = 0; i < 32; i++) {
0808 struct vgic_irq *irq;
0809 bool new_level;
0810
0811 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
0812 continue;
0813
0814 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
0815
0816
0817
0818
0819
0820
0821 new_level = !!(val & (1U << i));
0822 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0823 irq->line_level = new_level;
0824 if (new_level)
0825 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
0826 else
0827 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0828
0829 vgic_put_irq(vcpu->kvm, irq);
0830 }
0831 }
0832
0833 static int match_region(const void *key, const void *elt)
0834 {
0835 const unsigned int offset = (unsigned long)key;
0836 const struct vgic_register_region *region = elt;
0837
0838 if (offset < region->reg_offset)
0839 return -1;
0840
0841 if (offset >= region->reg_offset + region->len)
0842 return 1;
0843
0844 return 0;
0845 }
0846
0847 const struct vgic_register_region *
0848 vgic_find_mmio_region(const struct vgic_register_region *regions,
0849 int nr_regions, unsigned int offset)
0850 {
0851 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
0852 sizeof(regions[0]), match_region);
0853 }
0854
0855 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
0856 {
0857 if (kvm_vgic_global_state.type == VGIC_V2)
0858 vgic_v2_set_vmcr(vcpu, vmcr);
0859 else
0860 vgic_v3_set_vmcr(vcpu, vmcr);
0861 }
0862
0863 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
0864 {
0865 if (kvm_vgic_global_state.type == VGIC_V2)
0866 vgic_v2_get_vmcr(vcpu, vmcr);
0867 else
0868 vgic_v3_get_vmcr(vcpu, vmcr);
0869 }
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
0881 {
0882 unsigned long data = kvm_mmio_read_buf(val, len);
0883
0884 switch (len) {
0885 case 1:
0886 return data;
0887 case 2:
0888 return le16_to_cpu(data);
0889 case 4:
0890 return le32_to_cpu(data);
0891 default:
0892 return le64_to_cpu(data);
0893 }
0894 }
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
0906 unsigned long data)
0907 {
0908 switch (len) {
0909 case 1:
0910 break;
0911 case 2:
0912 data = cpu_to_le16(data);
0913 break;
0914 case 4:
0915 data = cpu_to_le32(data);
0916 break;
0917 default:
0918 data = cpu_to_le64(data);
0919 }
0920
0921 kvm_mmio_write_buf(buf, len, data);
0922 }
0923
0924 static
0925 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
0926 {
0927 return container_of(dev, struct vgic_io_device, dev);
0928 }
0929
0930 static bool check_region(const struct kvm *kvm,
0931 const struct vgic_register_region *region,
0932 gpa_t addr, int len)
0933 {
0934 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
0935
0936 switch (len) {
0937 case sizeof(u8):
0938 flags = VGIC_ACCESS_8bit;
0939 break;
0940 case sizeof(u32):
0941 flags = VGIC_ACCESS_32bit;
0942 break;
0943 case sizeof(u64):
0944 flags = VGIC_ACCESS_64bit;
0945 break;
0946 default:
0947 return false;
0948 }
0949
0950 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
0951 if (!region->bits_per_irq)
0952 return true;
0953
0954
0955 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
0956 }
0957
0958 return false;
0959 }
0960
0961 const struct vgic_register_region *
0962 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
0963 gpa_t addr, int len)
0964 {
0965 const struct vgic_register_region *region;
0966
0967 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
0968 addr - iodev->base_addr);
0969 if (!region || !check_region(vcpu->kvm, region, addr, len))
0970 return NULL;
0971
0972 return region;
0973 }
0974
0975 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
0976 gpa_t addr, u32 *val)
0977 {
0978 const struct vgic_register_region *region;
0979 struct kvm_vcpu *r_vcpu;
0980
0981 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
0982 if (!region) {
0983 *val = 0;
0984 return 0;
0985 }
0986
0987 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
0988 if (region->uaccess_read)
0989 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
0990 else
0991 *val = region->read(r_vcpu, addr, sizeof(u32));
0992
0993 return 0;
0994 }
0995
0996 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
0997 gpa_t addr, const u32 *val)
0998 {
0999 const struct vgic_register_region *region;
1000 struct kvm_vcpu *r_vcpu;
1001
1002 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
1003 if (!region)
1004 return 0;
1005
1006 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
1007 if (region->uaccess_write)
1008 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
1009
1010 region->write(r_vcpu, addr, sizeof(u32), *val);
1011 return 0;
1012 }
1013
1014
1015
1016
1017 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
1018 bool is_write, int offset, u32 *val)
1019 {
1020 if (is_write)
1021 return vgic_uaccess_write(vcpu, dev, offset, val);
1022 else
1023 return vgic_uaccess_read(vcpu, dev, offset, val);
1024 }
1025
1026 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1027 gpa_t addr, int len, void *val)
1028 {
1029 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
1030 const struct vgic_register_region *region;
1031 unsigned long data = 0;
1032
1033 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
1034 if (!region) {
1035 memset(val, 0, len);
1036 return 0;
1037 }
1038
1039 switch (iodev->iodev_type) {
1040 case IODEV_CPUIF:
1041 data = region->read(vcpu, addr, len);
1042 break;
1043 case IODEV_DIST:
1044 data = region->read(vcpu, addr, len);
1045 break;
1046 case IODEV_REDIST:
1047 data = region->read(iodev->redist_vcpu, addr, len);
1048 break;
1049 case IODEV_ITS:
1050 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
1051 break;
1052 }
1053
1054 vgic_data_host_to_mmio_bus(val, len, data);
1055 return 0;
1056 }
1057
1058 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1059 gpa_t addr, int len, const void *val)
1060 {
1061 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
1062 const struct vgic_register_region *region;
1063 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
1064
1065 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
1066 if (!region)
1067 return 0;
1068
1069 switch (iodev->iodev_type) {
1070 case IODEV_CPUIF:
1071 region->write(vcpu, addr, len, data);
1072 break;
1073 case IODEV_DIST:
1074 region->write(vcpu, addr, len, data);
1075 break;
1076 case IODEV_REDIST:
1077 region->write(iodev->redist_vcpu, addr, len, data);
1078 break;
1079 case IODEV_ITS:
1080 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
1081 break;
1082 }
1083
1084 return 0;
1085 }
1086
1087 const struct kvm_io_device_ops kvm_io_gic_ops = {
1088 .read = dispatch_mmio_read,
1089 .write = dispatch_mmio_write,
1090 };
1091
1092 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
1093 enum vgic_type type)
1094 {
1095 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
1096 int ret = 0;
1097 unsigned int len;
1098
1099 switch (type) {
1100 case VGIC_V2:
1101 len = vgic_v2_init_dist_iodev(io_device);
1102 break;
1103 case VGIC_V3:
1104 len = vgic_v3_init_dist_iodev(io_device);
1105 break;
1106 default:
1107 BUG_ON(1);
1108 }
1109
1110 io_device->base_addr = dist_base_address;
1111 io_device->iodev_type = IODEV_DIST;
1112 io_device->redist_vcpu = NULL;
1113
1114 mutex_lock(&kvm->slots_lock);
1115 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
1116 len, &io_device->dev);
1117 mutex_unlock(&kvm->slots_lock);
1118
1119 return ret;
1120 }