0001
0002
0003 #include <linux/irqchip/arm-gic-v3.h>
0004 #include <linux/irq.h>
0005 #include <linux/irqdomain.h>
0006 #include <linux/kvm.h>
0007 #include <linux/kvm_host.h>
0008 #include <kvm/arm_vgic.h>
0009 #include <asm/kvm_hyp.h>
0010 #include <asm/kvm_mmu.h>
0011 #include <asm/kvm_asm.h>
0012
0013 #include "vgic.h"
0014
0015 static bool group0_trap;
0016 static bool group1_trap;
0017 static bool common_trap;
0018 static bool dir_trap;
0019 static bool gicv4_enable;
0020
0021 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
0022 {
0023 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
0024
0025 cpuif->vgic_hcr |= ICH_HCR_UIE;
0026 }
0027
0028 static bool lr_signals_eoi_mi(u64 lr_val)
0029 {
0030 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
0031 !(lr_val & ICH_LR_HW);
0032 }
0033
0034 void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
0035 {
0036 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
0037 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
0038 u32 model = vcpu->kvm->arch.vgic.vgic_model;
0039 int lr;
0040
0041 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
0042
0043 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
0044
0045 for (lr = 0; lr < cpuif->used_lrs; lr++) {
0046 u64 val = cpuif->vgic_lr[lr];
0047 u32 intid, cpuid;
0048 struct vgic_irq *irq;
0049 bool is_v2_sgi = false;
0050 bool deactivated;
0051
0052 cpuid = val & GICH_LR_PHYSID_CPUID;
0053 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
0054
0055 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
0056 intid = val & ICH_LR_VIRTUAL_ID_MASK;
0057 } else {
0058 intid = val & GICH_LR_VIRTUALID;
0059 is_v2_sgi = vgic_irq_is_sgi(intid);
0060 }
0061
0062
0063 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
0064 kvm_notify_acked_irq(vcpu->kvm, 0,
0065 intid - VGIC_NR_PRIVATE_IRQS);
0066
0067 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
0068 if (!irq)
0069 continue;
0070
0071 raw_spin_lock(&irq->irq_lock);
0072
0073
0074 deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
0075 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
0076
0077 if (irq->active && is_v2_sgi)
0078 irq->active_source = cpuid;
0079
0080
0081 if (irq->config == VGIC_CONFIG_EDGE &&
0082 (val & ICH_LR_PENDING_BIT)) {
0083 irq->pending_latch = true;
0084
0085 if (is_v2_sgi)
0086 irq->source |= (1 << cpuid);
0087 }
0088
0089
0090
0091
0092 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
0093 irq->pending_latch = false;
0094
0095
0096 vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
0097
0098 raw_spin_unlock(&irq->irq_lock);
0099 vgic_put_irq(vcpu->kvm, irq);
0100 }
0101
0102 cpuif->used_lrs = 0;
0103 }
0104
0105
0106 void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
0107 {
0108 u32 model = vcpu->kvm->arch.vgic.vgic_model;
0109 u64 val = irq->intid;
0110 bool allow_pending = true, is_v2_sgi;
0111
0112 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
0113 model == KVM_DEV_TYPE_ARM_VGIC_V2);
0114
0115 if (irq->active) {
0116 val |= ICH_LR_ACTIVE_BIT;
0117 if (is_v2_sgi)
0118 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
0119 if (vgic_irq_is_multi_sgi(irq)) {
0120 allow_pending = false;
0121 val |= ICH_LR_EOI;
0122 }
0123 }
0124
0125 if (irq->hw && !vgic_irq_needs_resampling(irq)) {
0126 val |= ICH_LR_HW;
0127 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
0128
0129
0130
0131
0132
0133 if (irq->active)
0134 allow_pending = false;
0135 } else {
0136 if (irq->config == VGIC_CONFIG_LEVEL) {
0137 val |= ICH_LR_EOI;
0138
0139
0140
0141
0142
0143 if (irq->active)
0144 allow_pending = false;
0145 }
0146 }
0147
0148 if (allow_pending && irq_is_pending(irq)) {
0149 val |= ICH_LR_PENDING_BIT;
0150
0151 if (irq->config == VGIC_CONFIG_EDGE)
0152 irq->pending_latch = false;
0153
0154 if (vgic_irq_is_sgi(irq->intid) &&
0155 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
0156 u32 src = ffs(irq->source);
0157
0158 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
0159 irq->intid))
0160 return;
0161
0162 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
0163 irq->source &= ~(1 << (src - 1));
0164 if (irq->source) {
0165 irq->pending_latch = true;
0166 val |= ICH_LR_EOI;
0167 }
0168 }
0169 }
0170
0171
0172
0173
0174
0175
0176
0177 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
0178 irq->line_level = false;
0179
0180 if (irq->group)
0181 val |= ICH_LR_GROUP;
0182
0183 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
0184
0185 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
0186 }
0187
0188 void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
0189 {
0190 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
0191 }
0192
0193 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
0194 {
0195 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
0196 u32 model = vcpu->kvm->arch.vgic.vgic_model;
0197 u32 vmcr;
0198
0199 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
0200 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
0201 ICH_VMCR_ACK_CTL_MASK;
0202 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
0203 ICH_VMCR_FIQ_EN_MASK;
0204 } else {
0205
0206
0207
0208
0209 vmcr = ICH_VMCR_FIQ_EN_MASK;
0210 }
0211
0212 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
0213 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
0214 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
0215 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
0216 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
0217 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
0218 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
0219
0220 cpu_if->vgic_vmcr = vmcr;
0221 }
0222
0223 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
0224 {
0225 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
0226 u32 model = vcpu->kvm->arch.vgic.vgic_model;
0227 u32 vmcr;
0228
0229 vmcr = cpu_if->vgic_vmcr;
0230
0231 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
0232 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
0233 ICH_VMCR_ACK_CTL_SHIFT;
0234 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
0235 ICH_VMCR_FIQ_EN_SHIFT;
0236 } else {
0237
0238
0239
0240
0241 vmcrp->fiqen = 1;
0242 vmcrp->ackctl = 0;
0243 }
0244
0245 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
0246 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
0247 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
0248 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
0249 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
0250 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
0251 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
0252 }
0253
0254 #define INITIAL_PENDBASER_VALUE \
0255 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
0256 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
0257 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
0258
0259 void vgic_v3_enable(struct kvm_vcpu *vcpu)
0260 {
0261 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
0262
0263
0264
0265
0266
0267
0268 vgic_v3->vgic_vmcr = 0;
0269
0270
0271
0272
0273
0274
0275
0276 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
0277 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
0278 ICC_SRE_EL1_DFB |
0279 ICC_SRE_EL1_SRE);
0280 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
0281 } else {
0282 vgic_v3->vgic_sre = 0;
0283 }
0284
0285 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
0286 ICH_VTR_ID_BITS_MASK) >>
0287 ICH_VTR_ID_BITS_SHIFT;
0288 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
0289 ICH_VTR_PRI_BITS_MASK) >>
0290 ICH_VTR_PRI_BITS_SHIFT) + 1;
0291
0292
0293 vgic_v3->vgic_hcr = ICH_HCR_EN;
0294 if (group0_trap)
0295 vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
0296 if (group1_trap)
0297 vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
0298 if (common_trap)
0299 vgic_v3->vgic_hcr |= ICH_HCR_TC;
0300 if (dir_trap)
0301 vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
0302 }
0303
0304 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
0305 {
0306 struct kvm_vcpu *vcpu;
0307 int byte_offset, bit_nr;
0308 gpa_t pendbase, ptr;
0309 bool status;
0310 u8 val;
0311 int ret;
0312 unsigned long flags;
0313
0314 retry:
0315 vcpu = irq->target_vcpu;
0316 if (!vcpu)
0317 return 0;
0318
0319 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
0320
0321 byte_offset = irq->intid / BITS_PER_BYTE;
0322 bit_nr = irq->intid % BITS_PER_BYTE;
0323 ptr = pendbase + byte_offset;
0324
0325 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
0326 if (ret)
0327 return ret;
0328
0329 status = val & (1 << bit_nr);
0330
0331 raw_spin_lock_irqsave(&irq->irq_lock, flags);
0332 if (irq->target_vcpu != vcpu) {
0333 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
0334 goto retry;
0335 }
0336 irq->pending_latch = status;
0337 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
0338
0339 if (status) {
0340
0341 val &= ~(1 << bit_nr);
0342 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
0343 if (ret)
0344 return ret;
0345 }
0346 return 0;
0347 }
0348
0349
0350
0351
0352
0353 static void unmap_all_vpes(struct vgic_dist *dist)
0354 {
0355 struct irq_desc *desc;
0356 int i;
0357
0358 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
0359 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
0360 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
0361 }
0362 }
0363
0364 static void map_all_vpes(struct vgic_dist *dist)
0365 {
0366 struct irq_desc *desc;
0367 int i;
0368
0369 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
0370 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
0371 irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
0372 }
0373 }
0374
0375
0376
0377
0378
0379 int vgic_v3_save_pending_tables(struct kvm *kvm)
0380 {
0381 struct vgic_dist *dist = &kvm->arch.vgic;
0382 struct vgic_irq *irq;
0383 gpa_t last_ptr = ~(gpa_t)0;
0384 bool vlpi_avail = false;
0385 int ret = 0;
0386 u8 val;
0387
0388 if (unlikely(!vgic_initialized(kvm)))
0389 return -ENXIO;
0390
0391
0392
0393
0394
0395
0396 if (kvm_vgic_global_state.has_gicv4_1) {
0397 unmap_all_vpes(dist);
0398 vlpi_avail = true;
0399 }
0400
0401 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
0402 int byte_offset, bit_nr;
0403 struct kvm_vcpu *vcpu;
0404 gpa_t pendbase, ptr;
0405 bool is_pending;
0406 bool stored;
0407
0408 vcpu = irq->target_vcpu;
0409 if (!vcpu)
0410 continue;
0411
0412 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
0413
0414 byte_offset = irq->intid / BITS_PER_BYTE;
0415 bit_nr = irq->intid % BITS_PER_BYTE;
0416 ptr = pendbase + byte_offset;
0417
0418 if (ptr != last_ptr) {
0419 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
0420 if (ret)
0421 goto out;
0422 last_ptr = ptr;
0423 }
0424
0425 stored = val & (1U << bit_nr);
0426
0427 is_pending = irq->pending_latch;
0428
0429 if (irq->hw && vlpi_avail)
0430 vgic_v4_get_vlpi_state(irq, &is_pending);
0431
0432 if (stored == is_pending)
0433 continue;
0434
0435 if (is_pending)
0436 val |= 1 << bit_nr;
0437 else
0438 val &= ~(1 << bit_nr);
0439
0440 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
0441 if (ret)
0442 goto out;
0443 }
0444
0445 out:
0446 if (vlpi_avail)
0447 map_all_vpes(dist);
0448
0449 return ret;
0450 }
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462 bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
0463 {
0464 struct vgic_dist *d = &kvm->arch.vgic;
0465 struct vgic_redist_region *rdreg;
0466
0467 list_for_each_entry(rdreg, &d->rd_regions, list) {
0468 if ((base + size > rdreg->base) &&
0469 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
0470 return true;
0471 }
0472 return false;
0473 }
0474
0475
0476
0477
0478
0479 bool vgic_v3_check_base(struct kvm *kvm)
0480 {
0481 struct vgic_dist *d = &kvm->arch.vgic;
0482 struct vgic_redist_region *rdreg;
0483
0484 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
0485 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
0486 return false;
0487
0488 list_for_each_entry(rdreg, &d->rd_regions, list) {
0489 size_t sz = vgic_v3_rd_region_size(kvm, rdreg);
0490
0491 if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF,
0492 rdreg->base, SZ_64K, sz))
0493 return false;
0494 }
0495
0496 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
0497 return true;
0498
0499 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
0500 KVM_VGIC_V3_DIST_SIZE);
0501 }
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
0516 {
0517 struct vgic_redist_region *rdreg;
0518
0519 list_for_each_entry(rdreg, rd_regions, list) {
0520 if (!vgic_v3_redist_region_full(rdreg))
0521 return rdreg;
0522 }
0523 return NULL;
0524 }
0525
0526 struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
0527 u32 index)
0528 {
0529 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
0530 struct vgic_redist_region *rdreg;
0531
0532 list_for_each_entry(rdreg, rd_regions, list) {
0533 if (rdreg->index == index)
0534 return rdreg;
0535 }
0536 return NULL;
0537 }
0538
0539
0540 int vgic_v3_map_resources(struct kvm *kvm)
0541 {
0542 struct vgic_dist *dist = &kvm->arch.vgic;
0543 struct kvm_vcpu *vcpu;
0544 int ret = 0;
0545 unsigned long c;
0546
0547 kvm_for_each_vcpu(c, vcpu, kvm) {
0548 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
0549
0550 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
0551 kvm_debug("vcpu %ld redistributor base not set\n", c);
0552 return -ENXIO;
0553 }
0554 }
0555
0556 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
0557 kvm_debug("Need to set vgic distributor addresses first\n");
0558 return -ENXIO;
0559 }
0560
0561 if (!vgic_v3_check_base(kvm)) {
0562 kvm_debug("VGIC redist and dist frames overlap\n");
0563 return -EINVAL;
0564 }
0565
0566
0567
0568
0569
0570 if (!vgic_initialized(kvm)) {
0571 return -EBUSY;
0572 }
0573
0574 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
0575 if (ret) {
0576 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
0577 return ret;
0578 }
0579
0580 if (kvm_vgic_global_state.has_gicv4_1)
0581 vgic_v4_configure_vsgis(kvm);
0582
0583 return 0;
0584 }
0585
0586 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
0587
0588 static int __init early_group0_trap_cfg(char *buf)
0589 {
0590 return strtobool(buf, &group0_trap);
0591 }
0592 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
0593
0594 static int __init early_group1_trap_cfg(char *buf)
0595 {
0596 return strtobool(buf, &group1_trap);
0597 }
0598 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
0599
0600 static int __init early_common_trap_cfg(char *buf)
0601 {
0602 return strtobool(buf, &common_trap);
0603 }
0604 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
0605
0606 static int __init early_gicv4_enable(char *buf)
0607 {
0608 return strtobool(buf, &gicv4_enable);
0609 }
0610 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
0611
0612 static const struct midr_range broken_seis[] = {
0613 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
0614 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
0615 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
0616 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
0617 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
0618 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
0619 {},
0620 };
0621
0622 static bool vgic_v3_broken_seis(void)
0623 {
0624 return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
0625 is_midr_in_range_list(read_cpuid_id(), broken_seis));
0626 }
0627
0628
0629
0630
0631
0632
0633
0634
0635 int vgic_v3_probe(const struct gic_kvm_info *info)
0636 {
0637 u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
0638 bool has_v2;
0639 int ret;
0640
0641 has_v2 = ich_vtr_el2 >> 63;
0642 ich_vtr_el2 = (u32)ich_vtr_el2;
0643
0644
0645
0646
0647
0648 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
0649 kvm_vgic_global_state.can_emulate_gicv2 = false;
0650 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
0651
0652
0653 if (info->has_v4) {
0654 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
0655 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
0656 kvm_info("GICv4%s support %sabled\n",
0657 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
0658 gicv4_enable ? "en" : "dis");
0659 }
0660
0661 kvm_vgic_global_state.vcpu_base = 0;
0662
0663 if (!info->vcpu.start) {
0664 kvm_info("GICv3: no GICV resource entry\n");
0665 } else if (!has_v2) {
0666 pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
0667 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
0668 pr_warn("GICV physical address 0x%llx not page aligned\n",
0669 (unsigned long long)info->vcpu.start);
0670 } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
0671 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
0672 kvm_vgic_global_state.can_emulate_gicv2 = true;
0673 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
0674 if (ret) {
0675 kvm_err("Cannot register GICv2 KVM device.\n");
0676 return ret;
0677 }
0678 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
0679 }
0680 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
0681 if (ret) {
0682 kvm_err("Cannot register GICv3 KVM device.\n");
0683 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
0684 return ret;
0685 }
0686
0687 if (kvm_vgic_global_state.vcpu_base == 0)
0688 kvm_info("disabling GICv2 emulation\n");
0689
0690 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
0691 group0_trap = true;
0692 group1_trap = true;
0693 }
0694
0695 if (vgic_v3_broken_seis()) {
0696 kvm_info("GICv3 with broken locally generated SEI\n");
0697
0698 kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
0699 group0_trap = true;
0700 group1_trap = true;
0701 if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
0702 dir_trap = true;
0703 else
0704 common_trap = true;
0705 }
0706
0707 if (group0_trap || group1_trap || common_trap | dir_trap) {
0708 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
0709 group0_trap ? "G0" : "",
0710 group1_trap ? "G1" : "",
0711 common_trap ? "C" : "",
0712 dir_trap ? "D" : "");
0713 static_branch_enable(&vgic_v3_cpuif_trap);
0714 }
0715
0716 kvm_vgic_global_state.vctrl_base = NULL;
0717 kvm_vgic_global_state.type = VGIC_V3;
0718 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
0719
0720 return 0;
0721 }
0722
0723 void vgic_v3_load(struct kvm_vcpu *vcpu)
0724 {
0725 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
0726
0727
0728
0729
0730
0731
0732 if (likely(cpu_if->vgic_sre))
0733 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
0734
0735 kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
0736
0737 if (has_vhe())
0738 __vgic_v3_activate_traps(cpu_if);
0739
0740 WARN_ON(vgic_v4_load(vcpu));
0741 }
0742
0743 void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
0744 {
0745 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
0746
0747 if (likely(cpu_if->vgic_sre))
0748 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
0749 }
0750
0751 void vgic_v3_put(struct kvm_vcpu *vcpu)
0752 {
0753 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
0754
0755 WARN_ON(vgic_v4_put(vcpu, false));
0756
0757 vgic_v3_vmcr_sync(vcpu);
0758
0759 kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
0760
0761 if (has_vhe())
0762 __vgic_v3_deactivate_traps(cpu_if);
0763 }