0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) "xive-kvm: " fmt
0007
0008 #include <linux/kernel.h>
0009 #include <linux/kvm_host.h>
0010 #include <linux/err.h>
0011 #include <linux/gfp.h>
0012 #include <linux/spinlock.h>
0013 #include <linux/delay.h>
0014 #include <linux/percpu.h>
0015 #include <linux/cpumask.h>
0016 #include <linux/uaccess.h>
0017 #include <linux/irqdomain.h>
0018 #include <asm/kvm_book3s.h>
0019 #include <asm/kvm_ppc.h>
0020 #include <asm/hvcall.h>
0021 #include <asm/xics.h>
0022 #include <asm/xive.h>
0023 #include <asm/xive-regs.h>
0024 #include <asm/debug.h>
0025 #include <asm/time.h>
0026 #include <asm/opal.h>
0027
0028 #include <linux/debugfs.h>
0029 #include <linux/seq_file.h>
0030
0031 #include "book3s_xive.h"
0032
0033 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
0034 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
0035
0036
0037 #define XICS_DUMMY 1
0038
0039 static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc)
0040 {
0041 u8 cppr;
0042 u16 ack;
0043
0044
0045
0046
0047
0048 eieio();
0049
0050
0051 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
0052
0053
0054 mb();
0055
0056
0057
0058
0059 if (!((ack >> 8) & TM_QW1_NSR_EO))
0060 return;
0061
0062
0063 cppr = ack & 0xff;
0064 if (cppr < 8)
0065 xc->pending |= 1 << cppr;
0066
0067
0068 if (cppr >= xc->hw_cppr)
0069 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
0070 smp_processor_id(), cppr, xc->hw_cppr);
0071
0072
0073
0074
0075
0076
0077 xc->hw_cppr = cppr;
0078 }
0079
0080 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
0081 {
0082 u64 val;
0083
0084 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
0085 offset |= XIVE_ESB_LD_ST_MO;
0086
0087 val = __raw_readq(__x_eoi_page(xd) + offset);
0088 #ifdef __LITTLE_ENDIAN__
0089 val >>= 64-8;
0090 #endif
0091 return (u8)val;
0092 }
0093
0094
0095 static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
0096 {
0097
0098 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
0099 __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
0100 else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
0101
0102
0103
0104
0105
0106 __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
0107 } else {
0108 uint64_t eoi_val;
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00);
0120
0121
0122 if ((eoi_val & 1) && __x_trig_page(xd))
0123 __raw_writeq(0, __x_trig_page(xd));
0124 }
0125 }
0126
0127 enum {
0128 scan_fetch,
0129 scan_poll,
0130 scan_eoi,
0131 };
0132
0133 static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc,
0134 u8 pending, int scan_type)
0135 {
0136 u32 hirq = 0;
0137 u8 prio = 0xff;
0138
0139
0140 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
0141 struct xive_q *q;
0142 u32 idx, toggle;
0143 __be32 *qpage;
0144
0145
0146
0147
0148
0149 prio = ffs(pending) - 1;
0150
0151
0152 if (prio >= xc->cppr || prio > 7) {
0153 if (xc->mfrr < xc->cppr) {
0154 prio = xc->mfrr;
0155 hirq = XICS_IPI;
0156 }
0157 break;
0158 }
0159
0160
0161 q = &xc->queues[prio];
0162 idx = q->idx;
0163 toggle = q->toggle;
0164
0165
0166
0167
0168
0169
0170
0171 qpage = READ_ONCE(q->qpage);
0172
0173 skip_ipi:
0174
0175
0176
0177
0178 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
0195 if (scan_type == scan_fetch) {
0196 xive_vm_source_eoi(xc->vp_ipi,
0197 &xc->vp_ipi_data);
0198 q->idx = idx;
0199 q->toggle = toggle;
0200 }
0201
0202 WARN_ON(hirq && hirq != XICS_IPI);
0203 if (hirq)
0204 goto skip_ipi;
0205 }
0206
0207
0208 if (hirq == XICS_DUMMY)
0209 goto skip_ipi;
0210
0211
0212 if (!hirq) {
0213 pending &= ~(1 << prio);
0214
0215
0216
0217
0218
0219 if (atomic_read(&q->pending_count)) {
0220 int p = atomic_xchg(&q->pending_count, 0);
0221
0222 if (p) {
0223 WARN_ON(p > atomic_read(&q->count));
0224 atomic_sub(p, &q->count);
0225 }
0226 }
0227 }
0228
0229
0230
0231
0232
0233
0234 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
0235 prio = xc->mfrr;
0236 hirq = XICS_IPI;
0237 break;
0238 }
0239
0240
0241 if (scan_type == scan_fetch) {
0242 q->idx = idx;
0243 q->toggle = toggle;
0244 }
0245 }
0246
0247
0248 if (scan_type == scan_poll)
0249 return hirq;
0250
0251
0252 xc->pending = pending;
0253
0254
0255
0256
0257
0258
0259 if (scan_type == scan_eoi)
0260 return hirq;
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271 if (hirq)
0272 xc->cppr = prio;
0273
0274
0275
0276
0277
0278
0279 if (xc->cppr != xc->hw_cppr) {
0280 xc->hw_cppr = xc->cppr;
0281 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
0282 }
0283
0284 return hirq;
0285 }
0286
0287 static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu)
0288 {
0289 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0290 u8 old_cppr;
0291 u32 hirq;
0292
0293 pr_devel("H_XIRR\n");
0294
0295 xc->stat_vm_h_xirr++;
0296
0297
0298 xive_vm_ack_pending(xc);
0299
0300 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
0301 xc->pending, xc->hw_cppr, xc->cppr);
0302
0303
0304 old_cppr = xive_prio_to_guest(xc->cppr);
0305
0306
0307 hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch);
0308
0309 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
0310 hirq, xc->hw_cppr, xc->cppr);
0311
0312
0313 if (hirq & 0xff000000)
0314 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
0332
0333 return H_SUCCESS;
0334 }
0335
0336 static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
0337 {
0338 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0339 u8 pending = xc->pending;
0340 u32 hirq;
0341
0342 pr_devel("H_IPOLL(server=%ld)\n", server);
0343
0344 xc->stat_vm_h_ipoll++;
0345
0346
0347 if (xc->server_num != server) {
0348 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
0349 if (!vcpu)
0350 return H_PARAMETER;
0351 xc = vcpu->arch.xive_vcpu;
0352
0353
0354 pending = 0xff;
0355 } else {
0356
0357 __be64 qw1 = __raw_readq(xive_tima + TM_QW1_OS);
0358 u8 pipr = be64_to_cpu(qw1) & 0xff;
0359
0360 if (pipr < 8)
0361 pending |= 1 << pipr;
0362 }
0363
0364 hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
0365
0366
0367 vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
0368
0369 return H_SUCCESS;
0370 }
0371
0372 static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc)
0373 {
0374 u8 pending, prio;
0375
0376 pending = xc->pending;
0377 if (xc->mfrr != 0xff) {
0378 if (xc->mfrr < 8)
0379 pending |= 1 << xc->mfrr;
0380 else
0381 pending |= 0x80;
0382 }
0383 if (!pending)
0384 return;
0385 prio = ffs(pending) - 1;
0386
0387 __raw_writeb(prio, xive_tima + TM_SPC_SET_OS_PENDING);
0388 }
0389
0390 static void xive_vm_scan_for_rerouted_irqs(struct kvmppc_xive *xive,
0391 struct kvmppc_xive_vcpu *xc)
0392 {
0393 unsigned int prio;
0394
0395
0396 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
0397 struct xive_q *q = &xc->queues[prio];
0398 struct kvmppc_xive_irq_state *state;
0399 struct kvmppc_xive_src_block *sb;
0400 u32 idx, toggle, entry, irq, hw_num;
0401 struct xive_irq_data *xd;
0402 __be32 *qpage;
0403 u16 src;
0404
0405 idx = q->idx;
0406 toggle = q->toggle;
0407 qpage = READ_ONCE(q->qpage);
0408 if (!qpage)
0409 continue;
0410
0411
0412 for (;;) {
0413 entry = be32_to_cpup(qpage + idx);
0414
0415
0416 if ((entry >> 31) == toggle)
0417 break;
0418 irq = entry & 0x7fffffff;
0419
0420
0421 if (irq == XICS_DUMMY || irq == XICS_IPI)
0422 goto next;
0423 sb = kvmppc_xive_find_source(xive, irq, &src);
0424 if (!sb)
0425 goto next;
0426 state = &sb->irq_state[src];
0427
0428
0429 if (xc->server_num == state->act_server)
0430 goto next;
0431
0432
0433
0434
0435
0436 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
0437
0438
0439 kvmppc_xive_select_irq(state, &hw_num, &xd);
0440
0441
0442 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
0443 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
0444
0445
0446 xive_vm_source_eoi(hw_num, xd);
0447
0448 next:
0449 idx = (idx + 1) & q->msk;
0450 if (idx == 0)
0451 toggle ^= 1;
0452 }
0453 }
0454 }
0455
0456 static int xive_vm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
0457 {
0458 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0459 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
0460 u8 old_cppr;
0461
0462 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
0463
0464 xc->stat_vm_h_cppr++;
0465
0466
0467 cppr = xive_prio_from_guest(cppr);
0468
0469
0470 old_cppr = xc->cppr;
0471 xc->cppr = cppr;
0472
0473
0474
0475
0476
0477 smp_mb();
0478
0479 if (cppr > old_cppr) {
0480
0481
0482
0483
0484
0485
0486 xive_vm_push_pending_to_hw(xc);
0487 } else {
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505 xive_vm_scan_for_rerouted_irqs(xive, xc);
0506 }
0507
0508
0509 xc->hw_cppr = cppr;
0510 __raw_writeb(cppr, xive_tima + TM_QW1_OS + TM_CPPR);
0511
0512 return H_SUCCESS;
0513 }
0514
0515 static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
0516 {
0517 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
0518 struct kvmppc_xive_src_block *sb;
0519 struct kvmppc_xive_irq_state *state;
0520 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0521 struct xive_irq_data *xd;
0522 u8 new_cppr = xirr >> 24;
0523 u32 irq = xirr & 0x00ffffff, hw_num;
0524 u16 src;
0525 int rc = 0;
0526
0527 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
0528
0529 xc->stat_vm_h_eoi++;
0530
0531 xc->cppr = xive_prio_from_guest(new_cppr);
0532
0533
0534
0535
0536
0537
0538
0539 if (irq == XICS_IPI || irq == 0) {
0540
0541
0542
0543
0544
0545 smp_mb();
0546 goto bail;
0547 }
0548
0549
0550 sb = kvmppc_xive_find_source(xive, irq, &src);
0551 if (!sb) {
0552 pr_devel(" source not found !\n");
0553 rc = H_PARAMETER;
0554
0555 smp_mb();
0556 goto bail;
0557 }
0558 state = &sb->irq_state[src];
0559 kvmppc_xive_select_irq(state, &hw_num, &xd);
0560
0561 state->in_eoi = true;
0562
0563
0564
0565
0566
0567
0568
0569 smp_mb();
0570
0571 again:
0572 if (state->guest_priority == MASKED) {
0573 arch_spin_lock(&sb->lock);
0574 if (state->guest_priority != MASKED) {
0575 arch_spin_unlock(&sb->lock);
0576 goto again;
0577 }
0578 pr_devel(" EOI on saved P...\n");
0579
0580
0581 state->old_p = false;
0582
0583 arch_spin_unlock(&sb->lock);
0584 } else {
0585 pr_devel(" EOI on source...\n");
0586
0587
0588 xive_vm_source_eoi(hw_num, xd);
0589
0590
0591 if (state->lsi && state->asserted)
0592 __raw_writeq(0, __x_trig_page(xd));
0593
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 mb();
0605 state->in_eoi = false;
0606 bail:
0607
0608
0609 xive_vm_scan_interrupts(xc, xc->pending, scan_eoi);
0610 xive_vm_push_pending_to_hw(xc);
0611 pr_devel(" after scan pending=%02x\n", xc->pending);
0612
0613
0614 xc->hw_cppr = xc->cppr;
0615 __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
0616
0617 return rc;
0618 }
0619
0620 static int xive_vm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
0621 unsigned long mfrr)
0622 {
0623 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0624
0625 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
0626
0627 xc->stat_vm_h_ipi++;
0628
0629
0630 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
0631 if (!vcpu)
0632 return H_PARAMETER;
0633 xc = vcpu->arch.xive_vcpu;
0634
0635
0636 xc->mfrr = mfrr;
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648 mb();
0649
0650
0651 if (mfrr < xc->cppr)
0652 __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data));
0653
0654 return H_SUCCESS;
0655 }
0656
0657
0658
0659
0660
0661 #define XIVE_Q_GAP 2
0662
0663 static bool kvmppc_xive_vcpu_has_save_restore(struct kvm_vcpu *vcpu)
0664 {
0665 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0666
0667
0668 return xc->vp_cam & TM_QW1W2_HO;
0669 }
0670
0671 bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu)
0672 {
0673 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0674 struct kvmppc_xive *xive = xc->xive;
0675
0676 if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE)
0677 return kvmppc_xive_vcpu_has_save_restore(vcpu);
0678
0679 return true;
0680 }
0681
0682
0683
0684
0685
0686 void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
0687 {
0688 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
0689 u64 pq;
0690
0691
0692
0693
0694
0695
0696 if (!tima || !vcpu->arch.xive_cam_word)
0697 return;
0698
0699 eieio();
0700 if (!kvmppc_xive_vcpu_has_save_restore(vcpu))
0701 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
0702 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
0703 vcpu->arch.xive_pushed = 1;
0704 eieio();
0705
0706
0707
0708
0709
0710
0711
0712
0713 vcpu->arch.irq_pending = 0;
0714
0715
0716
0717
0718
0719 if (vcpu->arch.xive_esc_on) {
0720 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
0721 XIVE_ESB_SET_PQ_01));
0722 mb();
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745 if (!(pq & XIVE_ESB_VAL_P))
0746
0747 vcpu->arch.xive_esc_on = 0;
0748 }
0749 }
0750 EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
0751
0752
0753
0754
0755
0756 void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
0757 {
0758 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
0759
0760 if (!vcpu->arch.xive_pushed)
0761 return;
0762
0763
0764
0765
0766 if (WARN_ON(!tima))
0767 return;
0768
0769 eieio();
0770
0771 __raw_readl(tima + TM_SPC_PULL_OS_CTX);
0772
0773 if (!kvmppc_xive_vcpu_has_save_restore(vcpu))
0774 vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
0775
0776
0777 vcpu->arch.xive_saved_state.lsmfb = 0;
0778 vcpu->arch.xive_saved_state.ack = 0xff;
0779 vcpu->arch.xive_pushed = 0;
0780 eieio();
0781 }
0782 EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
0783
0784 bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
0785 {
0786 void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
0787 bool ret = true;
0788
0789 if (!esc_vaddr)
0790 return ret;
0791
0792
0793
0794 if (vcpu->arch.xive_esc_on) {
0795
0796
0797
0798
0799
0800
0801
0802
0803 ret = false;
0804
0805
0806
0807
0808
0809 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
0810 } else {
0811 vcpu->arch.xive_esc_on = true;
0812 mb();
0813 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
0814 }
0815 mb();
0816
0817 return ret;
0818 }
0819 EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
0820
0821
0822
0823
0824
0825 static bool xive_irq_trigger(struct xive_irq_data *xd)
0826 {
0827
0828 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
0829 return false;
0830
0831
0832 if (WARN_ON(!xd->trig_mmio))
0833 return false;
0834
0835 out_be64(xd->trig_mmio, 0);
0836
0837 return true;
0838 }
0839
0840 static irqreturn_t xive_esc_irq(int irq, void *data)
0841 {
0842 struct kvm_vcpu *vcpu = data;
0843
0844 vcpu->arch.irq_pending = 1;
0845 smp_mb();
0846 if (vcpu->arch.ceded || vcpu->arch.nested)
0847 kvmppc_fast_vcpu_kick(vcpu);
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858 vcpu->arch.xive_esc_on = false;
0859
0860
0861 smp_wmb();
0862
0863 return IRQ_HANDLED;
0864 }
0865
0866 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
0867 bool single_escalation)
0868 {
0869 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0870 struct xive_q *q = &xc->queues[prio];
0871 char *name = NULL;
0872 int rc;
0873
0874
0875 if (xc->esc_virq[prio])
0876 return 0;
0877
0878
0879 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
0880 if (!xc->esc_virq[prio]) {
0881 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
0882 prio, xc->server_num);
0883 return -EIO;
0884 }
0885
0886 if (single_escalation)
0887 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
0888 vcpu->kvm->arch.lpid, xc->server_num);
0889 else
0890 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
0891 vcpu->kvm->arch.lpid, xc->server_num, prio);
0892 if (!name) {
0893 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
0894 prio, xc->server_num);
0895 rc = -ENOMEM;
0896 goto error;
0897 }
0898
0899 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
0900
0901 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
0902 IRQF_NO_THREAD, name, vcpu);
0903 if (rc) {
0904 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
0905 prio, xc->server_num);
0906 goto error;
0907 }
0908 xc->esc_virq_names[prio] = name;
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918 if (single_escalation) {
0919 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
0920 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
0921
0922 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
0923 vcpu->arch.xive_esc_raddr = xd->eoi_page;
0924 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
0925 xd->flags |= XIVE_IRQ_FLAG_NO_EOI;
0926 }
0927
0928 return 0;
0929 error:
0930 irq_dispose_mapping(xc->esc_virq[prio]);
0931 xc->esc_virq[prio] = 0;
0932 kfree(name);
0933 return rc;
0934 }
0935
0936 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
0937 {
0938 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
0939 struct kvmppc_xive *xive = xc->xive;
0940 struct xive_q *q = &xc->queues[prio];
0941 void *qpage;
0942 int rc;
0943
0944 if (WARN_ON(q->qpage))
0945 return 0;
0946
0947
0948 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
0949 if (!qpage) {
0950 pr_err("Failed to allocate queue %d for VCPU %d\n",
0951 prio, xc->server_num);
0952 return -ENOMEM;
0953 }
0954 memset(qpage, 0, 1 << xive->q_order);
0955
0956
0957
0958
0959
0960
0961
0962
0963 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
0964 xive->q_order, true);
0965 if (rc)
0966 pr_err("Failed to configure queue %d for VCPU %d\n",
0967 prio, xc->server_num);
0968 return rc;
0969 }
0970
0971
0972 static int xive_check_provisioning(struct kvm *kvm, u8 prio)
0973 {
0974 struct kvmppc_xive *xive = kvm->arch.xive;
0975 struct kvm_vcpu *vcpu;
0976 unsigned long i;
0977 int rc;
0978
0979 lockdep_assert_held(&xive->lock);
0980
0981
0982 if (xive->qmap & (1 << prio))
0983 return 0;
0984
0985 pr_devel("Provisioning prio... %d\n", prio);
0986
0987
0988 kvm_for_each_vcpu(i, vcpu, kvm) {
0989 if (!vcpu->arch.xive_vcpu)
0990 continue;
0991 rc = xive_provision_queue(vcpu, prio);
0992 if (rc == 0 && !kvmppc_xive_has_single_escalation(xive))
0993 kvmppc_xive_attach_escalation(vcpu, prio,
0994 kvmppc_xive_has_single_escalation(xive));
0995 if (rc)
0996 return rc;
0997 }
0998
0999
1000 mb();
1001 xive->qmap |= (1 << prio);
1002 return 0;
1003 }
1004
1005 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
1006 {
1007 struct kvm_vcpu *vcpu;
1008 struct kvmppc_xive_vcpu *xc;
1009 struct xive_q *q;
1010
1011
1012 vcpu = kvmppc_xive_find_server(kvm, server);
1013 if (!vcpu) {
1014 pr_warn("%s: Can't find server %d\n", __func__, server);
1015 return;
1016 }
1017 xc = vcpu->arch.xive_vcpu;
1018 if (WARN_ON(!xc))
1019 return;
1020
1021 q = &xc->queues[prio];
1022 atomic_inc(&q->pending_count);
1023 }
1024
1025 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
1026 {
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1028 struct xive_q *q;
1029 u32 max;
1030
1031 if (WARN_ON(!xc))
1032 return -ENXIO;
1033 if (!xc->valid)
1034 return -ENXIO;
1035
1036 q = &xc->queues[prio];
1037 if (WARN_ON(!q->qpage))
1038 return -ENXIO;
1039
1040
1041 max = (q->msk + 1) - XIVE_Q_GAP;
1042 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
1043 }
1044
1045 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
1046 {
1047 struct kvm_vcpu *vcpu;
1048 unsigned long i;
1049 int rc;
1050
1051
1052 vcpu = kvmppc_xive_find_server(kvm, *server);
1053 if (!vcpu) {
1054 pr_devel("Can't find server %d\n", *server);
1055 return -EINVAL;
1056 }
1057
1058 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
1059
1060
1061 rc = xive_try_pick_queue(vcpu, prio);
1062 if (rc == 0)
1063 return rc;
1064
1065 pr_devel(" .. failed, looking up candidate...\n");
1066
1067
1068 kvm_for_each_vcpu(i, vcpu, kvm) {
1069 if (!vcpu->arch.xive_vcpu)
1070 continue;
1071 rc = xive_try_pick_queue(vcpu, prio);
1072 if (rc == 0) {
1073 *server = vcpu->arch.xive_vcpu->server_num;
1074 pr_devel(" found on 0x%x/%d\n", *server, prio);
1075 return rc;
1076 }
1077 }
1078 pr_devel(" no available target !\n");
1079
1080
1081 return -EBUSY;
1082 }
1083
1084 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
1085 struct kvmppc_xive_src_block *sb,
1086 struct kvmppc_xive_irq_state *state)
1087 {
1088 struct xive_irq_data *xd;
1089 u32 hw_num;
1090 u8 old_prio;
1091 u64 val;
1092
1093
1094
1095
1096
1097 for (;;) {
1098 arch_spin_lock(&sb->lock);
1099 old_prio = state->guest_priority;
1100 state->guest_priority = MASKED;
1101 mb();
1102 if (!state->in_eoi)
1103 break;
1104 state->guest_priority = old_prio;
1105 arch_spin_unlock(&sb->lock);
1106 }
1107
1108
1109 if (old_prio == MASKED)
1110 return old_prio;
1111
1112
1113 kvmppc_xive_select_irq(state, &hw_num, &xd);
1114
1115
1116 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
1117 state->old_p = !!(val & 2);
1118 state->old_q = !!(val & 1);
1119
1120
1121
1122
1123
1124 xive_native_sync_source(hw_num);
1125
1126 return old_prio;
1127 }
1128
1129 static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
1130 struct kvmppc_xive_irq_state *state)
1131 {
1132
1133
1134
1135 for (;;) {
1136 arch_spin_lock(&sb->lock);
1137 if (!state->in_eoi)
1138 break;
1139 arch_spin_unlock(&sb->lock);
1140 }
1141 }
1142
1143 static void xive_finish_unmask(struct kvmppc_xive *xive,
1144 struct kvmppc_xive_src_block *sb,
1145 struct kvmppc_xive_irq_state *state,
1146 u8 prio)
1147 {
1148 struct xive_irq_data *xd;
1149 u32 hw_num;
1150
1151
1152 if (state->guest_priority != MASKED)
1153 goto bail;
1154
1155
1156 kvmppc_xive_select_irq(state, &hw_num, &xd);
1157
1158
1159 if (state->old_q)
1160 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
1161
1162
1163
1164
1165
1166
1167 if (!state->old_p)
1168 xive_vm_source_eoi(hw_num, xd);
1169
1170
1171 mb();
1172 bail:
1173 state->guest_priority = prio;
1174 }
1175
1176
1177
1178
1179
1180
1181
1182
1183 static int xive_target_interrupt(struct kvm *kvm,
1184 struct kvmppc_xive_irq_state *state,
1185 u32 server, u8 prio)
1186 {
1187 struct kvmppc_xive *xive = kvm->arch.xive;
1188 u32 hw_num;
1189 int rc;
1190
1191
1192
1193
1194
1195
1196 rc = kvmppc_xive_select_target(kvm, &server, prio);
1197
1198
1199
1200
1201
1202 if (rc)
1203 return rc;
1204
1205
1206
1207
1208
1209
1210 if (state->act_priority != MASKED)
1211 xive_inc_q_pending(kvm,
1212 state->act_server,
1213 state->act_priority);
1214
1215
1216
1217 state->act_priority = prio;
1218 state->act_server = server;
1219
1220
1221 kvmppc_xive_select_irq(state, &hw_num, NULL);
1222
1223 return xive_native_configure_irq(hw_num,
1224 kvmppc_xive_vp(xive, server),
1225 prio, state->number);
1226 }
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
1269 u32 priority)
1270 {
1271 struct kvmppc_xive *xive = kvm->arch.xive;
1272 struct kvmppc_xive_src_block *sb;
1273 struct kvmppc_xive_irq_state *state;
1274 u8 new_act_prio;
1275 int rc = 0;
1276 u16 idx;
1277
1278 if (!xive)
1279 return -ENODEV;
1280
1281 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
1282 irq, server, priority);
1283
1284
1285 if (priority != MASKED) {
1286 mutex_lock(&xive->lock);
1287 rc = xive_check_provisioning(xive->kvm,
1288 xive_prio_from_guest(priority));
1289 mutex_unlock(&xive->lock);
1290 }
1291 if (rc) {
1292 pr_devel(" provisioning failure %d !\n", rc);
1293 return rc;
1294 }
1295
1296 sb = kvmppc_xive_find_source(xive, irq, &idx);
1297 if (!sb)
1298 return -EINVAL;
1299 state = &sb->irq_state[idx];
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 if (priority == MASKED)
1316 xive_lock_and_mask(xive, sb, state);
1317 else
1318 xive_lock_for_unmask(sb, state);
1319
1320
1321
1322
1323
1324
1325
1326 new_act_prio = state->act_priority;
1327 if (priority != MASKED)
1328 new_act_prio = xive_prio_from_guest(priority);
1329
1330 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
1331 new_act_prio, state->act_server, state->act_priority);
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347 if (new_act_prio != MASKED &&
1348 (state->act_server != server ||
1349 state->act_priority != new_act_prio))
1350 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
1351
1352
1353
1354
1355
1356 if (priority != MASKED)
1357 xive_finish_unmask(xive, sb, state, priority);
1358
1359
1360
1361
1362
1363 state->saved_priority = priority;
1364
1365 arch_spin_unlock(&sb->lock);
1366 return rc;
1367 }
1368
1369 int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
1370 u32 *priority)
1371 {
1372 struct kvmppc_xive *xive = kvm->arch.xive;
1373 struct kvmppc_xive_src_block *sb;
1374 struct kvmppc_xive_irq_state *state;
1375 u16 idx;
1376
1377 if (!xive)
1378 return -ENODEV;
1379
1380 sb = kvmppc_xive_find_source(xive, irq, &idx);
1381 if (!sb)
1382 return -EINVAL;
1383 state = &sb->irq_state[idx];
1384 arch_spin_lock(&sb->lock);
1385 *server = state->act_server;
1386 *priority = state->guest_priority;
1387 arch_spin_unlock(&sb->lock);
1388
1389 return 0;
1390 }
1391
1392 int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
1393 {
1394 struct kvmppc_xive *xive = kvm->arch.xive;
1395 struct kvmppc_xive_src_block *sb;
1396 struct kvmppc_xive_irq_state *state;
1397 u16 idx;
1398
1399 if (!xive)
1400 return -ENODEV;
1401
1402 sb = kvmppc_xive_find_source(xive, irq, &idx);
1403 if (!sb)
1404 return -EINVAL;
1405 state = &sb->irq_state[idx];
1406
1407 pr_devel("int_on(irq=0x%x)\n", irq);
1408
1409
1410
1411
1412 if (state->act_priority == MASKED) {
1413 pr_devel("int_on on untargetted interrupt\n");
1414 return -EINVAL;
1415 }
1416
1417
1418 if (state->saved_priority == MASKED)
1419 return 0;
1420
1421
1422
1423
1424 xive_lock_for_unmask(sb, state);
1425 xive_finish_unmask(xive, sb, state, state->saved_priority);
1426 arch_spin_unlock(&sb->lock);
1427
1428 return 0;
1429 }
1430
1431 int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
1432 {
1433 struct kvmppc_xive *xive = kvm->arch.xive;
1434 struct kvmppc_xive_src_block *sb;
1435 struct kvmppc_xive_irq_state *state;
1436 u16 idx;
1437
1438 if (!xive)
1439 return -ENODEV;
1440
1441 sb = kvmppc_xive_find_source(xive, irq, &idx);
1442 if (!sb)
1443 return -EINVAL;
1444 state = &sb->irq_state[idx];
1445
1446 pr_devel("int_off(irq=0x%x)\n", irq);
1447
1448
1449
1450
1451 state->saved_priority = xive_lock_and_mask(xive, sb, state);
1452 arch_spin_unlock(&sb->lock);
1453
1454 return 0;
1455 }
1456
1457 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
1458 {
1459 struct kvmppc_xive_src_block *sb;
1460 struct kvmppc_xive_irq_state *state;
1461 u16 idx;
1462
1463 sb = kvmppc_xive_find_source(xive, irq, &idx);
1464 if (!sb)
1465 return false;
1466 state = &sb->irq_state[idx];
1467 if (!state->valid)
1468 return false;
1469
1470
1471
1472
1473
1474 xive_irq_trigger(&state->ipi_data);
1475
1476 return true;
1477 }
1478
1479 u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
1480 {
1481 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1482
1483 if (!xc)
1484 return 0;
1485
1486
1487 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
1488 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
1489 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
1490 }
1491
1492 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1493 {
1494 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1495 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1496 u8 cppr, mfrr;
1497 u32 xisr;
1498
1499 if (!xc || !xive)
1500 return -ENOENT;
1501
1502
1503 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1504 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1505 KVM_REG_PPC_ICP_XISR_MASK;
1506 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1507
1508 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
1509 xc->server_num, cppr, mfrr, xisr);
1510
1511
1512
1513
1514
1515
1516 if (WARN_ON(vcpu->arch.xive_pushed))
1517 return -EIO;
1518
1519
1520 vcpu->arch.xive_saved_state.cppr = cppr;
1521 xc->hw_cppr = xc->cppr = cppr;
1522
1523
1524
1525
1526
1527
1528
1529 xc->mfrr = mfrr;
1530 if (mfrr < cppr)
1531 xive_irq_trigger(&xc->vp_ipi_data);
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
1543 xc->delayed_irq = xisr;
1544 xive->delayed_irqs++;
1545 pr_devel(" xisr restore delayed\n");
1546 }
1547
1548 return 0;
1549 }
1550
1551 int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
1552 unsigned long host_irq)
1553 {
1554 struct kvmppc_xive *xive = kvm->arch.xive;
1555 struct kvmppc_xive_src_block *sb;
1556 struct kvmppc_xive_irq_state *state;
1557 struct irq_data *host_data =
1558 irq_domain_get_irq_data(irq_get_default_host(), host_irq);
1559 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
1560 u16 idx;
1561 u8 prio;
1562 int rc;
1563
1564 if (!xive)
1565 return -ENODEV;
1566
1567 pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n",
1568 __func__, guest_irq, host_irq, hw_irq);
1569
1570 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1571 if (!sb)
1572 return -EINVAL;
1573 state = &sb->irq_state[idx];
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 rc = irq_set_vcpu_affinity(host_irq, state);
1589 if (rc) {
1590 pr_err("Failed to set VCPU affinity for host IRQ %ld\n", host_irq);
1591 return rc;
1592 }
1593
1594
1595
1596
1597
1598
1599 prio = xive_lock_and_mask(xive, sb, state);
1600 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
1601 state->old_p, state->old_q);
1602
1603
1604 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1605
1606
1607
1608
1609
1610 if (xive->ops && xive->ops->reset_mapped)
1611 xive->ops->reset_mapped(kvm, guest_irq);
1612
1613
1614 state->pt_number = hw_irq;
1615 state->pt_data = irq_data_get_irq_handler_data(host_data);
1616
1617
1618
1619
1620
1621
1622
1623 xive_native_configure_irq(hw_irq,
1624 kvmppc_xive_vp(xive, state->act_server),
1625 state->act_priority, state->number);
1626
1627
1628
1629
1630
1631
1632
1633
1634 if (prio != MASKED && !state->old_p)
1635 xive_vm_source_eoi(hw_irq, state->pt_data);
1636
1637
1638 state->old_p = state->old_q = false;
1639
1640
1641 mb();
1642 state->guest_priority = prio;
1643 arch_spin_unlock(&sb->lock);
1644
1645 return 0;
1646 }
1647 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
1648
1649 int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
1650 unsigned long host_irq)
1651 {
1652 struct kvmppc_xive *xive = kvm->arch.xive;
1653 struct kvmppc_xive_src_block *sb;
1654 struct kvmppc_xive_irq_state *state;
1655 u16 idx;
1656 u8 prio;
1657 int rc;
1658
1659 if (!xive)
1660 return -ENODEV;
1661
1662 pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq);
1663
1664 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1665 if (!sb)
1666 return -EINVAL;
1667 state = &sb->irq_state[idx];
1668
1669
1670
1671
1672
1673
1674 prio = xive_lock_and_mask(xive, sb, state);
1675 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1676 state->old_p, state->old_q);
1677
1678
1679
1680
1681
1682
1683 if (state->old_p)
1684 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1685
1686
1687 rc = irq_set_vcpu_affinity(host_irq, NULL);
1688 if (rc) {
1689 pr_err("Failed to clr VCPU affinity for host IRQ %ld\n", host_irq);
1690 return rc;
1691 }
1692
1693
1694 state->pt_number = 0;
1695 state->pt_data = NULL;
1696
1697
1698
1699
1700
1701 if (xive->ops && xive->ops->reset_mapped) {
1702 xive->ops->reset_mapped(kvm, guest_irq);
1703 }
1704
1705
1706 xive_native_configure_irq(state->ipi_number,
1707 kvmppc_xive_vp(xive, state->act_server),
1708 state->act_priority, state->number);
1709
1710
1711
1712
1713
1714
1715 if (prio == MASKED || state->old_p)
1716 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1717 else
1718 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1719
1720
1721 mb();
1722 state->guest_priority = prio;
1723 arch_spin_unlock(&sb->lock);
1724
1725 return 0;
1726 }
1727 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1728
1729 void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1730 {
1731 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1732 struct kvm *kvm = vcpu->kvm;
1733 struct kvmppc_xive *xive = kvm->arch.xive;
1734 int i, j;
1735
1736 for (i = 0; i <= xive->max_sbid; i++) {
1737 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1738
1739 if (!sb)
1740 continue;
1741 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1742 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1743
1744 if (!state->valid)
1745 continue;
1746 if (state->act_priority == MASKED)
1747 continue;
1748 if (state->act_server != xc->server_num)
1749 continue;
1750
1751
1752 arch_spin_lock(&sb->lock);
1753 state->act_priority = MASKED;
1754 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1755 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1756 if (state->pt_number) {
1757 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1758 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1759 }
1760 arch_spin_unlock(&sb->lock);
1761 }
1762 }
1763
1764
1765 if (vcpu->arch.xive_esc_on) {
1766 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1767 XIVE_ESB_SET_PQ_01));
1768 vcpu->arch.xive_esc_on = false;
1769 }
1770
1771
1772
1773
1774
1775
1776 vcpu->arch.xive_esc_vaddr = 0;
1777 vcpu->arch.xive_esc_raddr = 0;
1778 }
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788 void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1789 struct kvmppc_xive_vcpu *xc, int irq)
1790 {
1791 struct irq_data *d = irq_get_irq_data(irq);
1792 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1793
1794
1795
1796
1797
1798
1799 xd->stale_p = false;
1800 smp_mb();
1801 if (!vcpu->arch.xive_esc_on)
1802 xd->stale_p = true;
1803 }
1804
1805 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1806 {
1807 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1808 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1809 int i;
1810
1811 if (!kvmppc_xics_enabled(vcpu))
1812 return;
1813
1814 if (!xc)
1815 return;
1816
1817 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1818
1819
1820 xc->valid = false;
1821 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1822
1823
1824 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1825
1826
1827 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1828 if (xc->esc_virq[i]) {
1829 if (kvmppc_xive_has_single_escalation(xc->xive))
1830 xive_cleanup_single_escalation(vcpu, xc,
1831 xc->esc_virq[i]);
1832 free_irq(xc->esc_virq[i], vcpu);
1833 irq_dispose_mapping(xc->esc_virq[i]);
1834 kfree(xc->esc_virq_names[i]);
1835 }
1836 }
1837
1838
1839 xive_native_disable_vp(xc->vp_id);
1840
1841
1842 vcpu->arch.xive_cam_word = 0;
1843
1844
1845 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1846 struct xive_q *q = &xc->queues[i];
1847
1848 xive_native_disable_queue(xc->vp_id, q, i);
1849 if (q->qpage) {
1850 free_pages((unsigned long)q->qpage,
1851 xive->q_page_order);
1852 q->qpage = NULL;
1853 }
1854 }
1855
1856
1857 if (xc->vp_ipi) {
1858 xive_cleanup_irq_data(&xc->vp_ipi_data);
1859 xive_native_free_irq(xc->vp_ipi);
1860 }
1861
1862 kfree(xc);
1863
1864
1865 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1866 vcpu->arch.xive_vcpu = NULL;
1867 }
1868
1869 static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1870 {
1871
1872
1873
1874 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1875 }
1876
1877 int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1878 {
1879 u32 vp_id;
1880
1881 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1882 pr_devel("Out of bounds !\n");
1883 return -EINVAL;
1884 }
1885
1886 if (xive->vp_base == XIVE_INVALID_VP) {
1887 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1888 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1889
1890 if (xive->vp_base == XIVE_INVALID_VP)
1891 return -ENOSPC;
1892 }
1893
1894 vp_id = kvmppc_xive_vp(xive, cpu);
1895 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1896 pr_devel("Duplicate !\n");
1897 return -EEXIST;
1898 }
1899
1900 *vp = vp_id;
1901
1902 return 0;
1903 }
1904
1905 int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1906 struct kvm_vcpu *vcpu, u32 cpu)
1907 {
1908 struct kvmppc_xive *xive = dev->private;
1909 struct kvmppc_xive_vcpu *xc;
1910 int i, r = -EBUSY;
1911 u32 vp_id;
1912
1913 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1914
1915 if (dev->ops != &kvm_xive_ops) {
1916 pr_devel("Wrong ops !\n");
1917 return -EPERM;
1918 }
1919 if (xive->kvm != vcpu->kvm)
1920 return -EPERM;
1921 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1922 return -EBUSY;
1923
1924
1925 mutex_lock(&xive->lock);
1926
1927 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1928 if (r)
1929 goto bail;
1930
1931 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1932 if (!xc) {
1933 r = -ENOMEM;
1934 goto bail;
1935 }
1936
1937 vcpu->arch.xive_vcpu = xc;
1938 xc->xive = xive;
1939 xc->vcpu = vcpu;
1940 xc->server_num = cpu;
1941 xc->vp_id = vp_id;
1942 xc->mfrr = 0xff;
1943 xc->valid = true;
1944
1945 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1946 if (r)
1947 goto bail;
1948
1949 if (!kvmppc_xive_check_save_restore(vcpu)) {
1950 pr_err("inconsistent save-restore setup for VCPU %d\n", cpu);
1951 r = -EIO;
1952 goto bail;
1953 }
1954
1955
1956 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1957 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1958
1959
1960 xc->vp_ipi = xive_native_alloc_irq();
1961 if (!xc->vp_ipi) {
1962 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1963 r = -EIO;
1964 goto bail;
1965 }
1966 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1967
1968 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1969 if (r)
1970 goto bail;
1971
1972
1973
1974
1975
1976 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
1977 if (r) {
1978 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1979 goto bail;
1980 }
1981
1982
1983
1984
1985
1986
1987
1988
1989 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1990 struct xive_q *q = &xc->queues[i];
1991
1992
1993 if (i == 7 && kvmppc_xive_has_single_escalation(xive))
1994 break;
1995
1996
1997 if (xive->qmap & (1 << i)) {
1998 r = xive_provision_queue(vcpu, i);
1999 if (r == 0 && !kvmppc_xive_has_single_escalation(xive))
2000 kvmppc_xive_attach_escalation(
2001 vcpu, i, kvmppc_xive_has_single_escalation(xive));
2002 if (r)
2003 goto bail;
2004 } else {
2005 r = xive_native_configure_queue(xc->vp_id,
2006 q, i, NULL, 0, true);
2007 if (r) {
2008 pr_err("Failed to configure queue %d for VCPU %d\n",
2009 i, cpu);
2010 goto bail;
2011 }
2012 }
2013 }
2014
2015
2016 r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive));
2017 if (r)
2018 goto bail;
2019
2020
2021 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
2022 if (!r)
2023 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
2024
2025 bail:
2026 mutex_unlock(&xive->lock);
2027 if (r) {
2028 kvmppc_xive_cleanup_vcpu(vcpu);
2029 return r;
2030 }
2031
2032 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
2033 return 0;
2034 }
2035
2036
2037
2038
2039 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
2040 {
2041 struct kvmppc_xive_src_block *sb;
2042 struct kvmppc_xive_irq_state *state;
2043 u16 idx;
2044
2045 sb = kvmppc_xive_find_source(xive, irq, &idx);
2046 if (!sb)
2047 return;
2048
2049 state = &sb->irq_state[idx];
2050
2051
2052 if (!state->valid) {
2053 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
2054 return;
2055 }
2056
2057
2058
2059
2060
2061
2062 if (!state->saved_p)
2063 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
2064
2065
2066 state->in_queue = true;
2067 }
2068
2069 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
2070 struct kvmppc_xive_src_block *sb,
2071 u32 irq)
2072 {
2073 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
2074
2075 if (!state->valid)
2076 return;
2077
2078
2079 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
2080
2081
2082 state->saved_p = state->old_p;
2083 state->saved_q = state->old_q;
2084
2085
2086 arch_spin_unlock(&sb->lock);
2087 }
2088
2089 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
2090 struct kvmppc_xive_src_block *sb,
2091 u32 irq)
2092 {
2093 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
2094
2095 if (!state->valid)
2096 return;
2097
2098
2099
2100
2101
2102
2103 xive_lock_for_unmask(sb, state);
2104
2105
2106 if (state->saved_scan_prio != MASKED)
2107 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
2108
2109
2110 arch_spin_unlock(&sb->lock);
2111 }
2112
2113 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
2114 {
2115 u32 idx = q->idx;
2116 u32 toggle = q->toggle;
2117 u32 irq;
2118
2119 do {
2120 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
2121 if (irq > XICS_IPI)
2122 xive_pre_save_set_queued(xive, irq);
2123 } while(irq);
2124 }
2125
2126 static void xive_pre_save_scan(struct kvmppc_xive *xive)
2127 {
2128 struct kvm_vcpu *vcpu = NULL;
2129 unsigned long i;
2130 int j;
2131
2132
2133
2134
2135
2136 for (i = 0; i <= xive->max_sbid; i++) {
2137 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2138 if (!sb)
2139 continue;
2140 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
2141 xive_pre_save_mask_irq(xive, sb, j);
2142 }
2143
2144
2145 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
2146 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2147 if (!xc)
2148 continue;
2149 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
2150 if (xc->queues[j].qpage)
2151 xive_pre_save_queue(xive, &xc->queues[j]);
2152 }
2153 }
2154
2155
2156 for (i = 0; i <= xive->max_sbid; i++) {
2157 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2158 if (!sb)
2159 continue;
2160 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
2161 xive_pre_save_unmask_irq(xive, sb, j);
2162 }
2163 }
2164
2165 static void xive_post_save_scan(struct kvmppc_xive *xive)
2166 {
2167 u32 i, j;
2168
2169
2170 for (i = 0; i <= xive->max_sbid; i++) {
2171 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2172 if (!sb)
2173 continue;
2174 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
2175 sb->irq_state[j].in_queue = false;
2176 }
2177
2178
2179 xive->saved_src_count = 0;
2180 }
2181
2182
2183
2184
2185 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
2186 {
2187 struct kvmppc_xive_src_block *sb;
2188 struct kvmppc_xive_irq_state *state;
2189 u64 __user *ubufp = (u64 __user *) addr;
2190 u64 val, prio;
2191 u16 idx;
2192
2193 sb = kvmppc_xive_find_source(xive, irq, &idx);
2194 if (!sb)
2195 return -ENOENT;
2196
2197 state = &sb->irq_state[idx];
2198
2199 if (!state->valid)
2200 return -ENOENT;
2201
2202 pr_devel("get_source(%ld)...\n", irq);
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220 if (xive->saved_src_count == 0)
2221 xive_pre_save_scan(xive);
2222 xive->saved_src_count++;
2223
2224
2225 val = state->act_server;
2226 prio = state->saved_scan_prio;
2227
2228 if (prio == MASKED) {
2229 val |= KVM_XICS_MASKED;
2230 prio = state->saved_priority;
2231 }
2232 val |= prio << KVM_XICS_PRIORITY_SHIFT;
2233 if (state->lsi) {
2234 val |= KVM_XICS_LEVEL_SENSITIVE;
2235 if (state->saved_p)
2236 val |= KVM_XICS_PENDING;
2237 } else {
2238 if (state->saved_p)
2239 val |= KVM_XICS_PRESENTED;
2240
2241 if (state->saved_q)
2242 val |= KVM_XICS_QUEUED;
2243
2244
2245
2246
2247
2248
2249
2250 if (state->in_queue || (prio == MASKED && state->saved_q))
2251 val |= KVM_XICS_PENDING;
2252 }
2253
2254
2255
2256
2257
2258 if (xive->saved_src_count == xive->src_count)
2259 xive_post_save_scan(xive);
2260
2261
2262 if (put_user(val, ubufp))
2263 return -EFAULT;
2264
2265 return 0;
2266 }
2267
2268 struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
2269 struct kvmppc_xive *xive, int irq)
2270 {
2271 struct kvmppc_xive_src_block *sb;
2272 int i, bid;
2273
2274 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
2275
2276 mutex_lock(&xive->lock);
2277
2278
2279 if (xive->src_blocks[bid])
2280 goto out;
2281
2282
2283 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
2284 if (!sb)
2285 goto out;
2286
2287 sb->id = bid;
2288
2289 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2290 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
2291 sb->irq_state[i].eisn = 0;
2292 sb->irq_state[i].guest_priority = MASKED;
2293 sb->irq_state[i].saved_priority = MASKED;
2294 sb->irq_state[i].act_priority = MASKED;
2295 }
2296 smp_wmb();
2297 xive->src_blocks[bid] = sb;
2298
2299 if (bid > xive->max_sbid)
2300 xive->max_sbid = bid;
2301
2302 out:
2303 mutex_unlock(&xive->lock);
2304 return xive->src_blocks[bid];
2305 }
2306
2307 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
2308 {
2309 struct kvm *kvm = xive->kvm;
2310 struct kvm_vcpu *vcpu = NULL;
2311 unsigned long i;
2312
2313 kvm_for_each_vcpu(i, vcpu, kvm) {
2314 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2315
2316 if (!xc)
2317 continue;
2318
2319 if (xc->delayed_irq == irq) {
2320 xc->delayed_irq = 0;
2321 xive->delayed_irqs--;
2322 return true;
2323 }
2324 }
2325 return false;
2326 }
2327
2328 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
2329 {
2330 struct kvmppc_xive_src_block *sb;
2331 struct kvmppc_xive_irq_state *state;
2332 u64 __user *ubufp = (u64 __user *) addr;
2333 u16 idx;
2334 u64 val;
2335 u8 act_prio, guest_prio;
2336 u32 server;
2337 int rc = 0;
2338
2339 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
2340 return -ENOENT;
2341
2342 pr_devel("set_source(irq=0x%lx)\n", irq);
2343
2344
2345 sb = kvmppc_xive_find_source(xive, irq, &idx);
2346 if (!sb) {
2347 pr_devel("No source, creating source block...\n");
2348 sb = kvmppc_xive_create_src_block(xive, irq);
2349 if (!sb) {
2350 pr_devel("Failed to create block...\n");
2351 return -ENOMEM;
2352 }
2353 }
2354 state = &sb->irq_state[idx];
2355
2356
2357 if (get_user(val, ubufp)) {
2358 pr_devel("fault getting user info !\n");
2359 return -EFAULT;
2360 }
2361
2362 server = val & KVM_XICS_DESTINATION_MASK;
2363 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
2364
2365 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
2366 val, server, guest_prio);
2367
2368
2369
2370
2371
2372 if (!state->ipi_number) {
2373 state->ipi_number = xive_native_alloc_irq();
2374 if (state->ipi_number == 0) {
2375 pr_devel("Failed to allocate IPI !\n");
2376 return -ENOMEM;
2377 }
2378 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
2379 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
2380 }
2381
2382
2383
2384
2385
2386
2387
2388
2389 state->guest_priority = 0;
2390 xive_lock_and_mask(xive, sb, state);
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400 act_prio = xive_prio_from_guest(guest_prio);
2401 state->act_priority = MASKED;
2402
2403
2404
2405
2406
2407
2408 arch_spin_unlock(&sb->lock);
2409
2410
2411 if (act_prio != MASKED) {
2412
2413 mutex_lock(&xive->lock);
2414 rc = xive_check_provisioning(xive->kvm, act_prio);
2415 mutex_unlock(&xive->lock);
2416
2417
2418 if (rc == 0)
2419 rc = xive_target_interrupt(xive->kvm, state,
2420 server, act_prio);
2421
2422
2423
2424
2425
2426 }
2427
2428
2429
2430
2431
2432 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
2433 val |= KVM_XICS_PENDING;
2434 pr_devel(" Found delayed ! forcing PENDING !\n");
2435 }
2436
2437
2438 state->old_p = false;
2439 state->old_q = false;
2440 state->lsi = false;
2441 state->asserted = false;
2442
2443
2444 if (val & KVM_XICS_LEVEL_SENSITIVE) {
2445 state->lsi = true;
2446 if (val & KVM_XICS_PENDING)
2447 state->asserted = true;
2448 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
2449 }
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
2462 state->old_p = true;
2463 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
2464 state->old_q = true;
2465
2466 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
2467
2468
2469
2470
2471
2472
2473 if (val & KVM_XICS_MASKED) {
2474 pr_devel(" masked, saving prio\n");
2475 state->guest_priority = MASKED;
2476 state->saved_priority = guest_prio;
2477 } else {
2478 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
2479 xive_finish_unmask(xive, sb, state, guest_prio);
2480 state->saved_priority = guest_prio;
2481 }
2482
2483
2484 if (!state->valid)
2485 xive->src_count++;
2486 state->valid = true;
2487
2488 return 0;
2489 }
2490
2491 int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
2492 bool line_status)
2493 {
2494 struct kvmppc_xive *xive = kvm->arch.xive;
2495 struct kvmppc_xive_src_block *sb;
2496 struct kvmppc_xive_irq_state *state;
2497 u16 idx;
2498
2499 if (!xive)
2500 return -ENODEV;
2501
2502 sb = kvmppc_xive_find_source(xive, irq, &idx);
2503 if (!sb)
2504 return -EINVAL;
2505
2506
2507 state = &sb->irq_state[idx];
2508 if (!state->valid)
2509 return -EINVAL;
2510
2511
2512 if (state->pt_number)
2513 return -EINVAL;
2514
2515 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
2516 state->asserted = true;
2517 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
2518 state->asserted = false;
2519 return 0;
2520 }
2521
2522
2523 xive_irq_trigger(&state->ipi_data);
2524
2525 return 0;
2526 }
2527
2528 int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
2529 {
2530 u32 __user *ubufp = (u32 __user *) addr;
2531 u32 nr_servers;
2532 int rc = 0;
2533
2534 if (get_user(nr_servers, ubufp))
2535 return -EFAULT;
2536
2537 pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
2538
2539 if (!nr_servers || nr_servers > KVM_MAX_VCPU_IDS)
2540 return -EINVAL;
2541
2542 mutex_lock(&xive->lock);
2543 if (xive->vp_base != XIVE_INVALID_VP)
2544
2545
2546
2547
2548
2549
2550
2551 rc = -EBUSY;
2552 else if (nr_servers > KVM_MAX_VCPUS)
2553
2554
2555
2556 xive->nr_servers = KVM_MAX_VCPUS;
2557 else
2558 xive->nr_servers = nr_servers;
2559
2560 mutex_unlock(&xive->lock);
2561
2562 return rc;
2563 }
2564
2565 static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2566 {
2567 struct kvmppc_xive *xive = dev->private;
2568
2569
2570 switch (attr->group) {
2571 case KVM_DEV_XICS_GRP_SOURCES:
2572 return xive_set_source(xive, attr->attr, attr->addr);
2573 case KVM_DEV_XICS_GRP_CTRL:
2574 switch (attr->attr) {
2575 case KVM_DEV_XICS_NR_SERVERS:
2576 return kvmppc_xive_set_nr_servers(xive, attr->addr);
2577 }
2578 }
2579 return -ENXIO;
2580 }
2581
2582 static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2583 {
2584 struct kvmppc_xive *xive = dev->private;
2585
2586
2587 switch (attr->group) {
2588 case KVM_DEV_XICS_GRP_SOURCES:
2589 return xive_get_source(xive, attr->attr, attr->addr);
2590 }
2591 return -ENXIO;
2592 }
2593
2594 static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2595 {
2596
2597 switch (attr->group) {
2598 case KVM_DEV_XICS_GRP_SOURCES:
2599 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
2600 attr->attr < KVMPPC_XICS_NR_IRQS)
2601 return 0;
2602 break;
2603 case KVM_DEV_XICS_GRP_CTRL:
2604 switch (attr->attr) {
2605 case KVM_DEV_XICS_NR_SERVERS:
2606 return 0;
2607 }
2608 }
2609 return -ENXIO;
2610 }
2611
2612 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
2613 {
2614 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
2615 xive_native_configure_irq(hw_num, 0, MASKED, 0);
2616 }
2617
2618 void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
2619 {
2620 int i;
2621
2622 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2623 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2624
2625 if (!state->valid)
2626 continue;
2627
2628 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
2629 xive_cleanup_irq_data(&state->ipi_data);
2630 xive_native_free_irq(state->ipi_number);
2631
2632
2633 if (state->pt_number)
2634 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
2635
2636 state->valid = false;
2637 }
2638 }
2639
2640
2641
2642
2643 static void kvmppc_xive_release(struct kvm_device *dev)
2644 {
2645 struct kvmppc_xive *xive = dev->private;
2646 struct kvm *kvm = xive->kvm;
2647 struct kvm_vcpu *vcpu;
2648 unsigned long i;
2649
2650 pr_devel("Releasing xive device\n");
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661 debugfs_remove(xive->dentry);
2662
2663
2664
2665
2666 kvm_for_each_vcpu(i, vcpu, kvm) {
2667
2668
2669
2670
2671
2672
2673
2674
2675 mutex_lock(&vcpu->mutex);
2676 kvmppc_xive_cleanup_vcpu(vcpu);
2677 mutex_unlock(&vcpu->mutex);
2678 }
2679
2680
2681
2682
2683
2684
2685
2686 kvm->arch.xive = NULL;
2687
2688
2689 for (i = 0; i <= xive->max_sbid; i++) {
2690 if (xive->src_blocks[i])
2691 kvmppc_xive_free_sources(xive->src_blocks[i]);
2692 kfree(xive->src_blocks[i]);
2693 xive->src_blocks[i] = NULL;
2694 }
2695
2696 if (xive->vp_base != XIVE_INVALID_VP)
2697 xive_native_free_vp_block(xive->vp_base);
2698
2699
2700
2701
2702
2703
2704
2705
2706 kfree(dev);
2707 }
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718 struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2719 {
2720 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2721 &kvm->arch.xive_devices.native :
2722 &kvm->arch.xive_devices.xics_on_xive;
2723 struct kvmppc_xive *xive = *kvm_xive_device;
2724
2725 if (!xive) {
2726 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2727 *kvm_xive_device = xive;
2728 } else {
2729 memset(xive, 0, sizeof(*xive));
2730 }
2731
2732 return xive;
2733 }
2734
2735
2736
2737
2738 static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2739 {
2740 struct kvmppc_xive *xive;
2741 struct kvm *kvm = dev->kvm;
2742
2743 pr_devel("Creating xive for partition\n");
2744
2745
2746 if (kvm->arch.xive)
2747 return -EEXIST;
2748
2749 xive = kvmppc_xive_get_device(kvm, type);
2750 if (!xive)
2751 return -ENOMEM;
2752
2753 dev->private = xive;
2754 xive->dev = dev;
2755 xive->kvm = kvm;
2756 mutex_init(&xive->lock);
2757
2758
2759 xive->q_order = xive_native_default_eq_shift();
2760 if (xive->q_order < PAGE_SHIFT)
2761 xive->q_page_order = 0;
2762 else
2763 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2764
2765
2766 xive->vp_base = XIVE_INVALID_VP;
2767
2768
2769
2770 xive->nr_servers = KVM_MAX_VCPUS;
2771
2772 if (xive_native_has_single_escalation())
2773 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
2774
2775 if (xive_native_has_save_restore())
2776 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
2777
2778 kvm->arch.xive = xive;
2779 return 0;
2780 }
2781
2782 int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
2783 {
2784 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2785
2786
2787 if (!kvmppc_xics_enabled(vcpu))
2788 return H_TOO_HARD;
2789
2790 switch (req) {
2791 case H_XIRR:
2792 return xive_vm_h_xirr(vcpu);
2793 case H_CPPR:
2794 return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
2795 case H_EOI:
2796 return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
2797 case H_IPI:
2798 return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
2799 kvmppc_get_gpr(vcpu, 5));
2800 case H_IPOLL:
2801 return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
2802 case H_XIRR_X:
2803 xive_vm_h_xirr(vcpu);
2804 kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
2805 return H_SUCCESS;
2806 }
2807
2808 return H_UNSUPPORTED;
2809 }
2810 EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
2811
2812 int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2813 {
2814 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2815 unsigned int i;
2816
2817 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2818 struct xive_q *q = &xc->queues[i];
2819 u32 i0, i1, idx;
2820
2821 if (!q->qpage && !xc->esc_virq[i])
2822 continue;
2823
2824 if (q->qpage) {
2825 seq_printf(m, " q[%d]: ", i);
2826 idx = q->idx;
2827 i0 = be32_to_cpup(q->qpage + idx);
2828 idx = (idx + 1) & q->msk;
2829 i1 = be32_to_cpup(q->qpage + idx);
2830 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2831 i0, i1);
2832 }
2833 if (xc->esc_virq[i]) {
2834 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2835 struct xive_irq_data *xd =
2836 irq_data_get_irq_handler_data(d);
2837 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2838
2839 seq_printf(m, " ESC %d %c%c EOI @%llx",
2840 xc->esc_virq[i],
2841 (pq & XIVE_ESB_VAL_P) ? 'P' : '-',
2842 (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-',
2843 xd->eoi_page);
2844 seq_puts(m, "\n");
2845 }
2846 }
2847 return 0;
2848 }
2849
2850 void kvmppc_xive_debug_show_sources(struct seq_file *m,
2851 struct kvmppc_xive_src_block *sb)
2852 {
2853 int i;
2854
2855 seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n");
2856 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
2857 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
2858 struct xive_irq_data *xd;
2859 u64 pq;
2860 u32 hw_num;
2861
2862 if (!state->valid)
2863 continue;
2864
2865 kvmppc_xive_select_irq(state, &hw_num, &xd);
2866
2867 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2868
2869 seq_printf(m, "%08x %08x/%02x", state->number, hw_num,
2870 xd->src_chip);
2871 if (state->lsi)
2872 seq_printf(m, " %cLSI", state->asserted ? '^' : ' ');
2873 else
2874 seq_puts(m, " MSI");
2875
2876 seq_printf(m, " %s %c%c %08x % 4d/%d",
2877 state->ipi_number == hw_num ? "IPI" : " PT",
2878 pq & XIVE_ESB_VAL_P ? 'P' : '-',
2879 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
2880 state->eisn, state->act_server,
2881 state->act_priority);
2882
2883 seq_puts(m, "\n");
2884 }
2885 }
2886
2887 static int xive_debug_show(struct seq_file *m, void *private)
2888 {
2889 struct kvmppc_xive *xive = m->private;
2890 struct kvm *kvm = xive->kvm;
2891 struct kvm_vcpu *vcpu;
2892 u64 t_rm_h_xirr = 0;
2893 u64 t_rm_h_ipoll = 0;
2894 u64 t_rm_h_cppr = 0;
2895 u64 t_rm_h_eoi = 0;
2896 u64 t_rm_h_ipi = 0;
2897 u64 t_vm_h_xirr = 0;
2898 u64 t_vm_h_ipoll = 0;
2899 u64 t_vm_h_cppr = 0;
2900 u64 t_vm_h_eoi = 0;
2901 u64 t_vm_h_ipi = 0;
2902 unsigned long i;
2903
2904 if (!kvm)
2905 return 0;
2906
2907 seq_puts(m, "=========\nVCPU state\n=========\n");
2908
2909 kvm_for_each_vcpu(i, vcpu, kvm) {
2910 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2911
2912 if (!xc)
2913 continue;
2914
2915 seq_printf(m, "VCPU %d: VP:%#x/%02x\n"
2916 " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2917 xc->server_num, xc->vp_id, xc->vp_chip_id,
2918 xc->cppr, xc->hw_cppr,
2919 xc->mfrr, xc->pending,
2920 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2921
2922 kvmppc_xive_debug_show_queues(m, vcpu);
2923
2924 t_rm_h_xirr += xc->stat_rm_h_xirr;
2925 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2926 t_rm_h_cppr += xc->stat_rm_h_cppr;
2927 t_rm_h_eoi += xc->stat_rm_h_eoi;
2928 t_rm_h_ipi += xc->stat_rm_h_ipi;
2929 t_vm_h_xirr += xc->stat_vm_h_xirr;
2930 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2931 t_vm_h_cppr += xc->stat_vm_h_cppr;
2932 t_vm_h_eoi += xc->stat_vm_h_eoi;
2933 t_vm_h_ipi += xc->stat_vm_h_ipi;
2934 }
2935
2936 seq_puts(m, "Hcalls totals\n");
2937 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2938 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2939 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2940 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2941 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2942
2943 seq_puts(m, "=========\nSources\n=========\n");
2944
2945 for (i = 0; i <= xive->max_sbid; i++) {
2946 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
2947
2948 if (sb) {
2949 arch_spin_lock(&sb->lock);
2950 kvmppc_xive_debug_show_sources(m, sb);
2951 arch_spin_unlock(&sb->lock);
2952 }
2953 }
2954
2955 return 0;
2956 }
2957
2958 DEFINE_SHOW_ATTRIBUTE(xive_debug);
2959
2960 static void xive_debugfs_init(struct kvmppc_xive *xive)
2961 {
2962 xive->dentry = debugfs_create_file("xive", S_IRUGO, xive->kvm->debugfs_dentry,
2963 xive, &xive_debug_fops);
2964
2965 pr_debug("%s: created\n", __func__);
2966 }
2967
2968 static void kvmppc_xive_init(struct kvm_device *dev)
2969 {
2970 struct kvmppc_xive *xive = dev->private;
2971
2972
2973 xive_debugfs_init(xive);
2974 }
2975
2976 struct kvm_device_ops kvm_xive_ops = {
2977 .name = "kvm-xive",
2978 .create = kvmppc_xive_create,
2979 .init = kvmppc_xive_init,
2980 .release = kvmppc_xive_release,
2981 .set_attr = xive_set_attr,
2982 .get_attr = xive_get_attr,
2983 .has_attr = xive_has_attr,
2984 };