0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/kvm_host.h>
0009 #include <linux/err.h>
0010 #include <linux/gfp.h>
0011 #include <linux/anon_inodes.h>
0012 #include <linux/spinlock.h>
0013 #include <linux/debugfs.h>
0014 #include <linux/uaccess.h>
0015
0016 #include <asm/kvm_book3s.h>
0017 #include <asm/kvm_ppc.h>
0018 #include <asm/hvcall.h>
0019 #include <asm/xics.h>
0020 #include <asm/time.h>
0021
0022 #include <linux/seq_file.h>
0023
0024 #include "book3s_xics.h"
0025
0026 #if 1
0027 #define XICS_DBG(fmt...) do { } while (0)
0028 #else
0029 #define XICS_DBG(fmt...) trace_printk(fmt)
0030 #endif
0031
0032 #define ENABLE_REALMODE true
0033 #define DEBUG_REALMODE false
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
0062 u32 new_irq, bool check_resend);
0063
0064
0065
0066
0067
0068
0069 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
0070 {
0071 struct ics_irq_state *state;
0072 struct kvmppc_ics *ics;
0073 u16 src;
0074 u32 pq_old, pq_new;
0075
0076 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
0077
0078 ics = kvmppc_xics_find_ics(xics, irq, &src);
0079 if (!ics) {
0080 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
0081 return -EINVAL;
0082 }
0083 state = &ics->irq_state[src];
0084 if (!state->exists)
0085 return -EINVAL;
0086
0087 if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
0088 level = 1;
0089 else if (level == KVM_INTERRUPT_UNSET)
0090 level = 0;
0091
0092
0093
0094
0095
0096 if (!state->lsi && level == 0)
0097 return 0;
0098
0099 do {
0100 pq_old = state->pq_state;
0101 if (state->lsi) {
0102 if (level) {
0103 if (pq_old & PQ_PRESENTED)
0104
0105 return 0;
0106
0107 pq_new = PQ_PRESENTED;
0108 } else
0109 pq_new = 0;
0110 } else
0111 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
0112 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
0113
0114
0115 if (pq_new == PQ_PRESENTED)
0116 icp_deliver_irq(xics, NULL, irq, false);
0117
0118
0119 if (state->host_irq)
0120 state->intr_cpu = raw_smp_processor_id();
0121
0122 return 0;
0123 }
0124
0125 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
0126 struct kvmppc_icp *icp)
0127 {
0128 int i;
0129
0130 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
0131 struct ics_irq_state *state = &ics->irq_state[i];
0132 if (state->resend) {
0133 XICS_DBG("resend %#x prio %#x\n", state->number,
0134 state->priority);
0135 icp_deliver_irq(xics, icp, state->number, true);
0136 }
0137 }
0138 }
0139
0140 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
0141 struct ics_irq_state *state,
0142 u32 server, u32 priority, u32 saved_priority)
0143 {
0144 bool deliver;
0145 unsigned long flags;
0146
0147 local_irq_save(flags);
0148 arch_spin_lock(&ics->lock);
0149
0150 state->server = server;
0151 state->priority = priority;
0152 state->saved_priority = saved_priority;
0153 deliver = false;
0154 if ((state->masked_pending || state->resend) && priority != MASKED) {
0155 state->masked_pending = 0;
0156 state->resend = 0;
0157 deliver = true;
0158 }
0159
0160 arch_spin_unlock(&ics->lock);
0161 local_irq_restore(flags);
0162
0163 return deliver;
0164 }
0165
0166 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
0167 {
0168 struct kvmppc_xics *xics = kvm->arch.xics;
0169 struct kvmppc_icp *icp;
0170 struct kvmppc_ics *ics;
0171 struct ics_irq_state *state;
0172 u16 src;
0173
0174 if (!xics)
0175 return -ENODEV;
0176
0177 ics = kvmppc_xics_find_ics(xics, irq, &src);
0178 if (!ics)
0179 return -EINVAL;
0180 state = &ics->irq_state[src];
0181
0182 icp = kvmppc_xics_find_server(kvm, server);
0183 if (!icp)
0184 return -EINVAL;
0185
0186 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
0187 irq, server, priority,
0188 state->masked_pending, state->resend);
0189
0190 if (write_xive(xics, ics, state, server, priority, priority))
0191 icp_deliver_irq(xics, icp, irq, false);
0192
0193 return 0;
0194 }
0195
0196 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
0197 {
0198 struct kvmppc_xics *xics = kvm->arch.xics;
0199 struct kvmppc_ics *ics;
0200 struct ics_irq_state *state;
0201 u16 src;
0202 unsigned long flags;
0203
0204 if (!xics)
0205 return -ENODEV;
0206
0207 ics = kvmppc_xics_find_ics(xics, irq, &src);
0208 if (!ics)
0209 return -EINVAL;
0210 state = &ics->irq_state[src];
0211
0212 local_irq_save(flags);
0213 arch_spin_lock(&ics->lock);
0214 *server = state->server;
0215 *priority = state->priority;
0216 arch_spin_unlock(&ics->lock);
0217 local_irq_restore(flags);
0218
0219 return 0;
0220 }
0221
0222 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
0223 {
0224 struct kvmppc_xics *xics = kvm->arch.xics;
0225 struct kvmppc_icp *icp;
0226 struct kvmppc_ics *ics;
0227 struct ics_irq_state *state;
0228 u16 src;
0229
0230 if (!xics)
0231 return -ENODEV;
0232
0233 ics = kvmppc_xics_find_ics(xics, irq, &src);
0234 if (!ics)
0235 return -EINVAL;
0236 state = &ics->irq_state[src];
0237
0238 icp = kvmppc_xics_find_server(kvm, state->server);
0239 if (!icp)
0240 return -EINVAL;
0241
0242 if (write_xive(xics, ics, state, state->server, state->saved_priority,
0243 state->saved_priority))
0244 icp_deliver_irq(xics, icp, irq, false);
0245
0246 return 0;
0247 }
0248
0249 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
0250 {
0251 struct kvmppc_xics *xics = kvm->arch.xics;
0252 struct kvmppc_ics *ics;
0253 struct ics_irq_state *state;
0254 u16 src;
0255
0256 if (!xics)
0257 return -ENODEV;
0258
0259 ics = kvmppc_xics_find_ics(xics, irq, &src);
0260 if (!ics)
0261 return -EINVAL;
0262 state = &ics->irq_state[src];
0263
0264 write_xive(xics, ics, state, state->server, MASKED, state->priority);
0265
0266 return 0;
0267 }
0268
0269
0270
0271 static inline bool icp_try_update(struct kvmppc_icp *icp,
0272 union kvmppc_icp_state old,
0273 union kvmppc_icp_state new,
0274 bool change_self)
0275 {
0276 bool success;
0277
0278
0279 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
0280
0281
0282 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
0283 if (!success)
0284 goto bail;
0285
0286 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
0287 icp->server_num,
0288 old.cppr, old.mfrr, old.pending_pri, old.xisr,
0289 old.need_resend, old.out_ee);
0290 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
0291 new.cppr, new.mfrr, new.pending_pri, new.xisr,
0292 new.need_resend, new.out_ee);
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308 if (new.out_ee) {
0309 kvmppc_book3s_queue_irqprio(icp->vcpu,
0310 BOOK3S_INTERRUPT_EXTERNAL);
0311 if (!change_self)
0312 kvmppc_fast_vcpu_kick(icp->vcpu);
0313 }
0314 bail:
0315 return success;
0316 }
0317
0318 static void icp_check_resend(struct kvmppc_xics *xics,
0319 struct kvmppc_icp *icp)
0320 {
0321 u32 icsid;
0322
0323
0324 smp_rmb();
0325 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
0326 struct kvmppc_ics *ics = xics->ics[icsid];
0327
0328 if (!test_and_clear_bit(icsid, icp->resend_map))
0329 continue;
0330 if (!ics)
0331 continue;
0332 ics_check_resend(xics, ics, icp);
0333 }
0334 }
0335
0336 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
0337 u32 *reject)
0338 {
0339 union kvmppc_icp_state old_state, new_state;
0340 bool success;
0341
0342 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
0343 icp->server_num);
0344
0345 do {
0346 old_state = new_state = READ_ONCE(icp->state);
0347
0348 *reject = 0;
0349
0350
0351 success = new_state.cppr > priority &&
0352 new_state.mfrr > priority &&
0353 new_state.pending_pri > priority;
0354
0355
0356
0357
0358
0359 if (success) {
0360 *reject = new_state.xisr;
0361 new_state.xisr = irq;
0362 new_state.pending_pri = priority;
0363 } else {
0364
0365
0366
0367
0368
0369 new_state.need_resend = true;
0370 }
0371
0372 } while (!icp_try_update(icp, old_state, new_state, false));
0373
0374 return success;
0375 }
0376
0377 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
0378 u32 new_irq, bool check_resend)
0379 {
0380 struct ics_irq_state *state;
0381 struct kvmppc_ics *ics;
0382 u32 reject;
0383 u16 src;
0384 unsigned long flags;
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401 again:
0402
0403 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
0404 if (!ics) {
0405 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
0406 return;
0407 }
0408 state = &ics->irq_state[src];
0409
0410
0411 local_irq_save(flags);
0412 arch_spin_lock(&ics->lock);
0413
0414
0415 if (!icp || state->server != icp->server_num) {
0416 icp = kvmppc_xics_find_server(xics->kvm, state->server);
0417 if (!icp) {
0418 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
0419 new_irq, state->server);
0420 goto out;
0421 }
0422 }
0423
0424 if (check_resend)
0425 if (!state->resend)
0426 goto out;
0427
0428
0429 state->resend = 0;
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446 if (state->priority == MASKED) {
0447 XICS_DBG("irq %#x masked pending\n", new_irq);
0448 state->masked_pending = 1;
0449 goto out;
0450 }
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
0469
0470
0471
0472 if (reject && reject != XICS_IPI) {
0473 arch_spin_unlock(&ics->lock);
0474 local_irq_restore(flags);
0475 new_irq = reject;
0476 check_resend = false;
0477 goto again;
0478 }
0479 } else {
0480
0481
0482
0483
0484 state->resend = 1;
0485
0486
0487
0488
0489
0490 smp_wmb();
0491 set_bit(ics->icsid, icp->resend_map);
0492
0493
0494
0495
0496
0497
0498
0499 smp_mb();
0500 if (!icp->state.need_resend) {
0501 state->resend = 0;
0502 arch_spin_unlock(&ics->lock);
0503 local_irq_restore(flags);
0504 check_resend = false;
0505 goto again;
0506 }
0507 }
0508 out:
0509 arch_spin_unlock(&ics->lock);
0510 local_irq_restore(flags);
0511 }
0512
0513 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
0514 u8 new_cppr)
0515 {
0516 union kvmppc_icp_state old_state, new_state;
0517 bool resend;
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 do {
0549 old_state = new_state = READ_ONCE(icp->state);
0550
0551
0552 new_state.cppr = new_cppr;
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563 if (new_state.mfrr < new_cppr &&
0564 new_state.mfrr <= new_state.pending_pri) {
0565 WARN_ON(new_state.xisr != XICS_IPI &&
0566 new_state.xisr != 0);
0567 new_state.pending_pri = new_state.mfrr;
0568 new_state.xisr = XICS_IPI;
0569 }
0570
0571
0572 resend = new_state.need_resend;
0573 new_state.need_resend = 0;
0574
0575 } while (!icp_try_update(icp, old_state, new_state, true));
0576
0577
0578
0579
0580
0581
0582 if (resend)
0583 icp_check_resend(xics, icp);
0584 }
0585
0586 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
0587 {
0588 union kvmppc_icp_state old_state, new_state;
0589 struct kvmppc_icp *icp = vcpu->arch.icp;
0590 u32 xirr;
0591
0592
0593 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
0594
0595
0596
0597
0598
0599
0600
0601
0602 do {
0603 old_state = new_state = READ_ONCE(icp->state);
0604
0605 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
0606 if (!old_state.xisr)
0607 break;
0608 new_state.cppr = new_state.pending_pri;
0609 new_state.pending_pri = 0xff;
0610 new_state.xisr = 0;
0611
0612 } while (!icp_try_update(icp, old_state, new_state, true));
0613
0614 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
0615
0616 return xirr;
0617 }
0618
0619 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
0620 unsigned long mfrr)
0621 {
0622 union kvmppc_icp_state old_state, new_state;
0623 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
0624 struct kvmppc_icp *icp;
0625 u32 reject;
0626 bool resend;
0627 bool local;
0628
0629 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
0630 vcpu->vcpu_id, server, mfrr);
0631
0632 icp = vcpu->arch.icp;
0633 local = icp->server_num == server;
0634 if (!local) {
0635 icp = kvmppc_xics_find_server(vcpu->kvm, server);
0636 if (!icp)
0637 return H_PARAMETER;
0638 }
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669 do {
0670 old_state = new_state = READ_ONCE(icp->state);
0671
0672
0673 new_state.mfrr = mfrr;
0674
0675
0676 reject = 0;
0677 resend = false;
0678 if (mfrr < new_state.cppr) {
0679
0680 if (mfrr <= new_state.pending_pri) {
0681 reject = new_state.xisr;
0682 new_state.pending_pri = mfrr;
0683 new_state.xisr = XICS_IPI;
0684 }
0685 }
0686
0687 if (mfrr > old_state.mfrr) {
0688 resend = new_state.need_resend;
0689 new_state.need_resend = 0;
0690 }
0691 } while (!icp_try_update(icp, old_state, new_state, local));
0692
0693
0694 if (reject && reject != XICS_IPI)
0695 icp_deliver_irq(xics, icp, reject, false);
0696
0697
0698 if (resend)
0699 icp_check_resend(xics, icp);
0700
0701 return H_SUCCESS;
0702 }
0703
0704 static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
0705 {
0706 union kvmppc_icp_state state;
0707 struct kvmppc_icp *icp;
0708
0709 icp = vcpu->arch.icp;
0710 if (icp->server_num != server) {
0711 icp = kvmppc_xics_find_server(vcpu->kvm, server);
0712 if (!icp)
0713 return H_PARAMETER;
0714 }
0715 state = READ_ONCE(icp->state);
0716 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
0717 kvmppc_set_gpr(vcpu, 5, state.mfrr);
0718 return H_SUCCESS;
0719 }
0720
0721 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
0722 {
0723 union kvmppc_icp_state old_state, new_state;
0724 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
0725 struct kvmppc_icp *icp = vcpu->arch.icp;
0726 u32 reject;
0727
0728 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
0729
0730
0731
0732
0733
0734
0735
0736
0737 if (cppr > icp->state.cppr)
0738 icp_down_cppr(xics, icp, cppr);
0739 else if (cppr == icp->state.cppr)
0740 return;
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
0754
0755 do {
0756 old_state = new_state = READ_ONCE(icp->state);
0757
0758 reject = 0;
0759 new_state.cppr = cppr;
0760
0761 if (cppr <= new_state.pending_pri) {
0762 reject = new_state.xisr;
0763 new_state.xisr = 0;
0764 new_state.pending_pri = 0xff;
0765 }
0766
0767 } while (!icp_try_update(icp, old_state, new_state, true));
0768
0769
0770
0771
0772
0773 if (reject && reject != XICS_IPI)
0774 icp_deliver_irq(xics, icp, reject, false);
0775 }
0776
0777 static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
0778 {
0779 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
0780 struct kvmppc_icp *icp = vcpu->arch.icp;
0781 struct kvmppc_ics *ics;
0782 struct ics_irq_state *state;
0783 u16 src;
0784 u32 pq_old, pq_new;
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794 ics = kvmppc_xics_find_ics(xics, irq, &src);
0795 if (!ics) {
0796 XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
0797 return H_PARAMETER;
0798 }
0799 state = &ics->irq_state[src];
0800
0801 if (state->lsi)
0802 pq_new = state->pq_state;
0803 else
0804 do {
0805 pq_old = state->pq_state;
0806 pq_new = pq_old >> 1;
0807 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
0808
0809 if (pq_new & PQ_PRESENTED)
0810 icp_deliver_irq(xics, icp, irq, false);
0811
0812 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
0813
0814 return H_SUCCESS;
0815 }
0816
0817 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
0818 {
0819 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
0820 struct kvmppc_icp *icp = vcpu->arch.icp;
0821 u32 irq = xirr & 0x00ffffff;
0822
0823 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839 icp_down_cppr(xics, icp, xirr >> 24);
0840
0841
0842 if (irq == XICS_IPI)
0843 return H_SUCCESS;
0844
0845 return ics_eoi(vcpu, irq);
0846 }
0847
0848 int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
0849 {
0850 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
0851 struct kvmppc_icp *icp = vcpu->arch.icp;
0852
0853 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
0854 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
0855
0856 if (icp->rm_action & XICS_RM_KICK_VCPU) {
0857 icp->n_rm_kick_vcpu++;
0858 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
0859 }
0860 if (icp->rm_action & XICS_RM_CHECK_RESEND) {
0861 icp->n_rm_check_resend++;
0862 icp_check_resend(xics, icp->rm_resend_icp);
0863 }
0864 if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
0865 icp->n_rm_notify_eoi++;
0866 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
0867 }
0868
0869 icp->rm_action = 0;
0870
0871 return H_SUCCESS;
0872 }
0873 EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
0874
0875 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
0876 {
0877 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
0878 unsigned long res;
0879 int rc = H_SUCCESS;
0880
0881
0882 if (!xics || !vcpu->arch.icp)
0883 return H_HARDWARE;
0884
0885
0886 switch (req) {
0887 case H_XIRR_X:
0888 res = kvmppc_h_xirr(vcpu);
0889 kvmppc_set_gpr(vcpu, 4, res);
0890 kvmppc_set_gpr(vcpu, 5, get_tb());
0891 return rc;
0892 case H_IPOLL:
0893 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
0894 return rc;
0895 }
0896
0897
0898 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
0899 return kvmppc_xics_rm_complete(vcpu, req);
0900
0901 switch (req) {
0902 case H_XIRR:
0903 res = kvmppc_h_xirr(vcpu);
0904 kvmppc_set_gpr(vcpu, 4, res);
0905 break;
0906 case H_CPPR:
0907 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
0908 break;
0909 case H_EOI:
0910 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
0911 break;
0912 case H_IPI:
0913 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
0914 kvmppc_get_gpr(vcpu, 5));
0915 break;
0916 }
0917
0918 return rc;
0919 }
0920 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
0921
0922
0923
0924
0925 static void xics_debugfs_irqmap(struct seq_file *m,
0926 struct kvmppc_passthru_irqmap *pimap)
0927 {
0928 int i;
0929
0930 if (!pimap)
0931 return;
0932 seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
0933 pimap->n_mapped);
0934 for (i = 0; i < pimap->n_mapped; i++) {
0935 seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
0936 pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
0937 }
0938 }
0939
0940 static int xics_debug_show(struct seq_file *m, void *private)
0941 {
0942 struct kvmppc_xics *xics = m->private;
0943 struct kvm *kvm = xics->kvm;
0944 struct kvm_vcpu *vcpu;
0945 int icsid;
0946 unsigned long flags, i;
0947 unsigned long t_rm_kick_vcpu, t_rm_check_resend;
0948 unsigned long t_rm_notify_eoi;
0949 unsigned long t_reject, t_check_resend;
0950
0951 if (!kvm)
0952 return 0;
0953
0954 t_rm_kick_vcpu = 0;
0955 t_rm_notify_eoi = 0;
0956 t_rm_check_resend = 0;
0957 t_check_resend = 0;
0958 t_reject = 0;
0959
0960 xics_debugfs_irqmap(m, kvm->arch.pimap);
0961
0962 seq_printf(m, "=========\nICP state\n=========\n");
0963
0964 kvm_for_each_vcpu(i, vcpu, kvm) {
0965 struct kvmppc_icp *icp = vcpu->arch.icp;
0966 union kvmppc_icp_state state;
0967
0968 if (!icp)
0969 continue;
0970
0971 state.raw = READ_ONCE(icp->state.raw);
0972 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
0973 icp->server_num, state.xisr,
0974 state.pending_pri, state.cppr, state.mfrr,
0975 state.out_ee, state.need_resend);
0976 t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
0977 t_rm_notify_eoi += icp->n_rm_notify_eoi;
0978 t_rm_check_resend += icp->n_rm_check_resend;
0979 t_check_resend += icp->n_check_resend;
0980 t_reject += icp->n_reject;
0981 }
0982
0983 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
0984 t_rm_kick_vcpu, t_rm_check_resend,
0985 t_rm_notify_eoi);
0986 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
0987 t_check_resend, t_reject);
0988 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
0989 struct kvmppc_ics *ics = xics->ics[icsid];
0990
0991 if (!ics)
0992 continue;
0993
0994 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
0995 icsid);
0996
0997 local_irq_save(flags);
0998 arch_spin_lock(&ics->lock);
0999
1000 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1001 struct ics_irq_state *irq = &ics->irq_state[i];
1002
1003 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1004 irq->number, irq->server, irq->priority,
1005 irq->saved_priority, irq->pq_state,
1006 irq->resend, irq->masked_pending);
1007
1008 }
1009 arch_spin_unlock(&ics->lock);
1010 local_irq_restore(flags);
1011 }
1012 return 0;
1013 }
1014
1015 DEFINE_SHOW_ATTRIBUTE(xics_debug);
1016
1017 static void xics_debugfs_init(struct kvmppc_xics *xics)
1018 {
1019 xics->dentry = debugfs_create_file("xics", 0444, xics->kvm->debugfs_dentry,
1020 xics, &xics_debug_fops);
1021
1022 pr_debug("%s: created\n", __func__);
1023 }
1024
1025 static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1026 struct kvmppc_xics *xics, int irq)
1027 {
1028 struct kvmppc_ics *ics;
1029 int i, icsid;
1030
1031 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1032
1033 mutex_lock(&kvm->lock);
1034
1035
1036 if (xics->ics[icsid])
1037 goto out;
1038
1039
1040 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1041 if (!ics)
1042 goto out;
1043
1044 ics->icsid = icsid;
1045
1046 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1047 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1048 ics->irq_state[i].priority = MASKED;
1049 ics->irq_state[i].saved_priority = MASKED;
1050 }
1051 smp_wmb();
1052 xics->ics[icsid] = ics;
1053
1054 if (icsid > xics->max_icsid)
1055 xics->max_icsid = icsid;
1056
1057 out:
1058 mutex_unlock(&kvm->lock);
1059 return xics->ics[icsid];
1060 }
1061
1062 static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1063 {
1064 struct kvmppc_icp *icp;
1065
1066 if (!vcpu->kvm->arch.xics)
1067 return -ENODEV;
1068
1069 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1070 return -EEXIST;
1071
1072 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1073 if (!icp)
1074 return -ENOMEM;
1075
1076 icp->vcpu = vcpu;
1077 icp->server_num = server_num;
1078 icp->state.mfrr = MASKED;
1079 icp->state.pending_pri = MASKED;
1080 vcpu->arch.icp = icp;
1081
1082 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1083
1084 return 0;
1085 }
1086
1087 u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1088 {
1089 struct kvmppc_icp *icp = vcpu->arch.icp;
1090 union kvmppc_icp_state state;
1091
1092 if (!icp)
1093 return 0;
1094 state = icp->state;
1095 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1096 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1097 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1098 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1099 }
1100
1101 int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1102 {
1103 struct kvmppc_icp *icp = vcpu->arch.icp;
1104 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1105 union kvmppc_icp_state old_state, new_state;
1106 struct kvmppc_ics *ics;
1107 u8 cppr, mfrr, pending_pri;
1108 u32 xisr;
1109 u16 src;
1110 bool resend;
1111
1112 if (!icp || !xics)
1113 return -ENOENT;
1114
1115 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1116 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1117 KVM_REG_PPC_ICP_XISR_MASK;
1118 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1119 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1120
1121
1122 if (xisr == 0) {
1123 if (pending_pri != 0xff)
1124 return -EINVAL;
1125 } else if (xisr == XICS_IPI) {
1126 if (pending_pri != mfrr || pending_pri >= cppr)
1127 return -EINVAL;
1128 } else {
1129 if (pending_pri >= mfrr || pending_pri >= cppr)
1130 return -EINVAL;
1131 ics = kvmppc_xics_find_ics(xics, xisr, &src);
1132 if (!ics)
1133 return -EINVAL;
1134 }
1135
1136 new_state.raw = 0;
1137 new_state.cppr = cppr;
1138 new_state.xisr = xisr;
1139 new_state.mfrr = mfrr;
1140 new_state.pending_pri = pending_pri;
1141
1142
1143
1144
1145
1146 kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 do {
1159 old_state = READ_ONCE(icp->state);
1160
1161 if (new_state.mfrr <= old_state.mfrr) {
1162 resend = false;
1163 new_state.need_resend = old_state.need_resend;
1164 } else {
1165 resend = old_state.need_resend;
1166 new_state.need_resend = 0;
1167 }
1168 } while (!icp_try_update(icp, old_state, new_state, false));
1169
1170 if (resend)
1171 icp_check_resend(xics, icp);
1172
1173 return 0;
1174 }
1175
1176 static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1177 {
1178 int ret;
1179 struct kvmppc_ics *ics;
1180 struct ics_irq_state *irqp;
1181 u64 __user *ubufp = (u64 __user *) addr;
1182 u16 idx;
1183 u64 val, prio;
1184 unsigned long flags;
1185
1186 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1187 if (!ics)
1188 return -ENOENT;
1189
1190 irqp = &ics->irq_state[idx];
1191 local_irq_save(flags);
1192 arch_spin_lock(&ics->lock);
1193 ret = -ENOENT;
1194 if (irqp->exists) {
1195 val = irqp->server;
1196 prio = irqp->priority;
1197 if (prio == MASKED) {
1198 val |= KVM_XICS_MASKED;
1199 prio = irqp->saved_priority;
1200 }
1201 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1202 if (irqp->lsi) {
1203 val |= KVM_XICS_LEVEL_SENSITIVE;
1204 if (irqp->pq_state & PQ_PRESENTED)
1205 val |= KVM_XICS_PENDING;
1206 } else if (irqp->masked_pending || irqp->resend)
1207 val |= KVM_XICS_PENDING;
1208
1209 if (irqp->pq_state & PQ_PRESENTED)
1210 val |= KVM_XICS_PRESENTED;
1211
1212 if (irqp->pq_state & PQ_QUEUED)
1213 val |= KVM_XICS_QUEUED;
1214
1215 ret = 0;
1216 }
1217 arch_spin_unlock(&ics->lock);
1218 local_irq_restore(flags);
1219
1220 if (!ret && put_user(val, ubufp))
1221 ret = -EFAULT;
1222
1223 return ret;
1224 }
1225
1226 static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1227 {
1228 struct kvmppc_ics *ics;
1229 struct ics_irq_state *irqp;
1230 u64 __user *ubufp = (u64 __user *) addr;
1231 u16 idx;
1232 u64 val;
1233 u8 prio;
1234 u32 server;
1235 unsigned long flags;
1236
1237 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1238 return -ENOENT;
1239
1240 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1241 if (!ics) {
1242 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1243 if (!ics)
1244 return -ENOMEM;
1245 }
1246 irqp = &ics->irq_state[idx];
1247 if (get_user(val, ubufp))
1248 return -EFAULT;
1249
1250 server = val & KVM_XICS_DESTINATION_MASK;
1251 prio = val >> KVM_XICS_PRIORITY_SHIFT;
1252 if (prio != MASKED &&
1253 kvmppc_xics_find_server(xics->kvm, server) == NULL)
1254 return -EINVAL;
1255
1256 local_irq_save(flags);
1257 arch_spin_lock(&ics->lock);
1258 irqp->server = server;
1259 irqp->saved_priority = prio;
1260 if (val & KVM_XICS_MASKED)
1261 prio = MASKED;
1262 irqp->priority = prio;
1263 irqp->resend = 0;
1264 irqp->masked_pending = 0;
1265 irqp->lsi = 0;
1266 irqp->pq_state = 0;
1267 if (val & KVM_XICS_LEVEL_SENSITIVE)
1268 irqp->lsi = 1;
1269
1270 if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
1271 irqp->pq_state |= PQ_PRESENTED;
1272 if (val & KVM_XICS_QUEUED)
1273 irqp->pq_state |= PQ_QUEUED;
1274 irqp->exists = 1;
1275 arch_spin_unlock(&ics->lock);
1276 local_irq_restore(flags);
1277
1278 if (val & KVM_XICS_PENDING)
1279 icp_deliver_irq(xics, NULL, irqp->number, false);
1280
1281 return 0;
1282 }
1283
1284 int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1285 bool line_status)
1286 {
1287 struct kvmppc_xics *xics = kvm->arch.xics;
1288
1289 if (!xics)
1290 return -ENODEV;
1291 return ics_deliver_irq(xics, irq, level);
1292 }
1293
1294 static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1295 {
1296 struct kvmppc_xics *xics = dev->private;
1297
1298 switch (attr->group) {
1299 case KVM_DEV_XICS_GRP_SOURCES:
1300 return xics_set_source(xics, attr->attr, attr->addr);
1301 }
1302 return -ENXIO;
1303 }
1304
1305 static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1306 {
1307 struct kvmppc_xics *xics = dev->private;
1308
1309 switch (attr->group) {
1310 case KVM_DEV_XICS_GRP_SOURCES:
1311 return xics_get_source(xics, attr->attr, attr->addr);
1312 }
1313 return -ENXIO;
1314 }
1315
1316 static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1317 {
1318 switch (attr->group) {
1319 case KVM_DEV_XICS_GRP_SOURCES:
1320 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1321 attr->attr < KVMPPC_XICS_NR_IRQS)
1322 return 0;
1323 break;
1324 }
1325 return -ENXIO;
1326 }
1327
1328
1329
1330
1331 static void kvmppc_xics_release(struct kvm_device *dev)
1332 {
1333 struct kvmppc_xics *xics = dev->private;
1334 unsigned long i;
1335 struct kvm *kvm = xics->kvm;
1336 struct kvm_vcpu *vcpu;
1337
1338 pr_devel("Releasing xics device\n");
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 debugfs_remove(xics->dentry);
1350
1351
1352
1353
1354 kvm_for_each_vcpu(i, vcpu, kvm) {
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 mutex_lock(&vcpu->mutex);
1365 kvmppc_xics_free_icp(vcpu);
1366 mutex_unlock(&vcpu->mutex);
1367 }
1368
1369 if (kvm)
1370 kvm->arch.xics = NULL;
1371
1372 for (i = 0; i <= xics->max_icsid; i++) {
1373 kfree(xics->ics[i]);
1374 xics->ics[i] = NULL;
1375 }
1376
1377
1378
1379
1380
1381
1382 kfree(dev);
1383 }
1384
1385 static struct kvmppc_xics *kvmppc_xics_get_device(struct kvm *kvm)
1386 {
1387 struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device;
1388 struct kvmppc_xics *xics = *kvm_xics_device;
1389
1390 if (!xics) {
1391 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1392 *kvm_xics_device = xics;
1393 } else {
1394 memset(xics, 0, sizeof(*xics));
1395 }
1396
1397 return xics;
1398 }
1399
1400 static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1401 {
1402 struct kvmppc_xics *xics;
1403 struct kvm *kvm = dev->kvm;
1404
1405 pr_devel("Creating xics for partition\n");
1406
1407
1408 if (kvm->arch.xics)
1409 return -EEXIST;
1410
1411 xics = kvmppc_xics_get_device(kvm);
1412 if (!xics)
1413 return -ENOMEM;
1414
1415 dev->private = xics;
1416 xics->dev = dev;
1417 xics->kvm = kvm;
1418 kvm->arch.xics = xics;
1419
1420 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1421 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
1422 cpu_has_feature(CPU_FTR_HVMODE)) {
1423
1424 xics->real_mode = ENABLE_REALMODE;
1425 xics->real_mode_dbg = DEBUG_REALMODE;
1426 }
1427 #endif
1428
1429 return 0;
1430 }
1431
1432 static void kvmppc_xics_init(struct kvm_device *dev)
1433 {
1434 struct kvmppc_xics *xics = dev->private;
1435
1436 xics_debugfs_init(xics);
1437 }
1438
1439 struct kvm_device_ops kvm_xics_ops = {
1440 .name = "kvm-xics",
1441 .create = kvmppc_xics_create,
1442 .init = kvmppc_xics_init,
1443 .release = kvmppc_xics_release,
1444 .set_attr = xics_set_attr,
1445 .get_attr = xics_get_attr,
1446 .has_attr = xics_has_attr,
1447 };
1448
1449 int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1450 u32 xcpu)
1451 {
1452 struct kvmppc_xics *xics = dev->private;
1453 int r = -EBUSY;
1454
1455 if (dev->ops != &kvm_xics_ops)
1456 return -EPERM;
1457 if (xics->kvm != vcpu->kvm)
1458 return -EPERM;
1459 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1460 return -EBUSY;
1461
1462 r = kvmppc_xics_create_icp(vcpu, xcpu);
1463 if (!r)
1464 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1465
1466 return r;
1467 }
1468
1469 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1470 {
1471 if (!vcpu->arch.icp)
1472 return;
1473 kfree(vcpu->arch.icp);
1474 vcpu->arch.icp = NULL;
1475 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1476 }
1477
1478 void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
1479 unsigned long host_irq)
1480 {
1481 struct kvmppc_xics *xics = kvm->arch.xics;
1482 struct kvmppc_ics *ics;
1483 u16 idx;
1484
1485 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1486 if (!ics)
1487 return;
1488
1489 ics->irq_state[idx].host_irq = host_irq;
1490 ics->irq_state[idx].intr_cpu = -1;
1491 }
1492 EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
1493
1494 void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
1495 unsigned long host_irq)
1496 {
1497 struct kvmppc_xics *xics = kvm->arch.xics;
1498 struct kvmppc_ics *ics;
1499 u16 idx;
1500
1501 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1502 if (!ics)
1503 return;
1504
1505 ics->irq_state[idx].host_irq = 0;
1506 }
1507 EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);