0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/errno.h>
0013 #include <linux/err.h>
0014 #include <linux/ktime.h>
0015 #include <linux/kvm_host.h>
0016 #include <linux/vmalloc.h>
0017 #include <linux/fs.h>
0018 #include <linux/memblock.h>
0019 #include <linux/random.h>
0020 #include <asm/page.h>
0021 #include <asm/cacheflush.h>
0022 #include <asm/cacheops.h>
0023 #include <asm/cpu-info.h>
0024 #include <asm/mmu_context.h>
0025 #include <asm/tlbflush.h>
0026 #include <asm/inst.h>
0027
0028 #undef CONFIG_MIPS_MT
0029 #include <asm/r4kcache.h>
0030 #define CONFIG_MIPS_MT
0031
0032 #include "interrupt.h"
0033
0034 #include "trace.h"
0035
0036
0037
0038
0039
0040 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
0041 unsigned long *out)
0042 {
0043 unsigned int dspcontrol;
0044 union mips_instruction insn;
0045 struct kvm_vcpu_arch *arch = &vcpu->arch;
0046 long epc = instpc;
0047 long nextpc;
0048 int err;
0049
0050 if (epc & 3) {
0051 kvm_err("%s: unaligned epc\n", __func__);
0052 return -EINVAL;
0053 }
0054
0055
0056 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
0057 if (err)
0058 return err;
0059
0060 switch (insn.i_format.opcode) {
0061
0062 case spec_op:
0063 switch (insn.r_format.func) {
0064 case jalr_op:
0065 arch->gprs[insn.r_format.rd] = epc + 8;
0066 fallthrough;
0067 case jr_op:
0068 nextpc = arch->gprs[insn.r_format.rs];
0069 break;
0070 default:
0071 return -EINVAL;
0072 }
0073 break;
0074
0075
0076
0077
0078
0079
0080 case bcond_op:
0081 switch (insn.i_format.rt) {
0082 case bltz_op:
0083 case bltzl_op:
0084 if ((long)arch->gprs[insn.i_format.rs] < 0)
0085 epc = epc + 4 + (insn.i_format.simmediate << 2);
0086 else
0087 epc += 8;
0088 nextpc = epc;
0089 break;
0090
0091 case bgez_op:
0092 case bgezl_op:
0093 if ((long)arch->gprs[insn.i_format.rs] >= 0)
0094 epc = epc + 4 + (insn.i_format.simmediate << 2);
0095 else
0096 epc += 8;
0097 nextpc = epc;
0098 break;
0099
0100 case bltzal_op:
0101 case bltzall_op:
0102 arch->gprs[31] = epc + 8;
0103 if ((long)arch->gprs[insn.i_format.rs] < 0)
0104 epc = epc + 4 + (insn.i_format.simmediate << 2);
0105 else
0106 epc += 8;
0107 nextpc = epc;
0108 break;
0109
0110 case bgezal_op:
0111 case bgezall_op:
0112 arch->gprs[31] = epc + 8;
0113 if ((long)arch->gprs[insn.i_format.rs] >= 0)
0114 epc = epc + 4 + (insn.i_format.simmediate << 2);
0115 else
0116 epc += 8;
0117 nextpc = epc;
0118 break;
0119 case bposge32_op:
0120 if (!cpu_has_dsp) {
0121 kvm_err("%s: DSP branch but not DSP ASE\n",
0122 __func__);
0123 return -EINVAL;
0124 }
0125
0126 dspcontrol = rddsp(0x01);
0127
0128 if (dspcontrol >= 32)
0129 epc = epc + 4 + (insn.i_format.simmediate << 2);
0130 else
0131 epc += 8;
0132 nextpc = epc;
0133 break;
0134 default:
0135 return -EINVAL;
0136 }
0137 break;
0138
0139
0140 case jal_op:
0141 arch->gprs[31] = instpc + 8;
0142 fallthrough;
0143 case j_op:
0144 epc += 4;
0145 epc >>= 28;
0146 epc <<= 28;
0147 epc |= (insn.j_format.target << 2);
0148 nextpc = epc;
0149 break;
0150
0151
0152 case beq_op:
0153 case beql_op:
0154 if (arch->gprs[insn.i_format.rs] ==
0155 arch->gprs[insn.i_format.rt])
0156 epc = epc + 4 + (insn.i_format.simmediate << 2);
0157 else
0158 epc += 8;
0159 nextpc = epc;
0160 break;
0161
0162 case bne_op:
0163 case bnel_op:
0164 if (arch->gprs[insn.i_format.rs] !=
0165 arch->gprs[insn.i_format.rt])
0166 epc = epc + 4 + (insn.i_format.simmediate << 2);
0167 else
0168 epc += 8;
0169 nextpc = epc;
0170 break;
0171
0172 case blez_op:
0173 #ifndef CONFIG_CPU_MIPSR6
0174 case blezl_op:
0175 #endif
0176 if (insn.i_format.rt != 0)
0177 goto compact_branch;
0178 if ((long)arch->gprs[insn.i_format.rs] <= 0)
0179 epc = epc + 4 + (insn.i_format.simmediate << 2);
0180 else
0181 epc += 8;
0182 nextpc = epc;
0183 break;
0184
0185 case bgtz_op:
0186 #ifndef CONFIG_CPU_MIPSR6
0187 case bgtzl_op:
0188 #endif
0189 if (insn.i_format.rt != 0)
0190 goto compact_branch;
0191 if ((long)arch->gprs[insn.i_format.rs] > 0)
0192 epc = epc + 4 + (insn.i_format.simmediate << 2);
0193 else
0194 epc += 8;
0195 nextpc = epc;
0196 break;
0197
0198
0199 case cop1_op:
0200 kvm_err("%s: unsupported cop1_op\n", __func__);
0201 return -EINVAL;
0202
0203 #ifdef CONFIG_CPU_MIPSR6
0204
0205 case blezl_op:
0206 case bgtzl_op:
0207
0208 if (insn.i_format.rt != 0)
0209 goto compact_branch;
0210 return -EINVAL;
0211 case pop10_op:
0212 case pop30_op:
0213
0214 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
0215 goto compact_branch;
0216 return -EINVAL;
0217 case pop66_op:
0218 case pop76_op:
0219
0220 if (insn.i_format.rs != 0)
0221 goto compact_branch;
0222 return -EINVAL;
0223 compact_branch:
0224
0225
0226
0227
0228 epc += 8;
0229 nextpc = epc;
0230 break;
0231 #else
0232 compact_branch:
0233
0234 #endif
0235 default:
0236 return -EINVAL;
0237 }
0238
0239 *out = nextpc;
0240 return 0;
0241 }
0242
0243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
0244 {
0245 int err;
0246
0247 if (cause & CAUSEF_BD) {
0248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
0249 &vcpu->arch.pc);
0250 if (err)
0251 return EMULATE_FAIL;
0252 } else {
0253 vcpu->arch.pc += 4;
0254 }
0255
0256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
0257
0258 return EMULATE_DONE;
0259 }
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
0273 {
0274 if (cpu_has_badinstr) {
0275 *out = vcpu->arch.host_cp0_badinstr;
0276 return 0;
0277 } else {
0278 WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
0279 return -EINVAL;
0280 }
0281 }
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
0295 {
0296 if (cpu_has_badinstrp) {
0297 *out = vcpu->arch.host_cp0_badinstrp;
0298 return 0;
0299 } else {
0300 WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
0301 return -EINVAL;
0302 }
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
0314 {
0315 struct mips_coproc *cop0 = vcpu->arch.cop0;
0316
0317 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
0318 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
0329 {
0330 s64 now_ns, periods;
0331 u64 delta;
0332
0333 now_ns = ktime_to_ns(now);
0334 delta = now_ns + vcpu->arch.count_dyn_bias;
0335
0336 if (delta >= vcpu->arch.count_period) {
0337
0338 periods = div64_s64(now_ns, vcpu->arch.count_period);
0339 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
0340
0341 delta = now_ns + vcpu->arch.count_dyn_bias;
0342 }
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
0355 }
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
0368 {
0369 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
0370 return vcpu->arch.count_resume;
0371
0372 return ktime_get();
0373 }
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
0386 {
0387 struct mips_coproc *cop0 = vcpu->arch.cop0;
0388 ktime_t expires, threshold;
0389 u32 count, compare;
0390 int running;
0391
0392
0393 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
0394 compare = kvm_read_c0_guest_compare(cop0);
0395
0396
0397
0398
0399
0400 if ((s32)(count - compare) < 0)
0401 return count;
0402
0403
0404
0405
0406
0407
0408
0409 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
0410 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
0411 if (ktime_before(expires, threshold)) {
0412
0413
0414
0415
0416 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
0417
0418
0419 kvm_mips_callbacks->queue_timer_int(vcpu);
0420
0421
0422
0423
0424
0425 if (running) {
0426 expires = ktime_add_ns(expires,
0427 vcpu->arch.count_period);
0428 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
0429 HRTIMER_MODE_ABS);
0430 }
0431 }
0432
0433 return count;
0434 }
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
0446 {
0447 struct mips_coproc *cop0 = vcpu->arch.cop0;
0448
0449
0450 if (kvm_mips_count_disabled(vcpu))
0451 return kvm_read_c0_guest_count(cop0);
0452
0453 return kvm_mips_read_count_running(vcpu, ktime_get());
0454 }
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
0473 {
0474 ktime_t now;
0475
0476
0477 hrtimer_cancel(&vcpu->arch.comparecount_timer);
0478 now = ktime_get();
0479
0480
0481 *count = kvm_mips_read_count_running(vcpu, now);
0482
0483 return now;
0484 }
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
0503 ktime_t now, u32 count)
0504 {
0505 struct mips_coproc *cop0 = vcpu->arch.cop0;
0506 u32 compare;
0507 u64 delta;
0508 ktime_t expire;
0509
0510
0511 compare = kvm_read_c0_guest_compare(cop0);
0512 delta = (u64)(u32)(compare - count - 1) + 1;
0513 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
0514 expire = ktime_add_ns(now, delta);
0515
0516
0517 hrtimer_cancel(&vcpu->arch.comparecount_timer);
0518 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
0519 }
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
0544 u32 count, int min_drift)
0545 {
0546 ktime_t now, count_time;
0547 u32 now_count, before_count;
0548 u64 delta;
0549 int drift, ret = 0;
0550
0551
0552 before_count = vcpu->arch.count_bias +
0553 kvm_mips_ktime_to_count(vcpu, before);
0554
0555
0556
0557
0558
0559
0560
0561
0562 drift = count - before_count;
0563 if (drift < min_drift) {
0564 count_time = before;
0565 vcpu->arch.count_bias += drift;
0566 ret = drift;
0567 goto resume;
0568 }
0569
0570
0571 now = ktime_get();
0572 now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
0573
0574
0575
0576
0577
0578 drift = count - now_count;
0579 if (drift > 0) {
0580 count_time = now;
0581 vcpu->arch.count_bias += drift;
0582 ret = drift;
0583 goto resume;
0584 }
0585
0586
0587 delta = (u64)(u32)(now_count - count);
0588 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
0589 count_time = ktime_sub_ns(now, delta);
0590
0591 resume:
0592
0593 kvm_mips_resume_hrtimer(vcpu, count_time, count);
0594 return ret;
0595 }
0596
0597
0598
0599
0600
0601
0602
0603
0604 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
0605 {
0606 struct mips_coproc *cop0 = vcpu->arch.cop0;
0607 ktime_t now;
0608
0609
0610 now = kvm_mips_count_time(vcpu);
0611 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
0612
0613 if (kvm_mips_count_disabled(vcpu))
0614
0615 kvm_write_c0_guest_count(cop0, count);
0616 else
0617
0618 kvm_mips_resume_hrtimer(vcpu, now, count);
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
0630 {
0631 vcpu->arch.count_hz = count_hz;
0632 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
0633 vcpu->arch.count_dyn_bias = 0;
0634
0635
0636 kvm_mips_write_count(vcpu, 0);
0637 }
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
0651 {
0652 struct mips_coproc *cop0 = vcpu->arch.cop0;
0653 int dc;
0654 ktime_t now;
0655 u32 count;
0656
0657
0658 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
0659 return -EINVAL;
0660
0661 if (vcpu->arch.count_hz == count_hz)
0662 return 0;
0663
0664
0665 dc = kvm_mips_count_disabled(vcpu);
0666 if (dc) {
0667 now = kvm_mips_count_time(vcpu);
0668 count = kvm_read_c0_guest_count(cop0);
0669 } else {
0670 now = kvm_mips_freeze_hrtimer(vcpu, &count);
0671 }
0672
0673
0674 vcpu->arch.count_hz = count_hz;
0675 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
0676 vcpu->arch.count_dyn_bias = 0;
0677
0678
0679 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
0680
0681
0682 if (!dc)
0683 kvm_mips_resume_hrtimer(vcpu, now, count);
0684 return 0;
0685 }
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
0698 {
0699 struct mips_coproc *cop0 = vcpu->arch.cop0;
0700 int dc;
0701 u32 old_compare = kvm_read_c0_guest_compare(cop0);
0702 s32 delta = compare - old_compare;
0703 u32 cause;
0704 ktime_t now = ktime_set(0, 0);
0705 u32 count;
0706
0707
0708 if (old_compare == compare) {
0709 if (!ack)
0710 return;
0711 kvm_mips_callbacks->dequeue_timer_int(vcpu);
0712 kvm_write_c0_guest_compare(cop0, compare);
0713 return;
0714 }
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725 if (delta > 0) {
0726 preempt_disable();
0727 write_c0_gtoffset(compare - read_c0_count());
0728 back_to_back_c0_hazard();
0729 }
0730
0731
0732 dc = kvm_mips_count_disabled(vcpu);
0733 if (!dc)
0734 now = kvm_mips_freeze_hrtimer(vcpu, &count);
0735
0736 if (ack)
0737 kvm_mips_callbacks->dequeue_timer_int(vcpu);
0738 else
0739
0740
0741
0742
0743 cause = kvm_read_c0_guest_cause(cop0);
0744
0745 kvm_write_c0_guest_compare(cop0, compare);
0746
0747 if (delta > 0)
0748 preempt_enable();
0749
0750 back_to_back_c0_hazard();
0751
0752 if (!ack && cause & CAUSEF_TI)
0753 kvm_write_c0_guest_cause(cop0, cause);
0754
0755
0756 if (!dc)
0757 kvm_mips_resume_hrtimer(vcpu, now, count);
0758
0759
0760
0761
0762
0763
0764 if (delta <= 0)
0765 write_c0_gtoffset(compare - read_c0_count());
0766 }
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
0781 {
0782 struct mips_coproc *cop0 = vcpu->arch.cop0;
0783 u32 count;
0784 ktime_t now;
0785
0786
0787 hrtimer_cancel(&vcpu->arch.comparecount_timer);
0788
0789
0790 now = ktime_get();
0791 count = kvm_mips_read_count_running(vcpu, now);
0792 kvm_write_c0_guest_count(cop0, count);
0793
0794 return now;
0795 }
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
0808 {
0809 struct mips_coproc *cop0 = vcpu->arch.cop0;
0810
0811 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
0812 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
0813 kvm_mips_count_disable(vcpu);
0814 }
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
0828 {
0829 struct mips_coproc *cop0 = vcpu->arch.cop0;
0830 u32 count;
0831
0832 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
0833
0834
0835
0836
0837
0838
0839 count = kvm_read_c0_guest_count(cop0);
0840 kvm_mips_write_count(vcpu, count);
0841 }
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
0854 {
0855 struct mips_coproc *cop0 = vcpu->arch.cop0;
0856 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
0857 s64 delta;
0858 ktime_t expire, now;
0859 u32 count, compare;
0860
0861
0862 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
0863 return -EINVAL;
0864
0865
0866 vcpu->arch.count_ctl = count_ctl;
0867
0868
0869 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
0870
0871 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
0872 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
0873
0874 vcpu->arch.count_resume = ktime_get();
0875 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
0876
0877 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
0878 } else {
0879
0880
0881
0882
0883 count = kvm_read_c0_guest_count(cop0);
0884 compare = kvm_read_c0_guest_compare(cop0);
0885 delta = (u64)(u32)(compare - count - 1) + 1;
0886 delta = div_u64(delta * NSEC_PER_SEC,
0887 vcpu->arch.count_hz);
0888 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
0889
0890
0891 now = ktime_get();
0892 if (ktime_compare(now, expire) >= 0)
0893
0894 kvm_mips_callbacks->queue_timer_int(vcpu);
0895
0896
0897 count = kvm_mips_read_count_running(vcpu, now);
0898 kvm_mips_resume_hrtimer(vcpu, now, count);
0899 }
0900 }
0901
0902 return 0;
0903 }
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
0916 {
0917
0918
0919
0920
0921
0922 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
0923 return -EINVAL;
0924
0925 vcpu->arch.count_resume = ns_to_ktime(count_resume);
0926 return 0;
0927 }
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
0938 {
0939
0940 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
0941 vcpu->arch.count_period);
0942 return HRTIMER_RESTART;
0943 }
0944
0945 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
0946 {
0947 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
0948 vcpu->arch.pending_exceptions);
0949
0950 ++vcpu->stat.wait_exits;
0951 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
0952 if (!vcpu->arch.pending_exceptions) {
0953 kvm_vz_lose_htimer(vcpu);
0954 vcpu->arch.wait = 1;
0955 kvm_vcpu_halt(vcpu);
0956
0957
0958
0959
0960
0961 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
0962 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
0963 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
0964 }
0965 }
0966
0967 return EMULATE_DONE;
0968 }
0969
0970 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
0971 u32 cause,
0972 struct kvm_vcpu *vcpu)
0973 {
0974 int r;
0975 enum emulation_result er;
0976 u32 rt;
0977 struct kvm_run *run = vcpu->run;
0978 void *data = run->mmio.data;
0979 unsigned int imme;
0980 unsigned long curr_pc;
0981
0982
0983
0984
0985
0986 curr_pc = vcpu->arch.pc;
0987 er = update_pc(vcpu, cause);
0988 if (er == EMULATE_FAIL)
0989 return er;
0990
0991 rt = inst.i_format.rt;
0992
0993 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
0994 vcpu->arch.host_cp0_badvaddr);
0995 if (run->mmio.phys_addr == KVM_INVALID_ADDR)
0996 goto out_fail;
0997
0998 switch (inst.i_format.opcode) {
0999 #if defined(CONFIG_64BIT)
1000 case sd_op:
1001 run->mmio.len = 8;
1002 *(u64 *)data = vcpu->arch.gprs[rt];
1003
1004 kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1005 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1006 vcpu->arch.gprs[rt], *(u64 *)data);
1007 break;
1008 #endif
1009
1010 case sw_op:
1011 run->mmio.len = 4;
1012 *(u32 *)data = vcpu->arch.gprs[rt];
1013
1014 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1015 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1016 vcpu->arch.gprs[rt], *(u32 *)data);
1017 break;
1018
1019 case sh_op:
1020 run->mmio.len = 2;
1021 *(u16 *)data = vcpu->arch.gprs[rt];
1022
1023 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1024 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1025 vcpu->arch.gprs[rt], *(u16 *)data);
1026 break;
1027
1028 case sb_op:
1029 run->mmio.len = 1;
1030 *(u8 *)data = vcpu->arch.gprs[rt];
1031
1032 kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1033 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1034 vcpu->arch.gprs[rt], *(u8 *)data);
1035 break;
1036
1037 case swl_op:
1038 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1039 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1040 run->mmio.len = 4;
1041 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1042 switch (imme) {
1043 case 0:
1044 *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
1045 (vcpu->arch.gprs[rt] >> 24);
1046 break;
1047 case 1:
1048 *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
1049 (vcpu->arch.gprs[rt] >> 16);
1050 break;
1051 case 2:
1052 *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
1053 (vcpu->arch.gprs[rt] >> 8);
1054 break;
1055 case 3:
1056 *(u32 *)data = vcpu->arch.gprs[rt];
1057 break;
1058 default:
1059 break;
1060 }
1061
1062 kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1063 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1064 vcpu->arch.gprs[rt], *(u32 *)data);
1065 break;
1066
1067 case swr_op:
1068 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1069 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1070 run->mmio.len = 4;
1071 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1072 switch (imme) {
1073 case 0:
1074 *(u32 *)data = vcpu->arch.gprs[rt];
1075 break;
1076 case 1:
1077 *(u32 *)data = ((*(u32 *)data) & 0xff) |
1078 (vcpu->arch.gprs[rt] << 8);
1079 break;
1080 case 2:
1081 *(u32 *)data = ((*(u32 *)data) & 0xffff) |
1082 (vcpu->arch.gprs[rt] << 16);
1083 break;
1084 case 3:
1085 *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
1086 (vcpu->arch.gprs[rt] << 24);
1087 break;
1088 default:
1089 break;
1090 }
1091
1092 kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1093 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1094 vcpu->arch.gprs[rt], *(u32 *)data);
1095 break;
1096
1097 #if defined(CONFIG_64BIT)
1098 case sdl_op:
1099 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1100 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1101
1102 run->mmio.len = 8;
1103 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1104 switch (imme) {
1105 case 0:
1106 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
1107 ((vcpu->arch.gprs[rt] >> 56) & 0xff);
1108 break;
1109 case 1:
1110 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
1111 ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
1112 break;
1113 case 2:
1114 *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
1115 ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
1116 break;
1117 case 3:
1118 *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
1119 ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
1120 break;
1121 case 4:
1122 *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
1123 ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
1124 break;
1125 case 5:
1126 *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
1127 ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
1128 break;
1129 case 6:
1130 *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
1131 ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
1132 break;
1133 case 7:
1134 *(u64 *)data = vcpu->arch.gprs[rt];
1135 break;
1136 default:
1137 break;
1138 }
1139
1140 kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1141 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1142 vcpu->arch.gprs[rt], *(u64 *)data);
1143 break;
1144
1145 case sdr_op:
1146 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1147 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1148
1149 run->mmio.len = 8;
1150 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1151 switch (imme) {
1152 case 0:
1153 *(u64 *)data = vcpu->arch.gprs[rt];
1154 break;
1155 case 1:
1156 *(u64 *)data = ((*(u64 *)data) & 0xff) |
1157 (vcpu->arch.gprs[rt] << 8);
1158 break;
1159 case 2:
1160 *(u64 *)data = ((*(u64 *)data) & 0xffff) |
1161 (vcpu->arch.gprs[rt] << 16);
1162 break;
1163 case 3:
1164 *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
1165 (vcpu->arch.gprs[rt] << 24);
1166 break;
1167 case 4:
1168 *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
1169 (vcpu->arch.gprs[rt] << 32);
1170 break;
1171 case 5:
1172 *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
1173 (vcpu->arch.gprs[rt] << 40);
1174 break;
1175 case 6:
1176 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
1177 (vcpu->arch.gprs[rt] << 48);
1178 break;
1179 case 7:
1180 *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
1181 (vcpu->arch.gprs[rt] << 56);
1182 break;
1183 default:
1184 break;
1185 }
1186
1187 kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1188 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1189 vcpu->arch.gprs[rt], *(u64 *)data);
1190 break;
1191 #endif
1192
1193 #ifdef CONFIG_CPU_LOONGSON64
1194 case sdc2_op:
1195 rt = inst.loongson3_lsdc2_format.rt;
1196 switch (inst.loongson3_lsdc2_format.opcode1) {
1197
1198
1199
1200
1201
1202
1203
1204
1205 case 0x0:
1206 run->mmio.len = 1;
1207 *(u8 *)data = vcpu->arch.gprs[rt];
1208
1209 kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1210 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1211 vcpu->arch.gprs[rt], *(u8 *)data);
1212 break;
1213 case 0x1:
1214 run->mmio.len = 2;
1215 *(u16 *)data = vcpu->arch.gprs[rt];
1216
1217 kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1218 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1219 vcpu->arch.gprs[rt], *(u16 *)data);
1220 break;
1221 case 0x2:
1222 run->mmio.len = 4;
1223 *(u32 *)data = vcpu->arch.gprs[rt];
1224
1225 kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1226 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1227 vcpu->arch.gprs[rt], *(u32 *)data);
1228 break;
1229 case 0x3:
1230 run->mmio.len = 8;
1231 *(u64 *)data = vcpu->arch.gprs[rt];
1232
1233 kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1234 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1235 vcpu->arch.gprs[rt], *(u64 *)data);
1236 break;
1237 default:
1238 kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
1239 inst.word);
1240 break;
1241 }
1242 break;
1243 #endif
1244 default:
1245 kvm_err("Store not yet supported (inst=0x%08x)\n",
1246 inst.word);
1247 goto out_fail;
1248 }
1249
1250 vcpu->mmio_needed = 1;
1251 run->mmio.is_write = 1;
1252 vcpu->mmio_is_write = 1;
1253
1254 r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
1255 run->mmio.phys_addr, run->mmio.len, data);
1256
1257 if (!r) {
1258 vcpu->mmio_needed = 0;
1259 return EMULATE_DONE;
1260 }
1261
1262 return EMULATE_DO_MMIO;
1263
1264 out_fail:
1265
1266 vcpu->arch.pc = curr_pc;
1267 return EMULATE_FAIL;
1268 }
1269
1270 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1271 u32 cause, struct kvm_vcpu *vcpu)
1272 {
1273 struct kvm_run *run = vcpu->run;
1274 int r;
1275 enum emulation_result er;
1276 unsigned long curr_pc;
1277 u32 op, rt;
1278 unsigned int imme;
1279
1280 rt = inst.i_format.rt;
1281 op = inst.i_format.opcode;
1282
1283
1284
1285
1286
1287
1288 curr_pc = vcpu->arch.pc;
1289 er = update_pc(vcpu, cause);
1290 if (er == EMULATE_FAIL)
1291 return er;
1292 vcpu->arch.io_pc = vcpu->arch.pc;
1293 vcpu->arch.pc = curr_pc;
1294
1295 vcpu->arch.io_gpr = rt;
1296
1297 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1298 vcpu->arch.host_cp0_badvaddr);
1299 if (run->mmio.phys_addr == KVM_INVALID_ADDR)
1300 return EMULATE_FAIL;
1301
1302 vcpu->mmio_needed = 2;
1303 switch (op) {
1304 #if defined(CONFIG_64BIT)
1305 case ld_op:
1306 run->mmio.len = 8;
1307 break;
1308
1309 case lwu_op:
1310 vcpu->mmio_needed = 1;
1311 fallthrough;
1312 #endif
1313 case lw_op:
1314 run->mmio.len = 4;
1315 break;
1316
1317 case lhu_op:
1318 vcpu->mmio_needed = 1;
1319 fallthrough;
1320 case lh_op:
1321 run->mmio.len = 2;
1322 break;
1323
1324 case lbu_op:
1325 vcpu->mmio_needed = 1;
1326 fallthrough;
1327 case lb_op:
1328 run->mmio.len = 1;
1329 break;
1330
1331 case lwl_op:
1332 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1333 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1334
1335 run->mmio.len = 4;
1336 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1337 switch (imme) {
1338 case 0:
1339 vcpu->mmio_needed = 3;
1340 break;
1341 case 1:
1342 vcpu->mmio_needed = 4;
1343 break;
1344 case 2:
1345 vcpu->mmio_needed = 5;
1346 break;
1347 case 3:
1348 vcpu->mmio_needed = 6;
1349 break;
1350 default:
1351 break;
1352 }
1353 break;
1354
1355 case lwr_op:
1356 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1357 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1358
1359 run->mmio.len = 4;
1360 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1361 switch (imme) {
1362 case 0:
1363 vcpu->mmio_needed = 7;
1364 break;
1365 case 1:
1366 vcpu->mmio_needed = 8;
1367 break;
1368 case 2:
1369 vcpu->mmio_needed = 9;
1370 break;
1371 case 3:
1372 vcpu->mmio_needed = 10;
1373 break;
1374 default:
1375 break;
1376 }
1377 break;
1378
1379 #if defined(CONFIG_64BIT)
1380 case ldl_op:
1381 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1382 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1383
1384 run->mmio.len = 8;
1385 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1386 switch (imme) {
1387 case 0:
1388 vcpu->mmio_needed = 11;
1389 break;
1390 case 1:
1391 vcpu->mmio_needed = 12;
1392 break;
1393 case 2:
1394 vcpu->mmio_needed = 13;
1395 break;
1396 case 3:
1397 vcpu->mmio_needed = 14;
1398 break;
1399 case 4:
1400 vcpu->mmio_needed = 15;
1401 break;
1402 case 5:
1403 vcpu->mmio_needed = 16;
1404 break;
1405 case 6:
1406 vcpu->mmio_needed = 17;
1407 break;
1408 case 7:
1409 vcpu->mmio_needed = 18;
1410 break;
1411 default:
1412 break;
1413 }
1414 break;
1415
1416 case ldr_op:
1417 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1418 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1419
1420 run->mmio.len = 8;
1421 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1422 switch (imme) {
1423 case 0:
1424 vcpu->mmio_needed = 19;
1425 break;
1426 case 1:
1427 vcpu->mmio_needed = 20;
1428 break;
1429 case 2:
1430 vcpu->mmio_needed = 21;
1431 break;
1432 case 3:
1433 vcpu->mmio_needed = 22;
1434 break;
1435 case 4:
1436 vcpu->mmio_needed = 23;
1437 break;
1438 case 5:
1439 vcpu->mmio_needed = 24;
1440 break;
1441 case 6:
1442 vcpu->mmio_needed = 25;
1443 break;
1444 case 7:
1445 vcpu->mmio_needed = 26;
1446 break;
1447 default:
1448 break;
1449 }
1450 break;
1451 #endif
1452
1453 #ifdef CONFIG_CPU_LOONGSON64
1454 case ldc2_op:
1455 rt = inst.loongson3_lsdc2_format.rt;
1456 switch (inst.loongson3_lsdc2_format.opcode1) {
1457
1458
1459
1460
1461
1462
1463
1464
1465 case 0x0:
1466 run->mmio.len = 1;
1467 vcpu->mmio_needed = 27;
1468 break;
1469 case 0x1:
1470 run->mmio.len = 2;
1471 vcpu->mmio_needed = 28;
1472 break;
1473 case 0x2:
1474 run->mmio.len = 4;
1475 vcpu->mmio_needed = 29;
1476 break;
1477 case 0x3:
1478 run->mmio.len = 8;
1479 vcpu->mmio_needed = 30;
1480 break;
1481 default:
1482 kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
1483 inst.word);
1484 break;
1485 }
1486 break;
1487 #endif
1488
1489 default:
1490 kvm_err("Load not yet supported (inst=0x%08x)\n",
1491 inst.word);
1492 vcpu->mmio_needed = 0;
1493 return EMULATE_FAIL;
1494 }
1495
1496 run->mmio.is_write = 0;
1497 vcpu->mmio_is_write = 0;
1498
1499 r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
1500 run->mmio.phys_addr, run->mmio.len, run->mmio.data);
1501
1502 if (!r) {
1503 kvm_mips_complete_mmio_load(vcpu);
1504 vcpu->mmio_needed = 0;
1505 return EMULATE_DONE;
1506 }
1507
1508 return EMULATE_DO_MMIO;
1509 }
1510
1511 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
1512 {
1513 struct kvm_run *run = vcpu->run;
1514 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1515 enum emulation_result er = EMULATE_DONE;
1516
1517 if (run->mmio.len > sizeof(*gpr)) {
1518 kvm_err("Bad MMIO length: %d", run->mmio.len);
1519 er = EMULATE_FAIL;
1520 goto done;
1521 }
1522
1523
1524 vcpu->arch.pc = vcpu->arch.io_pc;
1525
1526 switch (run->mmio.len) {
1527 case 8:
1528 switch (vcpu->mmio_needed) {
1529 case 11:
1530 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
1531 (((*(s64 *)run->mmio.data) & 0xff) << 56);
1532 break;
1533 case 12:
1534 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
1535 (((*(s64 *)run->mmio.data) & 0xffff) << 48);
1536 break;
1537 case 13:
1538 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
1539 (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
1540 break;
1541 case 14:
1542 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
1543 (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
1544 break;
1545 case 15:
1546 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1547 (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
1548 break;
1549 case 16:
1550 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1551 (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
1552 break;
1553 case 17:
1554 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1555 (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
1556 break;
1557 case 18:
1558 case 19:
1559 *gpr = *(s64 *)run->mmio.data;
1560 break;
1561 case 20:
1562 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
1563 ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
1564 break;
1565 case 21:
1566 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
1567 ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
1568 break;
1569 case 22:
1570 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
1571 ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
1572 break;
1573 case 23:
1574 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
1575 ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
1576 break;
1577 case 24:
1578 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
1579 ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
1580 break;
1581 case 25:
1582 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
1583 ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
1584 break;
1585 case 26:
1586 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
1587 ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
1588 break;
1589 default:
1590 *gpr = *(s64 *)run->mmio.data;
1591 }
1592 break;
1593
1594 case 4:
1595 switch (vcpu->mmio_needed) {
1596 case 1:
1597 *gpr = *(u32 *)run->mmio.data;
1598 break;
1599 case 2:
1600 *gpr = *(s32 *)run->mmio.data;
1601 break;
1602 case 3:
1603 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1604 (((*(s32 *)run->mmio.data) & 0xff) << 24);
1605 break;
1606 case 4:
1607 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1608 (((*(s32 *)run->mmio.data) & 0xffff) << 16);
1609 break;
1610 case 5:
1611 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1612 (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
1613 break;
1614 case 6:
1615 case 7:
1616 *gpr = *(s32 *)run->mmio.data;
1617 break;
1618 case 8:
1619 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
1620 ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
1621 break;
1622 case 9:
1623 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
1624 ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
1625 break;
1626 case 10:
1627 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
1628 ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
1629 break;
1630 default:
1631 *gpr = *(s32 *)run->mmio.data;
1632 }
1633 break;
1634
1635 case 2:
1636 if (vcpu->mmio_needed == 1)
1637 *gpr = *(u16 *)run->mmio.data;
1638 else
1639 *gpr = *(s16 *)run->mmio.data;
1640
1641 break;
1642 case 1:
1643 if (vcpu->mmio_needed == 1)
1644 *gpr = *(u8 *)run->mmio.data;
1645 else
1646 *gpr = *(s8 *)run->mmio.data;
1647 break;
1648 }
1649
1650 done:
1651 return er;
1652 }