0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/bitops.h>
0013 #include <linux/errno.h>
0014 #include <linux/err.h>
0015 #include <linux/kdebug.h>
0016 #include <linux/module.h>
0017 #include <linux/uaccess.h>
0018 #include <linux/vmalloc.h>
0019 #include <linux/sched/signal.h>
0020 #include <linux/fs.h>
0021 #include <linux/memblock.h>
0022 #include <linux/pgtable.h>
0023
0024 #include <asm/fpu.h>
0025 #include <asm/page.h>
0026 #include <asm/cacheflush.h>
0027 #include <asm/mmu_context.h>
0028 #include <asm/pgalloc.h>
0029
0030 #include <linux/kvm_host.h>
0031
0032 #include "interrupt.h"
0033
0034 #define CREATE_TRACE_POINTS
0035 #include "trace.h"
0036
0037 #ifndef VECTORSPACING
0038 #define VECTORSPACING 0x100
0039 #endif
0040
0041 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
0042 KVM_GENERIC_VM_STATS()
0043 };
0044
0045 const struct kvm_stats_header kvm_vm_stats_header = {
0046 .name_size = KVM_STATS_NAME_SIZE,
0047 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
0048 .id_offset = sizeof(struct kvm_stats_header),
0049 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
0050 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
0051 sizeof(kvm_vm_stats_desc),
0052 };
0053
0054 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
0055 KVM_GENERIC_VCPU_STATS(),
0056 STATS_DESC_COUNTER(VCPU, wait_exits),
0057 STATS_DESC_COUNTER(VCPU, cache_exits),
0058 STATS_DESC_COUNTER(VCPU, signal_exits),
0059 STATS_DESC_COUNTER(VCPU, int_exits),
0060 STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
0061 STATS_DESC_COUNTER(VCPU, tlbmod_exits),
0062 STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
0063 STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
0064 STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
0065 STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
0066 STATS_DESC_COUNTER(VCPU, syscall_exits),
0067 STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
0068 STATS_DESC_COUNTER(VCPU, break_inst_exits),
0069 STATS_DESC_COUNTER(VCPU, trap_inst_exits),
0070 STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
0071 STATS_DESC_COUNTER(VCPU, fpe_exits),
0072 STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
0073 STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
0074 STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
0075 STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
0076 STATS_DESC_COUNTER(VCPU, vz_hc_exits),
0077 STATS_DESC_COUNTER(VCPU, vz_grr_exits),
0078 STATS_DESC_COUNTER(VCPU, vz_gva_exits),
0079 STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
0080 STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
0081 STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
0082 #ifdef CONFIG_CPU_LOONGSON64
0083 STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
0084 #endif
0085 };
0086
0087 const struct kvm_stats_header kvm_vcpu_stats_header = {
0088 .name_size = KVM_STATS_NAME_SIZE,
0089 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
0090 .id_offset = sizeof(struct kvm_stats_header),
0091 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
0092 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
0093 sizeof(kvm_vcpu_stats_desc),
0094 };
0095
0096 bool kvm_trace_guest_mode_change;
0097
0098 int kvm_guest_mode_change_trace_reg(void)
0099 {
0100 kvm_trace_guest_mode_change = true;
0101 return 0;
0102 }
0103
0104 void kvm_guest_mode_change_trace_unreg(void)
0105 {
0106 kvm_trace_guest_mode_change = false;
0107 }
0108
0109
0110
0111
0112
0113 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
0114 {
0115 return !!(vcpu->arch.pending_exceptions);
0116 }
0117
0118 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
0119 {
0120 return false;
0121 }
0122
0123 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
0124 {
0125 return 1;
0126 }
0127
0128 int kvm_arch_hardware_enable(void)
0129 {
0130 return kvm_mips_callbacks->hardware_enable();
0131 }
0132
0133 void kvm_arch_hardware_disable(void)
0134 {
0135 kvm_mips_callbacks->hardware_disable();
0136 }
0137
0138 int kvm_arch_hardware_setup(void *opaque)
0139 {
0140 return 0;
0141 }
0142
0143 int kvm_arch_check_processor_compat(void *opaque)
0144 {
0145 return 0;
0146 }
0147
0148 extern void kvm_init_loongson_ipi(struct kvm *kvm);
0149
0150 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
0151 {
0152 switch (type) {
0153 case KVM_VM_MIPS_AUTO:
0154 break;
0155 case KVM_VM_MIPS_VZ:
0156 break;
0157 default:
0158
0159 return -EINVAL;
0160 }
0161
0162
0163 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
0164 if (!kvm->arch.gpa_mm.pgd)
0165 return -ENOMEM;
0166
0167 #ifdef CONFIG_CPU_LOONGSON64
0168 kvm_init_loongson_ipi(kvm);
0169 #endif
0170
0171 return 0;
0172 }
0173
0174 static void kvm_mips_free_gpa_pt(struct kvm *kvm)
0175 {
0176
0177 WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
0178 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
0179 }
0180
0181 void kvm_arch_destroy_vm(struct kvm *kvm)
0182 {
0183 kvm_destroy_vcpus(kvm);
0184 kvm_mips_free_gpa_pt(kvm);
0185 }
0186
0187 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
0188 unsigned long arg)
0189 {
0190 return -ENOIOCTLCMD;
0191 }
0192
0193 void kvm_arch_flush_shadow_all(struct kvm *kvm)
0194 {
0195
0196 kvm_mips_flush_gpa_pt(kvm, 0, ~0);
0197 kvm_flush_remote_tlbs(kvm);
0198 }
0199
0200 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
0201 struct kvm_memory_slot *slot)
0202 {
0203
0204
0205
0206
0207
0208 spin_lock(&kvm->mmu_lock);
0209
0210 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
0211 slot->base_gfn + slot->npages - 1);
0212 kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
0213 spin_unlock(&kvm->mmu_lock);
0214 }
0215
0216 int kvm_arch_prepare_memory_region(struct kvm *kvm,
0217 const struct kvm_memory_slot *old,
0218 struct kvm_memory_slot *new,
0219 enum kvm_mr_change change)
0220 {
0221 return 0;
0222 }
0223
0224 void kvm_arch_commit_memory_region(struct kvm *kvm,
0225 struct kvm_memory_slot *old,
0226 const struct kvm_memory_slot *new,
0227 enum kvm_mr_change change)
0228 {
0229 int needs_flush;
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 if (change == KVM_MR_FLAGS_ONLY &&
0241 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
0242 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
0243 spin_lock(&kvm->mmu_lock);
0244
0245 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
0246 new->base_gfn + new->npages - 1);
0247 if (needs_flush)
0248 kvm_arch_flush_remote_tlbs_memslot(kvm, new);
0249 spin_unlock(&kvm->mmu_lock);
0250 }
0251 }
0252
0253 static inline void dump_handler(const char *symbol, void *start, void *end)
0254 {
0255 u32 *p;
0256
0257 pr_debug("LEAF(%s)\n", symbol);
0258
0259 pr_debug("\t.set push\n");
0260 pr_debug("\t.set noreorder\n");
0261
0262 for (p = start; p < (u32 *)end; ++p)
0263 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
0264
0265 pr_debug("\t.set\tpop\n");
0266
0267 pr_debug("\tEND(%s)\n", symbol);
0268 }
0269
0270
0271 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
0272 {
0273 struct kvm_vcpu *vcpu;
0274
0275 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
0276
0277 kvm_mips_callbacks->queue_timer_int(vcpu);
0278
0279 vcpu->arch.wait = 0;
0280 rcuwait_wake_up(&vcpu->wait);
0281
0282 return kvm_mips_count_timeout(vcpu);
0283 }
0284
0285 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
0286 {
0287 return 0;
0288 }
0289
0290 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
0291 {
0292 int err, size;
0293 void *gebase, *p, *handler, *refill_start, *refill_end;
0294 int i;
0295
0296 kvm_debug("kvm @ %p: create cpu %d at %p\n",
0297 vcpu->kvm, vcpu->vcpu_id, vcpu);
0298
0299 err = kvm_mips_callbacks->vcpu_init(vcpu);
0300 if (err)
0301 return err;
0302
0303 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
0304 HRTIMER_MODE_REL);
0305 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
0306
0307
0308
0309
0310
0311 if (cpu_has_veic || cpu_has_vint)
0312 size = 0x200 + VECTORSPACING * 64;
0313 else
0314 size = 0x4000;
0315
0316 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
0317
0318 if (!gebase) {
0319 err = -ENOMEM;
0320 goto out_uninit_vcpu;
0321 }
0322 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
0323 ALIGN(size, PAGE_SIZE), gebase);
0324
0325
0326
0327
0328
0329
0330 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
0331 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
0332 gebase);
0333 err = -ENOMEM;
0334 goto out_free_gebase;
0335 }
0336
0337
0338 vcpu->arch.guest_ebase = gebase;
0339
0340
0341 handler = gebase + 0x2000;
0342
0343
0344 refill_start = gebase;
0345 if (IS_ENABLED(CONFIG_64BIT))
0346 refill_start += 0x080;
0347 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
0348
0349
0350 kvm_mips_build_exception(gebase + 0x180, handler);
0351
0352
0353 for (i = 0; i < 8; i++) {
0354 kvm_debug("L1 Vectored handler @ %p\n",
0355 gebase + 0x200 + (i * VECTORSPACING));
0356 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
0357 handler);
0358 }
0359
0360
0361 p = handler;
0362 p = kvm_mips_build_exit(p);
0363
0364
0365 vcpu->arch.vcpu_run = p;
0366 p = kvm_mips_build_vcpu_run(p);
0367
0368
0369 pr_debug("#include <asm/asm.h>\n");
0370 pr_debug("#include <asm/regdef.h>\n");
0371 pr_debug("\n");
0372 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
0373 dump_handler("kvm_tlb_refill", refill_start, refill_end);
0374 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
0375 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
0376
0377
0378 flush_icache_range((unsigned long)gebase,
0379 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
0380
0381
0382 vcpu->arch.last_sched_cpu = -1;
0383 vcpu->arch.last_exec_cpu = -1;
0384
0385
0386 err = kvm_mips_callbacks->vcpu_setup(vcpu);
0387 if (err)
0388 goto out_free_gebase;
0389
0390 return 0;
0391
0392 out_free_gebase:
0393 kfree(gebase);
0394 out_uninit_vcpu:
0395 kvm_mips_callbacks->vcpu_uninit(vcpu);
0396 return err;
0397 }
0398
0399 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
0400 {
0401 hrtimer_cancel(&vcpu->arch.comparecount_timer);
0402
0403 kvm_mips_dump_stats(vcpu);
0404
0405 kvm_mmu_free_memory_caches(vcpu);
0406 kfree(vcpu->arch.guest_ebase);
0407
0408 kvm_mips_callbacks->vcpu_uninit(vcpu);
0409 }
0410
0411 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
0412 struct kvm_guest_debug *dbg)
0413 {
0414 return -ENOIOCTLCMD;
0415 }
0416
0417
0418
0419
0420
0421
0422
0423
0424 static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
0425 {
0426 int ret;
0427
0428 guest_state_enter_irqoff();
0429 ret = kvm_mips_callbacks->vcpu_run(vcpu);
0430 guest_state_exit_irqoff();
0431
0432 return ret;
0433 }
0434
0435 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
0436 {
0437 int r = -EINTR;
0438
0439 vcpu_load(vcpu);
0440
0441 kvm_sigset_activate(vcpu);
0442
0443 if (vcpu->mmio_needed) {
0444 if (!vcpu->mmio_is_write)
0445 kvm_mips_complete_mmio_load(vcpu);
0446 vcpu->mmio_needed = 0;
0447 }
0448
0449 if (vcpu->run->immediate_exit)
0450 goto out;
0451
0452 lose_fpu(1);
0453
0454 local_irq_disable();
0455 guest_timing_enter_irqoff();
0456 trace_kvm_enter(vcpu);
0457
0458
0459
0460
0461
0462
0463
0464 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
0465
0466 r = kvm_mips_vcpu_enter_exit(vcpu);
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478 local_irq_enable();
0479 local_irq_disable();
0480
0481 trace_kvm_out(vcpu);
0482 guest_timing_exit_irqoff();
0483 local_irq_enable();
0484
0485 out:
0486 kvm_sigset_deactivate(vcpu);
0487
0488 vcpu_put(vcpu);
0489 return r;
0490 }
0491
0492 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
0493 struct kvm_mips_interrupt *irq)
0494 {
0495 int intr = (int)irq->irq;
0496 struct kvm_vcpu *dvcpu = NULL;
0497
0498 if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
0499 intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
0500 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
0501 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
0502 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
0503 (int)intr);
0504
0505 if (irq->cpu == -1)
0506 dvcpu = vcpu;
0507 else
0508 dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
0509
0510 if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
0511 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
0512
0513 } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
0514 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
0515 } else {
0516 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
0517 irq->cpu, irq->irq);
0518 return -EINVAL;
0519 }
0520
0521 dvcpu->arch.wait = 0;
0522
0523 rcuwait_wake_up(&dvcpu->wait);
0524
0525 return 0;
0526 }
0527
0528 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
0529 struct kvm_mp_state *mp_state)
0530 {
0531 return -ENOIOCTLCMD;
0532 }
0533
0534 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
0535 struct kvm_mp_state *mp_state)
0536 {
0537 return -ENOIOCTLCMD;
0538 }
0539
0540 static u64 kvm_mips_get_one_regs[] = {
0541 KVM_REG_MIPS_R0,
0542 KVM_REG_MIPS_R1,
0543 KVM_REG_MIPS_R2,
0544 KVM_REG_MIPS_R3,
0545 KVM_REG_MIPS_R4,
0546 KVM_REG_MIPS_R5,
0547 KVM_REG_MIPS_R6,
0548 KVM_REG_MIPS_R7,
0549 KVM_REG_MIPS_R8,
0550 KVM_REG_MIPS_R9,
0551 KVM_REG_MIPS_R10,
0552 KVM_REG_MIPS_R11,
0553 KVM_REG_MIPS_R12,
0554 KVM_REG_MIPS_R13,
0555 KVM_REG_MIPS_R14,
0556 KVM_REG_MIPS_R15,
0557 KVM_REG_MIPS_R16,
0558 KVM_REG_MIPS_R17,
0559 KVM_REG_MIPS_R18,
0560 KVM_REG_MIPS_R19,
0561 KVM_REG_MIPS_R20,
0562 KVM_REG_MIPS_R21,
0563 KVM_REG_MIPS_R22,
0564 KVM_REG_MIPS_R23,
0565 KVM_REG_MIPS_R24,
0566 KVM_REG_MIPS_R25,
0567 KVM_REG_MIPS_R26,
0568 KVM_REG_MIPS_R27,
0569 KVM_REG_MIPS_R28,
0570 KVM_REG_MIPS_R29,
0571 KVM_REG_MIPS_R30,
0572 KVM_REG_MIPS_R31,
0573
0574 #ifndef CONFIG_CPU_MIPSR6
0575 KVM_REG_MIPS_HI,
0576 KVM_REG_MIPS_LO,
0577 #endif
0578 KVM_REG_MIPS_PC,
0579 };
0580
0581 static u64 kvm_mips_get_one_regs_fpu[] = {
0582 KVM_REG_MIPS_FCR_IR,
0583 KVM_REG_MIPS_FCR_CSR,
0584 };
0585
0586 static u64 kvm_mips_get_one_regs_msa[] = {
0587 KVM_REG_MIPS_MSA_IR,
0588 KVM_REG_MIPS_MSA_CSR,
0589 };
0590
0591 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
0592 {
0593 unsigned long ret;
0594
0595 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
0596 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
0597 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
0598
0599 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
0600 ret += 16;
0601 }
0602 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
0603 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
0604 ret += kvm_mips_callbacks->num_regs(vcpu);
0605
0606 return ret;
0607 }
0608
0609 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
0610 {
0611 u64 index;
0612 unsigned int i;
0613
0614 if (copy_to_user(indices, kvm_mips_get_one_regs,
0615 sizeof(kvm_mips_get_one_regs)))
0616 return -EFAULT;
0617 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
0618
0619 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
0620 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
0621 sizeof(kvm_mips_get_one_regs_fpu)))
0622 return -EFAULT;
0623 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
0624
0625 for (i = 0; i < 32; ++i) {
0626 index = KVM_REG_MIPS_FPR_32(i);
0627 if (copy_to_user(indices, &index, sizeof(index)))
0628 return -EFAULT;
0629 ++indices;
0630
0631
0632 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
0633 continue;
0634
0635 index = KVM_REG_MIPS_FPR_64(i);
0636 if (copy_to_user(indices, &index, sizeof(index)))
0637 return -EFAULT;
0638 ++indices;
0639 }
0640 }
0641
0642 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
0643 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
0644 sizeof(kvm_mips_get_one_regs_msa)))
0645 return -EFAULT;
0646 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
0647
0648 for (i = 0; i < 32; ++i) {
0649 index = KVM_REG_MIPS_VEC_128(i);
0650 if (copy_to_user(indices, &index, sizeof(index)))
0651 return -EFAULT;
0652 ++indices;
0653 }
0654 }
0655
0656 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
0657 }
0658
0659 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
0660 const struct kvm_one_reg *reg)
0661 {
0662 struct mips_coproc *cop0 = vcpu->arch.cop0;
0663 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
0664 int ret;
0665 s64 v;
0666 s64 vs[2];
0667 unsigned int idx;
0668
0669 switch (reg->id) {
0670
0671 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
0672 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
0673 break;
0674 #ifndef CONFIG_CPU_MIPSR6
0675 case KVM_REG_MIPS_HI:
0676 v = (long)vcpu->arch.hi;
0677 break;
0678 case KVM_REG_MIPS_LO:
0679 v = (long)vcpu->arch.lo;
0680 break;
0681 #endif
0682 case KVM_REG_MIPS_PC:
0683 v = (long)vcpu->arch.pc;
0684 break;
0685
0686
0687 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
0688 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
0689 return -EINVAL;
0690 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
0691
0692 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
0693 v = get_fpr32(&fpu->fpr[idx], 0);
0694 else
0695 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
0696 break;
0697 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
0698 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
0699 return -EINVAL;
0700 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
0701
0702 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
0703 return -EINVAL;
0704 v = get_fpr64(&fpu->fpr[idx], 0);
0705 break;
0706 case KVM_REG_MIPS_FCR_IR:
0707 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
0708 return -EINVAL;
0709 v = boot_cpu_data.fpu_id;
0710 break;
0711 case KVM_REG_MIPS_FCR_CSR:
0712 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
0713 return -EINVAL;
0714 v = fpu->fcr31;
0715 break;
0716
0717
0718 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
0719 if (!kvm_mips_guest_has_msa(&vcpu->arch))
0720 return -EINVAL;
0721
0722 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
0723 return -EINVAL;
0724 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
0725 #ifdef CONFIG_CPU_LITTLE_ENDIAN
0726
0727 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
0728 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
0729 #else
0730
0731 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
0732 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
0733 #endif
0734 break;
0735 case KVM_REG_MIPS_MSA_IR:
0736 if (!kvm_mips_guest_has_msa(&vcpu->arch))
0737 return -EINVAL;
0738 v = boot_cpu_data.msa_id;
0739 break;
0740 case KVM_REG_MIPS_MSA_CSR:
0741 if (!kvm_mips_guest_has_msa(&vcpu->arch))
0742 return -EINVAL;
0743 v = fpu->msacsr;
0744 break;
0745
0746
0747 default:
0748 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
0749 if (ret)
0750 return ret;
0751 break;
0752 }
0753 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
0754 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
0755
0756 return put_user(v, uaddr64);
0757 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
0758 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
0759 u32 v32 = (u32)v;
0760
0761 return put_user(v32, uaddr32);
0762 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
0763 void __user *uaddr = (void __user *)(long)reg->addr;
0764
0765 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
0766 } else {
0767 return -EINVAL;
0768 }
0769 }
0770
0771 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
0772 const struct kvm_one_reg *reg)
0773 {
0774 struct mips_coproc *cop0 = vcpu->arch.cop0;
0775 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
0776 s64 v;
0777 s64 vs[2];
0778 unsigned int idx;
0779
0780 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
0781 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
0782
0783 if (get_user(v, uaddr64) != 0)
0784 return -EFAULT;
0785 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
0786 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
0787 s32 v32;
0788
0789 if (get_user(v32, uaddr32) != 0)
0790 return -EFAULT;
0791 v = (s64)v32;
0792 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
0793 void __user *uaddr = (void __user *)(long)reg->addr;
0794
0795 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
0796 } else {
0797 return -EINVAL;
0798 }
0799
0800 switch (reg->id) {
0801
0802 case KVM_REG_MIPS_R0:
0803
0804 break;
0805 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
0806 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
0807 break;
0808 #ifndef CONFIG_CPU_MIPSR6
0809 case KVM_REG_MIPS_HI:
0810 vcpu->arch.hi = v;
0811 break;
0812 case KVM_REG_MIPS_LO:
0813 vcpu->arch.lo = v;
0814 break;
0815 #endif
0816 case KVM_REG_MIPS_PC:
0817 vcpu->arch.pc = v;
0818 break;
0819
0820
0821 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
0822 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
0823 return -EINVAL;
0824 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
0825
0826 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
0827 set_fpr32(&fpu->fpr[idx], 0, v);
0828 else
0829 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
0830 break;
0831 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
0832 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
0833 return -EINVAL;
0834 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
0835
0836 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
0837 return -EINVAL;
0838 set_fpr64(&fpu->fpr[idx], 0, v);
0839 break;
0840 case KVM_REG_MIPS_FCR_IR:
0841 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
0842 return -EINVAL;
0843
0844 break;
0845 case KVM_REG_MIPS_FCR_CSR:
0846 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
0847 return -EINVAL;
0848 fpu->fcr31 = v;
0849 break;
0850
0851
0852 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
0853 if (!kvm_mips_guest_has_msa(&vcpu->arch))
0854 return -EINVAL;
0855 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
0856 #ifdef CONFIG_CPU_LITTLE_ENDIAN
0857
0858 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
0859 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
0860 #else
0861
0862 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
0863 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
0864 #endif
0865 break;
0866 case KVM_REG_MIPS_MSA_IR:
0867 if (!kvm_mips_guest_has_msa(&vcpu->arch))
0868 return -EINVAL;
0869
0870 break;
0871 case KVM_REG_MIPS_MSA_CSR:
0872 if (!kvm_mips_guest_has_msa(&vcpu->arch))
0873 return -EINVAL;
0874 fpu->msacsr = v;
0875 break;
0876
0877
0878 default:
0879 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
0880 }
0881 return 0;
0882 }
0883
0884 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
0885 struct kvm_enable_cap *cap)
0886 {
0887 int r = 0;
0888
0889 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
0890 return -EINVAL;
0891 if (cap->flags)
0892 return -EINVAL;
0893 if (cap->args[0])
0894 return -EINVAL;
0895
0896 switch (cap->cap) {
0897 case KVM_CAP_MIPS_FPU:
0898 vcpu->arch.fpu_enabled = true;
0899 break;
0900 case KVM_CAP_MIPS_MSA:
0901 vcpu->arch.msa_enabled = true;
0902 break;
0903 default:
0904 r = -EINVAL;
0905 break;
0906 }
0907
0908 return r;
0909 }
0910
0911 long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
0912 unsigned long arg)
0913 {
0914 struct kvm_vcpu *vcpu = filp->private_data;
0915 void __user *argp = (void __user *)arg;
0916
0917 if (ioctl == KVM_INTERRUPT) {
0918 struct kvm_mips_interrupt irq;
0919
0920 if (copy_from_user(&irq, argp, sizeof(irq)))
0921 return -EFAULT;
0922 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
0923 irq.irq);
0924
0925 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
0926 }
0927
0928 return -ENOIOCTLCMD;
0929 }
0930
0931 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
0932 unsigned long arg)
0933 {
0934 struct kvm_vcpu *vcpu = filp->private_data;
0935 void __user *argp = (void __user *)arg;
0936 long r;
0937
0938 vcpu_load(vcpu);
0939
0940 switch (ioctl) {
0941 case KVM_SET_ONE_REG:
0942 case KVM_GET_ONE_REG: {
0943 struct kvm_one_reg reg;
0944
0945 r = -EFAULT;
0946 if (copy_from_user(®, argp, sizeof(reg)))
0947 break;
0948 if (ioctl == KVM_SET_ONE_REG)
0949 r = kvm_mips_set_reg(vcpu, ®);
0950 else
0951 r = kvm_mips_get_reg(vcpu, ®);
0952 break;
0953 }
0954 case KVM_GET_REG_LIST: {
0955 struct kvm_reg_list __user *user_list = argp;
0956 struct kvm_reg_list reg_list;
0957 unsigned n;
0958
0959 r = -EFAULT;
0960 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
0961 break;
0962 n = reg_list.n;
0963 reg_list.n = kvm_mips_num_regs(vcpu);
0964 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
0965 break;
0966 r = -E2BIG;
0967 if (n < reg_list.n)
0968 break;
0969 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
0970 break;
0971 }
0972 case KVM_ENABLE_CAP: {
0973 struct kvm_enable_cap cap;
0974
0975 r = -EFAULT;
0976 if (copy_from_user(&cap, argp, sizeof(cap)))
0977 break;
0978 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
0979 break;
0980 }
0981 default:
0982 r = -ENOIOCTLCMD;
0983 }
0984
0985 vcpu_put(vcpu);
0986 return r;
0987 }
0988
0989 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
0990 {
0991
0992 }
0993
0994 int kvm_arch_flush_remote_tlb(struct kvm *kvm)
0995 {
0996 kvm_mips_callbacks->prepare_flush_shadow(kvm);
0997 return 1;
0998 }
0999
1000 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1001 const struct kvm_memory_slot *memslot)
1002 {
1003 kvm_flush_remote_tlbs(kvm);
1004 }
1005
1006 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1007 {
1008 long r;
1009
1010 switch (ioctl) {
1011 default:
1012 r = -ENOIOCTLCMD;
1013 }
1014
1015 return r;
1016 }
1017
1018 int kvm_arch_init(void *opaque)
1019 {
1020 if (kvm_mips_callbacks) {
1021 kvm_err("kvm: module already exists\n");
1022 return -EEXIST;
1023 }
1024
1025 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1026 }
1027
1028 void kvm_arch_exit(void)
1029 {
1030 kvm_mips_callbacks = NULL;
1031 }
1032
1033 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1034 struct kvm_sregs *sregs)
1035 {
1036 return -ENOIOCTLCMD;
1037 }
1038
1039 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1040 struct kvm_sregs *sregs)
1041 {
1042 return -ENOIOCTLCMD;
1043 }
1044
1045 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1046 {
1047 }
1048
1049 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1050 {
1051 return -ENOIOCTLCMD;
1052 }
1053
1054 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1055 {
1056 return -ENOIOCTLCMD;
1057 }
1058
1059 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1060 {
1061 return VM_FAULT_SIGBUS;
1062 }
1063
1064 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1065 {
1066 int r;
1067
1068 switch (ext) {
1069 case KVM_CAP_ONE_REG:
1070 case KVM_CAP_ENABLE_CAP:
1071 case KVM_CAP_READONLY_MEM:
1072 case KVM_CAP_SYNC_MMU:
1073 case KVM_CAP_IMMEDIATE_EXIT:
1074 r = 1;
1075 break;
1076 case KVM_CAP_NR_VCPUS:
1077 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
1078 break;
1079 case KVM_CAP_MAX_VCPUS:
1080 r = KVM_MAX_VCPUS;
1081 break;
1082 case KVM_CAP_MAX_VCPU_ID:
1083 r = KVM_MAX_VCPU_IDS;
1084 break;
1085 case KVM_CAP_MIPS_FPU:
1086
1087 r = !!raw_cpu_has_fpu;
1088 break;
1089 case KVM_CAP_MIPS_MSA:
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1103 break;
1104 default:
1105 r = kvm_mips_callbacks->check_extension(kvm, ext);
1106 break;
1107 }
1108 return r;
1109 }
1110
1111 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1112 {
1113 return kvm_mips_pending_timer(vcpu) ||
1114 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1115 }
1116
1117 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1118 {
1119 int i;
1120 struct mips_coproc *cop0;
1121
1122 if (!vcpu)
1123 return -1;
1124
1125 kvm_debug("VCPU Register Dump:\n");
1126 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1127 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1128
1129 for (i = 0; i < 32; i += 4) {
1130 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1131 vcpu->arch.gprs[i],
1132 vcpu->arch.gprs[i + 1],
1133 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1134 }
1135 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1136 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1137
1138 cop0 = vcpu->arch.cop0;
1139 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1140 kvm_read_c0_guest_status(cop0),
1141 kvm_read_c0_guest_cause(cop0));
1142
1143 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1144
1145 return 0;
1146 }
1147
1148 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1149 {
1150 int i;
1151
1152 vcpu_load(vcpu);
1153
1154 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1155 vcpu->arch.gprs[i] = regs->gpr[i];
1156 vcpu->arch.gprs[0] = 0;
1157 vcpu->arch.hi = regs->hi;
1158 vcpu->arch.lo = regs->lo;
1159 vcpu->arch.pc = regs->pc;
1160
1161 vcpu_put(vcpu);
1162 return 0;
1163 }
1164
1165 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1166 {
1167 int i;
1168
1169 vcpu_load(vcpu);
1170
1171 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1172 regs->gpr[i] = vcpu->arch.gprs[i];
1173
1174 regs->hi = vcpu->arch.hi;
1175 regs->lo = vcpu->arch.lo;
1176 regs->pc = vcpu->arch.pc;
1177
1178 vcpu_put(vcpu);
1179 return 0;
1180 }
1181
1182 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1183 struct kvm_translation *tr)
1184 {
1185 return 0;
1186 }
1187
1188 static void kvm_mips_set_c0_status(void)
1189 {
1190 u32 status = read_c0_status();
1191
1192 if (cpu_has_dsp)
1193 status |= (ST0_MX);
1194
1195 write_c0_status(status);
1196 ehb();
1197 }
1198
1199
1200
1201
1202 static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1203 {
1204 struct kvm_run *run = vcpu->run;
1205 u32 cause = vcpu->arch.host_cp0_cause;
1206 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1207 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1208 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1209 enum emulation_result er = EMULATE_DONE;
1210 u32 inst;
1211 int ret = RESUME_GUEST;
1212
1213 vcpu->mode = OUTSIDE_GUEST_MODE;
1214
1215
1216 run->exit_reason = KVM_EXIT_UNKNOWN;
1217 run->ready_for_interrupt_injection = 1;
1218
1219
1220
1221
1222
1223 kvm_mips_set_c0_status();
1224
1225 local_irq_enable();
1226
1227 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1228 cause, opc, run, vcpu);
1229 trace_kvm_exit(vcpu, exccode);
1230
1231 switch (exccode) {
1232 case EXCCODE_INT:
1233 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1234
1235 ++vcpu->stat.int_exits;
1236
1237 if (need_resched())
1238 cond_resched();
1239
1240 ret = RESUME_GUEST;
1241 break;
1242
1243 case EXCCODE_CPU:
1244 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1245
1246 ++vcpu->stat.cop_unusable_exits;
1247 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1248
1249 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1250 ret = RESUME_HOST;
1251 break;
1252
1253 case EXCCODE_MOD:
1254 ++vcpu->stat.tlbmod_exits;
1255 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1256 break;
1257
1258 case EXCCODE_TLBS:
1259 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1260 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1261 badvaddr);
1262
1263 ++vcpu->stat.tlbmiss_st_exits;
1264 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1265 break;
1266
1267 case EXCCODE_TLBL:
1268 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1269 cause, opc, badvaddr);
1270
1271 ++vcpu->stat.tlbmiss_ld_exits;
1272 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1273 break;
1274
1275 case EXCCODE_ADES:
1276 ++vcpu->stat.addrerr_st_exits;
1277 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1278 break;
1279
1280 case EXCCODE_ADEL:
1281 ++vcpu->stat.addrerr_ld_exits;
1282 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1283 break;
1284
1285 case EXCCODE_SYS:
1286 ++vcpu->stat.syscall_exits;
1287 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1288 break;
1289
1290 case EXCCODE_RI:
1291 ++vcpu->stat.resvd_inst_exits;
1292 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1293 break;
1294
1295 case EXCCODE_BP:
1296 ++vcpu->stat.break_inst_exits;
1297 ret = kvm_mips_callbacks->handle_break(vcpu);
1298 break;
1299
1300 case EXCCODE_TR:
1301 ++vcpu->stat.trap_inst_exits;
1302 ret = kvm_mips_callbacks->handle_trap(vcpu);
1303 break;
1304
1305 case EXCCODE_MSAFPE:
1306 ++vcpu->stat.msa_fpe_exits;
1307 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1308 break;
1309
1310 case EXCCODE_FPE:
1311 ++vcpu->stat.fpe_exits;
1312 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1313 break;
1314
1315 case EXCCODE_MSADIS:
1316 ++vcpu->stat.msa_disabled_exits;
1317 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1318 break;
1319
1320 case EXCCODE_GE:
1321
1322 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1323 break;
1324
1325 default:
1326 if (cause & CAUSEF_BD)
1327 opc += 1;
1328 inst = 0;
1329 kvm_get_badinstr(opc, vcpu, &inst);
1330 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1331 exccode, opc, inst, badvaddr,
1332 kvm_read_c0_guest_status(vcpu->arch.cop0));
1333 kvm_arch_vcpu_dump_regs(vcpu);
1334 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1335 ret = RESUME_HOST;
1336 break;
1337
1338 }
1339
1340 local_irq_disable();
1341
1342 if (ret == RESUME_GUEST)
1343 kvm_vz_acquire_htimer(vcpu);
1344
1345 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1346 kvm_mips_deliver_interrupts(vcpu, cause);
1347
1348 if (!(ret & RESUME_HOST)) {
1349
1350 if (signal_pending(current)) {
1351 run->exit_reason = KVM_EXIT_INTR;
1352 ret = (-EINTR << 2) | RESUME_HOST;
1353 ++vcpu->stat.signal_exits;
1354 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1355 }
1356 }
1357
1358 if (ret == RESUME_GUEST) {
1359 trace_kvm_reenter(vcpu);
1360
1361
1362
1363
1364
1365
1366
1367 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1368
1369 kvm_mips_callbacks->vcpu_reenter(vcpu);
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1381 read_c0_status() & ST0_CU1)
1382 __kvm_restore_fcsr(&vcpu->arch);
1383
1384 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1385 read_c0_config5() & MIPS_CONF5_MSAEN)
1386 __kvm_restore_msacsr(&vcpu->arch);
1387 }
1388 return ret;
1389 }
1390
1391 int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1392 {
1393 int ret;
1394
1395 guest_state_exit_irqoff();
1396 ret = __kvm_mips_handle_exit(vcpu);
1397 guest_state_enter_irqoff();
1398
1399 return ret;
1400 }
1401
1402
1403 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1404 {
1405 struct mips_coproc *cop0 = vcpu->arch.cop0;
1406 unsigned int sr, cfg5;
1407
1408 preempt_disable();
1409
1410 sr = kvm_read_c0_guest_status(cop0);
1411
1412
1413
1414
1415
1416
1417
1418 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1419 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1420 kvm_lose_fpu(vcpu);
1421
1422
1423
1424
1425
1426 change_c0_status(ST0_CU1 | ST0_FR, sr);
1427 if (cpu_has_fre) {
1428 cfg5 = kvm_read_c0_guest_config5(cop0);
1429 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1430 }
1431 enable_fpu_hazard();
1432
1433
1434 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1435 __kvm_restore_fpu(&vcpu->arch);
1436 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1437 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1438 } else {
1439 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1440 }
1441
1442 preempt_enable();
1443 }
1444
1445 #ifdef CONFIG_CPU_HAS_MSA
1446
1447 void kvm_own_msa(struct kvm_vcpu *vcpu)
1448 {
1449 struct mips_coproc *cop0 = vcpu->arch.cop0;
1450 unsigned int sr, cfg5;
1451
1452 preempt_disable();
1453
1454
1455
1456
1457
1458 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1459 sr = kvm_read_c0_guest_status(cop0);
1460
1461
1462
1463
1464
1465 if (!(sr & ST0_FR) &&
1466 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1467 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1468 kvm_lose_fpu(vcpu);
1469
1470 change_c0_status(ST0_CU1 | ST0_FR, sr);
1471 if (sr & ST0_CU1 && cpu_has_fre) {
1472 cfg5 = kvm_read_c0_guest_config5(cop0);
1473 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1474 }
1475 }
1476
1477
1478 set_c0_config5(MIPS_CONF5_MSAEN);
1479 enable_fpu_hazard();
1480
1481 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1482 case KVM_MIPS_AUX_FPU:
1483
1484
1485
1486 __kvm_restore_msa_upper(&vcpu->arch);
1487 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1488 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1489 break;
1490 case 0:
1491
1492 __kvm_restore_msa(&vcpu->arch);
1493 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1494 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1495 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1496 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1497 KVM_TRACE_AUX_FPU_MSA);
1498 break;
1499 default:
1500 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1501 break;
1502 }
1503
1504 preempt_enable();
1505 }
1506 #endif
1507
1508
1509 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1510 {
1511 preempt_disable();
1512 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1513 disable_msa();
1514 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1515 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1516 }
1517 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1518 clear_c0_status(ST0_CU1 | ST0_FR);
1519 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1520 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1521 }
1522 preempt_enable();
1523 }
1524
1525
1526 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1527 {
1528
1529
1530
1531
1532
1533
1534
1535 preempt_disable();
1536 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1537 __kvm_save_msa(&vcpu->arch);
1538 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1539
1540
1541 disable_msa();
1542 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1543 clear_c0_status(ST0_CU1 | ST0_FR);
1544 disable_fpu_hazard();
1545 }
1546 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1547 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1548 __kvm_save_fpu(&vcpu->arch);
1549 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1550 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1551
1552
1553 clear_c0_status(ST0_CU1 | ST0_FR);
1554 disable_fpu_hazard();
1555 }
1556 preempt_enable();
1557 }
1558
1559
1560
1561
1562
1563
1564 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1565 unsigned long cmd, void *ptr)
1566 {
1567 struct die_args *args = (struct die_args *)ptr;
1568 struct pt_regs *regs = args->regs;
1569 unsigned long pc;
1570
1571
1572 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1573 return NOTIFY_DONE;
1574
1575
1576 if (!(current->flags & PF_VCPU))
1577 return NOTIFY_DONE;
1578
1579
1580 BUG_ON(user_mode(regs));
1581
1582 pc = instruction_pointer(regs);
1583 switch (cmd) {
1584 case DIE_FP:
1585
1586 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1587 return NOTIFY_DONE;
1588 break;
1589 case DIE_MSAFP:
1590
1591 if (!cpu_has_msa ||
1592 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1593 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1594 return NOTIFY_DONE;
1595 break;
1596 }
1597
1598
1599 instruction_pointer(regs) += 4;
1600
1601 return NOTIFY_STOP;
1602 }
1603
1604 static struct notifier_block kvm_mips_csr_die_notifier = {
1605 .notifier_call = kvm_mips_csr_die_notify,
1606 };
1607
1608 static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
1609 [MIPS_EXC_INT_TIMER] = C_IRQ5,
1610 [MIPS_EXC_INT_IO_1] = C_IRQ0,
1611 [MIPS_EXC_INT_IPI_1] = C_IRQ1,
1612 [MIPS_EXC_INT_IPI_2] = C_IRQ2,
1613 };
1614
1615 static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
1616 [MIPS_EXC_INT_TIMER] = C_IRQ5,
1617 [MIPS_EXC_INT_IO_1] = C_IRQ0,
1618 [MIPS_EXC_INT_IO_2] = C_IRQ1,
1619 [MIPS_EXC_INT_IPI_1] = C_IRQ4,
1620 };
1621
1622 u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
1623
1624 u32 kvm_irq_to_priority(u32 irq)
1625 {
1626 int i;
1627
1628 for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
1629 if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
1630 return i;
1631 }
1632
1633 return MIPS_EXC_MAX;
1634 }
1635
1636 static int __init kvm_mips_init(void)
1637 {
1638 int ret;
1639
1640 if (cpu_has_mmid) {
1641 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1642 return -EOPNOTSUPP;
1643 }
1644
1645 ret = kvm_mips_entry_setup();
1646 if (ret)
1647 return ret;
1648
1649 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1650
1651 if (ret)
1652 return ret;
1653
1654 if (boot_cpu_type() == CPU_LOONGSON64)
1655 kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
1656
1657 register_die_notifier(&kvm_mips_csr_die_notifier);
1658
1659 return 0;
1660 }
1661
1662 static void __exit kvm_mips_exit(void)
1663 {
1664 kvm_exit();
1665
1666 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1667 }
1668
1669 module_init(kvm_mips_init);
1670 module_exit(kvm_mips_exit);
1671
1672 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);