0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/errno.h>
0014 #include <linux/err.h>
0015 #include <linux/kvm_host.h>
0016 #include <linux/gfp.h>
0017 #include <linux/module.h>
0018 #include <linux/vmalloc.h>
0019 #include <linux/fs.h>
0020
0021 #include <asm/cputable.h>
0022 #include <linux/uaccess.h>
0023 #include <asm/interrupt.h>
0024 #include <asm/kvm_ppc.h>
0025 #include <asm/cacheflush.h>
0026 #include <asm/dbell.h>
0027 #include <asm/hw_irq.h>
0028 #include <asm/irq.h>
0029 #include <asm/time.h>
0030
0031 #include "timing.h"
0032 #include "booke.h"
0033
0034 #define CREATE_TRACE_POINTS
0035 #include "trace_booke.h"
0036
0037 unsigned long kvmppc_booke_handlers;
0038
0039 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
0040 KVM_GENERIC_VM_STATS(),
0041 STATS_DESC_ICOUNTER(VM, num_2M_pages),
0042 STATS_DESC_ICOUNTER(VM, num_1G_pages)
0043 };
0044
0045 const struct kvm_stats_header kvm_vm_stats_header = {
0046 .name_size = KVM_STATS_NAME_SIZE,
0047 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
0048 .id_offset = sizeof(struct kvm_stats_header),
0049 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
0050 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
0051 sizeof(kvm_vm_stats_desc),
0052 };
0053
0054 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
0055 KVM_GENERIC_VCPU_STATS(),
0056 STATS_DESC_COUNTER(VCPU, sum_exits),
0057 STATS_DESC_COUNTER(VCPU, mmio_exits),
0058 STATS_DESC_COUNTER(VCPU, signal_exits),
0059 STATS_DESC_COUNTER(VCPU, light_exits),
0060 STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
0061 STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
0062 STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
0063 STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
0064 STATS_DESC_COUNTER(VCPU, syscall_exits),
0065 STATS_DESC_COUNTER(VCPU, isi_exits),
0066 STATS_DESC_COUNTER(VCPU, dsi_exits),
0067 STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
0068 STATS_DESC_COUNTER(VCPU, dec_exits),
0069 STATS_DESC_COUNTER(VCPU, ext_intr_exits),
0070 STATS_DESC_COUNTER(VCPU, halt_successful_wait),
0071 STATS_DESC_COUNTER(VCPU, dbell_exits),
0072 STATS_DESC_COUNTER(VCPU, gdbell_exits),
0073 STATS_DESC_COUNTER(VCPU, ld),
0074 STATS_DESC_COUNTER(VCPU, st),
0075 STATS_DESC_COUNTER(VCPU, pthru_all),
0076 STATS_DESC_COUNTER(VCPU, pthru_host),
0077 STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
0078 };
0079
0080 const struct kvm_stats_header kvm_vcpu_stats_header = {
0081 .name_size = KVM_STATS_NAME_SIZE,
0082 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
0083 .id_offset = sizeof(struct kvm_stats_header),
0084 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
0085 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
0086 sizeof(kvm_vcpu_stats_desc),
0087 };
0088
0089
0090 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
0091 {
0092 int i;
0093
0094 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
0095 vcpu->arch.shared->msr);
0096 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
0097 vcpu->arch.regs.ctr);
0098 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
0099 vcpu->arch.shared->srr1);
0100
0101 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
0102
0103 for (i = 0; i < 32; i += 4) {
0104 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
0105 kvmppc_get_gpr(vcpu, i),
0106 kvmppc_get_gpr(vcpu, i+1),
0107 kvmppc_get_gpr(vcpu, i+2),
0108 kvmppc_get_gpr(vcpu, i+3));
0109 }
0110 }
0111
0112 #ifdef CONFIG_SPE
0113 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
0114 {
0115 preempt_disable();
0116 enable_kernel_spe();
0117 kvmppc_save_guest_spe(vcpu);
0118 disable_kernel_spe();
0119 vcpu->arch.shadow_msr &= ~MSR_SPE;
0120 preempt_enable();
0121 }
0122
0123 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
0124 {
0125 preempt_disable();
0126 enable_kernel_spe();
0127 kvmppc_load_guest_spe(vcpu);
0128 disable_kernel_spe();
0129 vcpu->arch.shadow_msr |= MSR_SPE;
0130 preempt_enable();
0131 }
0132
0133 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
0134 {
0135 if (vcpu->arch.shared->msr & MSR_SPE) {
0136 if (!(vcpu->arch.shadow_msr & MSR_SPE))
0137 kvmppc_vcpu_enable_spe(vcpu);
0138 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
0139 kvmppc_vcpu_disable_spe(vcpu);
0140 }
0141 }
0142 #else
0143 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
0144 {
0145 }
0146 #endif
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
0158 {
0159 #ifdef CONFIG_PPC_FPU
0160 if (!(current->thread.regs->msr & MSR_FP)) {
0161 enable_kernel_fp();
0162 load_fp_state(&vcpu->arch.fp);
0163 disable_kernel_fp();
0164 current->thread.fp_save_area = &vcpu->arch.fp;
0165 current->thread.regs->msr |= MSR_FP;
0166 }
0167 #endif
0168 }
0169
0170
0171
0172
0173
0174 static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
0175 {
0176 #ifdef CONFIG_PPC_FPU
0177 if (current->thread.regs->msr & MSR_FP)
0178 giveup_fpu(current);
0179 current->thread.fp_save_area = NULL;
0180 #endif
0181 }
0182
0183 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
0184 {
0185 #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
0186
0187
0188 vcpu->arch.shadow_msr &= ~MSR_FP;
0189 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
0190 #endif
0191 }
0192
0193
0194
0195
0196
0197
0198 static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
0199 {
0200 #ifdef CONFIG_ALTIVEC
0201 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
0202 if (!(current->thread.regs->msr & MSR_VEC)) {
0203 enable_kernel_altivec();
0204 load_vr_state(&vcpu->arch.vr);
0205 disable_kernel_altivec();
0206 current->thread.vr_save_area = &vcpu->arch.vr;
0207 current->thread.regs->msr |= MSR_VEC;
0208 }
0209 }
0210 #endif
0211 }
0212
0213
0214
0215
0216
0217 static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
0218 {
0219 #ifdef CONFIG_ALTIVEC
0220 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
0221 if (current->thread.regs->msr & MSR_VEC)
0222 giveup_altivec(current);
0223 current->thread.vr_save_area = NULL;
0224 }
0225 #endif
0226 }
0227
0228 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
0229 {
0230
0231 #ifndef CONFIG_KVM_BOOKE_HV
0232 vcpu->arch.shadow_msr &= ~MSR_DE;
0233 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
0234 #endif
0235
0236
0237 if (vcpu->guest_debug) {
0238 #ifdef CONFIG_KVM_BOOKE_HV
0239
0240
0241
0242
0243 vcpu->arch.shared->msr |= MSR_DE;
0244 #else
0245 vcpu->arch.shadow_msr |= MSR_DE;
0246 vcpu->arch.shared->msr &= ~MSR_DE;
0247 #endif
0248 }
0249 }
0250
0251
0252
0253
0254
0255 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
0256 {
0257 u32 old_msr = vcpu->arch.shared->msr;
0258
0259 #ifdef CONFIG_KVM_BOOKE_HV
0260 new_msr |= MSR_GS;
0261 #endif
0262
0263 vcpu->arch.shared->msr = new_msr;
0264
0265 kvmppc_mmu_msr_notify(vcpu, old_msr);
0266 kvmppc_vcpu_sync_spe(vcpu);
0267 kvmppc_vcpu_sync_fpu(vcpu);
0268 kvmppc_vcpu_sync_debug(vcpu);
0269 }
0270
0271 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
0272 unsigned int priority)
0273 {
0274 trace_kvm_booke_queue_irqprio(vcpu, priority);
0275 set_bit(priority, &vcpu->arch.pending_exceptions);
0276 }
0277
0278 void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
0279 ulong dear_flags, ulong esr_flags)
0280 {
0281 vcpu->arch.queued_dear = dear_flags;
0282 vcpu->arch.queued_esr = esr_flags;
0283 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
0284 }
0285
0286 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
0287 ulong dear_flags, ulong esr_flags)
0288 {
0289 vcpu->arch.queued_dear = dear_flags;
0290 vcpu->arch.queued_esr = esr_flags;
0291 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
0292 }
0293
0294 void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
0295 {
0296 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
0297 }
0298
0299 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
0300 {
0301 vcpu->arch.queued_esr = esr_flags;
0302 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
0303 }
0304
0305 static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
0306 ulong esr_flags)
0307 {
0308 vcpu->arch.queued_dear = dear_flags;
0309 vcpu->arch.queued_esr = esr_flags;
0310 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
0311 }
0312
0313 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
0314 {
0315 vcpu->arch.queued_esr = esr_flags;
0316 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
0317 }
0318
0319 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
0320 {
0321 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
0322 }
0323
0324 #ifdef CONFIG_ALTIVEC
0325 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
0326 {
0327 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
0328 }
0329 #endif
0330
0331 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
0332 {
0333 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
0334 }
0335
0336 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
0337 {
0338 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
0339 }
0340
0341 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
0342 {
0343 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
0344 }
0345
0346 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
0347 struct kvm_interrupt *irq)
0348 {
0349 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
0350
0351 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
0352 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
0353
0354 kvmppc_booke_queue_irqprio(vcpu, prio);
0355 }
0356
0357 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
0358 {
0359 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
0360 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
0361 }
0362
0363 static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
0364 {
0365 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
0366 }
0367
0368 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
0369 {
0370 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
0371 }
0372
0373 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
0374 {
0375 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
0376 }
0377
0378 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
0379 {
0380 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
0381 }
0382
0383 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
0384 {
0385 kvmppc_set_srr0(vcpu, srr0);
0386 kvmppc_set_srr1(vcpu, srr1);
0387 }
0388
0389 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
0390 {
0391 vcpu->arch.csrr0 = srr0;
0392 vcpu->arch.csrr1 = srr1;
0393 }
0394
0395 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
0396 {
0397 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
0398 vcpu->arch.dsrr0 = srr0;
0399 vcpu->arch.dsrr1 = srr1;
0400 } else {
0401 set_guest_csrr(vcpu, srr0, srr1);
0402 }
0403 }
0404
0405 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
0406 {
0407 vcpu->arch.mcsrr0 = srr0;
0408 vcpu->arch.mcsrr1 = srr1;
0409 }
0410
0411
0412 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
0413 unsigned int priority)
0414 {
0415 int allowed = 0;
0416 ulong msr_mask = 0;
0417 bool update_esr = false, update_dear = false, update_epr = false;
0418 ulong crit_raw = vcpu->arch.shared->critical;
0419 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
0420 bool crit;
0421 bool keep_irq = false;
0422 enum int_class int_class;
0423 ulong new_msr = vcpu->arch.shared->msr;
0424
0425
0426 if (!(vcpu->arch.shared->msr & MSR_SF)) {
0427 crit_raw &= 0xffffffff;
0428 crit_r1 &= 0xffffffff;
0429 }
0430
0431
0432 crit = (crit_raw == crit_r1);
0433
0434 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
0435
0436 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
0437 priority = BOOKE_IRQPRIO_EXTERNAL;
0438 keep_irq = true;
0439 }
0440
0441 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
0442 update_epr = true;
0443
0444 switch (priority) {
0445 case BOOKE_IRQPRIO_DTLB_MISS:
0446 case BOOKE_IRQPRIO_DATA_STORAGE:
0447 case BOOKE_IRQPRIO_ALIGNMENT:
0448 update_dear = true;
0449 fallthrough;
0450 case BOOKE_IRQPRIO_INST_STORAGE:
0451 case BOOKE_IRQPRIO_PROGRAM:
0452 update_esr = true;
0453 fallthrough;
0454 case BOOKE_IRQPRIO_ITLB_MISS:
0455 case BOOKE_IRQPRIO_SYSCALL:
0456 case BOOKE_IRQPRIO_FP_UNAVAIL:
0457 #ifdef CONFIG_SPE_POSSIBLE
0458 case BOOKE_IRQPRIO_SPE_UNAVAIL:
0459 case BOOKE_IRQPRIO_SPE_FP_DATA:
0460 case BOOKE_IRQPRIO_SPE_FP_ROUND:
0461 #endif
0462 #ifdef CONFIG_ALTIVEC
0463 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
0464 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
0465 #endif
0466 case BOOKE_IRQPRIO_AP_UNAVAIL:
0467 allowed = 1;
0468 msr_mask = MSR_CE | MSR_ME | MSR_DE;
0469 int_class = INT_CLASS_NONCRIT;
0470 break;
0471 case BOOKE_IRQPRIO_WATCHDOG:
0472 case BOOKE_IRQPRIO_CRITICAL:
0473 case BOOKE_IRQPRIO_DBELL_CRIT:
0474 allowed = vcpu->arch.shared->msr & MSR_CE;
0475 allowed = allowed && !crit;
0476 msr_mask = MSR_ME;
0477 int_class = INT_CLASS_CRIT;
0478 break;
0479 case BOOKE_IRQPRIO_MACHINE_CHECK:
0480 allowed = vcpu->arch.shared->msr & MSR_ME;
0481 allowed = allowed && !crit;
0482 int_class = INT_CLASS_MC;
0483 break;
0484 case BOOKE_IRQPRIO_DECREMENTER:
0485 case BOOKE_IRQPRIO_FIT:
0486 keep_irq = true;
0487 fallthrough;
0488 case BOOKE_IRQPRIO_EXTERNAL:
0489 case BOOKE_IRQPRIO_DBELL:
0490 allowed = vcpu->arch.shared->msr & MSR_EE;
0491 allowed = allowed && !crit;
0492 msr_mask = MSR_CE | MSR_ME | MSR_DE;
0493 int_class = INT_CLASS_NONCRIT;
0494 break;
0495 case BOOKE_IRQPRIO_DEBUG:
0496 allowed = vcpu->arch.shared->msr & MSR_DE;
0497 allowed = allowed && !crit;
0498 msr_mask = MSR_ME;
0499 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
0500 int_class = INT_CLASS_DBG;
0501 else
0502 int_class = INT_CLASS_CRIT;
0503
0504 break;
0505 }
0506
0507 if (allowed) {
0508 switch (int_class) {
0509 case INT_CLASS_NONCRIT:
0510 set_guest_srr(vcpu, vcpu->arch.regs.nip,
0511 vcpu->arch.shared->msr);
0512 break;
0513 case INT_CLASS_CRIT:
0514 set_guest_csrr(vcpu, vcpu->arch.regs.nip,
0515 vcpu->arch.shared->msr);
0516 break;
0517 case INT_CLASS_DBG:
0518 set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
0519 vcpu->arch.shared->msr);
0520 break;
0521 case INT_CLASS_MC:
0522 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
0523 vcpu->arch.shared->msr);
0524 break;
0525 }
0526
0527 vcpu->arch.regs.nip = vcpu->arch.ivpr |
0528 vcpu->arch.ivor[priority];
0529 if (update_esr)
0530 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
0531 if (update_dear)
0532 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
0533 if (update_epr) {
0534 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
0535 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
0536 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
0537 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
0538 kvmppc_mpic_set_epr(vcpu);
0539 }
0540 }
0541
0542 new_msr &= msr_mask;
0543 #if defined(CONFIG_64BIT)
0544 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
0545 new_msr |= MSR_CM;
0546 #endif
0547 kvmppc_set_msr(vcpu, new_msr);
0548
0549 if (!keep_irq)
0550 clear_bit(priority, &vcpu->arch.pending_exceptions);
0551 }
0552
0553 #ifdef CONFIG_KVM_BOOKE_HV
0554
0555
0556
0557
0558
0559 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
0560 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
0561 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
0562 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
0563 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
0564 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
0565 #endif
0566
0567 return allowed;
0568 }
0569
0570
0571
0572
0573
0574
0575 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
0576 {
0577 u64 tb, wdt_tb, wdt_ticks = 0;
0578 u64 nr_jiffies = 0;
0579 u32 period = TCR_GET_WP(vcpu->arch.tcr);
0580
0581 wdt_tb = 1ULL << (63 - period);
0582 tb = get_tb();
0583
0584
0585
0586
0587 if (tb & wdt_tb)
0588 wdt_ticks = wdt_tb;
0589
0590 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
0591
0592
0593 nr_jiffies = wdt_ticks;
0594
0595 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
0596 nr_jiffies++;
0597
0598 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
0599 }
0600
0601 static void arm_next_watchdog(struct kvm_vcpu *vcpu)
0602 {
0603 unsigned long nr_jiffies;
0604 unsigned long flags;
0605
0606
0607
0608
0609
0610 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
0611 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
0612
0613 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
0614 nr_jiffies = watchdog_next_timeout(vcpu);
0615
0616
0617
0618
0619 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
0620 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
0621 else
0622 del_timer(&vcpu->arch.wdt_timer);
0623 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
0624 }
0625
0626 void kvmppc_watchdog_func(struct timer_list *t)
0627 {
0628 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
0629 u32 tsr, new_tsr;
0630 int final;
0631
0632 do {
0633 new_tsr = tsr = vcpu->arch.tsr;
0634 final = 0;
0635
0636
0637 if (tsr & TSR_ENW) {
0638 if (tsr & TSR_WIS)
0639 final = 1;
0640 else
0641 new_tsr = tsr | TSR_WIS;
0642 } else {
0643 new_tsr = tsr | TSR_ENW;
0644 }
0645 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
0646
0647 if (new_tsr & TSR_WIS) {
0648 smp_wmb();
0649 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
0650 kvm_vcpu_kick(vcpu);
0651 }
0652
0653
0654
0655
0656
0657 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
0658 vcpu->arch.watchdog_enabled) {
0659 smp_wmb();
0660 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
0661 kvm_vcpu_kick(vcpu);
0662 }
0663
0664
0665
0666
0667
0668
0669
0670 if (!final)
0671 arm_next_watchdog(vcpu);
0672 }
0673
0674 static void update_timer_ints(struct kvm_vcpu *vcpu)
0675 {
0676 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
0677 kvmppc_core_queue_dec(vcpu);
0678 else
0679 kvmppc_core_dequeue_dec(vcpu);
0680
0681 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
0682 kvmppc_core_queue_watchdog(vcpu);
0683 else
0684 kvmppc_core_dequeue_watchdog(vcpu);
0685 }
0686
0687 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
0688 {
0689 unsigned long *pending = &vcpu->arch.pending_exceptions;
0690 unsigned int priority;
0691
0692 priority = __ffs(*pending);
0693 while (priority < BOOKE_IRQPRIO_MAX) {
0694 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
0695 break;
0696
0697 priority = find_next_bit(pending,
0698 BITS_PER_BYTE * sizeof(*pending),
0699 priority + 1);
0700 }
0701
0702
0703 vcpu->arch.shared->int_pending = !!*pending;
0704 }
0705
0706
0707 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
0708 {
0709 int r = 0;
0710 WARN_ON_ONCE(!irqs_disabled());
0711
0712 kvmppc_core_check_exceptions(vcpu);
0713
0714 if (kvm_request_pending(vcpu)) {
0715
0716 return 1;
0717 }
0718
0719 if (vcpu->arch.shared->msr & MSR_WE) {
0720 local_irq_enable();
0721 kvm_vcpu_halt(vcpu);
0722 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
0723 hard_irq_disable();
0724
0725 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
0726 r = 1;
0727 }
0728
0729 return r;
0730 }
0731
0732 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
0733 {
0734 int r = 1;
0735
0736 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
0737 update_timer_ints(vcpu);
0738 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
0739 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
0740 kvmppc_core_flush_tlb(vcpu);
0741 #endif
0742
0743 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
0744 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
0745 r = 0;
0746 }
0747
0748 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
0749 vcpu->run->epr.epr = 0;
0750 vcpu->arch.epr_needed = true;
0751 vcpu->run->exit_reason = KVM_EXIT_EPR;
0752 r = 0;
0753 }
0754
0755 return r;
0756 }
0757
0758 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
0759 {
0760 int ret, s;
0761 struct debug_reg debug;
0762
0763 if (!vcpu->arch.sane) {
0764 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
0765 return -EINVAL;
0766 }
0767
0768 s = kvmppc_prepare_to_enter(vcpu);
0769 if (s <= 0) {
0770 ret = s;
0771 goto out;
0772 }
0773
0774
0775 #ifdef CONFIG_PPC_FPU
0776
0777 enable_kernel_fp();
0778
0779
0780
0781
0782
0783 kvmppc_load_guest_fp(vcpu);
0784 #endif
0785
0786 #ifdef CONFIG_ALTIVEC
0787
0788 if (cpu_has_feature(CPU_FTR_ALTIVEC))
0789 enable_kernel_altivec();
0790
0791
0792
0793
0794 kvmppc_load_guest_altivec(vcpu);
0795 #endif
0796
0797
0798 debug = vcpu->arch.dbg_reg;
0799 switch_booke_debug_regs(&debug);
0800 debug = current->thread.debug;
0801 current->thread.debug = vcpu->arch.dbg_reg;
0802
0803 vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
0804 kvmppc_fix_ee_before_entry();
0805
0806 ret = __kvmppc_vcpu_run(vcpu);
0807
0808
0809
0810
0811
0812 switch_booke_debug_regs(&debug);
0813 current->thread.debug = debug;
0814
0815 #ifdef CONFIG_PPC_FPU
0816 kvmppc_save_guest_fp(vcpu);
0817 #endif
0818
0819 #ifdef CONFIG_ALTIVEC
0820 kvmppc_save_guest_altivec(vcpu);
0821 #endif
0822
0823 out:
0824 vcpu->mode = OUTSIDE_GUEST_MODE;
0825 return ret;
0826 }
0827
0828 static int emulation_exit(struct kvm_vcpu *vcpu)
0829 {
0830 enum emulation_result er;
0831
0832 er = kvmppc_emulate_instruction(vcpu);
0833 switch (er) {
0834 case EMULATE_DONE:
0835
0836 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
0837
0838
0839 return RESUME_GUEST_NV;
0840
0841 case EMULATE_AGAIN:
0842 return RESUME_GUEST;
0843
0844 case EMULATE_FAIL:
0845 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
0846 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
0847
0848
0849 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
0850 vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
0851 kvmppc_core_queue_program(vcpu, ESR_PIL);
0852 return RESUME_HOST;
0853
0854 case EMULATE_EXIT_USER:
0855 return RESUME_HOST;
0856
0857 default:
0858 BUG();
0859 }
0860 }
0861
0862 static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
0863 {
0864 struct kvm_run *run = vcpu->run;
0865 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
0866 u32 dbsr = vcpu->arch.dbsr;
0867
0868 if (vcpu->guest_debug == 0) {
0869
0870
0871
0872
0873 if (dbsr & DBSR_IDE) {
0874 dbsr &= ~DBSR_IDE;
0875 if (!dbsr)
0876 return RESUME_GUEST;
0877 }
0878
0879 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
0880 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
0881 kvmppc_core_queue_debug(vcpu);
0882
0883
0884 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
0885 kvmppc_core_queue_program(vcpu, ESR_PTR);
0886
0887 return RESUME_GUEST;
0888 }
0889
0890
0891
0892
0893
0894 vcpu->arch.dbsr = 0;
0895 run->debug.arch.status = 0;
0896 run->debug.arch.address = vcpu->arch.regs.nip;
0897
0898 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
0899 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
0900 } else {
0901 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
0902 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
0903 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
0904 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
0905 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
0906 run->debug.arch.address = dbg_reg->dac1;
0907 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
0908 run->debug.arch.address = dbg_reg->dac2;
0909 }
0910
0911 return RESUME_HOST;
0912 }
0913
0914 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
0915 {
0916 ulong r1, ip, msr, lr;
0917
0918 asm("mr %0, 1" : "=r"(r1));
0919 asm("mflr %0" : "=r"(lr));
0920 asm("mfmsr %0" : "=r"(msr));
0921 asm("bl 1f; 1: mflr %0" : "=r"(ip));
0922
0923 memset(regs, 0, sizeof(*regs));
0924 regs->gpr[1] = r1;
0925 regs->nip = ip;
0926 regs->msr = msr;
0927 regs->link = lr;
0928 }
0929
0930
0931
0932
0933
0934
0935
0936 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
0937 unsigned int exit_nr)
0938 {
0939 struct pt_regs regs;
0940
0941 switch (exit_nr) {
0942 case BOOKE_INTERRUPT_EXTERNAL:
0943 kvmppc_fill_pt_regs(®s);
0944 do_IRQ(®s);
0945 break;
0946 case BOOKE_INTERRUPT_DECREMENTER:
0947 kvmppc_fill_pt_regs(®s);
0948 timer_interrupt(®s);
0949 break;
0950 #if defined(CONFIG_PPC_DOORBELL)
0951 case BOOKE_INTERRUPT_DOORBELL:
0952 kvmppc_fill_pt_regs(®s);
0953 doorbell_exception(®s);
0954 break;
0955 #endif
0956 case BOOKE_INTERRUPT_MACHINE_CHECK:
0957
0958 break;
0959 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
0960 kvmppc_fill_pt_regs(®s);
0961 performance_monitor_exception(®s);
0962 break;
0963 case BOOKE_INTERRUPT_WATCHDOG:
0964 kvmppc_fill_pt_regs(®s);
0965 #ifdef CONFIG_BOOKE_WDT
0966 WatchdogException(®s);
0967 #else
0968 unknown_exception(®s);
0969 #endif
0970 break;
0971 case BOOKE_INTERRUPT_CRITICAL:
0972 kvmppc_fill_pt_regs(®s);
0973 unknown_exception(®s);
0974 break;
0975 case BOOKE_INTERRUPT_DEBUG:
0976
0977 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
0978 kvmppc_clear_dbsr();
0979 break;
0980 }
0981 }
0982
0983 static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
0984 enum emulation_result emulated, u32 last_inst)
0985 {
0986 switch (emulated) {
0987 case EMULATE_AGAIN:
0988 return RESUME_GUEST;
0989
0990 case EMULATE_FAIL:
0991 pr_debug("%s: load instruction from guest address %lx failed\n",
0992 __func__, vcpu->arch.regs.nip);
0993
0994
0995 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
0996 vcpu->run->hw.hardware_exit_reason |= last_inst;
0997 kvmppc_core_queue_program(vcpu, ESR_PIL);
0998 return RESUME_HOST;
0999
1000 default:
1001 BUG();
1002 }
1003 }
1004
1005
1006
1007
1008
1009
1010 int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1011 {
1012 struct kvm_run *run = vcpu->run;
1013 int r = RESUME_HOST;
1014 int s;
1015 int idx;
1016 u32 last_inst = KVM_INST_FETCH_FAILED;
1017 enum emulation_result emulated = EMULATE_DONE;
1018
1019
1020 kvmppc_update_timing_stats(vcpu);
1021
1022
1023 kvmppc_restart_interrupt(vcpu, exit_nr);
1024
1025
1026
1027
1028
1029 switch (exit_nr) {
1030 case BOOKE_INTERRUPT_DATA_STORAGE:
1031 case BOOKE_INTERRUPT_DTLB_MISS:
1032 case BOOKE_INTERRUPT_HV_PRIV:
1033 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1034 break;
1035 case BOOKE_INTERRUPT_PROGRAM:
1036
1037 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1038 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1039 break;
1040 default:
1041 break;
1042 }
1043
1044 trace_kvm_exit(exit_nr, vcpu);
1045
1046 context_tracking_guest_exit();
1047 if (!vtime_accounting_enabled_this_cpu()) {
1048 local_irq_enable();
1049
1050
1051
1052
1053
1054
1055
1056
1057 local_irq_disable();
1058 }
1059 vtime_account_guest_exit();
1060
1061 local_irq_enable();
1062
1063 run->exit_reason = KVM_EXIT_UNKNOWN;
1064 run->ready_for_interrupt_injection = 1;
1065
1066 if (emulated != EMULATE_DONE) {
1067 r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
1068 goto out;
1069 }
1070
1071 switch (exit_nr) {
1072 case BOOKE_INTERRUPT_MACHINE_CHECK:
1073 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1074 kvmppc_dump_vcpu(vcpu);
1075
1076 run->hw.hardware_exit_reason = ~1ULL << 32;
1077 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1078 r = RESUME_HOST;
1079 break;
1080
1081 case BOOKE_INTERRUPT_EXTERNAL:
1082 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1083 r = RESUME_GUEST;
1084 break;
1085
1086 case BOOKE_INTERRUPT_DECREMENTER:
1087 kvmppc_account_exit(vcpu, DEC_EXITS);
1088 r = RESUME_GUEST;
1089 break;
1090
1091 case BOOKE_INTERRUPT_WATCHDOG:
1092 r = RESUME_GUEST;
1093 break;
1094
1095 case BOOKE_INTERRUPT_DOORBELL:
1096 kvmppc_account_exit(vcpu, DBELL_EXITS);
1097 r = RESUME_GUEST;
1098 break;
1099
1100 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1101 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1102
1103
1104
1105
1106
1107
1108 r = RESUME_GUEST;
1109 break;
1110
1111 case BOOKE_INTERRUPT_GUEST_DBELL:
1112 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1113
1114
1115
1116
1117
1118
1119 r = RESUME_GUEST;
1120 break;
1121
1122 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1123 r = RESUME_GUEST;
1124 break;
1125
1126 case BOOKE_INTERRUPT_HV_PRIV:
1127 r = emulation_exit(vcpu);
1128 break;
1129
1130 case BOOKE_INTERRUPT_PROGRAM:
1131 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1132 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1133
1134
1135
1136
1137 r = kvmppc_handle_debug(vcpu);
1138 run->exit_reason = KVM_EXIT_DEBUG;
1139 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1140 break;
1141 }
1142
1143 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
1144
1145
1146
1147
1148
1149
1150
1151
1152 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
1153 r = RESUME_GUEST;
1154 kvmppc_account_exit(vcpu, USR_PR_INST);
1155 break;
1156 }
1157
1158 r = emulation_exit(vcpu);
1159 break;
1160
1161 case BOOKE_INTERRUPT_FP_UNAVAIL:
1162 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
1163 kvmppc_account_exit(vcpu, FP_UNAVAIL);
1164 r = RESUME_GUEST;
1165 break;
1166
1167 #ifdef CONFIG_SPE
1168 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1169 if (vcpu->arch.shared->msr & MSR_SPE)
1170 kvmppc_vcpu_enable_spe(vcpu);
1171 else
1172 kvmppc_booke_queue_irqprio(vcpu,
1173 BOOKE_IRQPRIO_SPE_UNAVAIL);
1174 r = RESUME_GUEST;
1175 break;
1176 }
1177
1178 case BOOKE_INTERRUPT_SPE_FP_DATA:
1179 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1180 r = RESUME_GUEST;
1181 break;
1182
1183 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1184 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1185 r = RESUME_GUEST;
1186 break;
1187 #elif defined(CONFIG_SPE_POSSIBLE)
1188 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1189
1190
1191
1192
1193 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1194 r = RESUME_GUEST;
1195 break;
1196
1197
1198
1199
1200
1201 case BOOKE_INTERRUPT_SPE_FP_DATA:
1202 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1203 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1204 __func__, exit_nr, vcpu->arch.regs.nip);
1205 run->hw.hardware_exit_reason = exit_nr;
1206 r = RESUME_HOST;
1207 break;
1208 #endif
1209
1210
1211
1212
1213
1214 #ifdef CONFIG_ALTIVEC
1215 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1216 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1217 r = RESUME_GUEST;
1218 break;
1219
1220 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1221 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1222 r = RESUME_GUEST;
1223 break;
1224 #endif
1225
1226 case BOOKE_INTERRUPT_DATA_STORAGE:
1227 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1228 vcpu->arch.fault_esr);
1229 kvmppc_account_exit(vcpu, DSI_EXITS);
1230 r = RESUME_GUEST;
1231 break;
1232
1233 case BOOKE_INTERRUPT_INST_STORAGE:
1234 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1235 kvmppc_account_exit(vcpu, ISI_EXITS);
1236 r = RESUME_GUEST;
1237 break;
1238
1239 case BOOKE_INTERRUPT_ALIGNMENT:
1240 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1241 vcpu->arch.fault_esr);
1242 r = RESUME_GUEST;
1243 break;
1244
1245 #ifdef CONFIG_KVM_BOOKE_HV
1246 case BOOKE_INTERRUPT_HV_SYSCALL:
1247 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1248 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1249 } else {
1250
1251
1252
1253
1254 kvmppc_core_queue_program(vcpu, ESR_PPR);
1255 }
1256
1257 r = RESUME_GUEST;
1258 break;
1259 #else
1260 case BOOKE_INTERRUPT_SYSCALL:
1261 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1262 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1263
1264 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1265 r = RESUME_GUEST;
1266 } else {
1267
1268 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1269 }
1270 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1271 r = RESUME_GUEST;
1272 break;
1273 #endif
1274
1275 case BOOKE_INTERRUPT_DTLB_MISS: {
1276 unsigned long eaddr = vcpu->arch.fault_dear;
1277 int gtlb_index;
1278 gpa_t gpaddr;
1279 gfn_t gfn;
1280
1281 #ifdef CONFIG_KVM_E500V2
1282 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1283 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1284 kvmppc_map_magic(vcpu);
1285 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1286 r = RESUME_GUEST;
1287
1288 break;
1289 }
1290 #endif
1291
1292
1293 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1294 if (gtlb_index < 0) {
1295
1296 kvmppc_core_queue_dtlb_miss(vcpu,
1297 vcpu->arch.fault_dear,
1298 vcpu->arch.fault_esr);
1299 kvmppc_mmu_dtlb_miss(vcpu);
1300 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1301 r = RESUME_GUEST;
1302 break;
1303 }
1304
1305 idx = srcu_read_lock(&vcpu->kvm->srcu);
1306
1307 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1308 gfn = gpaddr >> PAGE_SHIFT;
1309
1310 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1311
1312
1313
1314
1315
1316
1317 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1318 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1319 r = RESUME_GUEST;
1320 } else {
1321
1322
1323 vcpu->arch.paddr_accessed = gpaddr;
1324 vcpu->arch.vaddr_accessed = eaddr;
1325 r = kvmppc_emulate_mmio(vcpu);
1326 kvmppc_account_exit(vcpu, MMIO_EXITS);
1327 }
1328
1329 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1330 break;
1331 }
1332
1333 case BOOKE_INTERRUPT_ITLB_MISS: {
1334 unsigned long eaddr = vcpu->arch.regs.nip;
1335 gpa_t gpaddr;
1336 gfn_t gfn;
1337 int gtlb_index;
1338
1339 r = RESUME_GUEST;
1340
1341
1342 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1343 if (gtlb_index < 0) {
1344
1345 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1346 kvmppc_mmu_itlb_miss(vcpu);
1347 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1348 break;
1349 }
1350
1351 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1352
1353 idx = srcu_read_lock(&vcpu->kvm->srcu);
1354
1355 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1356 gfn = gpaddr >> PAGE_SHIFT;
1357
1358 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1359
1360
1361
1362
1363
1364
1365 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1366 } else {
1367
1368 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1369 }
1370
1371 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1372 break;
1373 }
1374
1375 case BOOKE_INTERRUPT_DEBUG: {
1376 r = kvmppc_handle_debug(vcpu);
1377 if (r == RESUME_HOST)
1378 run->exit_reason = KVM_EXIT_DEBUG;
1379 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1380 break;
1381 }
1382
1383 default:
1384 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1385 BUG();
1386 }
1387
1388 out:
1389
1390
1391
1392
1393 if (!(r & RESUME_HOST)) {
1394 s = kvmppc_prepare_to_enter(vcpu);
1395 if (s <= 0)
1396 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1397 else {
1398
1399 kvmppc_fix_ee_before_entry();
1400 kvmppc_load_guest_fp(vcpu);
1401 kvmppc_load_guest_altivec(vcpu);
1402 }
1403 }
1404
1405 return r;
1406 }
1407
1408 static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1409 {
1410 u32 old_tsr = vcpu->arch.tsr;
1411
1412 vcpu->arch.tsr = new_tsr;
1413
1414 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1415 arm_next_watchdog(vcpu);
1416
1417 update_timer_ints(vcpu);
1418 }
1419
1420 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1421 {
1422
1423 spin_lock_init(&vcpu->arch.wdt_lock);
1424 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
1425
1426
1427
1428
1429
1430 mtspr(SPRN_DBSR, DBSR_MRR);
1431 return 0;
1432 }
1433
1434 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1435 {
1436 del_timer_sync(&vcpu->arch.wdt_timer);
1437 }
1438
1439 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1440 {
1441 int i;
1442
1443 vcpu_load(vcpu);
1444
1445 regs->pc = vcpu->arch.regs.nip;
1446 regs->cr = kvmppc_get_cr(vcpu);
1447 regs->ctr = vcpu->arch.regs.ctr;
1448 regs->lr = vcpu->arch.regs.link;
1449 regs->xer = kvmppc_get_xer(vcpu);
1450 regs->msr = vcpu->arch.shared->msr;
1451 regs->srr0 = kvmppc_get_srr0(vcpu);
1452 regs->srr1 = kvmppc_get_srr1(vcpu);
1453 regs->pid = vcpu->arch.pid;
1454 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1455 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1456 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1457 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1458 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1459 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1460 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1461 regs->sprg7 = kvmppc_get_sprg7(vcpu);
1462
1463 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1464 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1465
1466 vcpu_put(vcpu);
1467 return 0;
1468 }
1469
1470 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1471 {
1472 int i;
1473
1474 vcpu_load(vcpu);
1475
1476 vcpu->arch.regs.nip = regs->pc;
1477 kvmppc_set_cr(vcpu, regs->cr);
1478 vcpu->arch.regs.ctr = regs->ctr;
1479 vcpu->arch.regs.link = regs->lr;
1480 kvmppc_set_xer(vcpu, regs->xer);
1481 kvmppc_set_msr(vcpu, regs->msr);
1482 kvmppc_set_srr0(vcpu, regs->srr0);
1483 kvmppc_set_srr1(vcpu, regs->srr1);
1484 kvmppc_set_pid(vcpu, regs->pid);
1485 kvmppc_set_sprg0(vcpu, regs->sprg0);
1486 kvmppc_set_sprg1(vcpu, regs->sprg1);
1487 kvmppc_set_sprg2(vcpu, regs->sprg2);
1488 kvmppc_set_sprg3(vcpu, regs->sprg3);
1489 kvmppc_set_sprg4(vcpu, regs->sprg4);
1490 kvmppc_set_sprg5(vcpu, regs->sprg5);
1491 kvmppc_set_sprg6(vcpu, regs->sprg6);
1492 kvmppc_set_sprg7(vcpu, regs->sprg7);
1493
1494 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1495 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1496
1497 vcpu_put(vcpu);
1498 return 0;
1499 }
1500
1501 static void get_sregs_base(struct kvm_vcpu *vcpu,
1502 struct kvm_sregs *sregs)
1503 {
1504 u64 tb = get_tb();
1505
1506 sregs->u.e.features |= KVM_SREGS_E_BASE;
1507
1508 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1509 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1510 sregs->u.e.mcsr = vcpu->arch.mcsr;
1511 sregs->u.e.esr = kvmppc_get_esr(vcpu);
1512 sregs->u.e.dear = kvmppc_get_dar(vcpu);
1513 sregs->u.e.tsr = vcpu->arch.tsr;
1514 sregs->u.e.tcr = vcpu->arch.tcr;
1515 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1516 sregs->u.e.tb = tb;
1517 sregs->u.e.vrsave = vcpu->arch.vrsave;
1518 }
1519
1520 static int set_sregs_base(struct kvm_vcpu *vcpu,
1521 struct kvm_sregs *sregs)
1522 {
1523 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1524 return 0;
1525
1526 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1527 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1528 vcpu->arch.mcsr = sregs->u.e.mcsr;
1529 kvmppc_set_esr(vcpu, sregs->u.e.esr);
1530 kvmppc_set_dar(vcpu, sregs->u.e.dear);
1531 vcpu->arch.vrsave = sregs->u.e.vrsave;
1532 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1533
1534 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1535 vcpu->arch.dec = sregs->u.e.dec;
1536 kvmppc_emulate_dec(vcpu);
1537 }
1538
1539 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1540 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1541
1542 return 0;
1543 }
1544
1545 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1546 struct kvm_sregs *sregs)
1547 {
1548 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1549
1550 sregs->u.e.pir = vcpu->vcpu_id;
1551 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1552 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1553 sregs->u.e.decar = vcpu->arch.decar;
1554 sregs->u.e.ivpr = vcpu->arch.ivpr;
1555 }
1556
1557 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1558 struct kvm_sregs *sregs)
1559 {
1560 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1561 return 0;
1562
1563 if (sregs->u.e.pir != vcpu->vcpu_id)
1564 return -EINVAL;
1565
1566 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1567 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1568 vcpu->arch.decar = sregs->u.e.decar;
1569 vcpu->arch.ivpr = sregs->u.e.ivpr;
1570
1571 return 0;
1572 }
1573
1574 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1575 {
1576 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1577
1578 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1579 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1580 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1581 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1582 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1583 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1584 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1585 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1586 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1587 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1588 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1589 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1590 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1591 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1592 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1593 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1594 return 0;
1595 }
1596
1597 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1598 {
1599 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1600 return 0;
1601
1602 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1603 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1604 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1605 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1606 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1607 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1608 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1609 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1610 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1611 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1612 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1613 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1614 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1615 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1616 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1617 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1618
1619 return 0;
1620 }
1621
1622 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1623 struct kvm_sregs *sregs)
1624 {
1625 int ret;
1626
1627 vcpu_load(vcpu);
1628
1629 sregs->pvr = vcpu->arch.pvr;
1630
1631 get_sregs_base(vcpu, sregs);
1632 get_sregs_arch206(vcpu, sregs);
1633 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1634
1635 vcpu_put(vcpu);
1636 return ret;
1637 }
1638
1639 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1640 struct kvm_sregs *sregs)
1641 {
1642 int ret = -EINVAL;
1643
1644 vcpu_load(vcpu);
1645 if (vcpu->arch.pvr != sregs->pvr)
1646 goto out;
1647
1648 ret = set_sregs_base(vcpu, sregs);
1649 if (ret < 0)
1650 goto out;
1651
1652 ret = set_sregs_arch206(vcpu, sregs);
1653 if (ret < 0)
1654 goto out;
1655
1656 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1657
1658 out:
1659 vcpu_put(vcpu);
1660 return ret;
1661 }
1662
1663 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1664 union kvmppc_one_reg *val)
1665 {
1666 int r = 0;
1667
1668 switch (id) {
1669 case KVM_REG_PPC_IAC1:
1670 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1671 break;
1672 case KVM_REG_PPC_IAC2:
1673 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1674 break;
1675 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1676 case KVM_REG_PPC_IAC3:
1677 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1678 break;
1679 case KVM_REG_PPC_IAC4:
1680 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1681 break;
1682 #endif
1683 case KVM_REG_PPC_DAC1:
1684 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1685 break;
1686 case KVM_REG_PPC_DAC2:
1687 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1688 break;
1689 case KVM_REG_PPC_EPR: {
1690 u32 epr = kvmppc_get_epr(vcpu);
1691 *val = get_reg_val(id, epr);
1692 break;
1693 }
1694 #if defined(CONFIG_64BIT)
1695 case KVM_REG_PPC_EPCR:
1696 *val = get_reg_val(id, vcpu->arch.epcr);
1697 break;
1698 #endif
1699 case KVM_REG_PPC_TCR:
1700 *val = get_reg_val(id, vcpu->arch.tcr);
1701 break;
1702 case KVM_REG_PPC_TSR:
1703 *val = get_reg_val(id, vcpu->arch.tsr);
1704 break;
1705 case KVM_REG_PPC_DEBUG_INST:
1706 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1707 break;
1708 case KVM_REG_PPC_VRSAVE:
1709 *val = get_reg_val(id, vcpu->arch.vrsave);
1710 break;
1711 default:
1712 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1713 break;
1714 }
1715
1716 return r;
1717 }
1718
1719 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1720 union kvmppc_one_reg *val)
1721 {
1722 int r = 0;
1723
1724 switch (id) {
1725 case KVM_REG_PPC_IAC1:
1726 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1727 break;
1728 case KVM_REG_PPC_IAC2:
1729 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1730 break;
1731 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1732 case KVM_REG_PPC_IAC3:
1733 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1734 break;
1735 case KVM_REG_PPC_IAC4:
1736 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1737 break;
1738 #endif
1739 case KVM_REG_PPC_DAC1:
1740 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1741 break;
1742 case KVM_REG_PPC_DAC2:
1743 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1744 break;
1745 case KVM_REG_PPC_EPR: {
1746 u32 new_epr = set_reg_val(id, *val);
1747 kvmppc_set_epr(vcpu, new_epr);
1748 break;
1749 }
1750 #if defined(CONFIG_64BIT)
1751 case KVM_REG_PPC_EPCR: {
1752 u32 new_epcr = set_reg_val(id, *val);
1753 kvmppc_set_epcr(vcpu, new_epcr);
1754 break;
1755 }
1756 #endif
1757 case KVM_REG_PPC_OR_TSR: {
1758 u32 tsr_bits = set_reg_val(id, *val);
1759 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1760 break;
1761 }
1762 case KVM_REG_PPC_CLEAR_TSR: {
1763 u32 tsr_bits = set_reg_val(id, *val);
1764 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1765 break;
1766 }
1767 case KVM_REG_PPC_TSR: {
1768 u32 tsr = set_reg_val(id, *val);
1769 kvmppc_set_tsr(vcpu, tsr);
1770 break;
1771 }
1772 case KVM_REG_PPC_TCR: {
1773 u32 tcr = set_reg_val(id, *val);
1774 kvmppc_set_tcr(vcpu, tcr);
1775 break;
1776 }
1777 case KVM_REG_PPC_VRSAVE:
1778 vcpu->arch.vrsave = set_reg_val(id, *val);
1779 break;
1780 default:
1781 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1782 break;
1783 }
1784
1785 return r;
1786 }
1787
1788 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1789 {
1790 return -EOPNOTSUPP;
1791 }
1792
1793 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1794 {
1795 return -EOPNOTSUPP;
1796 }
1797
1798 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1799 struct kvm_translation *tr)
1800 {
1801 int r;
1802
1803 vcpu_load(vcpu);
1804 r = kvmppc_core_vcpu_translate(vcpu, tr);
1805 vcpu_put(vcpu);
1806 return r;
1807 }
1808
1809 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1810 {
1811
1812 }
1813
1814 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1815 {
1816 return -EOPNOTSUPP;
1817 }
1818
1819 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1820 {
1821 }
1822
1823 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1824 const struct kvm_memory_slot *old,
1825 struct kvm_memory_slot *new,
1826 enum kvm_mr_change change)
1827 {
1828 return 0;
1829 }
1830
1831 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1832 struct kvm_memory_slot *old,
1833 const struct kvm_memory_slot *new,
1834 enum kvm_mr_change change)
1835 {
1836 }
1837
1838 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1839 {
1840 }
1841
1842 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1843 {
1844 #if defined(CONFIG_64BIT)
1845 vcpu->arch.epcr = new_epcr;
1846 #ifdef CONFIG_KVM_BOOKE_HV
1847 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1848 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1849 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1850 #endif
1851 #endif
1852 }
1853
1854 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1855 {
1856 vcpu->arch.tcr = new_tcr;
1857 arm_next_watchdog(vcpu);
1858 update_timer_ints(vcpu);
1859 }
1860
1861 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1862 {
1863 set_bits(tsr_bits, &vcpu->arch.tsr);
1864 smp_wmb();
1865 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1866 kvm_vcpu_kick(vcpu);
1867 }
1868
1869 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1870 {
1871 clear_bits(tsr_bits, &vcpu->arch.tsr);
1872
1873
1874
1875
1876
1877 if (tsr_bits & (TSR_ENW | TSR_WIS))
1878 arm_next_watchdog(vcpu);
1879
1880 update_timer_ints(vcpu);
1881 }
1882
1883 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1884 {
1885 if (vcpu->arch.tcr & TCR_ARE) {
1886 vcpu->arch.dec = vcpu->arch.decar;
1887 kvmppc_emulate_dec(vcpu);
1888 }
1889
1890 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1891 }
1892
1893 static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1894 uint64_t addr, int index)
1895 {
1896 switch (index) {
1897 case 0:
1898 dbg_reg->dbcr0 |= DBCR0_IAC1;
1899 dbg_reg->iac1 = addr;
1900 break;
1901 case 1:
1902 dbg_reg->dbcr0 |= DBCR0_IAC2;
1903 dbg_reg->iac2 = addr;
1904 break;
1905 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1906 case 2:
1907 dbg_reg->dbcr0 |= DBCR0_IAC3;
1908 dbg_reg->iac3 = addr;
1909 break;
1910 case 3:
1911 dbg_reg->dbcr0 |= DBCR0_IAC4;
1912 dbg_reg->iac4 = addr;
1913 break;
1914 #endif
1915 default:
1916 return -EINVAL;
1917 }
1918
1919 dbg_reg->dbcr0 |= DBCR0_IDM;
1920 return 0;
1921 }
1922
1923 static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1924 int type, int index)
1925 {
1926 switch (index) {
1927 case 0:
1928 if (type & KVMPPC_DEBUG_WATCH_READ)
1929 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1930 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1931 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1932 dbg_reg->dac1 = addr;
1933 break;
1934 case 1:
1935 if (type & KVMPPC_DEBUG_WATCH_READ)
1936 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1937 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1938 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1939 dbg_reg->dac2 = addr;
1940 break;
1941 default:
1942 return -EINVAL;
1943 }
1944
1945 dbg_reg->dbcr0 |= DBCR0_IDM;
1946 return 0;
1947 }
1948 void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1949 {
1950
1951 #ifdef CONFIG_KVM_BOOKE_HV
1952 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1953 if (set) {
1954 if (prot_bitmap & MSR_UCLE)
1955 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1956 if (prot_bitmap & MSR_DE)
1957 vcpu->arch.shadow_msrp |= MSRP_DEP;
1958 if (prot_bitmap & MSR_PMM)
1959 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1960 } else {
1961 if (prot_bitmap & MSR_UCLE)
1962 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1963 if (prot_bitmap & MSR_DE)
1964 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1965 if (prot_bitmap & MSR_PMM)
1966 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1967 }
1968 #endif
1969 }
1970
1971 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1972 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1973 {
1974 int gtlb_index;
1975 gpa_t gpaddr;
1976
1977 #ifdef CONFIG_KVM_E500V2
1978 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1979 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1980 pte->eaddr = eaddr;
1981 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1982 (eaddr & ~PAGE_MASK);
1983 pte->vpage = eaddr >> PAGE_SHIFT;
1984 pte->may_read = true;
1985 pte->may_write = true;
1986 pte->may_execute = true;
1987
1988 return 0;
1989 }
1990 #endif
1991
1992
1993 switch (xlid) {
1994 case XLATE_INST:
1995 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1996 break;
1997 case XLATE_DATA:
1998 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1999 break;
2000 default:
2001 BUG();
2002 }
2003
2004
2005 if (gtlb_index < 0)
2006 return -ENOENT;
2007
2008 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
2009
2010 pte->eaddr = eaddr;
2011 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
2012 pte->vpage = eaddr >> PAGE_SHIFT;
2013
2014
2015 pte->may_read = true;
2016 pte->may_write = true;
2017 pte->may_execute = true;
2018
2019 return 0;
2020 }
2021
2022 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2023 struct kvm_guest_debug *dbg)
2024 {
2025 struct debug_reg *dbg_reg;
2026 int n, b = 0, w = 0;
2027 int ret = 0;
2028
2029 vcpu_load(vcpu);
2030
2031 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
2032 vcpu->arch.dbg_reg.dbcr0 = 0;
2033 vcpu->guest_debug = 0;
2034 kvm_guest_protect_msr(vcpu, MSR_DE, false);
2035 goto out;
2036 }
2037
2038 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2039 vcpu->guest_debug = dbg->control;
2040 vcpu->arch.dbg_reg.dbcr0 = 0;
2041
2042 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2043 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2044
2045
2046 dbg_reg = &(vcpu->arch.dbg_reg);
2047
2048 #ifdef CONFIG_KVM_BOOKE_HV
2049
2050
2051
2052
2053 dbg_reg->dbcr1 = 0;
2054 dbg_reg->dbcr2 = 0;
2055 #else
2056
2057
2058
2059
2060
2061 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2062 DBCR1_IAC4US;
2063 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2064 #endif
2065
2066 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2067 goto out;
2068
2069 ret = -EINVAL;
2070 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2071 uint64_t addr = dbg->arch.bp[n].addr;
2072 uint32_t type = dbg->arch.bp[n].type;
2073
2074 if (type == KVMPPC_DEBUG_NONE)
2075 continue;
2076
2077 if (type & ~(KVMPPC_DEBUG_WATCH_READ |
2078 KVMPPC_DEBUG_WATCH_WRITE |
2079 KVMPPC_DEBUG_BREAKPOINT))
2080 goto out;
2081
2082 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2083
2084 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
2085 goto out;
2086 } else {
2087
2088 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2089 type, w++))
2090 goto out;
2091 }
2092 }
2093
2094 ret = 0;
2095 out:
2096 vcpu_put(vcpu);
2097 return ret;
2098 }
2099
2100 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2101 {
2102 vcpu->cpu = smp_processor_id();
2103 current->thread.kvm_vcpu = vcpu;
2104 }
2105
2106 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2107 {
2108 current->thread.kvm_vcpu = NULL;
2109 vcpu->cpu = -1;
2110
2111
2112 kvmppc_clear_dbsr();
2113 }
2114
2115 int kvmppc_core_init_vm(struct kvm *kvm)
2116 {
2117 return kvm->arch.kvm_ops->init_vm(kvm);
2118 }
2119
2120 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
2121 {
2122 int i;
2123 int r;
2124
2125 r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
2126 if (r)
2127 return r;
2128
2129
2130 vcpu->arch.regs.nip = 0;
2131 vcpu->arch.shared->pir = vcpu->vcpu_id;
2132 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8);
2133 kvmppc_set_msr(vcpu, 0);
2134
2135 #ifndef CONFIG_KVM_BOOKE_HV
2136 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
2137 vcpu->arch.shadow_pid = 1;
2138 vcpu->arch.shared->msr = 0;
2139 #endif
2140
2141
2142
2143 vcpu->arch.ivpr = 0x55550000;
2144 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
2145 vcpu->arch.ivor[i] = 0x7700 | i * 4;
2146
2147 kvmppc_init_timing_stats(vcpu);
2148
2149 r = kvmppc_core_vcpu_setup(vcpu);
2150 if (r)
2151 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2152 kvmppc_sanity_check(vcpu);
2153 return r;
2154 }
2155
2156 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2157 {
2158 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2159 }
2160
2161 void kvmppc_core_destroy_vm(struct kvm *kvm)
2162 {
2163 kvm->arch.kvm_ops->destroy_vm(kvm);
2164 }
2165
2166 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2167 {
2168 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
2169 }
2170
2171 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2172 {
2173 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
2174 }
2175
2176 int __init kvmppc_booke_init(void)
2177 {
2178 #ifndef CONFIG_KVM_BOOKE_HV
2179 unsigned long ivor[16];
2180 unsigned long *handler = kvmppc_booke_handler_addr;
2181 unsigned long max_ivor = 0;
2182 unsigned long handler_len;
2183 int i;
2184
2185
2186
2187 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2188 VCPU_SIZE_ORDER);
2189 if (!kvmppc_booke_handlers)
2190 return -ENOMEM;
2191
2192
2193
2194
2195
2196 ivor[0] = mfspr(SPRN_IVOR0);
2197 ivor[1] = mfspr(SPRN_IVOR1);
2198 ivor[2] = mfspr(SPRN_IVOR2);
2199 ivor[3] = mfspr(SPRN_IVOR3);
2200 ivor[4] = mfspr(SPRN_IVOR4);
2201 ivor[5] = mfspr(SPRN_IVOR5);
2202 ivor[6] = mfspr(SPRN_IVOR6);
2203 ivor[7] = mfspr(SPRN_IVOR7);
2204 ivor[8] = mfspr(SPRN_IVOR8);
2205 ivor[9] = mfspr(SPRN_IVOR9);
2206 ivor[10] = mfspr(SPRN_IVOR10);
2207 ivor[11] = mfspr(SPRN_IVOR11);
2208 ivor[12] = mfspr(SPRN_IVOR12);
2209 ivor[13] = mfspr(SPRN_IVOR13);
2210 ivor[14] = mfspr(SPRN_IVOR14);
2211 ivor[15] = mfspr(SPRN_IVOR15);
2212
2213 for (i = 0; i < 16; i++) {
2214 if (ivor[i] > max_ivor)
2215 max_ivor = i;
2216
2217 handler_len = handler[i + 1] - handler[i];
2218 memcpy((void *)kvmppc_booke_handlers + ivor[i],
2219 (void *)handler[i], handler_len);
2220 }
2221
2222 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2223 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2224 ivor[max_ivor] + handler_len);
2225 #endif
2226 return 0;
2227 }
2228
2229 void __exit kvmppc_booke_exit(void)
2230 {
2231 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2232 kvm_exit();
2233 }