0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <asm/kvm_ppc.h>
0010 #include <asm/disassemble.h>
0011 #include <asm/kvm_book3s.h>
0012 #include <asm/reg.h>
0013 #include <asm/switch_to.h>
0014 #include <asm/time.h>
0015 #include <asm/tm.h>
0016 #include "book3s.h"
0017 #include <asm/asm-prototypes.h>
0018
0019 #define OP_19_XOP_RFID 18
0020 #define OP_19_XOP_RFI 50
0021
0022 #define OP_31_XOP_MFMSR 83
0023 #define OP_31_XOP_MTMSR 146
0024 #define OP_31_XOP_MTMSRD 178
0025 #define OP_31_XOP_MTSR 210
0026 #define OP_31_XOP_MTSRIN 242
0027 #define OP_31_XOP_TLBIEL 274
0028
0029 #define OP_31_XOP_FAKE_SC1 308
0030 #define OP_31_XOP_SLBMTE 402
0031 #define OP_31_XOP_SLBIE 434
0032 #define OP_31_XOP_SLBIA 498
0033 #define OP_31_XOP_MFSR 595
0034 #define OP_31_XOP_MFSRIN 659
0035 #define OP_31_XOP_DCBA 758
0036 #define OP_31_XOP_SLBMFEV 851
0037 #define OP_31_XOP_EIOIO 854
0038 #define OP_31_XOP_SLBMFEE 915
0039 #define OP_31_XOP_SLBFEE 979
0040
0041 #define OP_31_XOP_TBEGIN 654
0042 #define OP_31_XOP_TABORT 910
0043
0044 #define OP_31_XOP_TRECLAIM 942
0045 #define OP_31_XOP_TRCHKPT 1006
0046
0047
0048 #define OP_31_XOP_DCBZ 1010
0049
0050 #define OP_LFS 48
0051 #define OP_LFD 50
0052 #define OP_STFS 52
0053 #define OP_STFD 54
0054
0055 #define SPRN_GQR0 912
0056 #define SPRN_GQR1 913
0057 #define SPRN_GQR2 914
0058 #define SPRN_GQR3 915
0059 #define SPRN_GQR4 916
0060 #define SPRN_GQR5 917
0061 #define SPRN_GQR6 918
0062 #define SPRN_GQR7 919
0063
0064 enum priv_level {
0065 PRIV_PROBLEM = 0,
0066 PRIV_SUPER = 1,
0067 PRIV_HYPER = 2,
0068 };
0069
0070 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
0071 {
0072
0073 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
0074 return false;
0075
0076
0077 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
0078 return false;
0079
0080 return true;
0081 }
0082
0083 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0084 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
0085 {
0086 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
0087 sizeof(vcpu->arch.gpr_tm));
0088 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
0089 sizeof(struct thread_fp_state));
0090 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
0091 sizeof(struct thread_vr_state));
0092 vcpu->arch.ppr_tm = vcpu->arch.ppr;
0093 vcpu->arch.dscr_tm = vcpu->arch.dscr;
0094 vcpu->arch.amr_tm = vcpu->arch.amr;
0095 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
0096 vcpu->arch.tar_tm = vcpu->arch.tar;
0097 vcpu->arch.lr_tm = vcpu->arch.regs.link;
0098 vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
0099 vcpu->arch.xer_tm = vcpu->arch.regs.xer;
0100 vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
0101 }
0102
0103 static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
0104 {
0105 memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
0106 sizeof(vcpu->arch.regs.gpr));
0107 memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
0108 sizeof(struct thread_fp_state));
0109 memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
0110 sizeof(struct thread_vr_state));
0111 vcpu->arch.ppr = vcpu->arch.ppr_tm;
0112 vcpu->arch.dscr = vcpu->arch.dscr_tm;
0113 vcpu->arch.amr = vcpu->arch.amr_tm;
0114 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
0115 vcpu->arch.tar = vcpu->arch.tar_tm;
0116 vcpu->arch.regs.link = vcpu->arch.lr_tm;
0117 vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
0118 vcpu->arch.regs.xer = vcpu->arch.xer_tm;
0119 vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
0120 }
0121
0122 static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
0123 {
0124 unsigned long guest_msr = kvmppc_get_msr(vcpu);
0125 int fc_val = ra_val ? ra_val : 1;
0126 uint64_t texasr;
0127
0128
0129 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
0130 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
0131 << CR0_SHIFT);
0132
0133 preempt_disable();
0134 tm_enable();
0135 texasr = mfspr(SPRN_TEXASR);
0136 kvmppc_save_tm_pr(vcpu);
0137 kvmppc_copyfrom_vcpu_tm(vcpu);
0138
0139
0140 if (!(texasr & TEXASR_FS)) {
0141 texasr &= ~TEXASR_FC;
0142 texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
0143
0144 texasr &= ~(TEXASR_PR | TEXASR_HV);
0145 if (kvmppc_get_msr(vcpu) & MSR_PR)
0146 texasr |= TEXASR_PR;
0147
0148 if (kvmppc_get_msr(vcpu) & MSR_HV)
0149 texasr |= TEXASR_HV;
0150
0151 vcpu->arch.texasr = texasr;
0152 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
0153 mtspr(SPRN_TEXASR, texasr);
0154 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
0155 }
0156 tm_disable();
0157
0158
0159
0160 guest_msr &= ~(MSR_TS_MASK);
0161 kvmppc_set_msr(vcpu, guest_msr);
0162 preempt_enable();
0163
0164 if (vcpu->arch.shadow_fscr & FSCR_TAR)
0165 mtspr(SPRN_TAR, vcpu->arch.tar);
0166 }
0167
0168 static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
0169 {
0170 unsigned long guest_msr = kvmppc_get_msr(vcpu);
0171
0172 preempt_disable();
0173
0174
0175
0176
0177 kvmppc_giveup_ext(vcpu, MSR_VSX);
0178 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
0179 kvmppc_copyto_vcpu_tm(vcpu);
0180 kvmppc_save_tm_sprs(vcpu);
0181
0182
0183
0184
0185 guest_msr &= ~(MSR_TS_MASK);
0186 guest_msr |= MSR_TS_S;
0187 kvmppc_set_msr(vcpu, guest_msr);
0188 kvmppc_restore_tm_pr(vcpu);
0189 preempt_enable();
0190 }
0191
0192
0193 void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
0194 {
0195
0196
0197
0198
0199 unsigned long guest_msr = kvmppc_get_msr(vcpu);
0200 uint64_t org_texasr;
0201
0202 preempt_disable();
0203 tm_enable();
0204 org_texasr = mfspr(SPRN_TEXASR);
0205 tm_abort(ra_val);
0206
0207
0208 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
0209 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
0210 << CR0_SHIFT);
0211
0212 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
0213
0214
0215
0216
0217 if (!(org_texasr & TEXASR_FS) &&
0218 MSR_TM_ACTIVE(guest_msr)) {
0219 vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
0220 if (guest_msr & MSR_PR)
0221 vcpu->arch.texasr |= TEXASR_PR;
0222
0223 if (guest_msr & MSR_HV)
0224 vcpu->arch.texasr |= TEXASR_HV;
0225
0226 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
0227 }
0228 tm_disable();
0229 preempt_enable();
0230 }
0231
0232 #endif
0233
0234 int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
0235 unsigned int inst, int *advance)
0236 {
0237 int emulated = EMULATE_DONE;
0238 int rt = get_rt(inst);
0239 int rs = get_rs(inst);
0240 int ra = get_ra(inst);
0241 int rb = get_rb(inst);
0242 u32 inst_sc = 0x44000002;
0243
0244 switch (get_op(inst)) {
0245 case 0:
0246 emulated = EMULATE_FAIL;
0247 if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
0248 (inst == swab32(inst_sc))) {
0249
0250
0251
0252
0253
0254
0255
0256 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
0257 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
0258 emulated = EMULATE_DONE;
0259 }
0260 break;
0261 case 19:
0262 switch (get_xop(inst)) {
0263 case OP_19_XOP_RFID:
0264 case OP_19_XOP_RFI: {
0265 unsigned long srr1 = kvmppc_get_srr1(vcpu);
0266 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0267 unsigned long cur_msr = kvmppc_get_msr(vcpu);
0268
0269
0270
0271
0272
0273
0274
0275 if (((cur_msr & MSR_TM) == 0) &&
0276 ((srr1 & MSR_TM) == 0) &&
0277 MSR_TM_SUSPENDED(cur_msr) &&
0278 !MSR_TM_ACTIVE(srr1))
0279 srr1 |= MSR_TS_S;
0280 #endif
0281 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
0282 kvmppc_set_msr(vcpu, srr1);
0283 *advance = 0;
0284 break;
0285 }
0286
0287 default:
0288 emulated = EMULATE_FAIL;
0289 break;
0290 }
0291 break;
0292 case 31:
0293 switch (get_xop(inst)) {
0294 case OP_31_XOP_MFMSR:
0295 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
0296 break;
0297 case OP_31_XOP_MTMSRD:
0298 {
0299 ulong rs_val = kvmppc_get_gpr(vcpu, rs);
0300 if (inst & 0x10000) {
0301 ulong new_msr = kvmppc_get_msr(vcpu);
0302 new_msr &= ~(MSR_RI | MSR_EE);
0303 new_msr |= rs_val & (MSR_RI | MSR_EE);
0304 kvmppc_set_msr_fast(vcpu, new_msr);
0305 } else
0306 kvmppc_set_msr(vcpu, rs_val);
0307 break;
0308 }
0309 case OP_31_XOP_MTMSR:
0310 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
0311 break;
0312 case OP_31_XOP_MFSR:
0313 {
0314 int srnum;
0315
0316 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
0317 if (vcpu->arch.mmu.mfsrin) {
0318 u32 sr;
0319 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
0320 kvmppc_set_gpr(vcpu, rt, sr);
0321 }
0322 break;
0323 }
0324 case OP_31_XOP_MFSRIN:
0325 {
0326 int srnum;
0327
0328 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
0329 if (vcpu->arch.mmu.mfsrin) {
0330 u32 sr;
0331 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
0332 kvmppc_set_gpr(vcpu, rt, sr);
0333 }
0334 break;
0335 }
0336 case OP_31_XOP_MTSR:
0337 vcpu->arch.mmu.mtsrin(vcpu,
0338 (inst >> 16) & 0xf,
0339 kvmppc_get_gpr(vcpu, rs));
0340 break;
0341 case OP_31_XOP_MTSRIN:
0342 vcpu->arch.mmu.mtsrin(vcpu,
0343 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
0344 kvmppc_get_gpr(vcpu, rs));
0345 break;
0346 case OP_31_XOP_TLBIE:
0347 case OP_31_XOP_TLBIEL:
0348 {
0349 bool large = (inst & 0x00200000) ? true : false;
0350 ulong addr = kvmppc_get_gpr(vcpu, rb);
0351 vcpu->arch.mmu.tlbie(vcpu, addr, large);
0352 break;
0353 }
0354 #ifdef CONFIG_PPC_BOOK3S_64
0355 case OP_31_XOP_FAKE_SC1:
0356 {
0357
0358 ulong cmd = kvmppc_get_gpr(vcpu, 3);
0359 int i;
0360
0361 if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
0362 !vcpu->arch.papr_enabled) {
0363 emulated = EMULATE_FAIL;
0364 break;
0365 }
0366
0367 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
0368 break;
0369
0370 vcpu->run->papr_hcall.nr = cmd;
0371 for (i = 0; i < 9; ++i) {
0372 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
0373 vcpu->run->papr_hcall.args[i] = gpr;
0374 }
0375
0376 vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
0377 vcpu->arch.hcall_needed = 1;
0378 emulated = EMULATE_EXIT_USER;
0379 break;
0380 }
0381 #endif
0382 case OP_31_XOP_EIOIO:
0383 break;
0384 case OP_31_XOP_SLBMTE:
0385 if (!vcpu->arch.mmu.slbmte)
0386 return EMULATE_FAIL;
0387
0388 vcpu->arch.mmu.slbmte(vcpu,
0389 kvmppc_get_gpr(vcpu, rs),
0390 kvmppc_get_gpr(vcpu, rb));
0391 break;
0392 case OP_31_XOP_SLBIE:
0393 if (!vcpu->arch.mmu.slbie)
0394 return EMULATE_FAIL;
0395
0396 vcpu->arch.mmu.slbie(vcpu,
0397 kvmppc_get_gpr(vcpu, rb));
0398 break;
0399 case OP_31_XOP_SLBIA:
0400 if (!vcpu->arch.mmu.slbia)
0401 return EMULATE_FAIL;
0402
0403 vcpu->arch.mmu.slbia(vcpu);
0404 break;
0405 case OP_31_XOP_SLBFEE:
0406 if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
0407 return EMULATE_FAIL;
0408 } else {
0409 ulong b, t;
0410 ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
0411
0412 b = kvmppc_get_gpr(vcpu, rb);
0413 if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
0414 cr |= 2 << CR0_SHIFT;
0415 kvmppc_set_gpr(vcpu, rt, t);
0416
0417 cr |= (vcpu->arch.regs.xer & 0x80000000) >>
0418 (31 - CR0_SHIFT);
0419 kvmppc_set_cr(vcpu, cr);
0420 }
0421 break;
0422 case OP_31_XOP_SLBMFEE:
0423 if (!vcpu->arch.mmu.slbmfee) {
0424 emulated = EMULATE_FAIL;
0425 } else {
0426 ulong t, rb_val;
0427
0428 rb_val = kvmppc_get_gpr(vcpu, rb);
0429 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
0430 kvmppc_set_gpr(vcpu, rt, t);
0431 }
0432 break;
0433 case OP_31_XOP_SLBMFEV:
0434 if (!vcpu->arch.mmu.slbmfev) {
0435 emulated = EMULATE_FAIL;
0436 } else {
0437 ulong t, rb_val;
0438
0439 rb_val = kvmppc_get_gpr(vcpu, rb);
0440 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
0441 kvmppc_set_gpr(vcpu, rt, t);
0442 }
0443 break;
0444 case OP_31_XOP_DCBA:
0445
0446 break;
0447 case OP_31_XOP_DCBZ:
0448 {
0449 ulong rb_val = kvmppc_get_gpr(vcpu, rb);
0450 ulong ra_val = 0;
0451 ulong addr, vaddr;
0452 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
0453 u32 dsisr;
0454 int r;
0455
0456 if (ra)
0457 ra_val = kvmppc_get_gpr(vcpu, ra);
0458
0459 addr = (ra_val + rb_val) & ~31ULL;
0460 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
0461 addr &= 0xffffffff;
0462 vaddr = addr;
0463
0464 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
0465 if ((r == -ENOENT) || (r == -EPERM)) {
0466 *advance = 0;
0467 kvmppc_set_dar(vcpu, vaddr);
0468 vcpu->arch.fault_dar = vaddr;
0469
0470 dsisr = DSISR_ISSTORE;
0471 if (r == -ENOENT)
0472 dsisr |= DSISR_NOHPTE;
0473 else if (r == -EPERM)
0474 dsisr |= DSISR_PROTFAULT;
0475
0476 kvmppc_set_dsisr(vcpu, dsisr);
0477 vcpu->arch.fault_dsisr = dsisr;
0478
0479 kvmppc_book3s_queue_irqprio(vcpu,
0480 BOOK3S_INTERRUPT_DATA_STORAGE);
0481 }
0482
0483 break;
0484 }
0485 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0486 case OP_31_XOP_TBEGIN:
0487 {
0488 if (!cpu_has_feature(CPU_FTR_TM))
0489 break;
0490
0491 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
0492 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
0493 emulated = EMULATE_AGAIN;
0494 break;
0495 }
0496
0497 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
0498 preempt_disable();
0499 vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
0500 (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
0501
0502 vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
0503 (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
0504 << TEXASR_FC_LG));
0505
0506 if ((inst >> 21) & 0x1)
0507 vcpu->arch.texasr |= TEXASR_ROT;
0508
0509 if (kvmppc_get_msr(vcpu) & MSR_HV)
0510 vcpu->arch.texasr |= TEXASR_HV;
0511
0512 vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
0513 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
0514
0515 kvmppc_restore_tm_sprs(vcpu);
0516 preempt_enable();
0517 } else
0518 emulated = EMULATE_FAIL;
0519 break;
0520 }
0521 case OP_31_XOP_TABORT:
0522 {
0523 ulong guest_msr = kvmppc_get_msr(vcpu);
0524 unsigned long ra_val = 0;
0525
0526 if (!cpu_has_feature(CPU_FTR_TM))
0527 break;
0528
0529 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
0530 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
0531 emulated = EMULATE_AGAIN;
0532 break;
0533 }
0534
0535
0536
0537
0538
0539 WARN_ON(guest_msr & MSR_PR);
0540
0541 if (ra)
0542 ra_val = kvmppc_get_gpr(vcpu, ra);
0543
0544 kvmppc_emulate_tabort(vcpu, ra_val);
0545 break;
0546 }
0547 case OP_31_XOP_TRECLAIM:
0548 {
0549 ulong guest_msr = kvmppc_get_msr(vcpu);
0550 unsigned long ra_val = 0;
0551
0552 if (!cpu_has_feature(CPU_FTR_TM))
0553 break;
0554
0555 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
0556 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
0557 emulated = EMULATE_AGAIN;
0558 break;
0559 }
0560
0561
0562 if (guest_msr & MSR_PR) {
0563
0564 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
0565 emulated = EMULATE_AGAIN;
0566 break;
0567 }
0568
0569 if (!MSR_TM_ACTIVE(guest_msr)) {
0570
0571 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
0572 emulated = EMULATE_AGAIN;
0573 break;
0574 }
0575
0576 if (ra)
0577 ra_val = kvmppc_get_gpr(vcpu, ra);
0578 kvmppc_emulate_treclaim(vcpu, ra_val);
0579 break;
0580 }
0581 case OP_31_XOP_TRCHKPT:
0582 {
0583 ulong guest_msr = kvmppc_get_msr(vcpu);
0584 unsigned long texasr;
0585
0586 if (!cpu_has_feature(CPU_FTR_TM))
0587 break;
0588
0589 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
0590 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
0591 emulated = EMULATE_AGAIN;
0592 break;
0593 }
0594
0595
0596 if (guest_msr & MSR_PR) {
0597
0598 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
0599 emulated = EMULATE_AGAIN;
0600 break;
0601 }
0602
0603 tm_enable();
0604 texasr = mfspr(SPRN_TEXASR);
0605 tm_disable();
0606
0607 if (MSR_TM_ACTIVE(guest_msr) ||
0608 !(texasr & (TEXASR_FS))) {
0609
0610 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
0611 emulated = EMULATE_AGAIN;
0612 break;
0613 }
0614
0615 kvmppc_emulate_trchkpt(vcpu);
0616 break;
0617 }
0618 #endif
0619 default:
0620 emulated = EMULATE_FAIL;
0621 }
0622 break;
0623 default:
0624 emulated = EMULATE_FAIL;
0625 }
0626
0627 if (emulated == EMULATE_FAIL)
0628 emulated = kvmppc_emulate_paired_single(vcpu);
0629
0630 return emulated;
0631 }
0632
0633 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
0634 u32 val)
0635 {
0636 if (upper) {
0637
0638 u32 bl = (val >> 2) & 0x7ff;
0639 bat->bepi_mask = (~bl << 17);
0640 bat->bepi = val & 0xfffe0000;
0641 bat->vs = (val & 2) ? 1 : 0;
0642 bat->vp = (val & 1) ? 1 : 0;
0643 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
0644 } else {
0645
0646 bat->brpn = val & 0xfffe0000;
0647 bat->wimg = (val >> 3) & 0xf;
0648 bat->pp = val & 3;
0649 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
0650 }
0651 }
0652
0653 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
0654 {
0655 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
0656 struct kvmppc_bat *bat;
0657
0658 switch (sprn) {
0659 case SPRN_IBAT0U ... SPRN_IBAT3L:
0660 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
0661 break;
0662 case SPRN_IBAT4U ... SPRN_IBAT7L:
0663 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
0664 break;
0665 case SPRN_DBAT0U ... SPRN_DBAT3L:
0666 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
0667 break;
0668 case SPRN_DBAT4U ... SPRN_DBAT7L:
0669 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
0670 break;
0671 default:
0672 BUG();
0673 }
0674
0675 return bat;
0676 }
0677
0678 int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
0679 {
0680 int emulated = EMULATE_DONE;
0681
0682 switch (sprn) {
0683 case SPRN_SDR1:
0684 if (!spr_allowed(vcpu, PRIV_HYPER))
0685 goto unprivileged;
0686 to_book3s(vcpu)->sdr1 = spr_val;
0687 break;
0688 case SPRN_DSISR:
0689 kvmppc_set_dsisr(vcpu, spr_val);
0690 break;
0691 case SPRN_DAR:
0692 kvmppc_set_dar(vcpu, spr_val);
0693 break;
0694 case SPRN_HIOR:
0695 to_book3s(vcpu)->hior = spr_val;
0696 break;
0697 case SPRN_IBAT0U ... SPRN_IBAT3L:
0698 case SPRN_IBAT4U ... SPRN_IBAT7L:
0699 case SPRN_DBAT0U ... SPRN_DBAT3L:
0700 case SPRN_DBAT4U ... SPRN_DBAT7L:
0701 {
0702 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
0703
0704 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
0705
0706
0707 kvmppc_mmu_pte_flush(vcpu, 0, 0);
0708 kvmppc_mmu_flush_segments(vcpu);
0709 break;
0710 }
0711 case SPRN_HID0:
0712 to_book3s(vcpu)->hid[0] = spr_val;
0713 break;
0714 case SPRN_HID1:
0715 to_book3s(vcpu)->hid[1] = spr_val;
0716 break;
0717 case SPRN_HID2:
0718 to_book3s(vcpu)->hid[2] = spr_val;
0719 break;
0720 case SPRN_HID2_GEKKO:
0721 to_book3s(vcpu)->hid[2] = spr_val;
0722
0723 switch (vcpu->arch.pvr) {
0724 case 0x00080200:
0725 case 0x00088202:
0726 case 0x70000100:
0727 case 0x00080100:
0728 case 0x00083203:
0729 case 0x00083213:
0730 case 0x00083204:
0731 case 0x00083214:
0732 case 0x00087200:
0733 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
0734
0735 } else if (spr_val & (1 << 29)) {
0736 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
0737 kvmppc_giveup_ext(vcpu, MSR_FP);
0738 } else {
0739 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
0740 }
0741 break;
0742 }
0743 break;
0744 case SPRN_HID4:
0745 case SPRN_HID4_GEKKO:
0746 to_book3s(vcpu)->hid[4] = spr_val;
0747 break;
0748 case SPRN_HID5:
0749 to_book3s(vcpu)->hid[5] = spr_val;
0750
0751 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
0752 (mfmsr() & MSR_HV))
0753 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
0754 break;
0755 case SPRN_GQR0:
0756 case SPRN_GQR1:
0757 case SPRN_GQR2:
0758 case SPRN_GQR3:
0759 case SPRN_GQR4:
0760 case SPRN_GQR5:
0761 case SPRN_GQR6:
0762 case SPRN_GQR7:
0763 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
0764 break;
0765 #ifdef CONFIG_PPC_BOOK3S_64
0766 case SPRN_FSCR:
0767 kvmppc_set_fscr(vcpu, spr_val);
0768 break;
0769 case SPRN_BESCR:
0770 vcpu->arch.bescr = spr_val;
0771 break;
0772 case SPRN_EBBHR:
0773 vcpu->arch.ebbhr = spr_val;
0774 break;
0775 case SPRN_EBBRR:
0776 vcpu->arch.ebbrr = spr_val;
0777 break;
0778 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0779 case SPRN_TFHAR:
0780 case SPRN_TEXASR:
0781 case SPRN_TFIAR:
0782 if (!cpu_has_feature(CPU_FTR_TM))
0783 break;
0784
0785 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
0786 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
0787 emulated = EMULATE_AGAIN;
0788 break;
0789 }
0790
0791 if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
0792 !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
0793 (sprn == SPRN_TFHAR))) {
0794
0795
0796
0797
0798 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
0799 emulated = EMULATE_AGAIN;
0800 break;
0801 }
0802
0803 tm_enable();
0804 if (sprn == SPRN_TFHAR)
0805 mtspr(SPRN_TFHAR, spr_val);
0806 else if (sprn == SPRN_TEXASR)
0807 mtspr(SPRN_TEXASR, spr_val);
0808 else
0809 mtspr(SPRN_TFIAR, spr_val);
0810 tm_disable();
0811
0812 break;
0813 #endif
0814 #endif
0815 case SPRN_ICTC:
0816 case SPRN_THRM1:
0817 case SPRN_THRM2:
0818 case SPRN_THRM3:
0819 case SPRN_CTRLF:
0820 case SPRN_CTRLT:
0821 case SPRN_L2CR:
0822 case SPRN_DSCR:
0823 case SPRN_MMCR0_GEKKO:
0824 case SPRN_MMCR1_GEKKO:
0825 case SPRN_PMC1_GEKKO:
0826 case SPRN_PMC2_GEKKO:
0827 case SPRN_PMC3_GEKKO:
0828 case SPRN_PMC4_GEKKO:
0829 case SPRN_WPAR_GEKKO:
0830 case SPRN_MSSSR0:
0831 case SPRN_DABR:
0832 #ifdef CONFIG_PPC_BOOK3S_64
0833 case SPRN_MMCRS:
0834 case SPRN_MMCRA:
0835 case SPRN_MMCR0:
0836 case SPRN_MMCR1:
0837 case SPRN_MMCR2:
0838 case SPRN_UMMCR2:
0839 case SPRN_UAMOR:
0840 case SPRN_IAMR:
0841 case SPRN_AMR:
0842 #endif
0843 break;
0844 unprivileged:
0845 default:
0846 pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
0847 if (sprn & 0x10) {
0848 if (kvmppc_get_msr(vcpu) & MSR_PR) {
0849 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
0850 emulated = EMULATE_AGAIN;
0851 }
0852 } else {
0853 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
0854 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
0855 emulated = EMULATE_AGAIN;
0856 }
0857 }
0858 break;
0859 }
0860
0861 return emulated;
0862 }
0863
0864 int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
0865 {
0866 int emulated = EMULATE_DONE;
0867
0868 switch (sprn) {
0869 case SPRN_IBAT0U ... SPRN_IBAT3L:
0870 case SPRN_IBAT4U ... SPRN_IBAT7L:
0871 case SPRN_DBAT0U ... SPRN_DBAT3L:
0872 case SPRN_DBAT4U ... SPRN_DBAT7L:
0873 {
0874 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
0875
0876 if (sprn % 2)
0877 *spr_val = bat->raw >> 32;
0878 else
0879 *spr_val = bat->raw;
0880
0881 break;
0882 }
0883 case SPRN_SDR1:
0884 if (!spr_allowed(vcpu, PRIV_HYPER))
0885 goto unprivileged;
0886 *spr_val = to_book3s(vcpu)->sdr1;
0887 break;
0888 case SPRN_DSISR:
0889 *spr_val = kvmppc_get_dsisr(vcpu);
0890 break;
0891 case SPRN_DAR:
0892 *spr_val = kvmppc_get_dar(vcpu);
0893 break;
0894 case SPRN_HIOR:
0895 *spr_val = to_book3s(vcpu)->hior;
0896 break;
0897 case SPRN_HID0:
0898 *spr_val = to_book3s(vcpu)->hid[0];
0899 break;
0900 case SPRN_HID1:
0901 *spr_val = to_book3s(vcpu)->hid[1];
0902 break;
0903 case SPRN_HID2:
0904 case SPRN_HID2_GEKKO:
0905 *spr_val = to_book3s(vcpu)->hid[2];
0906 break;
0907 case SPRN_HID4:
0908 case SPRN_HID4_GEKKO:
0909 *spr_val = to_book3s(vcpu)->hid[4];
0910 break;
0911 case SPRN_HID5:
0912 *spr_val = to_book3s(vcpu)->hid[5];
0913 break;
0914 case SPRN_CFAR:
0915 case SPRN_DSCR:
0916 *spr_val = 0;
0917 break;
0918 case SPRN_PURR:
0919
0920
0921
0922 *spr_val = vcpu->arch.purr;
0923 break;
0924 case SPRN_SPURR:
0925
0926
0927
0928 *spr_val = vcpu->arch.spurr;
0929 break;
0930 case SPRN_VTB:
0931 *spr_val = to_book3s(vcpu)->vtb;
0932 break;
0933 case SPRN_IC:
0934 *spr_val = vcpu->arch.ic;
0935 break;
0936 case SPRN_GQR0:
0937 case SPRN_GQR1:
0938 case SPRN_GQR2:
0939 case SPRN_GQR3:
0940 case SPRN_GQR4:
0941 case SPRN_GQR5:
0942 case SPRN_GQR6:
0943 case SPRN_GQR7:
0944 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
0945 break;
0946 #ifdef CONFIG_PPC_BOOK3S_64
0947 case SPRN_FSCR:
0948 *spr_val = vcpu->arch.fscr;
0949 break;
0950 case SPRN_BESCR:
0951 *spr_val = vcpu->arch.bescr;
0952 break;
0953 case SPRN_EBBHR:
0954 *spr_val = vcpu->arch.ebbhr;
0955 break;
0956 case SPRN_EBBRR:
0957 *spr_val = vcpu->arch.ebbrr;
0958 break;
0959 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0960 case SPRN_TFHAR:
0961 case SPRN_TEXASR:
0962 case SPRN_TFIAR:
0963 if (!cpu_has_feature(CPU_FTR_TM))
0964 break;
0965
0966 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
0967 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
0968 emulated = EMULATE_AGAIN;
0969 break;
0970 }
0971
0972 tm_enable();
0973 if (sprn == SPRN_TFHAR)
0974 *spr_val = mfspr(SPRN_TFHAR);
0975 else if (sprn == SPRN_TEXASR)
0976 *spr_val = mfspr(SPRN_TEXASR);
0977 else if (sprn == SPRN_TFIAR)
0978 *spr_val = mfspr(SPRN_TFIAR);
0979 tm_disable();
0980 break;
0981 #endif
0982 #endif
0983 case SPRN_THRM1:
0984 case SPRN_THRM2:
0985 case SPRN_THRM3:
0986 case SPRN_CTRLF:
0987 case SPRN_CTRLT:
0988 case SPRN_L2CR:
0989 case SPRN_MMCR0_GEKKO:
0990 case SPRN_MMCR1_GEKKO:
0991 case SPRN_PMC1_GEKKO:
0992 case SPRN_PMC2_GEKKO:
0993 case SPRN_PMC3_GEKKO:
0994 case SPRN_PMC4_GEKKO:
0995 case SPRN_WPAR_GEKKO:
0996 case SPRN_MSSSR0:
0997 case SPRN_DABR:
0998 #ifdef CONFIG_PPC_BOOK3S_64
0999 case SPRN_MMCRS:
1000 case SPRN_MMCRA:
1001 case SPRN_MMCR0:
1002 case SPRN_MMCR1:
1003 case SPRN_MMCR2:
1004 case SPRN_UMMCR2:
1005 case SPRN_TIR:
1006 case SPRN_UAMOR:
1007 case SPRN_IAMR:
1008 case SPRN_AMR:
1009 #endif
1010 *spr_val = 0;
1011 break;
1012 default:
1013 unprivileged:
1014 pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
1015 if (sprn & 0x10) {
1016 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1017 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
1018 emulated = EMULATE_AGAIN;
1019 }
1020 } else {
1021 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
1022 sprn == 4 || sprn == 5 || sprn == 6) {
1023 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1024 emulated = EMULATE_AGAIN;
1025 }
1026 }
1027
1028 break;
1029 }
1030
1031 return emulated;
1032 }
1033
1034 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
1035 {
1036 return make_dsisr(inst);
1037 }
1038
1039 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
1040 {
1041 #ifdef CONFIG_PPC_BOOK3S_64
1042
1043
1044
1045 return vcpu->arch.fault_dar;
1046 #else
1047 ulong dar = 0;
1048 ulong ra = get_ra(inst);
1049 ulong rb = get_rb(inst);
1050
1051 switch (get_op(inst)) {
1052 case OP_LFS:
1053 case OP_LFD:
1054 case OP_STFD:
1055 case OP_STFS:
1056 if (ra)
1057 dar = kvmppc_get_gpr(vcpu, ra);
1058 dar += (s32)((s16)inst);
1059 break;
1060 case 31:
1061 if (ra)
1062 dar = kvmppc_get_gpr(vcpu, ra);
1063 dar += kvmppc_get_gpr(vcpu, rb);
1064 break;
1065 default:
1066 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
1067 break;
1068 }
1069
1070 return dar;
1071 #endif
1072 }