0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <linux/string.h>
0009 #include <linux/kvm.h>
0010 #include <linux/kvm_host.h>
0011 #include <linux/kernel.h>
0012 #include <asm/opal.h>
0013 #include <asm/mce.h>
0014 #include <asm/machdep.h>
0015 #include <asm/cputhreads.h>
0016 #include <asm/hmi.h>
0017 #include <asm/kvm_ppc.h>
0018
0019
0020 #define SRR1_MC_LDSTERR (1ul << (63-42))
0021 #define SRR1_MC_IFETCH_SH (63-45)
0022 #define SRR1_MC_IFETCH_MASK 0x7
0023 #define SRR1_MC_IFETCH_SLBPAR 2
0024 #define SRR1_MC_IFETCH_SLBMULTI 3
0025 #define SRR1_MC_IFETCH_SLBPARMULTI 4
0026 #define SRR1_MC_IFETCH_TLBMULTI 5
0027
0028
0029 #define DSISR_MC_DERAT_MULTI 0x800
0030 #define DSISR_MC_TLB_MULTI 0x400
0031 #define DSISR_MC_SLB_PARITY 0x100
0032 #define DSISR_MC_SLB_MULTI 0x080
0033 #define DSISR_MC_SLB_PARMULTI 0x040
0034
0035
0036 static void reload_slb(struct kvm_vcpu *vcpu)
0037 {
0038 struct slb_shadow *slb;
0039 unsigned long i, n;
0040
0041
0042 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
0043
0044
0045 slb = vcpu->arch.slb_shadow.pinned_addr;
0046 if (!slb)
0047 return;
0048
0049
0050 n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
0051 if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
0052 return;
0053
0054
0055 for (i = 0; i < n; ++i) {
0056 unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
0057 unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
0058
0059 rb = (rb & ~0xFFFul) | i;
0060 asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
0061 }
0062 }
0063
0064
0065
0066
0067
0068 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
0069 {
0070 unsigned long srr1 = vcpu->arch.shregs.msr;
0071 long handled = 1;
0072
0073 if (srr1 & SRR1_MC_LDSTERR) {
0074
0075 unsigned long dsisr = vcpu->arch.shregs.dsisr;
0076
0077 if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
0078 DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
0079
0080 reload_slb(vcpu);
0081 dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
0082 DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
0083 }
0084 if (dsisr & DSISR_MC_TLB_MULTI) {
0085 tlbiel_all_lpid(vcpu->kvm->arch.radix);
0086 dsisr &= ~DSISR_MC_TLB_MULTI;
0087 }
0088
0089 if (dsisr & 0xffffffffUL)
0090 handled = 0;
0091 }
0092
0093 switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
0094 case 0:
0095 break;
0096 case SRR1_MC_IFETCH_SLBPAR:
0097 case SRR1_MC_IFETCH_SLBMULTI:
0098 case SRR1_MC_IFETCH_SLBPARMULTI:
0099 reload_slb(vcpu);
0100 break;
0101 case SRR1_MC_IFETCH_TLBMULTI:
0102 tlbiel_all_lpid(vcpu->kvm->arch.radix);
0103 break;
0104 default:
0105 handled = 0;
0106 }
0107
0108 return handled;
0109 }
0110
0111 void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
0112 {
0113 struct machine_check_event mce_evt;
0114 long handled;
0115
0116 if (vcpu->kvm->arch.fwnmi_enabled) {
0117
0118 handled = 0;
0119 } else {
0120 handled = kvmppc_realmode_mc_power7(vcpu);
0121 }
0122
0123
0124
0125
0126
0127
0128
0129 if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
0130 if (handled && mce_evt.version == MCE_V1)
0131 mce_evt.disposition = MCE_DISPOSITION_RECOVERED;
0132 } else {
0133 memset(&mce_evt, 0, sizeof(mce_evt));
0134 }
0135
0136 vcpu->arch.mce_evt = mce_evt;
0137 }
0138
0139
0140 long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
0141 {
0142 struct kvmppc_vcore *vc = vcpu->arch.vcore;
0143 long ret = 0;
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 if (vc->tb_offset_applied) {
0156 u64 new_tb = mftb() - vc->tb_offset_applied;
0157 mtspr(SPRN_TBU40, new_tb);
0158 if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
0159 new_tb += 0x1000000;
0160 mtspr(SPRN_TBU40, new_tb);
0161 }
0162 vc->tb_offset_applied = 0;
0163 }
0164
0165 local_paca->hmi_irqs++;
0166
0167 if (hmi_handle_debugtrig(NULL) >= 0) {
0168 ret = 1;
0169 goto out;
0170 }
0171
0172 if (ppc_md.hmi_exception_early)
0173 ppc_md.hmi_exception_early(NULL);
0174
0175 out:
0176 if (vc->tb_offset) {
0177 u64 new_tb = mftb() + vc->tb_offset;
0178 mtspr(SPRN_TBU40, new_tb);
0179 if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
0180 new_tb += 0x1000000;
0181 mtspr(SPRN_TBU40, new_tb);
0182 }
0183 vc->tb_offset_applied = vc->tb_offset;
0184 }
0185
0186 return ret;
0187 }
0188
0189
0190
0191
0192
0193
0194 static inline int kvmppc_cur_subcore_size(void)
0195 {
0196 if (local_paca->kvm_hstate.kvm_split_mode)
0197 return local_paca->kvm_hstate.kvm_split_mode->subcore_size;
0198
0199 return threads_per_subcore;
0200 }
0201
0202 void kvmppc_subcore_enter_guest(void)
0203 {
0204 int thread_id, subcore_id;
0205
0206 thread_id = cpu_thread_in_core(local_paca->paca_index);
0207 subcore_id = thread_id / kvmppc_cur_subcore_size();
0208
0209 local_paca->sibling_subcore_state->in_guest[subcore_id] = 1;
0210 }
0211 EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest);
0212
0213 void kvmppc_subcore_exit_guest(void)
0214 {
0215 int thread_id, subcore_id;
0216
0217 thread_id = cpu_thread_in_core(local_paca->paca_index);
0218 subcore_id = thread_id / kvmppc_cur_subcore_size();
0219
0220 local_paca->sibling_subcore_state->in_guest[subcore_id] = 0;
0221 }
0222 EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest);
0223
0224 static bool kvmppc_tb_resync_required(void)
0225 {
0226 if (test_and_set_bit(CORE_TB_RESYNC_REQ_BIT,
0227 &local_paca->sibling_subcore_state->flags))
0228 return false;
0229
0230 return true;
0231 }
0232
0233 static void kvmppc_tb_resync_done(void)
0234 {
0235 clear_bit(CORE_TB_RESYNC_REQ_BIT,
0236 &local_paca->sibling_subcore_state->flags);
0237 }
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 long kvmppc_realmode_hmi_handler(void)
0307 {
0308 bool resync_req;
0309
0310 local_paca->hmi_irqs++;
0311
0312 if (hmi_handle_debugtrig(NULL) >= 0)
0313 return 1;
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334 resync_req = kvmppc_tb_resync_required();
0335
0336
0337 kvmppc_subcore_exit_guest();
0338
0339
0340
0341
0342
0343
0344
0345 wait_for_subcore_guest_exit();
0346
0347
0348
0349
0350
0351
0352 if (ppc_md.hmi_exception_early)
0353 ppc_md.hmi_exception_early(NULL);
0354
0355
0356
0357
0358
0359
0360 if (resync_req) {
0361 opal_resync_timebase();
0362
0363 kvmppc_tb_resync_done();
0364 } else {
0365 wait_for_tb_resync();
0366 }
0367
0368
0369
0370
0371
0372 if (local_paca->kvm_hstate.kvm_vcore)
0373 local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0;
0374
0375 return 0;
0376 }