0001
0002
0003 #include <asm/kvm_ppc.h>
0004 #include <asm/pmc.h>
0005
0006 #include "book3s_hv.h"
0007
0008 static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
0009 {
0010 if (!(mmcr0 & MMCR0_FC))
0011 goto do_freeze;
0012 if (mmcra & MMCRA_SAMPLE_ENABLE)
0013 goto do_freeze;
0014 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0015 if (!(mmcr0 & MMCR0_PMCCEXT))
0016 goto do_freeze;
0017 if (!(mmcra & MMCRA_BHRB_DISABLE))
0018 goto do_freeze;
0019 }
0020 return;
0021
0022 do_freeze:
0023 mmcr0 = MMCR0_FC;
0024 mmcra = 0;
0025 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0026 mmcr0 |= MMCR0_PMCCEXT;
0027 mmcra = MMCRA_BHRB_DISABLE;
0028 }
0029
0030 mtspr(SPRN_MMCR0, mmcr0);
0031 mtspr(SPRN_MMCRA, mmcra);
0032 isync();
0033 }
0034
0035 void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
0036 struct p9_host_os_sprs *host_os_sprs)
0037 {
0038 struct lppaca *lp;
0039 int load_pmu = 1;
0040
0041 lp = vcpu->arch.vpa.pinned_addr;
0042 if (lp)
0043 load_pmu = lp->pmcregs_in_use;
0044
0045
0046 if (ppc_get_pmu_inuse()) {
0047
0048
0049 host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
0050 host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
0051
0052 freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
0053
0054 host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
0055 host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
0056 host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
0057 host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
0058 host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
0059 host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
0060 host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
0061 host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
0062 host_os_sprs->sdar = mfspr(SPRN_SDAR);
0063 host_os_sprs->siar = mfspr(SPRN_SIAR);
0064 host_os_sprs->sier1 = mfspr(SPRN_SIER);
0065
0066 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0067 host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
0068 host_os_sprs->sier2 = mfspr(SPRN_SIER2);
0069 host_os_sprs->sier3 = mfspr(SPRN_SIER3);
0070 }
0071 }
0072
0073 #ifdef CONFIG_PPC_PSERIES
0074
0075 if (kvmhv_on_pseries()) {
0076 barrier();
0077 get_lppaca()->pmcregs_in_use = load_pmu;
0078 barrier();
0079 }
0080 #endif
0081
0082
0083
0084
0085
0086
0087 if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
0088 mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
0089 mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
0090 mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
0091 mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
0092 mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
0093 mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
0094 mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
0095 mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
0096 mtspr(SPRN_SDAR, vcpu->arch.sdar);
0097 mtspr(SPRN_SIAR, vcpu->arch.siar);
0098 mtspr(SPRN_SIER, vcpu->arch.sier[0]);
0099
0100 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0101 mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
0102 mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
0103 mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
0104 }
0105
0106
0107 mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
0108 mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
0109
0110
0111 if (!vcpu->arch.nested &&
0112 (vcpu->arch.hfscr_permitted & HFSCR_PM))
0113 vcpu->arch.hfscr |= HFSCR_PM;
0114 }
0115 }
0116 EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
0117
0118 void switch_pmu_to_host(struct kvm_vcpu *vcpu,
0119 struct p9_host_os_sprs *host_os_sprs)
0120 {
0121 struct lppaca *lp;
0122 int save_pmu = 1;
0123
0124 lp = vcpu->arch.vpa.pinned_addr;
0125 if (lp)
0126 save_pmu = lp->pmcregs_in_use;
0127 if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
0128
0129
0130
0131
0132
0133 save_pmu |= nesting_enabled(vcpu->kvm);
0134 }
0135
0136 if (save_pmu) {
0137 vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
0138 vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
0139
0140 freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
0141
0142 vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
0143 vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
0144 vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
0145 vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
0146 vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
0147 vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
0148 vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
0149 vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
0150 vcpu->arch.sdar = mfspr(SPRN_SDAR);
0151 vcpu->arch.siar = mfspr(SPRN_SIAR);
0152 vcpu->arch.sier[0] = mfspr(SPRN_SIER);
0153
0154 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0155 vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
0156 vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
0157 vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
0158 }
0159
0160 } else if (vcpu->arch.hfscr & HFSCR_PM) {
0161
0162
0163
0164
0165
0166 freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 if (!vcpu->arch.nested)
0183 vcpu->arch.hfscr &= ~HFSCR_PM;
0184 }
0185
0186 #ifdef CONFIG_PPC_PSERIES
0187 if (kvmhv_on_pseries()) {
0188 barrier();
0189 get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
0190 barrier();
0191 }
0192 #endif
0193
0194 if (ppc_get_pmu_inuse()) {
0195 mtspr(SPRN_PMC1, host_os_sprs->pmc1);
0196 mtspr(SPRN_PMC2, host_os_sprs->pmc2);
0197 mtspr(SPRN_PMC3, host_os_sprs->pmc3);
0198 mtspr(SPRN_PMC4, host_os_sprs->pmc4);
0199 mtspr(SPRN_PMC5, host_os_sprs->pmc5);
0200 mtspr(SPRN_PMC6, host_os_sprs->pmc6);
0201 mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
0202 mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
0203 mtspr(SPRN_SDAR, host_os_sprs->sdar);
0204 mtspr(SPRN_SIAR, host_os_sprs->siar);
0205 mtspr(SPRN_SIER, host_os_sprs->sier1);
0206
0207 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0208 mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
0209 mtspr(SPRN_SIER2, host_os_sprs->sier2);
0210 mtspr(SPRN_SIER3, host_os_sprs->sier3);
0211 }
0212
0213
0214 mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
0215 mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
0216 isync();
0217 }
0218 }
0219 EXPORT_SYMBOL_GPL(switch_pmu_to_host);