Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 
0003 #include <asm/kvm_ppc.h>
0004 #include <asm/pmc.h>
0005 
0006 #include "book3s_hv.h"
0007 
0008 static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
0009 {
0010     if (!(mmcr0 & MMCR0_FC))
0011         goto do_freeze;
0012     if (mmcra & MMCRA_SAMPLE_ENABLE)
0013         goto do_freeze;
0014     if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0015         if (!(mmcr0 & MMCR0_PMCCEXT))
0016             goto do_freeze;
0017         if (!(mmcra & MMCRA_BHRB_DISABLE))
0018             goto do_freeze;
0019     }
0020     return;
0021 
0022 do_freeze:
0023     mmcr0 = MMCR0_FC;
0024     mmcra = 0;
0025     if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0026         mmcr0 |= MMCR0_PMCCEXT;
0027         mmcra = MMCRA_BHRB_DISABLE;
0028     }
0029 
0030     mtspr(SPRN_MMCR0, mmcr0);
0031     mtspr(SPRN_MMCRA, mmcra);
0032     isync();
0033 }
0034 
0035 void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
0036              struct p9_host_os_sprs *host_os_sprs)
0037 {
0038     struct lppaca *lp;
0039     int load_pmu = 1;
0040 
0041     lp = vcpu->arch.vpa.pinned_addr;
0042     if (lp)
0043         load_pmu = lp->pmcregs_in_use;
0044 
0045     /* Save host */
0046     if (ppc_get_pmu_inuse()) {
0047         /* POWER9, POWER10 do not implement HPMC or SPMC */
0048 
0049         host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
0050         host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
0051 
0052         freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
0053 
0054         host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
0055         host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
0056         host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
0057         host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
0058         host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
0059         host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
0060         host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
0061         host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
0062         host_os_sprs->sdar = mfspr(SPRN_SDAR);
0063         host_os_sprs->siar = mfspr(SPRN_SIAR);
0064         host_os_sprs->sier1 = mfspr(SPRN_SIER);
0065 
0066         if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0067             host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
0068             host_os_sprs->sier2 = mfspr(SPRN_SIER2);
0069             host_os_sprs->sier3 = mfspr(SPRN_SIER3);
0070         }
0071     }
0072 
0073 #ifdef CONFIG_PPC_PSERIES
0074     /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
0075     if (kvmhv_on_pseries()) {
0076         barrier();
0077         get_lppaca()->pmcregs_in_use = load_pmu;
0078         barrier();
0079     }
0080 #endif
0081 
0082     /*
0083      * Load guest. If the VPA said the PMCs are not in use but the guest
0084      * tried to access them anyway, HFSCR[PM] will be set by the HFAC
0085      * fault so we can make forward progress.
0086      */
0087     if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
0088         mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
0089         mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
0090         mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
0091         mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
0092         mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
0093         mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
0094         mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
0095         mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
0096         mtspr(SPRN_SDAR, vcpu->arch.sdar);
0097         mtspr(SPRN_SIAR, vcpu->arch.siar);
0098         mtspr(SPRN_SIER, vcpu->arch.sier[0]);
0099 
0100         if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0101             mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
0102             mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
0103             mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
0104         }
0105 
0106         /* Set MMCRA then MMCR0 last */
0107         mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
0108         mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
0109         /* No isync necessary because we're starting counters */
0110 
0111         if (!vcpu->arch.nested &&
0112             (vcpu->arch.hfscr_permitted & HFSCR_PM))
0113             vcpu->arch.hfscr |= HFSCR_PM;
0114     }
0115 }
0116 EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
0117 
0118 void switch_pmu_to_host(struct kvm_vcpu *vcpu,
0119             struct p9_host_os_sprs *host_os_sprs)
0120 {
0121     struct lppaca *lp;
0122     int save_pmu = 1;
0123 
0124     lp = vcpu->arch.vpa.pinned_addr;
0125     if (lp)
0126         save_pmu = lp->pmcregs_in_use;
0127     if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
0128         /*
0129          * Save pmu if this guest is capable of running nested guests.
0130          * This is option is for old L1s that do not set their
0131          * lppaca->pmcregs_in_use properly when entering their L2.
0132          */
0133         save_pmu |= nesting_enabled(vcpu->kvm);
0134     }
0135 
0136     if (save_pmu) {
0137         vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
0138         vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
0139 
0140         freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
0141 
0142         vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
0143         vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
0144         vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
0145         vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
0146         vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
0147         vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
0148         vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
0149         vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
0150         vcpu->arch.sdar = mfspr(SPRN_SDAR);
0151         vcpu->arch.siar = mfspr(SPRN_SIAR);
0152         vcpu->arch.sier[0] = mfspr(SPRN_SIER);
0153 
0154         if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0155             vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
0156             vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
0157             vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
0158         }
0159 
0160     } else if (vcpu->arch.hfscr & HFSCR_PM) {
0161         /*
0162          * The guest accessed PMC SPRs without specifying they should
0163          * be preserved, or it cleared pmcregs_in_use after the last
0164          * access. Just ensure they are frozen.
0165          */
0166         freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
0167 
0168         /*
0169          * Demand-fault PMU register access in the guest.
0170          *
0171          * This is used to grab the guest's VPA pmcregs_in_use value
0172          * and reflect it into the host's VPA in the case of a nested
0173          * hypervisor.
0174          *
0175          * It also avoids having to zero-out SPRs after each guest
0176          * exit to avoid side-channels when.
0177          *
0178          * This is cleared here when we exit the guest, so later HFSCR
0179          * interrupt handling can add it back to run the guest with
0180          * PM enabled next time.
0181          */
0182         if (!vcpu->arch.nested)
0183             vcpu->arch.hfscr &= ~HFSCR_PM;
0184     } /* otherwise the PMU should still be frozen */
0185 
0186 #ifdef CONFIG_PPC_PSERIES
0187     if (kvmhv_on_pseries()) {
0188         barrier();
0189         get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
0190         barrier();
0191     }
0192 #endif
0193 
0194     if (ppc_get_pmu_inuse()) {
0195         mtspr(SPRN_PMC1, host_os_sprs->pmc1);
0196         mtspr(SPRN_PMC2, host_os_sprs->pmc2);
0197         mtspr(SPRN_PMC3, host_os_sprs->pmc3);
0198         mtspr(SPRN_PMC4, host_os_sprs->pmc4);
0199         mtspr(SPRN_PMC5, host_os_sprs->pmc5);
0200         mtspr(SPRN_PMC6, host_os_sprs->pmc6);
0201         mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
0202         mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
0203         mtspr(SPRN_SDAR, host_os_sprs->sdar);
0204         mtspr(SPRN_SIAR, host_os_sprs->siar);
0205         mtspr(SPRN_SIER, host_os_sprs->sier1);
0206 
0207         if (cpu_has_feature(CPU_FTR_ARCH_31)) {
0208             mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
0209             mtspr(SPRN_SIER2, host_os_sprs->sier2);
0210             mtspr(SPRN_SIER3, host_os_sprs->sier3);
0211         }
0212 
0213         /* Set MMCRA then MMCR0 last */
0214         mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
0215         mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
0216         isync();
0217     }
0218 }
0219 EXPORT_SYMBOL_GPL(switch_pmu_to_host);