Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * KVM PMU support for AMD
0004  *
0005  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
0006  *
0007  * Author:
0008  *   Wei Huang <wei@redhat.com>
0009  *
0010  * Implementation is based on pmu_intel.c file
0011  */
0012 #include <linux/types.h>
0013 #include <linux/kvm_host.h>
0014 #include <linux/perf_event.h>
0015 #include "x86.h"
0016 #include "cpuid.h"
0017 #include "lapic.h"
0018 #include "pmu.h"
0019 #include "svm.h"
0020 
0021 enum pmu_type {
0022     PMU_TYPE_COUNTER = 0,
0023     PMU_TYPE_EVNTSEL,
0024 };
0025 
0026 enum index {
0027     INDEX_ZERO = 0,
0028     INDEX_ONE,
0029     INDEX_TWO,
0030     INDEX_THREE,
0031     INDEX_FOUR,
0032     INDEX_FIVE,
0033     INDEX_ERROR,
0034 };
0035 
0036 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
0037 {
0038     struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
0039 
0040     if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
0041         if (type == PMU_TYPE_COUNTER)
0042             return MSR_F15H_PERF_CTR;
0043         else
0044             return MSR_F15H_PERF_CTL;
0045     } else {
0046         if (type == PMU_TYPE_COUNTER)
0047             return MSR_K7_PERFCTR0;
0048         else
0049             return MSR_K7_EVNTSEL0;
0050     }
0051 }
0052 
0053 static enum index msr_to_index(u32 msr)
0054 {
0055     switch (msr) {
0056     case MSR_F15H_PERF_CTL0:
0057     case MSR_F15H_PERF_CTR0:
0058     case MSR_K7_EVNTSEL0:
0059     case MSR_K7_PERFCTR0:
0060         return INDEX_ZERO;
0061     case MSR_F15H_PERF_CTL1:
0062     case MSR_F15H_PERF_CTR1:
0063     case MSR_K7_EVNTSEL1:
0064     case MSR_K7_PERFCTR1:
0065         return INDEX_ONE;
0066     case MSR_F15H_PERF_CTL2:
0067     case MSR_F15H_PERF_CTR2:
0068     case MSR_K7_EVNTSEL2:
0069     case MSR_K7_PERFCTR2:
0070         return INDEX_TWO;
0071     case MSR_F15H_PERF_CTL3:
0072     case MSR_F15H_PERF_CTR3:
0073     case MSR_K7_EVNTSEL3:
0074     case MSR_K7_PERFCTR3:
0075         return INDEX_THREE;
0076     case MSR_F15H_PERF_CTL4:
0077     case MSR_F15H_PERF_CTR4:
0078         return INDEX_FOUR;
0079     case MSR_F15H_PERF_CTL5:
0080     case MSR_F15H_PERF_CTR5:
0081         return INDEX_FIVE;
0082     default:
0083         return INDEX_ERROR;
0084     }
0085 }
0086 
0087 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
0088                          enum pmu_type type)
0089 {
0090     struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
0091 
0092     if (!vcpu->kvm->arch.enable_pmu)
0093         return NULL;
0094 
0095     switch (msr) {
0096     case MSR_F15H_PERF_CTL0:
0097     case MSR_F15H_PERF_CTL1:
0098     case MSR_F15H_PERF_CTL2:
0099     case MSR_F15H_PERF_CTL3:
0100     case MSR_F15H_PERF_CTL4:
0101     case MSR_F15H_PERF_CTL5:
0102         if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
0103             return NULL;
0104         fallthrough;
0105     case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
0106         if (type != PMU_TYPE_EVNTSEL)
0107             return NULL;
0108         break;
0109     case MSR_F15H_PERF_CTR0:
0110     case MSR_F15H_PERF_CTR1:
0111     case MSR_F15H_PERF_CTR2:
0112     case MSR_F15H_PERF_CTR3:
0113     case MSR_F15H_PERF_CTR4:
0114     case MSR_F15H_PERF_CTR5:
0115         if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
0116             return NULL;
0117         fallthrough;
0118     case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
0119         if (type != PMU_TYPE_COUNTER)
0120             return NULL;
0121         break;
0122     default:
0123         return NULL;
0124     }
0125 
0126     return &pmu->gp_counters[msr_to_index(msr)];
0127 }
0128 
0129 static bool amd_hw_event_available(struct kvm_pmc *pmc)
0130 {
0131     return true;
0132 }
0133 
0134 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
0135  * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
0136  */
0137 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
0138 {
0139     return true;
0140 }
0141 
0142 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
0143 {
0144     unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
0145     struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
0146 
0147     if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
0148         /*
0149          * The idx is contiguous. The MSRs are not. The counter MSRs
0150          * are interleaved with the event select MSRs.
0151          */
0152         pmc_idx *= 2;
0153     }
0154 
0155     return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
0156 }
0157 
0158 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
0159 {
0160     struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0161 
0162     idx &= ~(3u << 30);
0163 
0164     return idx < pmu->nr_arch_gp_counters;
0165 }
0166 
0167 /* idx is the ECX register of RDPMC instruction */
0168 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
0169     unsigned int idx, u64 *mask)
0170 {
0171     struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0172     struct kvm_pmc *counters;
0173 
0174     idx &= ~(3u << 30);
0175     if (idx >= pmu->nr_arch_gp_counters)
0176         return NULL;
0177     counters = pmu->gp_counters;
0178 
0179     return &counters[idx];
0180 }
0181 
0182 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
0183 {
0184     /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
0185     return false;
0186 }
0187 
0188 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
0189 {
0190     struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0191     struct kvm_pmc *pmc;
0192 
0193     pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
0194     pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
0195 
0196     return pmc;
0197 }
0198 
0199 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
0200 {
0201     struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0202     struct kvm_pmc *pmc;
0203     u32 msr = msr_info->index;
0204 
0205     /* MSR_PERFCTRn */
0206     pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
0207     if (pmc) {
0208         msr_info->data = pmc_read_counter(pmc);
0209         return 0;
0210     }
0211     /* MSR_EVNTSELn */
0212     pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
0213     if (pmc) {
0214         msr_info->data = pmc->eventsel;
0215         return 0;
0216     }
0217 
0218     return 1;
0219 }
0220 
0221 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
0222 {
0223     struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0224     struct kvm_pmc *pmc;
0225     u32 msr = msr_info->index;
0226     u64 data = msr_info->data;
0227 
0228     /* MSR_PERFCTRn */
0229     pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
0230     if (pmc) {
0231         pmc->counter += data - pmc_read_counter(pmc);
0232         pmc_update_sample_period(pmc);
0233         return 0;
0234     }
0235     /* MSR_EVNTSELn */
0236     pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
0237     if (pmc) {
0238         data &= ~pmu->reserved_bits;
0239         if (data != pmc->eventsel) {
0240             pmc->eventsel = data;
0241             reprogram_counter(pmc);
0242         }
0243         return 0;
0244     }
0245 
0246     return 1;
0247 }
0248 
0249 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
0250 {
0251     struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0252 
0253     if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
0254         pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
0255     else
0256         pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
0257 
0258     pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
0259     pmu->reserved_bits = 0xfffffff000280000ull;
0260     pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
0261     pmu->version = 1;
0262     /* not applicable to AMD; but clean them to prevent any fall out */
0263     pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
0264     pmu->nr_arch_fixed_counters = 0;
0265     pmu->global_status = 0;
0266     bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
0267 }
0268 
0269 static void amd_pmu_init(struct kvm_vcpu *vcpu)
0270 {
0271     struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0272     int i;
0273 
0274     BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
0275 
0276     for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
0277         pmu->gp_counters[i].type = KVM_PMC_GP;
0278         pmu->gp_counters[i].vcpu = vcpu;
0279         pmu->gp_counters[i].idx = i;
0280         pmu->gp_counters[i].current_config = 0;
0281     }
0282 }
0283 
0284 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
0285 {
0286     struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0287     int i;
0288 
0289     for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
0290         struct kvm_pmc *pmc = &pmu->gp_counters[i];
0291 
0292         pmc_stop_counter(pmc);
0293         pmc->counter = pmc->eventsel = 0;
0294     }
0295 }
0296 
0297 struct kvm_pmu_ops amd_pmu_ops __initdata = {
0298     .hw_event_available = amd_hw_event_available,
0299     .pmc_is_enabled = amd_pmc_is_enabled,
0300     .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
0301     .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
0302     .msr_idx_to_pmc = amd_msr_idx_to_pmc,
0303     .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
0304     .is_valid_msr = amd_is_valid_msr,
0305     .get_msr = amd_pmu_get_msr,
0306     .set_msr = amd_pmu_set_msr,
0307     .refresh = amd_pmu_refresh,
0308     .init = amd_pmu_init,
0309     .reset = amd_pmu_reset,
0310 };