Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __KVM_X86_PMU_H
0003 #define __KVM_X86_PMU_H
0004 
0005 #include <linux/nospec.h>
0006 
0007 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
0008 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
0009 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
0010 
0011 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |   \
0012                       MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
0013 
0014 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
0015 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
0016 
0017 #define VMWARE_BACKDOOR_PMC_HOST_TSC        0x10000
0018 #define VMWARE_BACKDOOR_PMC_REAL_TIME       0x10001
0019 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME   0x10002
0020 
0021 struct kvm_event_hw_type_mapping {
0022     u8 eventsel;
0023     u8 unit_mask;
0024     unsigned event_type;
0025 };
0026 
0027 struct kvm_pmu_ops {
0028     bool (*hw_event_available)(struct kvm_pmc *pmc);
0029     bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
0030     struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
0031     struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
0032         unsigned int idx, u64 *mask);
0033     struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
0034     bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
0035     bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
0036     int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
0037     int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
0038     void (*refresh)(struct kvm_vcpu *vcpu);
0039     void (*init)(struct kvm_vcpu *vcpu);
0040     void (*reset)(struct kvm_vcpu *vcpu);
0041     void (*deliver_pmi)(struct kvm_vcpu *vcpu);
0042     void (*cleanup)(struct kvm_vcpu *vcpu);
0043 };
0044 
0045 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
0046 
0047 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
0048 {
0049     struct kvm_pmu *pmu = pmc_to_pmu(pmc);
0050 
0051     return pmu->counter_bitmask[pmc->type];
0052 }
0053 
0054 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
0055 {
0056     u64 counter, enabled, running;
0057 
0058     counter = pmc->counter;
0059     if (pmc->perf_event && !pmc->is_paused)
0060         counter += perf_event_read_value(pmc->perf_event,
0061                          &enabled, &running);
0062     /* FIXME: Scaling needed? */
0063     return counter & pmc_bitmask(pmc);
0064 }
0065 
0066 static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
0067 {
0068     if (pmc->perf_event) {
0069         perf_event_release_kernel(pmc->perf_event);
0070         pmc->perf_event = NULL;
0071         pmc->current_config = 0;
0072         pmc_to_pmu(pmc)->event_count--;
0073     }
0074 }
0075 
0076 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
0077 {
0078     if (pmc->perf_event) {
0079         pmc->counter = pmc_read_counter(pmc);
0080         pmc_release_perf_event(pmc);
0081     }
0082 }
0083 
0084 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
0085 {
0086     return pmc->type == KVM_PMC_GP;
0087 }
0088 
0089 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
0090 {
0091     return pmc->type == KVM_PMC_FIXED;
0092 }
0093 
0094 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
0095                          u64 data)
0096 {
0097     return !(pmu->global_ctrl_mask & data);
0098 }
0099 
0100 /* returns general purpose PMC with the specified MSR. Note that it can be
0101  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
0102  * parameter to tell them apart.
0103  */
0104 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
0105                      u32 base)
0106 {
0107     if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
0108         u32 index = array_index_nospec(msr - base,
0109                            pmu->nr_arch_gp_counters);
0110 
0111         return &pmu->gp_counters[index];
0112     }
0113 
0114     return NULL;
0115 }
0116 
0117 /* returns fixed PMC with the specified MSR */
0118 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
0119 {
0120     int base = MSR_CORE_PERF_FIXED_CTR0;
0121 
0122     if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
0123         u32 index = array_index_nospec(msr - base,
0124                            pmu->nr_arch_fixed_counters);
0125 
0126         return &pmu->fixed_counters[index];
0127     }
0128 
0129     return NULL;
0130 }
0131 
0132 static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
0133 {
0134     u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
0135 
0136     if (!sample_period)
0137         sample_period = pmc_bitmask(pmc) + 1;
0138     return sample_period;
0139 }
0140 
0141 static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
0142 {
0143     if (!pmc->perf_event || pmc->is_paused)
0144         return;
0145 
0146     perf_event_period(pmc->perf_event,
0147               get_sample_period(pmc, pmc->counter));
0148 }
0149 
0150 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
0151 {
0152     struct kvm_pmu *pmu = pmc_to_pmu(pmc);
0153 
0154     if (pmc_is_fixed(pmc))
0155         return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
0156                     pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
0157 
0158     return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
0159 }
0160 
0161 extern struct x86_pmu_capability kvm_pmu_cap;
0162 
0163 static inline void kvm_init_pmu_capability(void)
0164 {
0165     bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
0166 
0167     perf_get_x86_pmu_capability(&kvm_pmu_cap);
0168 
0169      /*
0170       * For Intel, only support guest architectural pmu
0171       * on a host with architectural pmu.
0172       */
0173     if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
0174         enable_pmu = false;
0175 
0176     if (!enable_pmu) {
0177         memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
0178         return;
0179     }
0180 
0181     kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
0182     kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
0183                          KVM_PMC_MAX_FIXED);
0184 }
0185 
0186 void reprogram_counter(struct kvm_pmc *pmc);
0187 
0188 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
0189 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
0190 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
0191 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
0192 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
0193 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
0194 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
0195 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
0196 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
0197 void kvm_pmu_init(struct kvm_vcpu *vcpu);
0198 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
0199 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
0200 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
0201 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
0202 
0203 bool is_vmware_backdoor_pmc(u32 pmc_idx);
0204 
0205 extern struct kvm_pmu_ops intel_pmu_ops;
0206 extern struct kvm_pmu_ops amd_pmu_ops;
0207 #endif /* __KVM_X86_PMU_H */