0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/types.h>
0012 #include <linux/kvm_host.h>
0013 #include <linux/perf_event.h>
0014 #include <asm/perf_event.h>
0015 #include "x86.h"
0016 #include "cpuid.h"
0017 #include "lapic.h"
0018 #include "nested.h"
0019 #include "pmu.h"
0020
0021 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
0022
0023 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
0024 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
0025 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
0026 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
0027 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
0028 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
0029 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
0030 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
0031
0032 [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
0033 };
0034
0035
0036 static int fixed_pmc_events[] = {1, 0, 7};
0037
0038 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
0039 {
0040 struct kvm_pmc *pmc;
0041 u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
0042 int i;
0043
0044 pmu->fixed_ctr_ctrl = data;
0045 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
0046 u8 new_ctrl = fixed_ctrl_field(data, i);
0047 u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
0048
0049 if (old_ctrl == new_ctrl)
0050 continue;
0051
0052 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
0053
0054 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
0055 reprogram_counter(pmc);
0056 }
0057 }
0058
0059 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
0060 {
0061 if (pmc_idx < INTEL_PMC_IDX_FIXED) {
0062 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
0063 MSR_P6_EVNTSEL0);
0064 } else {
0065 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
0066
0067 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
0068 }
0069 }
0070
0071
0072 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
0073 {
0074 int bit;
0075 u64 diff = pmu->global_ctrl ^ data;
0076 struct kvm_pmc *pmc;
0077
0078 pmu->global_ctrl = data;
0079
0080 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
0081 pmc = intel_pmc_idx_to_pmc(pmu, bit);
0082 if (pmc)
0083 reprogram_counter(pmc);
0084 }
0085 }
0086
0087 static bool intel_hw_event_available(struct kvm_pmc *pmc)
0088 {
0089 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
0090 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
0091 u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
0092 int i;
0093
0094 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
0095 if (intel_arch_events[i].eventsel != event_select ||
0096 intel_arch_events[i].unit_mask != unit_mask)
0097 continue;
0098
0099
0100 if ((i < 7) && !(pmu->available_event_types & (1 << i)))
0101 return false;
0102
0103 break;
0104 }
0105
0106 return true;
0107 }
0108
0109
0110 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
0111 {
0112 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
0113
0114 if (!intel_pmu_has_perf_global_ctrl(pmu))
0115 return true;
0116
0117 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
0118 }
0119
0120 static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
0121 {
0122 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0123 bool fixed = idx & (1u << 30);
0124
0125 idx &= ~(3u << 30);
0126
0127 return fixed ? idx < pmu->nr_arch_fixed_counters
0128 : idx < pmu->nr_arch_gp_counters;
0129 }
0130
0131 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
0132 unsigned int idx, u64 *mask)
0133 {
0134 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0135 bool fixed = idx & (1u << 30);
0136 struct kvm_pmc *counters;
0137 unsigned int num_counters;
0138
0139 idx &= ~(3u << 30);
0140 if (fixed) {
0141 counters = pmu->fixed_counters;
0142 num_counters = pmu->nr_arch_fixed_counters;
0143 } else {
0144 counters = pmu->gp_counters;
0145 num_counters = pmu->nr_arch_gp_counters;
0146 }
0147 if (idx >= num_counters)
0148 return NULL;
0149 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
0150 return &counters[array_index_nospec(idx, num_counters)];
0151 }
0152
0153 static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
0154 {
0155 if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
0156 return 0;
0157
0158 return vcpu->arch.perf_capabilities;
0159 }
0160
0161 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
0162 {
0163 return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
0164 }
0165
0166 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
0167 {
0168 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
0169 return NULL;
0170
0171 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
0172 }
0173
0174 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
0175 {
0176 struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
0177 bool ret = false;
0178
0179 if (!intel_pmu_lbr_is_enabled(vcpu))
0180 return ret;
0181
0182 ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
0183 (index >= records->from && index < records->from + records->nr) ||
0184 (index >= records->to && index < records->to + records->nr);
0185
0186 if (!ret && records->info)
0187 ret = (index >= records->info && index < records->info + records->nr);
0188
0189 return ret;
0190 }
0191
0192 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
0193 {
0194 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0195 u64 perf_capabilities;
0196 int ret;
0197
0198 switch (msr) {
0199 case MSR_CORE_PERF_FIXED_CTR_CTRL:
0200 case MSR_CORE_PERF_GLOBAL_STATUS:
0201 case MSR_CORE_PERF_GLOBAL_CTRL:
0202 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
0203 return intel_pmu_has_perf_global_ctrl(pmu);
0204 break;
0205 case MSR_IA32_PEBS_ENABLE:
0206 ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
0207 break;
0208 case MSR_IA32_DS_AREA:
0209 ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
0210 break;
0211 case MSR_PEBS_DATA_CFG:
0212 perf_capabilities = vcpu_get_perf_capabilities(vcpu);
0213 ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
0214 ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
0215 break;
0216 default:
0217 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
0218 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
0219 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
0220 intel_pmu_is_valid_lbr_msr(vcpu, msr);
0221 break;
0222 }
0223
0224 return ret;
0225 }
0226
0227 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
0228 {
0229 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0230 struct kvm_pmc *pmc;
0231
0232 pmc = get_fixed_pmc(pmu, msr);
0233 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
0234 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
0235
0236 return pmc;
0237 }
0238
0239 static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
0240 {
0241 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
0242
0243 if (lbr_desc->event) {
0244 perf_event_release_kernel(lbr_desc->event);
0245 lbr_desc->event = NULL;
0246 vcpu_to_pmu(vcpu)->event_count--;
0247 }
0248 }
0249
0250 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
0251 {
0252 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
0253 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0254 struct perf_event *event;
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 struct perf_event_attr attr = {
0274 .type = PERF_TYPE_RAW,
0275 .size = sizeof(attr),
0276 .config = INTEL_FIXED_VLBR_EVENT,
0277 .sample_type = PERF_SAMPLE_BRANCH_STACK,
0278 .pinned = true,
0279 .exclude_host = true,
0280 .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
0281 PERF_SAMPLE_BRANCH_USER,
0282 };
0283
0284 if (unlikely(lbr_desc->event)) {
0285 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
0286 return 0;
0287 }
0288
0289 event = perf_event_create_kernel_counter(&attr, -1,
0290 current, NULL, NULL);
0291 if (IS_ERR(event)) {
0292 pr_debug_ratelimited("%s: failed %ld\n",
0293 __func__, PTR_ERR(event));
0294 return PTR_ERR(event);
0295 }
0296 lbr_desc->event = event;
0297 pmu->event_count++;
0298 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
0299 return 0;
0300 }
0301
0302
0303
0304
0305
0306
0307 static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
0308 struct msr_data *msr_info, bool read)
0309 {
0310 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
0311 u32 index = msr_info->index;
0312
0313 if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
0314 return false;
0315
0316 if (!lbr_desc->event && intel_pmu_create_guest_lbr_event(vcpu) < 0)
0317 goto dummy;
0318
0319
0320
0321
0322
0323
0324
0325 local_irq_disable();
0326 if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
0327 if (read)
0328 rdmsrl(index, msr_info->data);
0329 else
0330 wrmsrl(index, msr_info->data);
0331 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
0332 local_irq_enable();
0333 return true;
0334 }
0335 clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
0336 local_irq_enable();
0337
0338 dummy:
0339 if (read)
0340 msr_info->data = 0;
0341 return true;
0342 }
0343
0344 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
0345 {
0346 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0347 struct kvm_pmc *pmc;
0348 u32 msr = msr_info->index;
0349
0350 switch (msr) {
0351 case MSR_CORE_PERF_FIXED_CTR_CTRL:
0352 msr_info->data = pmu->fixed_ctr_ctrl;
0353 return 0;
0354 case MSR_CORE_PERF_GLOBAL_STATUS:
0355 msr_info->data = pmu->global_status;
0356 return 0;
0357 case MSR_CORE_PERF_GLOBAL_CTRL:
0358 msr_info->data = pmu->global_ctrl;
0359 return 0;
0360 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
0361 msr_info->data = 0;
0362 return 0;
0363 case MSR_IA32_PEBS_ENABLE:
0364 msr_info->data = pmu->pebs_enable;
0365 return 0;
0366 case MSR_IA32_DS_AREA:
0367 msr_info->data = pmu->ds_area;
0368 return 0;
0369 case MSR_PEBS_DATA_CFG:
0370 msr_info->data = pmu->pebs_data_cfg;
0371 return 0;
0372 default:
0373 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
0374 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
0375 u64 val = pmc_read_counter(pmc);
0376 msr_info->data =
0377 val & pmu->counter_bitmask[KVM_PMC_GP];
0378 return 0;
0379 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
0380 u64 val = pmc_read_counter(pmc);
0381 msr_info->data =
0382 val & pmu->counter_bitmask[KVM_PMC_FIXED];
0383 return 0;
0384 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
0385 msr_info->data = pmc->eventsel;
0386 return 0;
0387 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true))
0388 return 0;
0389 }
0390
0391 return 1;
0392 }
0393
0394 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
0395 {
0396 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0397 struct kvm_pmc *pmc;
0398 u32 msr = msr_info->index;
0399 u64 data = msr_info->data;
0400 u64 reserved_bits;
0401
0402 switch (msr) {
0403 case MSR_CORE_PERF_FIXED_CTR_CTRL:
0404 if (pmu->fixed_ctr_ctrl == data)
0405 return 0;
0406 if (!(data & pmu->fixed_ctr_ctrl_mask)) {
0407 reprogram_fixed_counters(pmu, data);
0408 return 0;
0409 }
0410 break;
0411 case MSR_CORE_PERF_GLOBAL_STATUS:
0412 if (msr_info->host_initiated) {
0413 pmu->global_status = data;
0414 return 0;
0415 }
0416 break;
0417 case MSR_CORE_PERF_GLOBAL_CTRL:
0418 if (pmu->global_ctrl == data)
0419 return 0;
0420 if (kvm_valid_perf_global_ctrl(pmu, data)) {
0421 global_ctrl_changed(pmu, data);
0422 return 0;
0423 }
0424 break;
0425 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
0426 if (!(data & pmu->global_ovf_ctrl_mask)) {
0427 if (!msr_info->host_initiated)
0428 pmu->global_status &= ~data;
0429 return 0;
0430 }
0431 break;
0432 case MSR_IA32_PEBS_ENABLE:
0433 if (pmu->pebs_enable == data)
0434 return 0;
0435 if (!(data & pmu->pebs_enable_mask)) {
0436 pmu->pebs_enable = data;
0437 return 0;
0438 }
0439 break;
0440 case MSR_IA32_DS_AREA:
0441 if (msr_info->host_initiated && data && !guest_cpuid_has(vcpu, X86_FEATURE_DS))
0442 return 1;
0443 if (is_noncanonical_address(data, vcpu))
0444 return 1;
0445 pmu->ds_area = data;
0446 return 0;
0447 case MSR_PEBS_DATA_CFG:
0448 if (pmu->pebs_data_cfg == data)
0449 return 0;
0450 if (!(data & pmu->pebs_data_cfg_mask)) {
0451 pmu->pebs_data_cfg = data;
0452 return 0;
0453 }
0454 break;
0455 default:
0456 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
0457 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
0458 if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
0459 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
0460 return 1;
0461 if (!msr_info->host_initiated &&
0462 !(msr & MSR_PMC_FULL_WIDTH_BIT))
0463 data = (s64)(s32)data;
0464 pmc->counter += data - pmc_read_counter(pmc);
0465 pmc_update_sample_period(pmc);
0466 return 0;
0467 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
0468 pmc->counter += data - pmc_read_counter(pmc);
0469 pmc_update_sample_period(pmc);
0470 return 0;
0471 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
0472 if (data == pmc->eventsel)
0473 return 0;
0474 reserved_bits = pmu->reserved_bits;
0475 if ((pmc->idx == 2) &&
0476 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
0477 reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
0478 if (!(data & reserved_bits)) {
0479 pmc->eventsel = data;
0480 reprogram_counter(pmc);
0481 return 0;
0482 }
0483 } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))
0484 return 0;
0485 }
0486
0487 return 1;
0488 }
0489
0490 static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
0491 {
0492 size_t size = ARRAY_SIZE(fixed_pmc_events);
0493 struct kvm_pmc *pmc;
0494 u32 event;
0495 int i;
0496
0497 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
0498 pmc = &pmu->fixed_counters[i];
0499 event = fixed_pmc_events[array_index_nospec(i, size)];
0500 pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
0501 intel_arch_events[event].eventsel;
0502 }
0503 }
0504
0505 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
0506 {
0507 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0508 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
0509 struct kvm_cpuid_entry2 *entry;
0510 union cpuid10_eax eax;
0511 union cpuid10_edx edx;
0512 u64 perf_capabilities;
0513 u64 counter_mask;
0514 int i;
0515
0516 pmu->nr_arch_gp_counters = 0;
0517 pmu->nr_arch_fixed_counters = 0;
0518 pmu->counter_bitmask[KVM_PMC_GP] = 0;
0519 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
0520 pmu->version = 0;
0521 pmu->reserved_bits = 0xffffffff00200000ull;
0522 pmu->raw_event_mask = X86_RAW_EVENT_MASK;
0523 pmu->global_ctrl_mask = ~0ull;
0524 pmu->global_ovf_ctrl_mask = ~0ull;
0525 pmu->fixed_ctr_ctrl_mask = ~0ull;
0526 pmu->pebs_enable_mask = ~0ull;
0527 pmu->pebs_data_cfg_mask = ~0ull;
0528
0529 entry = kvm_find_cpuid_entry(vcpu, 0xa);
0530 if (!entry || !vcpu->kvm->arch.enable_pmu)
0531 return;
0532 eax.full = entry->eax;
0533 edx.full = entry->edx;
0534
0535 pmu->version = eax.split.version_id;
0536 if (!pmu->version)
0537 return;
0538
0539 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
0540 kvm_pmu_cap.num_counters_gp);
0541 eax.split.bit_width = min_t(int, eax.split.bit_width,
0542 kvm_pmu_cap.bit_width_gp);
0543 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
0544 eax.split.mask_length = min_t(int, eax.split.mask_length,
0545 kvm_pmu_cap.events_mask_len);
0546 pmu->available_event_types = ~entry->ebx &
0547 ((1ull << eax.split.mask_length) - 1);
0548
0549 if (pmu->version == 1) {
0550 pmu->nr_arch_fixed_counters = 0;
0551 } else {
0552 pmu->nr_arch_fixed_counters =
0553 min3(ARRAY_SIZE(fixed_pmc_events),
0554 (size_t) edx.split.num_counters_fixed,
0555 (size_t)kvm_pmu_cap.num_counters_fixed);
0556 edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
0557 kvm_pmu_cap.bit_width_fixed);
0558 pmu->counter_bitmask[KVM_PMC_FIXED] =
0559 ((u64)1 << edx.split.bit_width_fixed) - 1;
0560 setup_fixed_pmc_eventsel(pmu);
0561 }
0562
0563 for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
0564 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
0565 counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
0566 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
0567 pmu->global_ctrl_mask = counter_mask;
0568 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
0569 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
0570 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
0571 if (vmx_pt_mode_is_host_guest())
0572 pmu->global_ovf_ctrl_mask &=
0573 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
0574
0575 entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
0576 if (entry &&
0577 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
0578 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
0579 pmu->reserved_bits ^= HSW_IN_TX;
0580 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
0581 }
0582
0583 bitmap_set(pmu->all_valid_pmc_idx,
0584 0, pmu->nr_arch_gp_counters);
0585 bitmap_set(pmu->all_valid_pmc_idx,
0586 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
0587
0588 perf_capabilities = vcpu_get_perf_capabilities(vcpu);
0589 if (cpuid_model_is_consistent(vcpu) &&
0590 (perf_capabilities & PMU_CAP_LBR_FMT))
0591 x86_perf_get_lbr(&lbr_desc->records);
0592 else
0593 lbr_desc->records.nr = 0;
0594
0595 if (lbr_desc->records.nr)
0596 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
0597
0598 if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
0599 if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
0600 pmu->pebs_enable_mask = counter_mask;
0601 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
0602 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
0603 pmu->fixed_ctr_ctrl_mask &=
0604 ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
0605 }
0606 pmu->pebs_data_cfg_mask = ~0xff00000full;
0607 } else {
0608 pmu->pebs_enable_mask =
0609 ~((1ull << pmu->nr_arch_gp_counters) - 1);
0610 }
0611 }
0612 }
0613
0614 static void intel_pmu_init(struct kvm_vcpu *vcpu)
0615 {
0616 int i;
0617 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0618 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
0619
0620 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
0621 pmu->gp_counters[i].type = KVM_PMC_GP;
0622 pmu->gp_counters[i].vcpu = vcpu;
0623 pmu->gp_counters[i].idx = i;
0624 pmu->gp_counters[i].current_config = 0;
0625 }
0626
0627 for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
0628 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
0629 pmu->fixed_counters[i].vcpu = vcpu;
0630 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
0631 pmu->fixed_counters[i].current_config = 0;
0632 }
0633
0634 vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
0635 lbr_desc->records.nr = 0;
0636 lbr_desc->event = NULL;
0637 lbr_desc->msr_passthrough = false;
0638 }
0639
0640 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
0641 {
0642 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0643 struct kvm_pmc *pmc = NULL;
0644 int i;
0645
0646 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
0647 pmc = &pmu->gp_counters[i];
0648
0649 pmc_stop_counter(pmc);
0650 pmc->counter = pmc->eventsel = 0;
0651 }
0652
0653 for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
0654 pmc = &pmu->fixed_counters[i];
0655
0656 pmc_stop_counter(pmc);
0657 pmc->counter = 0;
0658 }
0659
0660 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
0661
0662 intel_pmu_release_guest_lbr_event(vcpu);
0663 }
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673 static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
0674 {
0675 u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
0676
0677 if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
0678 data &= ~DEBUGCTLMSR_LBR;
0679 vmcs_write64(GUEST_IA32_DEBUGCTL, data);
0680 }
0681 }
0682
0683 static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
0684 {
0685 u8 version = vcpu_to_pmu(vcpu)->version;
0686
0687 if (!intel_pmu_lbr_is_enabled(vcpu))
0688 return;
0689
0690 if (version > 1 && version < 4)
0691 intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
0692 }
0693
0694 static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
0695 {
0696 struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
0697 int i;
0698
0699 for (i = 0; i < lbr->nr; i++) {
0700 vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
0701 vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
0702 if (lbr->info)
0703 vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
0704 }
0705
0706 vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
0707 vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
0708 }
0709
0710 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
0711 {
0712 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
0713
0714 if (!lbr_desc->msr_passthrough)
0715 return;
0716
0717 vmx_update_intercept_for_lbr_msrs(vcpu, true);
0718 lbr_desc->msr_passthrough = false;
0719 }
0720
0721 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
0722 {
0723 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
0724
0725 if (lbr_desc->msr_passthrough)
0726 return;
0727
0728 vmx_update_intercept_for_lbr_msrs(vcpu, false);
0729 lbr_desc->msr_passthrough = true;
0730 }
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
0743 {
0744 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
0745 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
0746
0747 if (!lbr_desc->event) {
0748 vmx_disable_lbr_msrs_passthrough(vcpu);
0749 if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
0750 goto warn;
0751 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
0752 goto warn;
0753 return;
0754 }
0755
0756 if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
0757 vmx_disable_lbr_msrs_passthrough(vcpu);
0758 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
0759 goto warn;
0760 } else
0761 vmx_enable_lbr_msrs_passthrough(vcpu);
0762
0763 return;
0764
0765 warn:
0766 pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n",
0767 vcpu->vcpu_id);
0768 }
0769
0770 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
0771 {
0772 if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
0773 intel_pmu_release_guest_lbr_event(vcpu);
0774 }
0775
0776 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
0777 {
0778 struct kvm_pmc *pmc = NULL;
0779 int bit;
0780
0781 for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
0782 X86_PMC_IDX_MAX) {
0783 pmc = intel_pmc_idx_to_pmc(pmu, bit);
0784
0785 if (!pmc || !pmc_speculative_in_use(pmc) ||
0786 !intel_pmc_is_enabled(pmc))
0787 continue;
0788
0789 if (pmc->perf_event && pmc->idx != pmc->perf_event->hw.idx) {
0790 pmu->host_cross_mapped_mask |=
0791 BIT_ULL(pmc->perf_event->hw.idx);
0792 }
0793 }
0794 }
0795
0796 struct kvm_pmu_ops intel_pmu_ops __initdata = {
0797 .hw_event_available = intel_hw_event_available,
0798 .pmc_is_enabled = intel_pmc_is_enabled,
0799 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
0800 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
0801 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
0802 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
0803 .is_valid_msr = intel_is_valid_msr,
0804 .get_msr = intel_pmu_get_msr,
0805 .set_msr = intel_pmu_set_msr,
0806 .refresh = intel_pmu_refresh,
0807 .init = intel_pmu_init,
0808 .reset = intel_pmu_reset,
0809 .deliver_pmi = intel_pmu_deliver_pmi,
0810 .cleanup = intel_pmu_cleanup,
0811 };