0001
0002
0003
0004
0005
0006 #include <linux/kvm_host.h>
0007 #include <linux/perf_event.h>
0008
0009 static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
0010
0011
0012
0013
0014
0015 static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
0016 {
0017
0018
0019
0020
0021
0022 if (has_vhe() && attr->exclude_user)
0023 return false;
0024
0025
0026 return (attr->exclude_host != attr->exclude_guest);
0027 }
0028
0029 struct kvm_pmu_events *kvm_get_pmu_events(void)
0030 {
0031 return this_cpu_ptr(&kvm_pmu_events);
0032 }
0033
0034
0035
0036
0037
0038 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
0039 {
0040 struct kvm_pmu_events *pmu = kvm_get_pmu_events();
0041
0042 if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
0043 return;
0044
0045 if (!attr->exclude_host)
0046 pmu->events_host |= set;
0047 if (!attr->exclude_guest)
0048 pmu->events_guest |= set;
0049 }
0050
0051
0052
0053
0054 void kvm_clr_pmu_events(u32 clr)
0055 {
0056 struct kvm_pmu_events *pmu = kvm_get_pmu_events();
0057
0058 if (!kvm_arm_support_pmu_v3() || !pmu)
0059 return;
0060
0061 pmu->events_host &= ~clr;
0062 pmu->events_guest &= ~clr;
0063 }
0064
0065 #define PMEVTYPER_READ_CASE(idx) \
0066 case idx: \
0067 return read_sysreg(pmevtyper##idx##_el0)
0068
0069 #define PMEVTYPER_WRITE_CASE(idx) \
0070 case idx: \
0071 write_sysreg(val, pmevtyper##idx##_el0); \
0072 break
0073
0074 #define PMEVTYPER_CASES(readwrite) \
0075 PMEVTYPER_##readwrite##_CASE(0); \
0076 PMEVTYPER_##readwrite##_CASE(1); \
0077 PMEVTYPER_##readwrite##_CASE(2); \
0078 PMEVTYPER_##readwrite##_CASE(3); \
0079 PMEVTYPER_##readwrite##_CASE(4); \
0080 PMEVTYPER_##readwrite##_CASE(5); \
0081 PMEVTYPER_##readwrite##_CASE(6); \
0082 PMEVTYPER_##readwrite##_CASE(7); \
0083 PMEVTYPER_##readwrite##_CASE(8); \
0084 PMEVTYPER_##readwrite##_CASE(9); \
0085 PMEVTYPER_##readwrite##_CASE(10); \
0086 PMEVTYPER_##readwrite##_CASE(11); \
0087 PMEVTYPER_##readwrite##_CASE(12); \
0088 PMEVTYPER_##readwrite##_CASE(13); \
0089 PMEVTYPER_##readwrite##_CASE(14); \
0090 PMEVTYPER_##readwrite##_CASE(15); \
0091 PMEVTYPER_##readwrite##_CASE(16); \
0092 PMEVTYPER_##readwrite##_CASE(17); \
0093 PMEVTYPER_##readwrite##_CASE(18); \
0094 PMEVTYPER_##readwrite##_CASE(19); \
0095 PMEVTYPER_##readwrite##_CASE(20); \
0096 PMEVTYPER_##readwrite##_CASE(21); \
0097 PMEVTYPER_##readwrite##_CASE(22); \
0098 PMEVTYPER_##readwrite##_CASE(23); \
0099 PMEVTYPER_##readwrite##_CASE(24); \
0100 PMEVTYPER_##readwrite##_CASE(25); \
0101 PMEVTYPER_##readwrite##_CASE(26); \
0102 PMEVTYPER_##readwrite##_CASE(27); \
0103 PMEVTYPER_##readwrite##_CASE(28); \
0104 PMEVTYPER_##readwrite##_CASE(29); \
0105 PMEVTYPER_##readwrite##_CASE(30)
0106
0107
0108
0109
0110
0111 static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
0112 {
0113 switch (idx) {
0114 PMEVTYPER_CASES(READ);
0115 case ARMV8_PMU_CYCLE_IDX:
0116 return read_sysreg(pmccfiltr_el0);
0117 default:
0118 WARN_ON(1);
0119 }
0120
0121 return 0;
0122 }
0123
0124
0125
0126
0127
0128 static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
0129 {
0130 switch (idx) {
0131 PMEVTYPER_CASES(WRITE);
0132 case ARMV8_PMU_CYCLE_IDX:
0133 write_sysreg(val, pmccfiltr_el0);
0134 break;
0135 default:
0136 WARN_ON(1);
0137 }
0138 }
0139
0140
0141
0142
0143 static void kvm_vcpu_pmu_enable_el0(unsigned long events)
0144 {
0145 u64 typer;
0146 u32 counter;
0147
0148 for_each_set_bit(counter, &events, 32) {
0149 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
0150 typer &= ~ARMV8_PMU_EXCLUDE_EL0;
0151 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
0152 }
0153 }
0154
0155
0156
0157
0158 static void kvm_vcpu_pmu_disable_el0(unsigned long events)
0159 {
0160 u64 typer;
0161 u32 counter;
0162
0163 for_each_set_bit(counter, &events, 32) {
0164 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
0165 typer |= ARMV8_PMU_EXCLUDE_EL0;
0166 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
0167 }
0168 }
0169
0170
0171
0172
0173
0174
0175
0176 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
0177 {
0178 struct kvm_pmu_events *pmu;
0179 u32 events_guest, events_host;
0180
0181 if (!kvm_arm_support_pmu_v3() || !has_vhe())
0182 return;
0183
0184 preempt_disable();
0185 pmu = kvm_get_pmu_events();
0186 events_guest = pmu->events_guest;
0187 events_host = pmu->events_host;
0188
0189 kvm_vcpu_pmu_enable_el0(events_guest);
0190 kvm_vcpu_pmu_disable_el0(events_host);
0191 preempt_enable();
0192 }
0193
0194
0195
0196
0197 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
0198 {
0199 struct kvm_pmu_events *pmu;
0200 u32 events_guest, events_host;
0201
0202 if (!kvm_arm_support_pmu_v3() || !has_vhe())
0203 return;
0204
0205 pmu = kvm_get_pmu_events();
0206 events_guest = pmu->events_guest;
0207 events_host = pmu->events_host;
0208
0209 kvm_vcpu_pmu_enable_el0(events_host);
0210 kvm_vcpu_pmu_disable_el0(events_guest);
0211 }