0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/acpi.h>
0009 #include <linux/cpumask.h>
0010 #include <linux/init.h>
0011 #include <linux/irq.h>
0012 #include <linux/irqdesc.h>
0013 #include <linux/percpu.h>
0014 #include <linux/perf/arm_pmu.h>
0015
0016 #include <asm/cputype.h>
0017
0018 static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
0019 static DEFINE_PER_CPU(int, pmu_irqs);
0020
0021 static int arm_pmu_acpi_register_irq(int cpu)
0022 {
0023 struct acpi_madt_generic_interrupt *gicc;
0024 int gsi, trigger;
0025
0026 gicc = acpi_cpu_get_madt_gicc(cpu);
0027
0028 gsi = gicc->performance_interrupt;
0029
0030
0031
0032
0033
0034
0035
0036
0037 if (!gsi)
0038 return 0;
0039
0040 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
0041 trigger = ACPI_EDGE_SENSITIVE;
0042 else
0043 trigger = ACPI_LEVEL_SENSITIVE;
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
0057 }
0058
0059 static void arm_pmu_acpi_unregister_irq(int cpu)
0060 {
0061 struct acpi_madt_generic_interrupt *gicc;
0062 int gsi;
0063
0064 gicc = acpi_cpu_get_madt_gicc(cpu);
0065
0066 gsi = gicc->performance_interrupt;
0067 if (gsi)
0068 acpi_unregister_gsi(gsi);
0069 }
0070
0071 #if IS_ENABLED(CONFIG_ARM_SPE_PMU)
0072 static struct resource spe_resources[] = {
0073 {
0074
0075 .flags = IORESOURCE_IRQ,
0076 }
0077 };
0078
0079 static struct platform_device spe_dev = {
0080 .name = ARMV8_SPE_PDEV_NAME,
0081 .id = -1,
0082 .resource = spe_resources,
0083 .num_resources = ARRAY_SIZE(spe_resources)
0084 };
0085
0086
0087
0088
0089
0090
0091 static void arm_spe_acpi_register_device(void)
0092 {
0093 int cpu, hetid, irq, ret;
0094 bool first = true;
0095 u16 gsi = 0;
0096
0097
0098
0099
0100
0101 for_each_possible_cpu(cpu) {
0102 struct acpi_madt_generic_interrupt *gicc;
0103
0104 gicc = acpi_cpu_get_madt_gicc(cpu);
0105 if (gicc->header.length < ACPI_MADT_GICC_SPE)
0106 return;
0107
0108 if (first) {
0109 gsi = gicc->spe_interrupt;
0110 if (!gsi)
0111 return;
0112 hetid = find_acpi_cpu_topology_hetero_id(cpu);
0113 first = false;
0114 } else if ((gsi != gicc->spe_interrupt) ||
0115 (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
0116 pr_warn("ACPI: SPE must be homogeneous\n");
0117 return;
0118 }
0119 }
0120
0121 irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
0122 ACPI_ACTIVE_HIGH);
0123 if (irq < 0) {
0124 pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
0125 return;
0126 }
0127
0128 spe_resources[0].start = irq;
0129 ret = platform_device_register(&spe_dev);
0130 if (ret < 0) {
0131 pr_warn("ACPI: SPE: Unable to register device\n");
0132 acpi_unregister_gsi(gsi);
0133 }
0134 }
0135 #else
0136 static inline void arm_spe_acpi_register_device(void)
0137 {
0138 }
0139 #endif
0140
0141 static int arm_pmu_acpi_parse_irqs(void)
0142 {
0143 int irq, cpu, irq_cpu, err;
0144
0145 for_each_possible_cpu(cpu) {
0146 irq = arm_pmu_acpi_register_irq(cpu);
0147 if (irq < 0) {
0148 err = irq;
0149 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
0150 cpu, err);
0151 goto out_err;
0152 } else if (irq == 0) {
0153 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
0154 }
0155
0156
0157
0158
0159
0160
0161 per_cpu(pmu_irqs, cpu) = irq;
0162 err = armpmu_request_irq(irq, cpu);
0163 if (err)
0164 goto out_err;
0165 }
0166
0167 return 0;
0168
0169 out_err:
0170 for_each_possible_cpu(cpu) {
0171 irq = per_cpu(pmu_irqs, cpu);
0172 if (!irq)
0173 continue;
0174
0175 arm_pmu_acpi_unregister_irq(cpu);
0176
0177
0178
0179
0180
0181 for_each_possible_cpu(irq_cpu) {
0182 if (per_cpu(pmu_irqs, irq_cpu) == irq)
0183 per_cpu(pmu_irqs, irq_cpu) = 0;
0184 }
0185 }
0186
0187 return err;
0188 }
0189
0190 static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
0191 {
0192 unsigned long cpuid = read_cpuid_id();
0193 struct arm_pmu *pmu;
0194 int cpu;
0195
0196 for_each_possible_cpu(cpu) {
0197 pmu = per_cpu(probed_pmus, cpu);
0198 if (!pmu || pmu->acpi_cpuid != cpuid)
0199 continue;
0200
0201 return pmu;
0202 }
0203
0204 pmu = armpmu_alloc_atomic();
0205 if (!pmu) {
0206 pr_warn("Unable to allocate PMU for CPU%d\n",
0207 smp_processor_id());
0208 return NULL;
0209 }
0210
0211 pmu->acpi_cpuid = cpuid;
0212
0213 return pmu;
0214 }
0215
0216
0217
0218
0219
0220 static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
0221 {
0222 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
0223 int cpu;
0224
0225 if (!irq)
0226 return true;
0227
0228 for_each_cpu(cpu, &pmu->supported_cpus) {
0229 int other_irq = per_cpu(hw_events->irq, cpu);
0230 if (!other_irq)
0231 continue;
0232
0233 if (irq == other_irq)
0234 continue;
0235 if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
0236 continue;
0237
0238 pr_warn("mismatched PPIs detected\n");
0239 return false;
0240 }
0241
0242 return true;
0243 }
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254 static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
0255 {
0256 struct arm_pmu *pmu;
0257 struct pmu_hw_events __percpu *hw_events;
0258 int irq;
0259
0260
0261 if (per_cpu(probed_pmus, cpu))
0262 return 0;
0263
0264 irq = per_cpu(pmu_irqs, cpu);
0265
0266 pmu = arm_pmu_acpi_find_alloc_pmu();
0267 if (!pmu)
0268 return -ENOMEM;
0269
0270 per_cpu(probed_pmus, cpu) = pmu;
0271
0272 if (pmu_irq_matches(pmu, irq)) {
0273 hw_events = pmu->hw_events;
0274 per_cpu(hw_events->irq, cpu) = irq;
0275 }
0276
0277 cpumask_set_cpu(cpu, &pmu->supported_cpus);
0278
0279
0280
0281
0282
0283
0284
0285
0286 return 0;
0287 }
0288
0289 int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
0290 {
0291 int pmu_idx = 0;
0292 int cpu, ret;
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 for_each_possible_cpu(cpu) {
0308 struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
0309 char *base_name;
0310
0311 if (!pmu || pmu->name)
0312 continue;
0313
0314 ret = init_fn(pmu);
0315 if (ret == -ENODEV) {
0316
0317 continue;
0318 } else if (ret) {
0319 pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
0320 return ret;
0321 }
0322
0323 base_name = pmu->name;
0324 pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
0325 if (!pmu->name) {
0326 pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
0327 return -ENOMEM;
0328 }
0329
0330 ret = armpmu_register(pmu);
0331 if (ret) {
0332 pr_warn("Failed to register PMU for CPU%d\n", cpu);
0333 kfree(pmu->name);
0334 return ret;
0335 }
0336 }
0337
0338 return 0;
0339 }
0340
0341 static int arm_pmu_acpi_init(void)
0342 {
0343 int ret;
0344
0345 if (acpi_disabled)
0346 return 0;
0347
0348 arm_spe_acpi_register_device();
0349
0350 ret = arm_pmu_acpi_parse_irqs();
0351 if (ret)
0352 return ret;
0353
0354 ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
0355 "perf/arm/pmu_acpi:starting",
0356 arm_pmu_acpi_cpu_starting, NULL);
0357
0358 return ret;
0359 }
0360 subsys_initcall(arm_pmu_acpi_init)