Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * ACPI probing code for ARM performance counters.
0004  *
0005  * Copyright (C) 2017 ARM Ltd.
0006  */
0007 
0008 #include <linux/acpi.h>
0009 #include <linux/cpumask.h>
0010 #include <linux/init.h>
0011 #include <linux/irq.h>
0012 #include <linux/irqdesc.h>
0013 #include <linux/percpu.h>
0014 #include <linux/perf/arm_pmu.h>
0015 
0016 #include <asm/cputype.h>
0017 
0018 static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
0019 static DEFINE_PER_CPU(int, pmu_irqs);
0020 
0021 static int arm_pmu_acpi_register_irq(int cpu)
0022 {
0023     struct acpi_madt_generic_interrupt *gicc;
0024     int gsi, trigger;
0025 
0026     gicc = acpi_cpu_get_madt_gicc(cpu);
0027 
0028     gsi = gicc->performance_interrupt;
0029 
0030     /*
0031      * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
0032      * have an interrupt. QEMU advertises this by using a GSI of zero,
0033      * which is not known to be valid on any hardware despite being
0034      * valid per the spec. Take the pragmatic approach and reject a
0035      * GSI of zero for now.
0036      */
0037     if (!gsi)
0038         return 0;
0039 
0040     if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
0041         trigger = ACPI_EDGE_SENSITIVE;
0042     else
0043         trigger = ACPI_LEVEL_SENSITIVE;
0044 
0045     /*
0046      * Helpfully, the MADT GICC doesn't have a polarity flag for the
0047      * "performance interrupt". Luckily, on compliant GICs the polarity is
0048      * a fixed value in HW (for both SPIs and PPIs) that we cannot change
0049      * from SW.
0050      *
0051      * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
0052      * may not match the real polarity, but that should not matter.
0053      *
0054      * Other interrupt controllers are not supported with ACPI.
0055      */
0056     return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
0057 }
0058 
0059 static void arm_pmu_acpi_unregister_irq(int cpu)
0060 {
0061     struct acpi_madt_generic_interrupt *gicc;
0062     int gsi;
0063 
0064     gicc = acpi_cpu_get_madt_gicc(cpu);
0065 
0066     gsi = gicc->performance_interrupt;
0067     if (gsi)
0068         acpi_unregister_gsi(gsi);
0069 }
0070 
0071 #if IS_ENABLED(CONFIG_ARM_SPE_PMU)
0072 static struct resource spe_resources[] = {
0073     {
0074         /* irq */
0075         .flags          = IORESOURCE_IRQ,
0076     }
0077 };
0078 
0079 static struct platform_device spe_dev = {
0080     .name = ARMV8_SPE_PDEV_NAME,
0081     .id = -1,
0082     .resource = spe_resources,
0083     .num_resources = ARRAY_SIZE(spe_resources)
0084 };
0085 
0086 /*
0087  * For lack of a better place, hook the normal PMU MADT walk
0088  * and create a SPE device if we detect a recent MADT with
0089  * a homogeneous PPI mapping.
0090  */
0091 static void arm_spe_acpi_register_device(void)
0092 {
0093     int cpu, hetid, irq, ret;
0094     bool first = true;
0095     u16 gsi = 0;
0096 
0097     /*
0098      * Sanity check all the GICC tables for the same interrupt number.
0099      * For now, we only support homogeneous ACPI/SPE machines.
0100      */
0101     for_each_possible_cpu(cpu) {
0102         struct acpi_madt_generic_interrupt *gicc;
0103 
0104         gicc = acpi_cpu_get_madt_gicc(cpu);
0105         if (gicc->header.length < ACPI_MADT_GICC_SPE)
0106             return;
0107 
0108         if (first) {
0109             gsi = gicc->spe_interrupt;
0110             if (!gsi)
0111                 return;
0112             hetid = find_acpi_cpu_topology_hetero_id(cpu);
0113             first = false;
0114         } else if ((gsi != gicc->spe_interrupt) ||
0115                (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
0116             pr_warn("ACPI: SPE must be homogeneous\n");
0117             return;
0118         }
0119     }
0120 
0121     irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
0122                 ACPI_ACTIVE_HIGH);
0123     if (irq < 0) {
0124         pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
0125         return;
0126     }
0127 
0128     spe_resources[0].start = irq;
0129     ret = platform_device_register(&spe_dev);
0130     if (ret < 0) {
0131         pr_warn("ACPI: SPE: Unable to register device\n");
0132         acpi_unregister_gsi(gsi);
0133     }
0134 }
0135 #else
0136 static inline void arm_spe_acpi_register_device(void)
0137 {
0138 }
0139 #endif /* CONFIG_ARM_SPE_PMU */
0140 
0141 static int arm_pmu_acpi_parse_irqs(void)
0142 {
0143     int irq, cpu, irq_cpu, err;
0144 
0145     for_each_possible_cpu(cpu) {
0146         irq = arm_pmu_acpi_register_irq(cpu);
0147         if (irq < 0) {
0148             err = irq;
0149             pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
0150                 cpu, err);
0151             goto out_err;
0152         } else if (irq == 0) {
0153             pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
0154         }
0155 
0156         /*
0157          * Log and request the IRQ so the core arm_pmu code can manage
0158          * it. We'll have to sanity-check IRQs later when we associate
0159          * them with their PMUs.
0160          */
0161         per_cpu(pmu_irqs, cpu) = irq;
0162         err = armpmu_request_irq(irq, cpu);
0163         if (err)
0164             goto out_err;
0165     }
0166 
0167     return 0;
0168 
0169 out_err:
0170     for_each_possible_cpu(cpu) {
0171         irq = per_cpu(pmu_irqs, cpu);
0172         if (!irq)
0173             continue;
0174 
0175         arm_pmu_acpi_unregister_irq(cpu);
0176 
0177         /*
0178          * Blat all copies of the IRQ so that we only unregister the
0179          * corresponding GSI once (e.g. when we have PPIs).
0180          */
0181         for_each_possible_cpu(irq_cpu) {
0182             if (per_cpu(pmu_irqs, irq_cpu) == irq)
0183                 per_cpu(pmu_irqs, irq_cpu) = 0;
0184         }
0185     }
0186 
0187     return err;
0188 }
0189 
0190 static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
0191 {
0192     unsigned long cpuid = read_cpuid_id();
0193     struct arm_pmu *pmu;
0194     int cpu;
0195 
0196     for_each_possible_cpu(cpu) {
0197         pmu = per_cpu(probed_pmus, cpu);
0198         if (!pmu || pmu->acpi_cpuid != cpuid)
0199             continue;
0200 
0201         return pmu;
0202     }
0203 
0204     pmu = armpmu_alloc_atomic();
0205     if (!pmu) {
0206         pr_warn("Unable to allocate PMU for CPU%d\n",
0207             smp_processor_id());
0208         return NULL;
0209     }
0210 
0211     pmu->acpi_cpuid = cpuid;
0212 
0213     return pmu;
0214 }
0215 
0216 /*
0217  * Check whether the new IRQ is compatible with those already associated with
0218  * the PMU (e.g. we don't have mismatched PPIs).
0219  */
0220 static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
0221 {
0222     struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
0223     int cpu;
0224 
0225     if (!irq)
0226         return true;
0227 
0228     for_each_cpu(cpu, &pmu->supported_cpus) {
0229         int other_irq = per_cpu(hw_events->irq, cpu);
0230         if (!other_irq)
0231             continue;
0232 
0233         if (irq == other_irq)
0234             continue;
0235         if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
0236             continue;
0237 
0238         pr_warn("mismatched PPIs detected\n");
0239         return false;
0240     }
0241 
0242     return true;
0243 }
0244 
0245 /*
0246  * This must run before the common arm_pmu hotplug logic, so that we can
0247  * associate a CPU and its interrupt before the common code tries to manage the
0248  * affinity and so on.
0249  *
0250  * Note that hotplug events are serialized, so we cannot race with another CPU
0251  * coming up. The perf core won't open events while a hotplug event is in
0252  * progress.
0253  */
0254 static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
0255 {
0256     struct arm_pmu *pmu;
0257     struct pmu_hw_events __percpu *hw_events;
0258     int irq;
0259 
0260     /* If we've already probed this CPU, we have nothing to do */
0261     if (per_cpu(probed_pmus, cpu))
0262         return 0;
0263 
0264     irq = per_cpu(pmu_irqs, cpu);
0265 
0266     pmu = arm_pmu_acpi_find_alloc_pmu();
0267     if (!pmu)
0268         return -ENOMEM;
0269 
0270     per_cpu(probed_pmus, cpu) = pmu;
0271 
0272     if (pmu_irq_matches(pmu, irq)) {
0273         hw_events = pmu->hw_events;
0274         per_cpu(hw_events->irq, cpu) = irq;
0275     }
0276 
0277     cpumask_set_cpu(cpu, &pmu->supported_cpus);
0278 
0279     /*
0280      * Ideally, we'd probe the PMU here when we find the first matching
0281      * CPU. We can't do that for several reasons; see the comment in
0282      * arm_pmu_acpi_init().
0283      *
0284      * So for the time being, we're done.
0285      */
0286     return 0;
0287 }
0288 
0289 int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
0290 {
0291     int pmu_idx = 0;
0292     int cpu, ret;
0293 
0294     /*
0295      * Initialise and register the set of PMUs which we know about right
0296      * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
0297      * could handle late hotplug, but this may lead to deadlock since we
0298      * might try to register a hotplug notifier instance from within a
0299      * hotplug notifier.
0300      *
0301      * There's also the problem of having access to the right init_fn,
0302      * without tying this too deeply into the "real" PMU driver.
0303      *
0304      * For the moment, as with the platform/DT case, we need at least one
0305      * of a PMU's CPUs to be online at probe time.
0306      */
0307     for_each_possible_cpu(cpu) {
0308         struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
0309         char *base_name;
0310 
0311         if (!pmu || pmu->name)
0312             continue;
0313 
0314         ret = init_fn(pmu);
0315         if (ret == -ENODEV) {
0316             /* PMU not handled by this driver, or not present */
0317             continue;
0318         } else if (ret) {
0319             pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
0320             return ret;
0321         }
0322 
0323         base_name = pmu->name;
0324         pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
0325         if (!pmu->name) {
0326             pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
0327             return -ENOMEM;
0328         }
0329 
0330         ret = armpmu_register(pmu);
0331         if (ret) {
0332             pr_warn("Failed to register PMU for CPU%d\n", cpu);
0333             kfree(pmu->name);
0334             return ret;
0335         }
0336     }
0337 
0338     return 0;
0339 }
0340 
0341 static int arm_pmu_acpi_init(void)
0342 {
0343     int ret;
0344 
0345     if (acpi_disabled)
0346         return 0;
0347 
0348     arm_spe_acpi_register_device();
0349 
0350     ret = arm_pmu_acpi_parse_irqs();
0351     if (ret)
0352         return ret;
0353 
0354     ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
0355                 "perf/arm/pmu_acpi:starting",
0356                 arm_pmu_acpi_cpu_starting, NULL);
0357 
0358     return ret;
0359 }
0360 subsys_initcall(arm_pmu_acpi_init)