Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
0004  *
0005  * Created by:  Nicolas Pitre, March 2012
0006  * Copyright:   (C) 2012-2013  Linaro Limited
0007  */
0008 
0009 #include <linux/export.h>
0010 #include <linux/kernel.h>
0011 #include <linux/init.h>
0012 #include <linux/irqflags.h>
0013 #include <linux/cpu_pm.h>
0014 
0015 #include <asm/mcpm.h>
0016 #include <asm/cacheflush.h>
0017 #include <asm/idmap.h>
0018 #include <asm/cputype.h>
0019 #include <asm/suspend.h>
0020 
0021 /*
0022  * The public API for this code is documented in arch/arm/include/asm/mcpm.h.
0023  * For a comprehensive description of the main algorithm used here, please
0024  * see Documentation/arm/cluster-pm-race-avoidance.rst.
0025  */
0026 
0027 struct sync_struct mcpm_sync;
0028 
0029 /*
0030  * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
0031  *    This must be called at the point of committing to teardown of a CPU.
0032  *    The CPU cache (SCTRL.C bit) is expected to still be active.
0033  */
0034 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
0035 {
0036     mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
0037     sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
0038 }
0039 
0040 /*
0041  * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
0042  *    cluster can be torn down without disrupting this CPU.
0043  *    To avoid deadlocks, this must be called before a CPU is powered down.
0044  *    The CPU cache (SCTRL.C bit) is expected to be off.
0045  *    However L2 cache might or might not be active.
0046  */
0047 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
0048 {
0049     dmb();
0050     mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
0051     sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
0052     sev();
0053 }
0054 
0055 /*
0056  * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
0057  * @state: the final state of the cluster:
0058  *     CLUSTER_UP: no destructive teardown was done and the cluster has been
0059  *         restored to the previous state (CPU cache still active); or
0060  *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
0061  *         (CPU cache disabled, L2 cache either enabled or disabled).
0062  */
0063 static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
0064 {
0065     dmb();
0066     mcpm_sync.clusters[cluster].cluster = state;
0067     sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
0068     sev();
0069 }
0070 
0071 /*
0072  * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
0073  * This function should be called by the last man, after local CPU teardown
0074  * is complete.  CPU cache expected to be active.
0075  *
0076  * Returns:
0077  *     false: the critical section was not entered because an inbound CPU was
0078  *         observed, or the cluster is already being set up;
0079  *     true: the critical section was entered: it is now safe to tear down the
0080  *         cluster.
0081  */
0082 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
0083 {
0084     unsigned int i;
0085     struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
0086 
0087     /* Warn inbound CPUs that the cluster is being torn down: */
0088     c->cluster = CLUSTER_GOING_DOWN;
0089     sync_cache_w(&c->cluster);
0090 
0091     /* Back out if the inbound cluster is already in the critical region: */
0092     sync_cache_r(&c->inbound);
0093     if (c->inbound == INBOUND_COMING_UP)
0094         goto abort;
0095 
0096     /*
0097      * Wait for all CPUs to get out of the GOING_DOWN state, so that local
0098      * teardown is complete on each CPU before tearing down the cluster.
0099      *
0100      * If any CPU has been woken up again from the DOWN state, then we
0101      * shouldn't be taking the cluster down at all: abort in that case.
0102      */
0103     sync_cache_r(&c->cpus);
0104     for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
0105         int cpustate;
0106 
0107         if (i == cpu)
0108             continue;
0109 
0110         while (1) {
0111             cpustate = c->cpus[i].cpu;
0112             if (cpustate != CPU_GOING_DOWN)
0113                 break;
0114 
0115             wfe();
0116             sync_cache_r(&c->cpus[i].cpu);
0117         }
0118 
0119         switch (cpustate) {
0120         case CPU_DOWN:
0121             continue;
0122 
0123         default:
0124             goto abort;
0125         }
0126     }
0127 
0128     return true;
0129 
0130 abort:
0131     __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
0132     return false;
0133 }
0134 
0135 static int __mcpm_cluster_state(unsigned int cluster)
0136 {
0137     sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
0138     return mcpm_sync.clusters[cluster].cluster;
0139 }
0140 
0141 extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
0142 
0143 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
0144 {
0145     unsigned long val = ptr ? __pa_symbol(ptr) : 0;
0146     mcpm_entry_vectors[cluster][cpu] = val;
0147     sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
0148 }
0149 
0150 extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
0151 
0152 void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
0153              unsigned long poke_phys_addr, unsigned long poke_val)
0154 {
0155     unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
0156     poke[0] = poke_phys_addr;
0157     poke[1] = poke_val;
0158     __sync_cache_range_w(poke, 2 * sizeof(*poke));
0159 }
0160 
0161 static const struct mcpm_platform_ops *platform_ops;
0162 
0163 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
0164 {
0165     if (platform_ops)
0166         return -EBUSY;
0167     platform_ops = ops;
0168     return 0;
0169 }
0170 
0171 bool mcpm_is_available(void)
0172 {
0173     return (platform_ops) ? true : false;
0174 }
0175 EXPORT_SYMBOL_GPL(mcpm_is_available);
0176 
0177 /*
0178  * We can't use regular spinlocks. In the switcher case, it is possible
0179  * for an outbound CPU to call power_down() after its inbound counterpart
0180  * is already live using the same logical CPU number which trips lockdep
0181  * debugging.
0182  */
0183 static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
0184 
0185 static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
0186 
0187 static inline bool mcpm_cluster_unused(unsigned int cluster)
0188 {
0189     int i, cnt;
0190     for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++)
0191         cnt |= mcpm_cpu_use_count[cluster][i];
0192     return !cnt;
0193 }
0194 
0195 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
0196 {
0197     bool cpu_is_down, cluster_is_down;
0198     int ret = 0;
0199 
0200     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0201     if (!platform_ops)
0202         return -EUNATCH; /* try not to shadow power_up errors */
0203     might_sleep();
0204 
0205     /*
0206      * Since this is called with IRQs enabled, and no arch_spin_lock_irq
0207      * variant exists, we need to disable IRQs manually here.
0208      */
0209     local_irq_disable();
0210     arch_spin_lock(&mcpm_lock);
0211 
0212     cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
0213     cluster_is_down = mcpm_cluster_unused(cluster);
0214 
0215     mcpm_cpu_use_count[cluster][cpu]++;
0216     /*
0217      * The only possible values are:
0218      * 0 = CPU down
0219      * 1 = CPU (still) up
0220      * 2 = CPU requested to be up before it had a chance
0221      *     to actually make itself down.
0222      * Any other value is a bug.
0223      */
0224     BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
0225            mcpm_cpu_use_count[cluster][cpu] != 2);
0226 
0227     if (cluster_is_down)
0228         ret = platform_ops->cluster_powerup(cluster);
0229     if (cpu_is_down && !ret)
0230         ret = platform_ops->cpu_powerup(cpu, cluster);
0231 
0232     arch_spin_unlock(&mcpm_lock);
0233     local_irq_enable();
0234     return ret;
0235 }
0236 
0237 typedef typeof(cpu_reset) phys_reset_t;
0238 
0239 void mcpm_cpu_power_down(void)
0240 {
0241     unsigned int mpidr, cpu, cluster;
0242     bool cpu_going_down, last_man;
0243     phys_reset_t phys_reset;
0244 
0245     mpidr = read_cpuid_mpidr();
0246     cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0247     cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0248     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0249     if (WARN_ON_ONCE(!platform_ops))
0250            return;
0251     BUG_ON(!irqs_disabled());
0252 
0253     setup_mm_for_reboot();
0254 
0255     __mcpm_cpu_going_down(cpu, cluster);
0256     arch_spin_lock(&mcpm_lock);
0257     BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
0258 
0259     mcpm_cpu_use_count[cluster][cpu]--;
0260     BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
0261            mcpm_cpu_use_count[cluster][cpu] != 1);
0262     cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
0263     last_man = mcpm_cluster_unused(cluster);
0264 
0265     if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
0266         platform_ops->cpu_powerdown_prepare(cpu, cluster);
0267         platform_ops->cluster_powerdown_prepare(cluster);
0268         arch_spin_unlock(&mcpm_lock);
0269         platform_ops->cluster_cache_disable();
0270         __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
0271     } else {
0272         if (cpu_going_down)
0273             platform_ops->cpu_powerdown_prepare(cpu, cluster);
0274         arch_spin_unlock(&mcpm_lock);
0275         /*
0276          * If cpu_going_down is false here, that means a power_up
0277          * request raced ahead of us.  Even if we do not want to
0278          * shut this CPU down, the caller still expects execution
0279          * to return through the system resume entry path, like
0280          * when the WFI is aborted due to a new IRQ or the like..
0281          * So let's continue with cache cleaning in all cases.
0282          */
0283         platform_ops->cpu_cache_disable();
0284     }
0285 
0286     __mcpm_cpu_down(cpu, cluster);
0287 
0288     /* Now we are prepared for power-down, do it: */
0289     if (cpu_going_down)
0290         wfi();
0291 
0292     /*
0293      * It is possible for a power_up request to happen concurrently
0294      * with a power_down request for the same CPU. In this case the
0295      * CPU might not be able to actually enter a powered down state
0296      * with the WFI instruction if the power_up request has removed
0297      * the required reset condition.  We must perform a re-entry in
0298      * the kernel as if the power_up method just had deasserted reset
0299      * on the CPU.
0300      */
0301     phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
0302     phys_reset(__pa_symbol(mcpm_entry_point), false);
0303 
0304     /* should never get here */
0305     BUG();
0306 }
0307 
0308 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
0309 {
0310     int ret;
0311 
0312     if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown))
0313         return -EUNATCH;
0314 
0315     ret = platform_ops->wait_for_powerdown(cpu, cluster);
0316     if (ret)
0317         pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
0318             __func__, cpu, cluster, ret);
0319 
0320     return ret;
0321 }
0322 
0323 void mcpm_cpu_suspend(void)
0324 {
0325     if (WARN_ON_ONCE(!platform_ops))
0326         return;
0327 
0328     /* Some platforms might have to enable special resume modes, etc. */
0329     if (platform_ops->cpu_suspend_prepare) {
0330         unsigned int mpidr = read_cpuid_mpidr();
0331         unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0332         unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 
0333         arch_spin_lock(&mcpm_lock);
0334         platform_ops->cpu_suspend_prepare(cpu, cluster);
0335         arch_spin_unlock(&mcpm_lock);
0336     }
0337     mcpm_cpu_power_down();
0338 }
0339 
0340 int mcpm_cpu_powered_up(void)
0341 {
0342     unsigned int mpidr, cpu, cluster;
0343     bool cpu_was_down, first_man;
0344     unsigned long flags;
0345 
0346     if (!platform_ops)
0347         return -EUNATCH;
0348 
0349     mpidr = read_cpuid_mpidr();
0350     cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0351     cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0352     local_irq_save(flags);
0353     arch_spin_lock(&mcpm_lock);
0354 
0355     cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
0356     first_man = mcpm_cluster_unused(cluster);
0357 
0358     if (first_man && platform_ops->cluster_is_up)
0359         platform_ops->cluster_is_up(cluster);
0360     if (cpu_was_down)
0361         mcpm_cpu_use_count[cluster][cpu] = 1;
0362     if (platform_ops->cpu_is_up)
0363         platform_ops->cpu_is_up(cpu, cluster);
0364 
0365     arch_spin_unlock(&mcpm_lock);
0366     local_irq_restore(flags);
0367 
0368     return 0;
0369 }
0370 
0371 #ifdef CONFIG_ARM_CPU_SUSPEND
0372 
0373 static int __init nocache_trampoline(unsigned long _arg)
0374 {
0375     void (*cache_disable)(void) = (void *)_arg;
0376     unsigned int mpidr = read_cpuid_mpidr();
0377     unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0378     unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0379     phys_reset_t phys_reset;
0380 
0381     mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
0382     setup_mm_for_reboot();
0383 
0384     __mcpm_cpu_going_down(cpu, cluster);
0385     BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
0386     cache_disable();
0387     __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
0388     __mcpm_cpu_down(cpu, cluster);
0389 
0390     phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
0391     phys_reset(__pa_symbol(mcpm_entry_point), false);
0392     BUG();
0393 }
0394 
0395 int __init mcpm_loopback(void (*cache_disable)(void))
0396 {
0397     int ret;
0398 
0399     /*
0400      * We're going to soft-restart the current CPU through the
0401      * low-level MCPM code by leveraging the suspend/resume
0402      * infrastructure. Let's play it safe by using cpu_pm_enter()
0403      * in case the CPU init code path resets the VFP or similar.
0404      */
0405     local_irq_disable();
0406     local_fiq_disable();
0407     ret = cpu_pm_enter();
0408     if (!ret) {
0409         ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
0410         cpu_pm_exit();
0411     }
0412     local_fiq_enable();
0413     local_irq_enable();
0414     if (ret)
0415         pr_err("%s returned %d\n", __func__, ret);
0416     return ret;
0417 }
0418 
0419 #endif
0420 
0421 extern unsigned long mcpm_power_up_setup_phys;
0422 
0423 int __init mcpm_sync_init(
0424     void (*power_up_setup)(unsigned int affinity_level))
0425 {
0426     unsigned int i, j, mpidr, this_cluster;
0427 
0428     BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
0429     BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
0430 
0431     /*
0432      * Set initial CPU and cluster states.
0433      * Only one cluster is assumed to be active at this point.
0434      */
0435     for (i = 0; i < MAX_NR_CLUSTERS; i++) {
0436         mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
0437         mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
0438         for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
0439             mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
0440     }
0441     mpidr = read_cpuid_mpidr();
0442     this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0443     for_each_online_cpu(i) {
0444         mcpm_cpu_use_count[this_cluster][i] = 1;
0445         mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
0446     }
0447     mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
0448     sync_cache_w(&mcpm_sync);
0449 
0450     if (power_up_setup) {
0451         mcpm_power_up_setup_phys = __pa_symbol(power_up_setup);
0452         sync_cache_w(&mcpm_power_up_setup_phys);
0453     }
0454 
0455     return 0;
0456 }