Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Created by:  Nicolas Pitre, October 2012
0004  * Copyright:   (C) 2012-2013  Linaro Limited
0005  *
0006  * Some portions of this file were originally written by Achin Gupta
0007  * Copyright:   (C) 2012  ARM Limited
0008  */
0009 
0010 #include <linux/delay.h>
0011 #include <linux/init.h>
0012 #include <linux/io.h>
0013 #include <linux/kernel.h>
0014 #include <linux/of_address.h>
0015 #include <linux/of_irq.h>
0016 #include <linux/errno.h>
0017 #include <linux/irqchip/arm-gic.h>
0018 
0019 #include <asm/mcpm.h>
0020 #include <asm/proc-fns.h>
0021 #include <asm/cacheflush.h>
0022 #include <asm/cputype.h>
0023 #include <asm/cp15.h>
0024 
0025 #include <linux/arm-cci.h>
0026 
0027 #include "spc.h"
0028 
0029 /* SCC conf registers */
0030 #define RESET_CTRL      0x018
0031 #define RESET_A15_NCORERESET(cpu)   (1 << (2 + (cpu)))
0032 #define RESET_A7_NCORERESET(cpu)    (1 << (16 + (cpu)))
0033 
0034 #define A15_CONF        0x400
0035 #define A7_CONF         0x500
0036 #define SYS_INFO        0x700
0037 #define SPC_BASE        0xb00
0038 
0039 static void __iomem *scc;
0040 
0041 #define TC2_CLUSTERS            2
0042 #define TC2_MAX_CPUS_PER_CLUSTER    3
0043 
0044 static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
0045 
0046 static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
0047 {
0048     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0049     if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
0050         return -EINVAL;
0051     ve_spc_set_resume_addr(cluster, cpu,
0052                    __pa_symbol(mcpm_entry_point));
0053     ve_spc_cpu_wakeup_irq(cluster, cpu, true);
0054     return 0;
0055 }
0056 
0057 static int tc2_pm_cluster_powerup(unsigned int cluster)
0058 {
0059     pr_debug("%s: cluster %u\n", __func__, cluster);
0060     if (cluster >= TC2_CLUSTERS)
0061         return -EINVAL;
0062     ve_spc_powerdown(cluster, false);
0063     return 0;
0064 }
0065 
0066 static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
0067 {
0068     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0069     BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
0070     ve_spc_cpu_wakeup_irq(cluster, cpu, true);
0071     /*
0072      * If the CPU is committed to power down, make sure
0073      * the power controller will be in charge of waking it
0074      * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
0075      * to the CPU by disabling the GIC CPU IF to prevent wfi
0076      * from completing execution behind power controller back
0077      */
0078     gic_cpu_if_down(0);
0079 }
0080 
0081 static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
0082 {
0083     pr_debug("%s: cluster %u\n", __func__, cluster);
0084     BUG_ON(cluster >= TC2_CLUSTERS);
0085     ve_spc_powerdown(cluster, true);
0086     ve_spc_global_wakeup_irq(true);
0087 }
0088 
0089 static void tc2_pm_cpu_cache_disable(void)
0090 {
0091     v7_exit_coherency_flush(louis);
0092 }
0093 
0094 static void tc2_pm_cluster_cache_disable(void)
0095 {
0096     if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
0097         /*
0098          * On the Cortex-A15 we need to disable
0099          * L2 prefetching before flushing the cache.
0100          */
0101         asm volatile(
0102         "mcr    p15, 1, %0, c15, c0, 3 \n\t"
0103         "isb    \n\t"
0104         "dsb    "
0105         : : "r" (0x400) );
0106     }
0107 
0108     v7_exit_coherency_flush(all);
0109     cci_disable_port_by_cpu(read_cpuid_mpidr());
0110 }
0111 
0112 static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
0113 {
0114     u32 mask = cluster ?
0115           RESET_A7_NCORERESET(cpu)
0116         : RESET_A15_NCORERESET(cpu);
0117 
0118     return !(readl_relaxed(scc + RESET_CTRL) & mask);
0119 }
0120 
0121 #define POLL_MSEC 10
0122 #define TIMEOUT_MSEC 1000
0123 
0124 static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
0125 {
0126     unsigned tries;
0127 
0128     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0129     BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
0130 
0131     for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
0132         pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
0133              __func__, cpu, cluster,
0134              readl_relaxed(scc + RESET_CTRL));
0135 
0136         /*
0137          * We need the CPU to reach WFI, but the power
0138          * controller may put the cluster in reset and
0139          * power it off as soon as that happens, before
0140          * we have a chance to see STANDBYWFI.
0141          *
0142          * So we need to check for both conditions:
0143          */
0144         if (tc2_core_in_reset(cpu, cluster) ||
0145             ve_spc_cpu_in_wfi(cpu, cluster))
0146             return 0; /* success: the CPU is halted */
0147 
0148         /* Otherwise, wait and retry: */
0149         msleep(POLL_MSEC);
0150     }
0151 
0152     return -ETIMEDOUT; /* timeout */
0153 }
0154 
0155 static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
0156 {
0157     ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point));
0158 }
0159 
0160 static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
0161 {
0162     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0163     BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
0164     ve_spc_cpu_wakeup_irq(cluster, cpu, false);
0165     ve_spc_set_resume_addr(cluster, cpu, 0);
0166 }
0167 
0168 static void tc2_pm_cluster_is_up(unsigned int cluster)
0169 {
0170     pr_debug("%s: cluster %u\n", __func__, cluster);
0171     BUG_ON(cluster >= TC2_CLUSTERS);
0172     ve_spc_powerdown(cluster, false);
0173     ve_spc_global_wakeup_irq(false);
0174 }
0175 
0176 static const struct mcpm_platform_ops tc2_pm_power_ops = {
0177     .cpu_powerup        = tc2_pm_cpu_powerup,
0178     .cluster_powerup    = tc2_pm_cluster_powerup,
0179     .cpu_suspend_prepare    = tc2_pm_cpu_suspend_prepare,
0180     .cpu_powerdown_prepare  = tc2_pm_cpu_powerdown_prepare,
0181     .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare,
0182     .cpu_cache_disable  = tc2_pm_cpu_cache_disable,
0183     .cluster_cache_disable  = tc2_pm_cluster_cache_disable,
0184     .wait_for_powerdown = tc2_pm_wait_for_powerdown,
0185     .cpu_is_up      = tc2_pm_cpu_is_up,
0186     .cluster_is_up      = tc2_pm_cluster_is_up,
0187 };
0188 
0189 /*
0190  * Enable cluster-level coherency, in preparation for turning on the MMU.
0191  */
0192 static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
0193 {
0194     asm volatile (" \n"
0195 "   cmp r0, #1 \n"
0196 "   bxne    lr \n"
0197 "   b   cci_enable_port_for_self ");
0198 }
0199 
0200 static int __init tc2_pm_init(void)
0201 {
0202     unsigned int mpidr, cpu, cluster;
0203     int ret, irq;
0204     u32 a15_cluster_id, a7_cluster_id, sys_info;
0205     struct device_node *np;
0206 
0207     /*
0208      * The power management-related features are hidden behind
0209      * SCC registers. We need to extract runtime information like
0210      * cluster ids and number of CPUs really available in clusters.
0211      */
0212     np = of_find_compatible_node(NULL, NULL,
0213             "arm,vexpress-scc,v2p-ca15_a7");
0214     scc = of_iomap(np, 0);
0215     if (!scc)
0216         return -ENODEV;
0217 
0218     a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
0219     a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
0220     if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
0221         return -EINVAL;
0222 
0223     sys_info = readl_relaxed(scc + SYS_INFO);
0224     tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
0225     tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
0226 
0227     irq = irq_of_parse_and_map(np, 0);
0228 
0229     /*
0230      * A subset of the SCC registers is also used to communicate
0231      * with the SPC (power controller). We need to be able to
0232      * drive it very early in the boot process to power up
0233      * processors, so we initialize the SPC driver here.
0234      */
0235     ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
0236     if (ret)
0237         return ret;
0238 
0239     if (!cci_probed())
0240         return -ENODEV;
0241 
0242     mpidr = read_cpuid_mpidr();
0243     cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0244     cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0245     pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0246     if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
0247         pr_err("%s: boot CPU is out of bound!\n", __func__);
0248         return -EINVAL;
0249     }
0250 
0251     ret = mcpm_platform_register(&tc2_pm_power_ops);
0252     if (!ret) {
0253         mcpm_sync_init(tc2_pm_power_up_setup);
0254         /* test if we can (re)enable the CCI on our own */
0255         BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0);
0256         pr_info("TC2 power management initialized\n");
0257     }
0258     return ret;
0259 }
0260 
0261 early_initcall(tc2_pm_init);