Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2018 Chen-Yu Tsai
0004  *
0005  * Chen-Yu Tsai <wens@csie.org>
0006  *
0007  * arch/arm/mach-sunxi/mc_smp.c
0008  *
0009  * Based on Allwinner code, arch/arm/mach-exynos/mcpm-exynos.c, and
0010  * arch/arm/mach-hisi/platmcpm.c
0011  * Cluster cache enable trampoline code adapted from MCPM framework
0012  */
0013 
0014 #include <linux/arm-cci.h>
0015 #include <linux/cpu_pm.h>
0016 #include <linux/delay.h>
0017 #include <linux/io.h>
0018 #include <linux/iopoll.h>
0019 #include <linux/irqchip/arm-gic.h>
0020 #include <linux/of.h>
0021 #include <linux/of_address.h>
0022 #include <linux/of_device.h>
0023 #include <linux/smp.h>
0024 
0025 #include <asm/cacheflush.h>
0026 #include <asm/cp15.h>
0027 #include <asm/cputype.h>
0028 #include <asm/idmap.h>
0029 #include <asm/smp_plat.h>
0030 #include <asm/suspend.h>
0031 
0032 #define SUNXI_CPUS_PER_CLUSTER      4
0033 #define SUNXI_NR_CLUSTERS       2
0034 
0035 #define POLL_USEC   100
0036 #define TIMEOUT_USEC    100000
0037 
0038 #define CPUCFG_CX_CTRL_REG0(c)      (0x10 * (c))
0039 #define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(n)   BIT(n)
0040 #define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL  0xf
0041 #define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A7   BIT(4)
0042 #define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15  BIT(0)
0043 #define CPUCFG_CX_CTRL_REG1(c)      (0x10 * (c) + 0x4)
0044 #define CPUCFG_CX_CTRL_REG1_ACINACTM    BIT(0)
0045 #define CPUCFG_CX_STATUS(c)     (0x30 + 0x4 * (c))
0046 #define CPUCFG_CX_STATUS_STANDBYWFI(n)  BIT(16 + (n))
0047 #define CPUCFG_CX_STATUS_STANDBYWFIL2   BIT(0)
0048 #define CPUCFG_CX_RST_CTRL(c)       (0x80 + 0x4 * (c))
0049 #define CPUCFG_CX_RST_CTRL_DBG_SOC_RST  BIT(24)
0050 #define CPUCFG_CX_RST_CTRL_ETM_RST(n)   BIT(20 + (n))
0051 #define CPUCFG_CX_RST_CTRL_ETM_RST_ALL  (0xf << 20)
0052 #define CPUCFG_CX_RST_CTRL_DBG_RST(n)   BIT(16 + (n))
0053 #define CPUCFG_CX_RST_CTRL_DBG_RST_ALL  (0xf << 16)
0054 #define CPUCFG_CX_RST_CTRL_H_RST    BIT(12)
0055 #define CPUCFG_CX_RST_CTRL_L2_RST   BIT(8)
0056 #define CPUCFG_CX_RST_CTRL_CX_RST(n)    BIT(4 + (n))
0057 #define CPUCFG_CX_RST_CTRL_CORE_RST(n)  BIT(n)
0058 #define CPUCFG_CX_RST_CTRL_CORE_RST_ALL (0xf << 0)
0059 
0060 #define PRCM_CPU_PO_RST_CTRL(c)     (0x4 + 0x4 * (c))
0061 #define PRCM_CPU_PO_RST_CTRL_CORE(n)    BIT(n)
0062 #define PRCM_CPU_PO_RST_CTRL_CORE_ALL   0xf
0063 #define PRCM_PWROFF_GATING_REG(c)   (0x100 + 0x4 * (c))
0064 /* The power off register for clusters are different from a80 and a83t */
0065 #define PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I    BIT(0)
0066 #define PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I    BIT(4)
0067 #define PRCM_PWROFF_GATING_REG_CORE(n)  BIT(n)
0068 #define PRCM_PWR_SWITCH_REG(c, cpu) (0x140 + 0x10 * (c) + 0x4 * (cpu))
0069 #define PRCM_CPU_SOFT_ENTRY_REG     0x164
0070 
0071 /* R_CPUCFG registers, specific to sun8i-a83t */
0072 #define R_CPUCFG_CLUSTER_PO_RST_CTRL(c) (0x30 + (c) * 0x4)
0073 #define R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(n)    BIT(n)
0074 #define R_CPUCFG_CPU_SOFT_ENTRY_REG     0x01a4
0075 
0076 #define CPU0_SUPPORT_HOTPLUG_MAGIC0 0xFA50392F
0077 #define CPU0_SUPPORT_HOTPLUG_MAGIC1 0x790DCA3A
0078 
0079 static void __iomem *cpucfg_base;
0080 static void __iomem *prcm_base;
0081 static void __iomem *sram_b_smp_base;
0082 static void __iomem *r_cpucfg_base;
0083 
0084 extern void sunxi_mc_smp_secondary_startup(void);
0085 extern void sunxi_mc_smp_resume(void);
0086 static bool is_a83t;
0087 
0088 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster)
0089 {
0090     struct device_node *node;
0091     int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core;
0092     bool is_compatible;
0093 
0094     node = of_cpu_device_node_get(cpu);
0095 
0096     /* In case of_cpu_device_node_get fails */
0097     if (!node)
0098         node = of_get_cpu_node(cpu, NULL);
0099 
0100     if (!node) {
0101         /*
0102          * There's no point in returning an error, since we
0103          * would be mid way in a core or cluster power sequence.
0104          */
0105         pr_err("%s: Couldn't get CPU cluster %u core %u device node\n",
0106                __func__, cluster, core);
0107 
0108         return false;
0109     }
0110 
0111     is_compatible = of_device_is_compatible(node, "arm,cortex-a15");
0112     of_node_put(node);
0113     return is_compatible;
0114 }
0115 
0116 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster,
0117                       bool enable)
0118 {
0119     u32 reg;
0120 
0121     /* control sequence from Allwinner A80 user manual v1.2 PRCM section */
0122     reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
0123     if (enable) {
0124         if (reg == 0x00) {
0125             pr_debug("power clamp for cluster %u cpu %u already open\n",
0126                  cluster, cpu);
0127             return 0;
0128         }
0129 
0130         writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
0131         udelay(10);
0132         writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
0133         udelay(10);
0134         writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
0135         udelay(10);
0136         writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
0137         udelay(10);
0138         writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
0139         udelay(10);
0140     } else {
0141         writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
0142         udelay(10);
0143     }
0144 
0145     return 0;
0146 }
0147 
0148 static void sunxi_cpu0_hotplug_support_set(bool enable)
0149 {
0150     if (enable) {
0151         writel(CPU0_SUPPORT_HOTPLUG_MAGIC0, sram_b_smp_base);
0152         writel(CPU0_SUPPORT_HOTPLUG_MAGIC1, sram_b_smp_base + 0x4);
0153     } else {
0154         writel(0x0, sram_b_smp_base);
0155         writel(0x0, sram_b_smp_base + 0x4);
0156     }
0157 }
0158 
0159 static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
0160 {
0161     u32 reg;
0162 
0163     pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
0164     if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
0165         return -EINVAL;
0166 
0167     /* Set hotplug support magic flags for cpu0 */
0168     if (cluster == 0 && cpu == 0)
0169         sunxi_cpu0_hotplug_support_set(true);
0170 
0171     /* assert processor power-on reset */
0172     reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
0173     reg &= ~PRCM_CPU_PO_RST_CTRL_CORE(cpu);
0174     writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
0175 
0176     if (is_a83t) {
0177         /* assert cpu power-on reset */
0178         reg  = readl(r_cpucfg_base +
0179                  R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
0180         reg &= ~(R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu));
0181         writel(reg, r_cpucfg_base +
0182                R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
0183         udelay(10);
0184     }
0185 
0186     /* Cortex-A7: hold L1 reset disable signal low */
0187     if (!sunxi_core_is_cortex_a15(cpu, cluster)) {
0188         reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
0189         reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(cpu);
0190         writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
0191     }
0192 
0193     /* assert processor related resets */
0194     reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0195     reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
0196 
0197     /*
0198      * Allwinner code also asserts resets for NEON on A15. According
0199      * to ARM manuals, asserting power-on reset is sufficient.
0200      */
0201     if (!sunxi_core_is_cortex_a15(cpu, cluster))
0202         reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
0203 
0204     writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0205 
0206     /* open power switch */
0207     sunxi_cpu_power_switch_set(cpu, cluster, true);
0208 
0209     /* Handle A83T bit swap */
0210     if (is_a83t) {
0211         if (cpu == 0)
0212             cpu = 4;
0213     }
0214 
0215     /* clear processor power gate */
0216     reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
0217     reg &= ~PRCM_PWROFF_GATING_REG_CORE(cpu);
0218     writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
0219     udelay(20);
0220 
0221     /* Handle A83T bit swap */
0222     if (is_a83t) {
0223         if (cpu == 4)
0224             cpu = 0;
0225     }
0226 
0227     /* de-assert processor power-on reset */
0228     reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
0229     reg |= PRCM_CPU_PO_RST_CTRL_CORE(cpu);
0230     writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
0231 
0232     if (is_a83t) {
0233         reg  = readl(r_cpucfg_base +
0234                  R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
0235         reg |= R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu);
0236         writel(reg, r_cpucfg_base +
0237                R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
0238         udelay(10);
0239     }
0240 
0241     /* de-assert all processor resets */
0242     reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0243     reg |= CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
0244     reg |= CPUCFG_CX_RST_CTRL_CORE_RST(cpu);
0245     if (!sunxi_core_is_cortex_a15(cpu, cluster))
0246         reg |= CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
0247     else
0248         reg |= CPUCFG_CX_RST_CTRL_CX_RST(cpu); /* NEON */
0249     writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0250 
0251     return 0;
0252 }
0253 
0254 static int sunxi_cluster_powerup(unsigned int cluster)
0255 {
0256     u32 reg;
0257 
0258     pr_debug("%s: cluster %u\n", __func__, cluster);
0259     if (cluster >= SUNXI_NR_CLUSTERS)
0260         return -EINVAL;
0261 
0262     /* For A83T, assert cluster cores resets */
0263     if (is_a83t) {
0264         reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0265         reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL;   /* Core Reset    */
0266         writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0267         udelay(10);
0268     }
0269 
0270     /* assert ACINACTM */
0271     reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
0272     reg |= CPUCFG_CX_CTRL_REG1_ACINACTM;
0273     writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
0274 
0275     /* assert cluster processor power-on resets */
0276     reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
0277     reg &= ~PRCM_CPU_PO_RST_CTRL_CORE_ALL;
0278     writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
0279 
0280     /* assert cluster cores resets */
0281     if (is_a83t) {
0282         reg  = readl(r_cpucfg_base +
0283                  R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
0284         reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL;
0285         writel(reg, r_cpucfg_base +
0286                R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
0287         udelay(10);
0288     }
0289 
0290     /* assert cluster resets */
0291     reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0292     reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
0293     reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST_ALL;
0294     reg &= ~CPUCFG_CX_RST_CTRL_H_RST;
0295     reg &= ~CPUCFG_CX_RST_CTRL_L2_RST;
0296 
0297     /*
0298      * Allwinner code also asserts resets for NEON on A15. According
0299      * to ARM manuals, asserting power-on reset is sufficient.
0300      */
0301     if (!sunxi_core_is_cortex_a15(0, cluster))
0302         reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST_ALL;
0303 
0304     writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0305 
0306     /* hold L1/L2 reset disable signals low */
0307     reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
0308     if (sunxi_core_is_cortex_a15(0, cluster)) {
0309         /* Cortex-A15: hold L2RSTDISABLE low */
0310         reg &= ~CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15;
0311     } else {
0312         /* Cortex-A7: hold L1RSTDISABLE and L2RSTDISABLE low */
0313         reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL;
0314         reg &= ~CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A7;
0315     }
0316     writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
0317 
0318     /* clear cluster power gate */
0319     reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
0320     if (is_a83t)
0321         reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I;
0322     else
0323         reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I;
0324     writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
0325     udelay(20);
0326 
0327     /* de-assert cluster resets */
0328     reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0329     reg |= CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
0330     reg |= CPUCFG_CX_RST_CTRL_H_RST;
0331     reg |= CPUCFG_CX_RST_CTRL_L2_RST;
0332     writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0333 
0334     /* de-assert ACINACTM */
0335     reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
0336     reg &= ~CPUCFG_CX_CTRL_REG1_ACINACTM;
0337     writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
0338 
0339     return 0;
0340 }
0341 
0342 /*
0343  * This bit is shared between the initial nocache_trampoline call to
0344  * enable CCI-400 and proper cluster cache disable before power down.
0345  */
0346 static void sunxi_cluster_cache_disable_without_axi(void)
0347 {
0348     if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
0349         /*
0350          * On the Cortex-A15 we need to disable
0351          * L2 prefetching before flushing the cache.
0352          */
0353         asm volatile(
0354         "mcr    p15, 1, %0, c15, c0, 3\n"
0355         "isb\n"
0356         "dsb"
0357         : : "r" (0x400));
0358     }
0359 
0360     /* Flush all cache levels for this cluster. */
0361     v7_exit_coherency_flush(all);
0362 
0363     /*
0364      * Disable cluster-level coherency by masking
0365      * incoming snoops and DVM messages:
0366      */
0367     cci_disable_port_by_cpu(read_cpuid_mpidr());
0368 }
0369 
0370 static int sunxi_mc_smp_cpu_table[SUNXI_NR_CLUSTERS][SUNXI_CPUS_PER_CLUSTER];
0371 int sunxi_mc_smp_first_comer;
0372 
0373 static DEFINE_SPINLOCK(boot_lock);
0374 
0375 static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster)
0376 {
0377     int i;
0378 
0379     for (i = 0; i < SUNXI_CPUS_PER_CLUSTER; i++)
0380         if (sunxi_mc_smp_cpu_table[cluster][i])
0381             return false;
0382     return true;
0383 }
0384 
0385 static void sunxi_mc_smp_secondary_init(unsigned int cpu)
0386 {
0387     /* Clear hotplug support magic flags for cpu0 */
0388     if (cpu == 0)
0389         sunxi_cpu0_hotplug_support_set(false);
0390 }
0391 
0392 static int sunxi_mc_smp_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
0393 {
0394     unsigned int mpidr, cpu, cluster;
0395 
0396     mpidr = cpu_logical_map(l_cpu);
0397     cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0398     cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0399 
0400     if (!cpucfg_base)
0401         return -ENODEV;
0402     if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER)
0403         return -EINVAL;
0404 
0405     spin_lock_irq(&boot_lock);
0406 
0407     if (sunxi_mc_smp_cpu_table[cluster][cpu])
0408         goto out;
0409 
0410     if (sunxi_mc_smp_cluster_is_down(cluster)) {
0411         sunxi_mc_smp_first_comer = true;
0412         sunxi_cluster_powerup(cluster);
0413     } else {
0414         sunxi_mc_smp_first_comer = false;
0415     }
0416 
0417     /* This is read by incoming CPUs with their cache and MMU disabled */
0418     sync_cache_w(&sunxi_mc_smp_first_comer);
0419     sunxi_cpu_powerup(cpu, cluster);
0420 
0421 out:
0422     sunxi_mc_smp_cpu_table[cluster][cpu]++;
0423     spin_unlock_irq(&boot_lock);
0424 
0425     return 0;
0426 }
0427 
0428 #ifdef CONFIG_HOTPLUG_CPU
0429 static void sunxi_cluster_cache_disable(void)
0430 {
0431     unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
0432     u32 reg;
0433 
0434     pr_debug("%s: cluster %u\n", __func__, cluster);
0435 
0436     sunxi_cluster_cache_disable_without_axi();
0437 
0438     /* last man standing, assert ACINACTM */
0439     reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
0440     reg |= CPUCFG_CX_CTRL_REG1_ACINACTM;
0441     writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
0442 }
0443 
0444 static void sunxi_mc_smp_cpu_die(unsigned int l_cpu)
0445 {
0446     unsigned int mpidr, cpu, cluster;
0447     bool last_man;
0448 
0449     mpidr = cpu_logical_map(l_cpu);
0450     cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0451     cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0452     pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
0453 
0454     spin_lock(&boot_lock);
0455     sunxi_mc_smp_cpu_table[cluster][cpu]--;
0456     if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) {
0457         /* A power_up request went ahead of us. */
0458         pr_debug("%s: aborting due to a power up request\n",
0459              __func__);
0460         spin_unlock(&boot_lock);
0461         return;
0462     } else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) {
0463         pr_err("Cluster %d CPU%d boots multiple times\n",
0464                cluster, cpu);
0465         BUG();
0466     }
0467 
0468     last_man = sunxi_mc_smp_cluster_is_down(cluster);
0469     spin_unlock(&boot_lock);
0470 
0471     gic_cpu_if_down(0);
0472     if (last_man)
0473         sunxi_cluster_cache_disable();
0474     else
0475         v7_exit_coherency_flush(louis);
0476 
0477     for (;;)
0478         wfi();
0479 }
0480 
0481 static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
0482 {
0483     u32 reg;
0484     int gating_bit = cpu;
0485 
0486     pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
0487     if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
0488         return -EINVAL;
0489 
0490     if (is_a83t && cpu == 0)
0491         gating_bit = 4;
0492 
0493     /* gate processor power */
0494     reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
0495     reg |= PRCM_PWROFF_GATING_REG_CORE(gating_bit);
0496     writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
0497     udelay(20);
0498 
0499     /* close power switch */
0500     sunxi_cpu_power_switch_set(cpu, cluster, false);
0501 
0502     return 0;
0503 }
0504 
0505 static int sunxi_cluster_powerdown(unsigned int cluster)
0506 {
0507     u32 reg;
0508 
0509     pr_debug("%s: cluster %u\n", __func__, cluster);
0510     if (cluster >= SUNXI_NR_CLUSTERS)
0511         return -EINVAL;
0512 
0513     /* assert cluster resets or system will hang */
0514     pr_debug("%s: assert cluster reset\n", __func__);
0515     reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0516     reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
0517     reg &= ~CPUCFG_CX_RST_CTRL_H_RST;
0518     reg &= ~CPUCFG_CX_RST_CTRL_L2_RST;
0519     writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
0520 
0521     /* gate cluster power */
0522     pr_debug("%s: gate cluster power\n", __func__);
0523     reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
0524     if (is_a83t)
0525         reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I;
0526     else
0527         reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I;
0528     writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
0529     udelay(20);
0530 
0531     return 0;
0532 }
0533 
0534 static int sunxi_mc_smp_cpu_kill(unsigned int l_cpu)
0535 {
0536     unsigned int mpidr, cpu, cluster;
0537     unsigned int tries, count;
0538     int ret = 0;
0539     u32 reg;
0540 
0541     mpidr = cpu_logical_map(l_cpu);
0542     cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0543     cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0544 
0545     /* This should never happen */
0546     if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS ||
0547             cpu >= SUNXI_CPUS_PER_CLUSTER))
0548         return 0;
0549 
0550     /* wait for CPU core to die and enter WFI */
0551     count = TIMEOUT_USEC / POLL_USEC;
0552     spin_lock_irq(&boot_lock);
0553     for (tries = 0; tries < count; tries++) {
0554         spin_unlock_irq(&boot_lock);
0555         usleep_range(POLL_USEC / 2, POLL_USEC);
0556         spin_lock_irq(&boot_lock);
0557 
0558         /*
0559          * If the user turns off a bunch of cores at the same
0560          * time, the kernel might call cpu_kill before some of
0561          * them are ready. This is because boot_lock serializes
0562          * both cpu_die and cpu_kill callbacks. Either one could
0563          * run first. We should wait for cpu_die to complete.
0564          */
0565         if (sunxi_mc_smp_cpu_table[cluster][cpu])
0566             continue;
0567 
0568         reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster));
0569         if (reg & CPUCFG_CX_STATUS_STANDBYWFI(cpu))
0570             break;
0571     }
0572 
0573     if (tries >= count) {
0574         ret = ETIMEDOUT;
0575         goto out;
0576     }
0577 
0578     /* power down CPU core */
0579     sunxi_cpu_powerdown(cpu, cluster);
0580 
0581     if (!sunxi_mc_smp_cluster_is_down(cluster))
0582         goto out;
0583 
0584     /* wait for cluster L2 WFI */
0585     ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg,
0586                  reg & CPUCFG_CX_STATUS_STANDBYWFIL2,
0587                  POLL_USEC, TIMEOUT_USEC);
0588     if (ret) {
0589         /*
0590          * Ignore timeout on the cluster. Leaving the cluster on
0591          * will not affect system execution, just use a bit more
0592          * power. But returning an error here will only confuse
0593          * the user as the CPU has already been shutdown.
0594          */
0595         ret = 0;
0596         goto out;
0597     }
0598 
0599     /* Power down cluster */
0600     sunxi_cluster_powerdown(cluster);
0601 
0602 out:
0603     spin_unlock_irq(&boot_lock);
0604     pr_debug("%s: cluster %u cpu %u powerdown: %d\n",
0605          __func__, cluster, cpu, ret);
0606     return !ret;
0607 }
0608 
0609 static bool sunxi_mc_smp_cpu_can_disable(unsigned int cpu)
0610 {
0611     /* CPU0 hotplug not handled for sun8i-a83t */
0612     if (is_a83t)
0613         if (cpu == 0)
0614             return false;
0615     return true;
0616 }
0617 #endif
0618 
0619 static const struct smp_operations sunxi_mc_smp_smp_ops __initconst = {
0620     .smp_secondary_init = sunxi_mc_smp_secondary_init,
0621     .smp_boot_secondary = sunxi_mc_smp_boot_secondary,
0622 #ifdef CONFIG_HOTPLUG_CPU
0623     .cpu_die        = sunxi_mc_smp_cpu_die,
0624     .cpu_kill       = sunxi_mc_smp_cpu_kill,
0625     .cpu_can_disable    = sunxi_mc_smp_cpu_can_disable,
0626 #endif
0627 };
0628 
0629 static bool __init sunxi_mc_smp_cpu_table_init(void)
0630 {
0631     unsigned int mpidr, cpu, cluster;
0632 
0633     mpidr = read_cpuid_mpidr();
0634     cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0635     cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0636 
0637     if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) {
0638         pr_err("%s: boot CPU is out of bounds!\n", __func__);
0639         return false;
0640     }
0641     sunxi_mc_smp_cpu_table[cluster][cpu] = 1;
0642     return true;
0643 }
0644 
0645 /*
0646  * Adapted from arch/arm/common/mc_smp_entry.c
0647  *
0648  * We need the trampoline code to enable CCI-400 on the first cluster
0649  */
0650 typedef typeof(cpu_reset) phys_reset_t;
0651 
0652 static int __init nocache_trampoline(unsigned long __unused)
0653 {
0654     phys_reset_t phys_reset;
0655 
0656     setup_mm_for_reboot();
0657     sunxi_cluster_cache_disable_without_axi();
0658 
0659     phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
0660     phys_reset(__pa_symbol(sunxi_mc_smp_resume), false);
0661     BUG();
0662 }
0663 
0664 static int __init sunxi_mc_smp_loopback(void)
0665 {
0666     int ret;
0667 
0668     /*
0669      * We're going to soft-restart the current CPU through the
0670      * low-level MCPM code by leveraging the suspend/resume
0671      * infrastructure. Let's play it safe by using cpu_pm_enter()
0672      * in case the CPU init code path resets the VFP or similar.
0673      */
0674     sunxi_mc_smp_first_comer = true;
0675     local_irq_disable();
0676     local_fiq_disable();
0677     ret = cpu_pm_enter();
0678     if (!ret) {
0679         ret = cpu_suspend(0, nocache_trampoline);
0680         cpu_pm_exit();
0681     }
0682     local_fiq_enable();
0683     local_irq_enable();
0684     sunxi_mc_smp_first_comer = false;
0685 
0686     return ret;
0687 }
0688 
0689 /*
0690  * This holds any device nodes that we requested resources for,
0691  * so that we may easily release resources in the error path.
0692  */
0693 struct sunxi_mc_smp_nodes {
0694     struct device_node *prcm_node;
0695     struct device_node *cpucfg_node;
0696     struct device_node *sram_node;
0697     struct device_node *r_cpucfg_node;
0698 };
0699 
0700 /* This structure holds SoC-specific bits tied to an enable-method string. */
0701 struct sunxi_mc_smp_data {
0702     const char *enable_method;
0703     int (*get_smp_nodes)(struct sunxi_mc_smp_nodes *nodes);
0704     bool is_a83t;
0705 };
0706 
0707 static void __init sunxi_mc_smp_put_nodes(struct sunxi_mc_smp_nodes *nodes)
0708 {
0709     of_node_put(nodes->prcm_node);
0710     of_node_put(nodes->cpucfg_node);
0711     of_node_put(nodes->sram_node);
0712     of_node_put(nodes->r_cpucfg_node);
0713     memset(nodes, 0, sizeof(*nodes));
0714 }
0715 
0716 static int __init sun9i_a80_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes)
0717 {
0718     nodes->prcm_node = of_find_compatible_node(NULL, NULL,
0719                            "allwinner,sun9i-a80-prcm");
0720     if (!nodes->prcm_node) {
0721         pr_err("%s: PRCM not available\n", __func__);
0722         return -ENODEV;
0723     }
0724 
0725     nodes->cpucfg_node = of_find_compatible_node(NULL, NULL,
0726                              "allwinner,sun9i-a80-cpucfg");
0727     if (!nodes->cpucfg_node) {
0728         pr_err("%s: CPUCFG not available\n", __func__);
0729         return -ENODEV;
0730     }
0731 
0732     nodes->sram_node = of_find_compatible_node(NULL, NULL,
0733                            "allwinner,sun9i-a80-smp-sram");
0734     if (!nodes->sram_node) {
0735         pr_err("%s: Secure SRAM not available\n", __func__);
0736         return -ENODEV;
0737     }
0738 
0739     return 0;
0740 }
0741 
0742 static int __init sun8i_a83t_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes)
0743 {
0744     nodes->prcm_node = of_find_compatible_node(NULL, NULL,
0745                            "allwinner,sun8i-a83t-r-ccu");
0746     if (!nodes->prcm_node) {
0747         pr_err("%s: PRCM not available\n", __func__);
0748         return -ENODEV;
0749     }
0750 
0751     nodes->cpucfg_node = of_find_compatible_node(NULL, NULL,
0752                              "allwinner,sun8i-a83t-cpucfg");
0753     if (!nodes->cpucfg_node) {
0754         pr_err("%s: CPUCFG not available\n", __func__);
0755         return -ENODEV;
0756     }
0757 
0758     nodes->r_cpucfg_node = of_find_compatible_node(NULL, NULL,
0759                                "allwinner,sun8i-a83t-r-cpucfg");
0760     if (!nodes->r_cpucfg_node) {
0761         pr_err("%s: RCPUCFG not available\n", __func__);
0762         return -ENODEV;
0763     }
0764 
0765     return 0;
0766 }
0767 
0768 static const struct sunxi_mc_smp_data sunxi_mc_smp_data[] __initconst = {
0769     {
0770         .enable_method  = "allwinner,sun9i-a80-smp",
0771         .get_smp_nodes  = sun9i_a80_get_smp_nodes,
0772     },
0773     {
0774         .enable_method  = "allwinner,sun8i-a83t-smp",
0775         .get_smp_nodes  = sun8i_a83t_get_smp_nodes,
0776         .is_a83t    = true,
0777     },
0778 };
0779 
0780 static int __init sunxi_mc_smp_init(void)
0781 {
0782     struct sunxi_mc_smp_nodes nodes = { 0 };
0783     struct device_node *node;
0784     struct resource res;
0785     void __iomem *addr;
0786     int i, ret;
0787 
0788     /*
0789      * Don't bother checking the "cpus" node, as an enable-method
0790      * property in that node is undocumented.
0791      */
0792     node = of_cpu_device_node_get(0);
0793     if (!node)
0794         return -ENODEV;
0795 
0796     /*
0797      * We can't actually use the enable-method magic in the kernel.
0798      * Our loopback / trampoline code uses the CPU suspend framework,
0799      * which requires the identity mapping be available. It would not
0800      * yet be available if we used the .init_cpus or .prepare_cpus
0801      * callbacks in smp_operations, which we would use if we were to
0802      * use CPU_METHOD_OF_DECLARE
0803      */
0804     for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
0805         ret = of_property_match_string(node, "enable-method",
0806                            sunxi_mc_smp_data[i].enable_method);
0807         if (!ret)
0808             break;
0809     }
0810 
0811     is_a83t = sunxi_mc_smp_data[i].is_a83t;
0812 
0813     of_node_put(node);
0814     if (ret)
0815         return -ENODEV;
0816 
0817     if (!sunxi_mc_smp_cpu_table_init())
0818         return -EINVAL;
0819 
0820     if (!cci_probed()) {
0821         pr_err("%s: CCI-400 not available\n", __func__);
0822         return -ENODEV;
0823     }
0824 
0825     /* Get needed device tree nodes */
0826     ret = sunxi_mc_smp_data[i].get_smp_nodes(&nodes);
0827     if (ret)
0828         goto err_put_nodes;
0829 
0830     /*
0831      * Unfortunately we can not request the I/O region for the PRCM.
0832      * It is shared with the PRCM clock.
0833      */
0834     prcm_base = of_iomap(nodes.prcm_node, 0);
0835     if (!prcm_base) {
0836         pr_err("%s: failed to map PRCM registers\n", __func__);
0837         ret = -ENOMEM;
0838         goto err_put_nodes;
0839     }
0840 
0841     cpucfg_base = of_io_request_and_map(nodes.cpucfg_node, 0,
0842                         "sunxi-mc-smp");
0843     if (IS_ERR(cpucfg_base)) {
0844         ret = PTR_ERR(cpucfg_base);
0845         pr_err("%s: failed to map CPUCFG registers: %d\n",
0846                __func__, ret);
0847         goto err_unmap_prcm;
0848     }
0849 
0850     if (is_a83t) {
0851         r_cpucfg_base = of_io_request_and_map(nodes.r_cpucfg_node,
0852                               0, "sunxi-mc-smp");
0853         if (IS_ERR(r_cpucfg_base)) {
0854             ret = PTR_ERR(r_cpucfg_base);
0855             pr_err("%s: failed to map R-CPUCFG registers\n",
0856                    __func__);
0857             goto err_unmap_release_cpucfg;
0858         }
0859     } else {
0860         sram_b_smp_base = of_io_request_and_map(nodes.sram_node, 0,
0861                             "sunxi-mc-smp");
0862         if (IS_ERR(sram_b_smp_base)) {
0863             ret = PTR_ERR(sram_b_smp_base);
0864             pr_err("%s: failed to map secure SRAM\n", __func__);
0865             goto err_unmap_release_cpucfg;
0866         }
0867     }
0868 
0869     /* Configure CCI-400 for boot cluster */
0870     ret = sunxi_mc_smp_loopback();
0871     if (ret) {
0872         pr_err("%s: failed to configure boot cluster: %d\n",
0873                __func__, ret);
0874         goto err_unmap_release_sram_rcpucfg;
0875     }
0876 
0877     /* We don't need the device nodes anymore */
0878     sunxi_mc_smp_put_nodes(&nodes);
0879 
0880     /* Set the hardware entry point address */
0881     if (is_a83t)
0882         addr = r_cpucfg_base + R_CPUCFG_CPU_SOFT_ENTRY_REG;
0883     else
0884         addr = prcm_base + PRCM_CPU_SOFT_ENTRY_REG;
0885     writel(__pa_symbol(sunxi_mc_smp_secondary_startup), addr);
0886 
0887     /* Actually enable multi cluster SMP */
0888     smp_set_ops(&sunxi_mc_smp_smp_ops);
0889 
0890     pr_info("sunxi multi cluster SMP support installed\n");
0891 
0892     return 0;
0893 
0894 err_unmap_release_sram_rcpucfg:
0895     if (is_a83t) {
0896         iounmap(r_cpucfg_base);
0897         of_address_to_resource(nodes.r_cpucfg_node, 0, &res);
0898     } else {
0899         iounmap(sram_b_smp_base);
0900         of_address_to_resource(nodes.sram_node, 0, &res);
0901     }
0902     release_mem_region(res.start, resource_size(&res));
0903 err_unmap_release_cpucfg:
0904     iounmap(cpucfg_base);
0905     of_address_to_resource(nodes.cpucfg_node, 0, &res);
0906     release_mem_region(res.start, resource_size(&res));
0907 err_unmap_prcm:
0908     iounmap(prcm_base);
0909 err_put_nodes:
0910     sunxi_mc_smp_put_nodes(&nodes);
0911     return ret;
0912 }
0913 
0914 early_initcall(sunxi_mc_smp_init);