0001
0002
0003
0004
0005
0006 #include <linux/init.h>
0007 #include <linux/smp.h>
0008 #include <linux/delay.h>
0009 #include <linux/io.h>
0010 #include <linux/memblock.h>
0011 #include <linux/of_address.h>
0012
0013 #include <asm/cputype.h>
0014 #include <asm/cp15.h>
0015 #include <asm/cacheflush.h>
0016 #include <asm/smp.h>
0017 #include <asm/smp_plat.h>
0018
0019 #include "core.h"
0020
0021
0022
0023
0024 #define CORE_RESET_BIT(x) (1 << x)
0025 #define NEON_RESET_BIT(x) (1 << (x + 4))
0026 #define CORE_DEBUG_RESET_BIT(x) (1 << (x + 9))
0027 #define CLUSTER_L2_RESET_BIT (1 << 8)
0028 #define CLUSTER_DEBUG_RESET_BIT (1 << 13)
0029
0030
0031
0032
0033
0034 #define CORE_RESET_STATUS(x) (1 << x)
0035 #define NEON_RESET_STATUS(x) (1 << (x + 4))
0036 #define CORE_DEBUG_RESET_STATUS(x) (1 << (x + 9))
0037 #define CLUSTER_L2_RESET_STATUS (1 << 8)
0038 #define CLUSTER_DEBUG_RESET_STATUS (1 << 13)
0039 #define CORE_WFI_STATUS(x) (1 << (x + 16))
0040 #define CORE_WFE_STATUS(x) (1 << (x + 20))
0041 #define CORE_DEBUG_ACK(x) (1 << (x + 24))
0042
0043 #define SC_CPU_RESET_REQ(x) (0x520 + (x << 3))
0044 #define SC_CPU_RESET_DREQ(x) (0x524 + (x << 3))
0045 #define SC_CPU_RESET_STATUS(x) (0x1520 + (x << 3))
0046
0047 #define FAB_SF_MODE 0x0c
0048 #define FAB_SF_INVLD 0x10
0049
0050
0051 #define FB_SF_INVLD_START (1 << 8)
0052
0053 #define HIP04_MAX_CLUSTERS 4
0054 #define HIP04_MAX_CPUS_PER_CLUSTER 4
0055
0056 #define POLL_MSEC 10
0057 #define TIMEOUT_MSEC 1000
0058
0059 static void __iomem *sysctrl, *fabric;
0060 static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
0061 static DEFINE_SPINLOCK(boot_lock);
0062 static u32 fabric_phys_addr;
0063
0064
0065
0066
0067
0068
0069 static u32 hip04_boot_method[4];
0070
0071 static bool hip04_cluster_is_down(unsigned int cluster)
0072 {
0073 int i;
0074
0075 for (i = 0; i < HIP04_MAX_CPUS_PER_CLUSTER; i++)
0076 if (hip04_cpu_table[cluster][i])
0077 return false;
0078 return true;
0079 }
0080
0081 static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on)
0082 {
0083 unsigned long data;
0084
0085 if (!fabric)
0086 BUG();
0087 data = readl_relaxed(fabric + FAB_SF_MODE);
0088 if (on)
0089 data |= 1 << cluster;
0090 else
0091 data &= ~(1 << cluster);
0092 writel_relaxed(data, fabric + FAB_SF_MODE);
0093 do {
0094 cpu_relax();
0095 } while (data != readl_relaxed(fabric + FAB_SF_MODE));
0096 }
0097
0098 static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
0099 {
0100 unsigned int mpidr, cpu, cluster;
0101 unsigned long data;
0102 void __iomem *sys_dreq, *sys_status;
0103
0104 mpidr = cpu_logical_map(l_cpu);
0105 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0106 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0107
0108 if (!sysctrl)
0109 return -ENODEV;
0110 if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
0111 return -EINVAL;
0112
0113 spin_lock_irq(&boot_lock);
0114
0115 if (hip04_cpu_table[cluster][cpu])
0116 goto out;
0117
0118 sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster);
0119 sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster);
0120 if (hip04_cluster_is_down(cluster)) {
0121 data = CLUSTER_DEBUG_RESET_BIT;
0122 writel_relaxed(data, sys_dreq);
0123 do {
0124 cpu_relax();
0125 data = readl_relaxed(sys_status);
0126 } while (data & CLUSTER_DEBUG_RESET_STATUS);
0127 hip04_set_snoop_filter(cluster, 1);
0128 }
0129
0130 data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
0131 CORE_DEBUG_RESET_BIT(cpu);
0132 writel_relaxed(data, sys_dreq);
0133 do {
0134 cpu_relax();
0135 } while (data == readl_relaxed(sys_status));
0136
0137
0138
0139
0140
0141 udelay(20);
0142
0143 arch_send_wakeup_ipi_mask(cpumask_of(l_cpu));
0144
0145 out:
0146 hip04_cpu_table[cluster][cpu]++;
0147 spin_unlock_irq(&boot_lock);
0148
0149 return 0;
0150 }
0151
0152 #ifdef CONFIG_HOTPLUG_CPU
0153 static void hip04_cpu_die(unsigned int l_cpu)
0154 {
0155 unsigned int mpidr, cpu, cluster;
0156 bool last_man;
0157
0158 mpidr = cpu_logical_map(l_cpu);
0159 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0160 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0161
0162 spin_lock(&boot_lock);
0163 hip04_cpu_table[cluster][cpu]--;
0164 if (hip04_cpu_table[cluster][cpu] == 1) {
0165
0166 spin_unlock(&boot_lock);
0167 return;
0168 } else if (hip04_cpu_table[cluster][cpu] > 1) {
0169 pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
0170 BUG();
0171 }
0172
0173 last_man = hip04_cluster_is_down(cluster);
0174 spin_unlock(&boot_lock);
0175 if (last_man) {
0176
0177 asm volatile(
0178 "mcr p15, 1, %0, c15, c0, 3 \n\t"
0179 "isb \n\t"
0180 "dsb "
0181 : : "r" (0x400) );
0182 v7_exit_coherency_flush(all);
0183 } else {
0184 v7_exit_coherency_flush(louis);
0185 }
0186
0187 for (;;)
0188 wfi();
0189 }
0190
0191 static int hip04_cpu_kill(unsigned int l_cpu)
0192 {
0193 unsigned int mpidr, cpu, cluster;
0194 unsigned int data, tries, count;
0195
0196 mpidr = cpu_logical_map(l_cpu);
0197 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0198 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0199 BUG_ON(cluster >= HIP04_MAX_CLUSTERS ||
0200 cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
0201
0202 count = TIMEOUT_MSEC / POLL_MSEC;
0203 spin_lock_irq(&boot_lock);
0204 for (tries = 0; tries < count; tries++) {
0205 if (hip04_cpu_table[cluster][cpu])
0206 goto err;
0207 cpu_relax();
0208 data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
0209 if (data & CORE_WFI_STATUS(cpu))
0210 break;
0211 spin_unlock_irq(&boot_lock);
0212
0213 msleep(POLL_MSEC);
0214 spin_lock_irq(&boot_lock);
0215 }
0216 if (tries >= count)
0217 goto err;
0218 data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
0219 CORE_DEBUG_RESET_BIT(cpu);
0220 writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster));
0221 for (tries = 0; tries < count; tries++) {
0222 cpu_relax();
0223 data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
0224 if (data & CORE_RESET_STATUS(cpu))
0225 break;
0226 }
0227 if (tries >= count)
0228 goto err;
0229 if (hip04_cluster_is_down(cluster))
0230 hip04_set_snoop_filter(cluster, 0);
0231 spin_unlock_irq(&boot_lock);
0232 return 1;
0233 err:
0234 spin_unlock_irq(&boot_lock);
0235 return 0;
0236 }
0237 #endif
0238
0239 static const struct smp_operations hip04_smp_ops __initconst = {
0240 .smp_boot_secondary = hip04_boot_secondary,
0241 #ifdef CONFIG_HOTPLUG_CPU
0242 .cpu_die = hip04_cpu_die,
0243 .cpu_kill = hip04_cpu_kill,
0244 #endif
0245 };
0246
0247 static bool __init hip04_cpu_table_init(void)
0248 {
0249 unsigned int mpidr, cpu, cluster;
0250
0251 mpidr = read_cpuid_mpidr();
0252 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0253 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0254
0255 if (cluster >= HIP04_MAX_CLUSTERS ||
0256 cpu >= HIP04_MAX_CPUS_PER_CLUSTER) {
0257 pr_err("%s: boot CPU is out of bound!\n", __func__);
0258 return false;
0259 }
0260 hip04_set_snoop_filter(cluster, 1);
0261 hip04_cpu_table[cluster][cpu] = 1;
0262 return true;
0263 }
0264
0265 static int __init hip04_smp_init(void)
0266 {
0267 struct device_node *np, *np_sctl, *np_fab;
0268 struct resource fab_res;
0269 void __iomem *relocation;
0270 int ret = -ENODEV;
0271
0272 np = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-bootwrapper");
0273 if (!np)
0274 goto err;
0275 ret = of_property_read_u32_array(np, "boot-method",
0276 &hip04_boot_method[0], 4);
0277 if (ret)
0278 goto err;
0279
0280 ret = -ENODEV;
0281 np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
0282 if (!np_sctl)
0283 goto err;
0284 np_fab = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-fabric");
0285 if (!np_fab)
0286 goto err;
0287
0288 ret = memblock_reserve(hip04_boot_method[0], hip04_boot_method[1]);
0289 if (ret)
0290 goto err;
0291
0292 relocation = ioremap(hip04_boot_method[2], hip04_boot_method[3]);
0293 if (!relocation) {
0294 pr_err("failed to map relocation space\n");
0295 ret = -ENOMEM;
0296 goto err_reloc;
0297 }
0298 sysctrl = of_iomap(np_sctl, 0);
0299 if (!sysctrl) {
0300 pr_err("failed to get sysctrl base\n");
0301 ret = -ENOMEM;
0302 goto err_sysctrl;
0303 }
0304 ret = of_address_to_resource(np_fab, 0, &fab_res);
0305 if (ret) {
0306 pr_err("failed to get fabric base phys\n");
0307 goto err_fabric;
0308 }
0309 fabric_phys_addr = fab_res.start;
0310 sync_cache_w(&fabric_phys_addr);
0311 fabric = of_iomap(np_fab, 0);
0312 if (!fabric) {
0313 pr_err("failed to get fabric base\n");
0314 ret = -ENOMEM;
0315 goto err_fabric;
0316 }
0317
0318 if (!hip04_cpu_table_init()) {
0319 ret = -EINVAL;
0320 goto err_table;
0321 }
0322
0323
0324
0325
0326
0327 writel_relaxed(hip04_boot_method[0], relocation);
0328 writel_relaxed(0xa5a5a5a5, relocation + 4);
0329 writel_relaxed(__pa_symbol(secondary_startup), relocation + 8);
0330 writel_relaxed(0, relocation + 12);
0331 iounmap(relocation);
0332
0333 smp_set_ops(&hip04_smp_ops);
0334 return ret;
0335 err_table:
0336 iounmap(fabric);
0337 err_fabric:
0338 iounmap(sysctrl);
0339 err_sysctrl:
0340 iounmap(relocation);
0341 err_reloc:
0342 memblock_phys_free(hip04_boot_method[0], hip04_boot_method[1]);
0343 err:
0344 return ret;
0345 }
0346 early_initcall(hip04_smp_init);