0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/export.h>
0010 #include <linux/kernel.h>
0011 #include <linux/init.h>
0012 #include <linux/irqflags.h>
0013 #include <linux/cpu_pm.h>
0014
0015 #include <asm/mcpm.h>
0016 #include <asm/cacheflush.h>
0017 #include <asm/idmap.h>
0018 #include <asm/cputype.h>
0019 #include <asm/suspend.h>
0020
0021
0022
0023
0024
0025
0026
0027 struct sync_struct mcpm_sync;
0028
0029
0030
0031
0032
0033
0034 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
0035 {
0036 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
0037 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
0048 {
0049 dmb();
0050 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
0051 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
0052 sev();
0053 }
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
0064 {
0065 dmb();
0066 mcpm_sync.clusters[cluster].cluster = state;
0067 sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
0068 sev();
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
0083 {
0084 unsigned int i;
0085 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
0086
0087
0088 c->cluster = CLUSTER_GOING_DOWN;
0089 sync_cache_w(&c->cluster);
0090
0091
0092 sync_cache_r(&c->inbound);
0093 if (c->inbound == INBOUND_COMING_UP)
0094 goto abort;
0095
0096
0097
0098
0099
0100
0101
0102
0103 sync_cache_r(&c->cpus);
0104 for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
0105 int cpustate;
0106
0107 if (i == cpu)
0108 continue;
0109
0110 while (1) {
0111 cpustate = c->cpus[i].cpu;
0112 if (cpustate != CPU_GOING_DOWN)
0113 break;
0114
0115 wfe();
0116 sync_cache_r(&c->cpus[i].cpu);
0117 }
0118
0119 switch (cpustate) {
0120 case CPU_DOWN:
0121 continue;
0122
0123 default:
0124 goto abort;
0125 }
0126 }
0127
0128 return true;
0129
0130 abort:
0131 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
0132 return false;
0133 }
0134
0135 static int __mcpm_cluster_state(unsigned int cluster)
0136 {
0137 sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
0138 return mcpm_sync.clusters[cluster].cluster;
0139 }
0140
0141 extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
0142
0143 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
0144 {
0145 unsigned long val = ptr ? __pa_symbol(ptr) : 0;
0146 mcpm_entry_vectors[cluster][cpu] = val;
0147 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
0148 }
0149
0150 extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
0151
0152 void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
0153 unsigned long poke_phys_addr, unsigned long poke_val)
0154 {
0155 unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
0156 poke[0] = poke_phys_addr;
0157 poke[1] = poke_val;
0158 __sync_cache_range_w(poke, 2 * sizeof(*poke));
0159 }
0160
0161 static const struct mcpm_platform_ops *platform_ops;
0162
0163 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
0164 {
0165 if (platform_ops)
0166 return -EBUSY;
0167 platform_ops = ops;
0168 return 0;
0169 }
0170
0171 bool mcpm_is_available(void)
0172 {
0173 return (platform_ops) ? true : false;
0174 }
0175 EXPORT_SYMBOL_GPL(mcpm_is_available);
0176
0177
0178
0179
0180
0181
0182
0183 static arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
0184
0185 static int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
0186
0187 static inline bool mcpm_cluster_unused(unsigned int cluster)
0188 {
0189 int i, cnt;
0190 for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++)
0191 cnt |= mcpm_cpu_use_count[cluster][i];
0192 return !cnt;
0193 }
0194
0195 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
0196 {
0197 bool cpu_is_down, cluster_is_down;
0198 int ret = 0;
0199
0200 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0201 if (!platform_ops)
0202 return -EUNATCH;
0203 might_sleep();
0204
0205
0206
0207
0208
0209 local_irq_disable();
0210 arch_spin_lock(&mcpm_lock);
0211
0212 cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
0213 cluster_is_down = mcpm_cluster_unused(cluster);
0214
0215 mcpm_cpu_use_count[cluster][cpu]++;
0216
0217
0218
0219
0220
0221
0222
0223
0224 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 &&
0225 mcpm_cpu_use_count[cluster][cpu] != 2);
0226
0227 if (cluster_is_down)
0228 ret = platform_ops->cluster_powerup(cluster);
0229 if (cpu_is_down && !ret)
0230 ret = platform_ops->cpu_powerup(cpu, cluster);
0231
0232 arch_spin_unlock(&mcpm_lock);
0233 local_irq_enable();
0234 return ret;
0235 }
0236
0237 typedef typeof(cpu_reset) phys_reset_t;
0238
0239 void mcpm_cpu_power_down(void)
0240 {
0241 unsigned int mpidr, cpu, cluster;
0242 bool cpu_going_down, last_man;
0243 phys_reset_t phys_reset;
0244
0245 mpidr = read_cpuid_mpidr();
0246 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0247 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0248 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
0249 if (WARN_ON_ONCE(!platform_ops))
0250 return;
0251 BUG_ON(!irqs_disabled());
0252
0253 setup_mm_for_reboot();
0254
0255 __mcpm_cpu_going_down(cpu, cluster);
0256 arch_spin_lock(&mcpm_lock);
0257 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
0258
0259 mcpm_cpu_use_count[cluster][cpu]--;
0260 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 &&
0261 mcpm_cpu_use_count[cluster][cpu] != 1);
0262 cpu_going_down = !mcpm_cpu_use_count[cluster][cpu];
0263 last_man = mcpm_cluster_unused(cluster);
0264
0265 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
0266 platform_ops->cpu_powerdown_prepare(cpu, cluster);
0267 platform_ops->cluster_powerdown_prepare(cluster);
0268 arch_spin_unlock(&mcpm_lock);
0269 platform_ops->cluster_cache_disable();
0270 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
0271 } else {
0272 if (cpu_going_down)
0273 platform_ops->cpu_powerdown_prepare(cpu, cluster);
0274 arch_spin_unlock(&mcpm_lock);
0275
0276
0277
0278
0279
0280
0281
0282
0283 platform_ops->cpu_cache_disable();
0284 }
0285
0286 __mcpm_cpu_down(cpu, cluster);
0287
0288
0289 if (cpu_going_down)
0290 wfi();
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
0302 phys_reset(__pa_symbol(mcpm_entry_point), false);
0303
0304
0305 BUG();
0306 }
0307
0308 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
0309 {
0310 int ret;
0311
0312 if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown))
0313 return -EUNATCH;
0314
0315 ret = platform_ops->wait_for_powerdown(cpu, cluster);
0316 if (ret)
0317 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
0318 __func__, cpu, cluster, ret);
0319
0320 return ret;
0321 }
0322
0323 void mcpm_cpu_suspend(void)
0324 {
0325 if (WARN_ON_ONCE(!platform_ops))
0326 return;
0327
0328
0329 if (platform_ops->cpu_suspend_prepare) {
0330 unsigned int mpidr = read_cpuid_mpidr();
0331 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0332 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0333 arch_spin_lock(&mcpm_lock);
0334 platform_ops->cpu_suspend_prepare(cpu, cluster);
0335 arch_spin_unlock(&mcpm_lock);
0336 }
0337 mcpm_cpu_power_down();
0338 }
0339
0340 int mcpm_cpu_powered_up(void)
0341 {
0342 unsigned int mpidr, cpu, cluster;
0343 bool cpu_was_down, first_man;
0344 unsigned long flags;
0345
0346 if (!platform_ops)
0347 return -EUNATCH;
0348
0349 mpidr = read_cpuid_mpidr();
0350 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0351 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0352 local_irq_save(flags);
0353 arch_spin_lock(&mcpm_lock);
0354
0355 cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
0356 first_man = mcpm_cluster_unused(cluster);
0357
0358 if (first_man && platform_ops->cluster_is_up)
0359 platform_ops->cluster_is_up(cluster);
0360 if (cpu_was_down)
0361 mcpm_cpu_use_count[cluster][cpu] = 1;
0362 if (platform_ops->cpu_is_up)
0363 platform_ops->cpu_is_up(cpu, cluster);
0364
0365 arch_spin_unlock(&mcpm_lock);
0366 local_irq_restore(flags);
0367
0368 return 0;
0369 }
0370
0371 #ifdef CONFIG_ARM_CPU_SUSPEND
0372
0373 static int __init nocache_trampoline(unsigned long _arg)
0374 {
0375 void (*cache_disable)(void) = (void *)_arg;
0376 unsigned int mpidr = read_cpuid_mpidr();
0377 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
0378 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0379 phys_reset_t phys_reset;
0380
0381 mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
0382 setup_mm_for_reboot();
0383
0384 __mcpm_cpu_going_down(cpu, cluster);
0385 BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
0386 cache_disable();
0387 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
0388 __mcpm_cpu_down(cpu, cluster);
0389
0390 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
0391 phys_reset(__pa_symbol(mcpm_entry_point), false);
0392 BUG();
0393 }
0394
0395 int __init mcpm_loopback(void (*cache_disable)(void))
0396 {
0397 int ret;
0398
0399
0400
0401
0402
0403
0404
0405 local_irq_disable();
0406 local_fiq_disable();
0407 ret = cpu_pm_enter();
0408 if (!ret) {
0409 ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
0410 cpu_pm_exit();
0411 }
0412 local_fiq_enable();
0413 local_irq_enable();
0414 if (ret)
0415 pr_err("%s returned %d\n", __func__, ret);
0416 return ret;
0417 }
0418
0419 #endif
0420
0421 extern unsigned long mcpm_power_up_setup_phys;
0422
0423 int __init mcpm_sync_init(
0424 void (*power_up_setup)(unsigned int affinity_level))
0425 {
0426 unsigned int i, j, mpidr, this_cluster;
0427
0428 BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
0429 BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
0430
0431
0432
0433
0434
0435 for (i = 0; i < MAX_NR_CLUSTERS; i++) {
0436 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
0437 mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
0438 for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
0439 mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
0440 }
0441 mpidr = read_cpuid_mpidr();
0442 this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0443 for_each_online_cpu(i) {
0444 mcpm_cpu_use_count[this_cluster][i] = 1;
0445 mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
0446 }
0447 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
0448 sync_cache_w(&mcpm_sync);
0449
0450 if (power_up_setup) {
0451 mcpm_power_up_setup_phys = __pa_symbol(power_up_setup);
0452 sync_cache_w(&mcpm_power_up_setup_phys);
0453 }
0454
0455 return 0;
0456 }