0001
0002
0003
0004
0005
0006
0007 #include <linux/cpu.h>
0008 #include <linux/delay.h>
0009 #include <linux/io.h>
0010 #include <linux/sched/task_stack.h>
0011 #include <linux/sched/hotplug.h>
0012 #include <linux/slab.h>
0013 #include <linux/smp.h>
0014 #include <linux/types.h>
0015 #include <linux/irq.h>
0016
0017 #include <asm/bcache.h>
0018 #include <asm/mips-cps.h>
0019 #include <asm/mips_mt.h>
0020 #include <asm/mipsregs.h>
0021 #include <asm/pm-cps.h>
0022 #include <asm/r4kcache.h>
0023 #include <asm/smp-cps.h>
0024 #include <asm/time.h>
0025 #include <asm/uasm.h>
0026
0027 static bool threads_disabled;
0028 static DECLARE_BITMAP(core_power, NR_CPUS);
0029
0030 struct core_boot_config *mips_cps_core_bootcfg;
0031
0032 static int __init setup_nothreads(char *s)
0033 {
0034 threads_disabled = true;
0035 return 0;
0036 }
0037 early_param("nothreads", setup_nothreads);
0038
0039 static unsigned core_vpe_count(unsigned int cluster, unsigned core)
0040 {
0041 if (threads_disabled)
0042 return 1;
0043
0044 return mips_cps_numvps(cluster, core);
0045 }
0046
0047 static void __init cps_smp_setup(void)
0048 {
0049 unsigned int nclusters, ncores, nvpes, core_vpes;
0050 unsigned long core_entry;
0051 int cl, c, v;
0052
0053
0054 nvpes = 0;
0055 nclusters = mips_cps_numclusters();
0056 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
0057 for (cl = 0; cl < nclusters; cl++) {
0058 if (cl > 0)
0059 pr_cont(",");
0060 pr_cont("{");
0061
0062 ncores = mips_cps_numcores(cl);
0063 for (c = 0; c < ncores; c++) {
0064 core_vpes = core_vpe_count(cl, c);
0065
0066 if (c > 0)
0067 pr_cont(",");
0068 pr_cont("%u", core_vpes);
0069
0070
0071 if (!cl && !c)
0072 smp_num_siblings = core_vpes;
0073
0074 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
0075 cpu_set_cluster(&cpu_data[nvpes + v], cl);
0076 cpu_set_core(&cpu_data[nvpes + v], c);
0077 cpu_set_vpe_id(&cpu_data[nvpes + v], v);
0078 }
0079
0080 nvpes += core_vpes;
0081 }
0082
0083 pr_cont("}");
0084 }
0085 pr_cont(" total %u\n", nvpes);
0086
0087
0088 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
0089 set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
0090 set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
0091 __cpu_number_map[v] = v;
0092 __cpu_logical_map[v] = v;
0093 }
0094
0095
0096 change_c0_config(CONF_CM_CMASK, 0x5);
0097
0098
0099 bitmap_set(core_power, 0, 1);
0100
0101
0102 mips_cps_core_init();
0103
0104
0105 write_gcr_cl_coherence(0xff);
0106
0107 if (mips_cm_revision() >= CM_REV_CM3) {
0108 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
0109 write_gcr_bev_base(core_entry);
0110 }
0111
0112 #ifdef CONFIG_MIPS_MT_FPAFF
0113
0114 if (cpu_has_fpu)
0115 cpumask_set_cpu(0, &mt_fpu_cpumask);
0116 #endif
0117 }
0118
0119 static void __init cps_prepare_cpus(unsigned int max_cpus)
0120 {
0121 unsigned ncores, core_vpes, c, cca;
0122 bool cca_unsuitable, cores_limited;
0123 u32 *entry_code;
0124
0125 mips_mt_set_cpuoptions();
0126
0127
0128 cca = read_c0_config() & CONF_CM_CMASK;
0129 switch (cca) {
0130 case 0x4:
0131 case 0x5:
0132
0133 cca_unsuitable = false;
0134 break;
0135
0136 default:
0137
0138 cca_unsuitable = true;
0139 }
0140
0141
0142 cores_limited = false;
0143 if (cca_unsuitable || cpu_has_dc_aliases) {
0144 for_each_present_cpu(c) {
0145 if (cpus_are_siblings(smp_processor_id(), c))
0146 continue;
0147
0148 set_cpu_present(c, false);
0149 cores_limited = true;
0150 }
0151 }
0152 if (cores_limited)
0153 pr_warn("Using only one core due to %s%s%s\n",
0154 cca_unsuitable ? "unsuitable CCA" : "",
0155 (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
0156 cpu_has_dc_aliases ? "dcache aliasing" : "");
0157
0158
0159
0160
0161
0162
0163 entry_code = (u32 *)&mips_cps_core_entry;
0164 uasm_i_addiu(&entry_code, 16, 0, cca);
0165 blast_dcache_range((unsigned long)&mips_cps_core_entry,
0166 (unsigned long)entry_code);
0167 bc_wback_inv((unsigned long)&mips_cps_core_entry,
0168 (void *)entry_code - (void *)&mips_cps_core_entry);
0169 __sync();
0170
0171
0172 ncores = mips_cps_numcores(0);
0173 mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
0174 GFP_KERNEL);
0175 if (!mips_cps_core_bootcfg) {
0176 pr_err("Failed to allocate boot config for %u cores\n", ncores);
0177 goto err_out;
0178 }
0179
0180
0181 for (c = 0; c < ncores; c++) {
0182 core_vpes = core_vpe_count(0, c);
0183 mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
0184 sizeof(*mips_cps_core_bootcfg[c].vpe_config),
0185 GFP_KERNEL);
0186 if (!mips_cps_core_bootcfg[c].vpe_config) {
0187 pr_err("Failed to allocate %u VPE boot configs\n",
0188 core_vpes);
0189 goto err_out;
0190 }
0191 }
0192
0193
0194 atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask,
0195 1 << cpu_vpe_id(¤t_cpu_data));
0196
0197 return;
0198 err_out:
0199
0200 if (mips_cps_core_bootcfg) {
0201 for (c = 0; c < ncores; c++)
0202 kfree(mips_cps_core_bootcfg[c].vpe_config);
0203 kfree(mips_cps_core_bootcfg);
0204 mips_cps_core_bootcfg = NULL;
0205 }
0206
0207
0208 for_each_possible_cpu(c) {
0209 if (c == 0)
0210 continue;
0211 set_cpu_present(c, false);
0212 }
0213 }
0214
0215 static void boot_core(unsigned int core, unsigned int vpe_id)
0216 {
0217 u32 stat, seq_state;
0218 unsigned timeout;
0219
0220
0221 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
0222
0223
0224 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
0225
0226
0227 write_gcr_co_coherence(0);
0228
0229
0230 write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
0231
0232
0233 set_gcr_access(1 << core);
0234
0235 if (mips_cpc_present()) {
0236
0237 mips_cpc_lock_other(core);
0238
0239 if (mips_cm_revision() >= CM_REV_CM3) {
0240
0241 write_cpc_co_vp_stop(0xf);
0242 write_cpc_co_vp_run(1 << vpe_id);
0243
0244
0245
0246
0247
0248 wmb();
0249 }
0250
0251 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
0252
0253 timeout = 100;
0254 while (true) {
0255 stat = read_cpc_co_stat_conf();
0256 seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
0257 seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
0258
0259
0260 if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
0261 break;
0262
0263
0264 if (timeout) {
0265 timeout--;
0266 mdelay(10);
0267 continue;
0268 }
0269
0270 pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
0271 core, stat);
0272 mdelay(1000);
0273 }
0274
0275 mips_cpc_unlock_other();
0276 } else {
0277
0278 write_gcr_co_reset_release(0);
0279 }
0280
0281 mips_cm_unlock_other();
0282
0283
0284 bitmap_set(core_power, core, 1);
0285 }
0286
0287 static void remote_vpe_boot(void *dummy)
0288 {
0289 unsigned core = cpu_core(¤t_cpu_data);
0290 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
0291
0292 mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data));
0293 }
0294
0295 static int cps_boot_secondary(int cpu, struct task_struct *idle)
0296 {
0297 unsigned core = cpu_core(&cpu_data[cpu]);
0298 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
0299 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
0300 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
0301 unsigned long core_entry;
0302 unsigned int remote;
0303 int err;
0304
0305
0306 if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
0307 return -ENOSYS;
0308
0309 vpe_cfg->pc = (unsigned long)&smp_bootstrap;
0310 vpe_cfg->sp = __KSTK_TOS(idle);
0311 vpe_cfg->gp = (unsigned long)task_thread_info(idle);
0312
0313 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
0314
0315 preempt_disable();
0316
0317 if (!test_bit(core, core_power)) {
0318
0319 boot_core(core, vpe_id);
0320 goto out;
0321 }
0322
0323 if (cpu_has_vp) {
0324 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
0325 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
0326 write_gcr_co_reset_base(core_entry);
0327 mips_cm_unlock_other();
0328 }
0329
0330 if (!cpus_are_siblings(cpu, smp_processor_id())) {
0331
0332 for (remote = 0; remote < NR_CPUS; remote++) {
0333 if (!cpus_are_siblings(cpu, remote))
0334 continue;
0335 if (cpu_online(remote))
0336 break;
0337 }
0338 if (remote >= NR_CPUS) {
0339 pr_crit("No online CPU in core %u to start CPU%d\n",
0340 core, cpu);
0341 goto out;
0342 }
0343
0344 err = smp_call_function_single(remote, remote_vpe_boot,
0345 NULL, 1);
0346 if (err)
0347 panic("Failed to call remote CPU\n");
0348 goto out;
0349 }
0350
0351 BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
0352
0353
0354 mips_cps_boot_vpes(core_cfg, vpe_id);
0355 out:
0356 preempt_enable();
0357 return 0;
0358 }
0359
0360 static void cps_init_secondary(void)
0361 {
0362
0363 if (cpu_has_mipsmt)
0364 dmt();
0365
0366 if (mips_cm_revision() >= CM_REV_CM3) {
0367 unsigned int ident = read_gic_vl_ident();
0368
0369
0370
0371
0372
0373
0374 BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
0375 }
0376
0377 if (cpu_has_veic)
0378 clear_c0_status(ST0_IM);
0379 else
0380 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
0381 STATUSF_IP4 | STATUSF_IP5 |
0382 STATUSF_IP6 | STATUSF_IP7);
0383 }
0384
0385 static void cps_smp_finish(void)
0386 {
0387 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
0388
0389 #ifdef CONFIG_MIPS_MT_FPAFF
0390
0391 if (cpu_has_fpu)
0392 cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
0393 #endif
0394
0395 local_irq_enable();
0396 }
0397
0398 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
0399
0400 enum cpu_death {
0401 CPU_DEATH_HALT,
0402 CPU_DEATH_POWER,
0403 };
0404
0405 static void cps_shutdown_this_cpu(enum cpu_death death)
0406 {
0407 unsigned int cpu, core, vpe_id;
0408
0409 cpu = smp_processor_id();
0410 core = cpu_core(&cpu_data[cpu]);
0411
0412 if (death == CPU_DEATH_HALT) {
0413 vpe_id = cpu_vpe_id(&cpu_data[cpu]);
0414
0415 pr_debug("Halting core %d VP%d\n", core, vpe_id);
0416 if (cpu_has_mipsmt) {
0417
0418 write_c0_tchalt(TCHALT_H);
0419 instruction_hazard();
0420 } else if (cpu_has_vp) {
0421 write_cpc_cl_vp_stop(1 << vpe_id);
0422
0423
0424 wmb();
0425 }
0426 } else {
0427 pr_debug("Gating power to core %d\n", core);
0428
0429 cps_pm_enter_state(CPS_PM_POWER_GATED);
0430 }
0431 }
0432
0433 #ifdef CONFIG_KEXEC
0434
0435 static void cps_kexec_nonboot_cpu(void)
0436 {
0437 if (cpu_has_mipsmt || cpu_has_vp)
0438 cps_shutdown_this_cpu(CPU_DEATH_HALT);
0439 else
0440 cps_shutdown_this_cpu(CPU_DEATH_POWER);
0441 }
0442
0443 #endif
0444
0445 #endif
0446
0447 #ifdef CONFIG_HOTPLUG_CPU
0448
0449 static int cps_cpu_disable(void)
0450 {
0451 unsigned cpu = smp_processor_id();
0452 struct core_boot_config *core_cfg;
0453
0454 if (!cps_pm_support_state(CPS_PM_POWER_GATED))
0455 return -EINVAL;
0456
0457 core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)];
0458 atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
0459 smp_mb__after_atomic();
0460 set_cpu_online(cpu, false);
0461 calculate_cpu_foreign_map();
0462 irq_migrate_all_off_this_cpu();
0463
0464 return 0;
0465 }
0466
0467 static unsigned cpu_death_sibling;
0468 static enum cpu_death cpu_death;
0469
0470 void play_dead(void)
0471 {
0472 unsigned int cpu;
0473
0474 local_irq_disable();
0475 idle_task_exit();
0476 cpu = smp_processor_id();
0477 cpu_death = CPU_DEATH_POWER;
0478
0479 pr_debug("CPU%d going offline\n", cpu);
0480
0481 if (cpu_has_mipsmt || cpu_has_vp) {
0482
0483 for_each_online_cpu(cpu_death_sibling) {
0484 if (!cpus_are_siblings(cpu, cpu_death_sibling))
0485 continue;
0486
0487
0488
0489
0490
0491 cpu_death = CPU_DEATH_HALT;
0492 break;
0493 }
0494 }
0495
0496
0497 (void)cpu_report_death();
0498
0499 cps_shutdown_this_cpu(cpu_death);
0500
0501
0502 panic("Failed to offline CPU %u", cpu);
0503 }
0504
0505 static void wait_for_sibling_halt(void *ptr_cpu)
0506 {
0507 unsigned cpu = (unsigned long)ptr_cpu;
0508 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
0509 unsigned halted;
0510 unsigned long flags;
0511
0512 do {
0513 local_irq_save(flags);
0514 settc(vpe_id);
0515 halted = read_tc_c0_tchalt();
0516 local_irq_restore(flags);
0517 } while (!(halted & TCHALT_H));
0518 }
0519
0520 static void cps_cpu_die(unsigned int cpu)
0521 {
0522 unsigned core = cpu_core(&cpu_data[cpu]);
0523 unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
0524 ktime_t fail_time;
0525 unsigned stat;
0526 int err;
0527
0528
0529 if (!cpu_wait_death(cpu, 5)) {
0530 pr_err("CPU%u: didn't offline\n", cpu);
0531 return;
0532 }
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545 if (cpu_death == CPU_DEATH_POWER) {
0546
0547
0548
0549
0550
0551 fail_time = ktime_add_ms(ktime_get(), 2000);
0552 do {
0553 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
0554 mips_cpc_lock_other(core);
0555 stat = read_cpc_co_stat_conf();
0556 stat &= CPC_Cx_STAT_CONF_SEQSTATE;
0557 stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
0558 mips_cpc_unlock_other();
0559 mips_cm_unlock_other();
0560
0561 if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
0562 stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
0563 stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
0564 break;
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 if (WARN(ktime_after(ktime_get(), fail_time),
0578 "CPU%u hasn't powered down, seq. state %u\n",
0579 cpu, stat))
0580 break;
0581 } while (1);
0582
0583
0584 bitmap_clear(core_power, core, 1);
0585 } else if (cpu_has_mipsmt) {
0586
0587
0588
0589
0590 err = smp_call_function_single(cpu_death_sibling,
0591 wait_for_sibling_halt,
0592 (void *)(unsigned long)cpu, 1);
0593 if (err)
0594 panic("Failed to call remote sibling CPU\n");
0595 } else if (cpu_has_vp) {
0596 do {
0597 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
0598 stat = read_cpc_co_vp_running();
0599 mips_cm_unlock_other();
0600 } while (stat & (1 << vpe_id));
0601 }
0602 }
0603
0604 #endif
0605
0606 static const struct plat_smp_ops cps_smp_ops = {
0607 .smp_setup = cps_smp_setup,
0608 .prepare_cpus = cps_prepare_cpus,
0609 .boot_secondary = cps_boot_secondary,
0610 .init_secondary = cps_init_secondary,
0611 .smp_finish = cps_smp_finish,
0612 .send_ipi_single = mips_smp_send_ipi_single,
0613 .send_ipi_mask = mips_smp_send_ipi_mask,
0614 #ifdef CONFIG_HOTPLUG_CPU
0615 .cpu_disable = cps_cpu_disable,
0616 .cpu_die = cps_cpu_die,
0617 #endif
0618 #ifdef CONFIG_KEXEC
0619 .kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
0620 #endif
0621 };
0622
0623 bool mips_cps_smp_in_use(void)
0624 {
0625 extern const struct plat_smp_ops *mp_ops;
0626 return mp_ops == &cps_smp_ops;
0627 }
0628
0629 int register_cps_smp_ops(void)
0630 {
0631 if (!mips_cm_present()) {
0632 pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
0633 return -ENODEV;
0634 }
0635
0636
0637 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
0638 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
0639 return -ENODEV;
0640 }
0641
0642 register_smp_ops(&cps_smp_ops);
0643 return 0;
0644 }