0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/cpu.h>
0009 #include <linux/delay.h>
0010 #include <linux/smp.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/kernel_stat.h>
0013 #include <linux/sched.h>
0014 #include <linux/sched/hotplug.h>
0015 #include <linux/sched/task_stack.h>
0016 #include <linux/init.h>
0017 #include <linux/export.h>
0018 #include <linux/kexec.h>
0019
0020 #include <asm/mmu_context.h>
0021 #include <asm/time.h>
0022 #include <asm/setup.h>
0023
0024 #include <asm/octeon/octeon.h>
0025
0026 #include "octeon_boot.h"
0027
0028 volatile unsigned long octeon_processor_boot = 0xff;
0029 volatile unsigned long octeon_processor_sp;
0030 volatile unsigned long octeon_processor_gp;
0031 #ifdef CONFIG_RELOCATABLE
0032 volatile unsigned long octeon_processor_relocated_kernel_entry;
0033 #endif
0034
0035 #ifdef CONFIG_HOTPLUG_CPU
0036 uint64_t octeon_bootloader_entry_addr;
0037 EXPORT_SYMBOL(octeon_bootloader_entry_addr);
0038 #endif
0039
0040 extern void kernel_entry(unsigned long arg1, ...);
0041
0042 static void octeon_icache_flush(void)
0043 {
0044 asm volatile ("synci 0($0)\n");
0045 }
0046
0047 static void (*octeon_message_functions[8])(void) = {
0048 scheduler_ipi,
0049 generic_smp_call_function_interrupt,
0050 octeon_icache_flush,
0051 };
0052
0053 static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
0054 {
0055 u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
0056 u64 action;
0057 int i;
0058
0059
0060
0061
0062
0063 BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
0064 BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1));
0065 BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2));
0066
0067
0068
0069
0070
0071 action = cvmx_read_csr(mbox_clrx);
0072
0073 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
0074 action &= 0xff;
0075 else
0076 action &= 0xffff;
0077
0078
0079 cvmx_write_csr(mbox_clrx, action);
0080
0081 for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
0082 if (action & 1) {
0083 void (*fn)(void) = octeon_message_functions[i];
0084
0085 if (fn)
0086 fn();
0087 }
0088 action >>= 1;
0089 i++;
0090 }
0091 return IRQ_HANDLED;
0092 }
0093
0094
0095
0096
0097
0098
0099 void octeon_send_ipi_single(int cpu, unsigned int action)
0100 {
0101 int coreid = cpu_logical_map(cpu);
0102
0103
0104
0105
0106 cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
0107 }
0108
0109 static inline void octeon_send_ipi_mask(const struct cpumask *mask,
0110 unsigned int action)
0111 {
0112 unsigned int i;
0113
0114 for_each_cpu(i, mask)
0115 octeon_send_ipi_single(i, action);
0116 }
0117
0118
0119
0120
0121 static void octeon_smp_hotplug_setup(void)
0122 {
0123 #ifdef CONFIG_HOTPLUG_CPU
0124 struct linux_app_boot_info *labi;
0125
0126 if (!setup_max_cpus)
0127 return;
0128
0129 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
0130 if (labi->labi_signature != LABI_SIGNATURE) {
0131 pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
0132 return;
0133 }
0134
0135 octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
0136 #endif
0137 }
0138
0139 static void __init octeon_smp_setup(void)
0140 {
0141 const int coreid = cvmx_get_core_num();
0142 int cpus;
0143 int id;
0144 struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
0145
0146 #ifdef CONFIG_HOTPLUG_CPU
0147 int core_mask = octeon_get_boot_coremask();
0148 unsigned int num_cores = cvmx_octeon_num_cores();
0149 #endif
0150
0151
0152 for (id = 0; id < NR_CPUS; id++) {
0153 set_cpu_possible(id, id == 0);
0154 set_cpu_present(id, id == 0);
0155 }
0156
0157 __cpu_number_map[coreid] = 0;
0158 __cpu_logical_map[0] = coreid;
0159
0160
0161 cpus = 1;
0162 for (id = 0; id < NR_CPUS; id++) {
0163 if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
0164 set_cpu_possible(cpus, true);
0165 set_cpu_present(cpus, true);
0166 __cpu_number_map[id] = cpus;
0167 __cpu_logical_map[cpus] = id;
0168 cpus++;
0169 }
0170 }
0171
0172 #ifdef CONFIG_HOTPLUG_CPU
0173
0174
0175
0176
0177
0178 for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
0179 id < num_cores && id < NR_CPUS; id++) {
0180 if (!(core_mask & (1 << id))) {
0181 set_cpu_possible(cpus, true);
0182 __cpu_number_map[id] = cpus;
0183 __cpu_logical_map[cpus] = id;
0184 cpus++;
0185 }
0186 }
0187 #endif
0188
0189 octeon_smp_hotplug_setup();
0190 }
0191
0192
0193 #ifdef CONFIG_RELOCATABLE
0194 int plat_post_relocation(long offset)
0195 {
0196 unsigned long entry = (unsigned long)kernel_entry;
0197
0198
0199 octeon_processor_relocated_kernel_entry = entry + offset;
0200
0201 return 0;
0202 }
0203 #endif
0204
0205
0206
0207
0208 static int octeon_boot_secondary(int cpu, struct task_struct *idle)
0209 {
0210 int count;
0211
0212 pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
0213 cpu_logical_map(cpu));
0214
0215 octeon_processor_sp = __KSTK_TOS(idle);
0216 octeon_processor_gp = (unsigned long)(task_thread_info(idle));
0217 octeon_processor_boot = cpu_logical_map(cpu);
0218 mb();
0219
0220 count = 10000;
0221 while (octeon_processor_sp && count) {
0222
0223 udelay(1);
0224 count--;
0225 }
0226 if (count == 0) {
0227 pr_err("Secondary boot timeout\n");
0228 return -ETIMEDOUT;
0229 }
0230
0231 return 0;
0232 }
0233
0234
0235
0236
0237
0238 static void octeon_init_secondary(void)
0239 {
0240 unsigned int sr;
0241
0242 sr = set_c0_status(ST0_BEV);
0243 write_c0_ebase((u32)ebase);
0244 write_c0_status(sr);
0245
0246 octeon_check_cpu_bist();
0247 octeon_init_cvmcount();
0248
0249 octeon_irq_setup_secondary();
0250 }
0251
0252
0253
0254
0255 static void __init octeon_prepare_cpus(unsigned int max_cpus)
0256 {
0257
0258
0259
0260
0261 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
0262 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
0263 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
0264 mailbox_interrupt)) {
0265 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
0266 }
0267 }
0268
0269
0270
0271
0272
0273 static void octeon_smp_finish(void)
0274 {
0275 octeon_user_io_init();
0276
0277
0278 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
0279 local_irq_enable();
0280 }
0281
0282 #ifdef CONFIG_HOTPLUG_CPU
0283
0284
0285 static DEFINE_PER_CPU(int, cpu_state);
0286
0287 static int octeon_cpu_disable(void)
0288 {
0289 unsigned int cpu = smp_processor_id();
0290
0291 if (!octeon_bootloader_entry_addr)
0292 return -ENOTSUPP;
0293
0294 set_cpu_online(cpu, false);
0295 calculate_cpu_foreign_map();
0296 octeon_fixup_irqs();
0297
0298 __flush_cache_all();
0299 local_flush_tlb_all();
0300
0301 return 0;
0302 }
0303
0304 static void octeon_cpu_die(unsigned int cpu)
0305 {
0306 int coreid = cpu_logical_map(cpu);
0307 uint32_t mask, new_mask;
0308 const struct cvmx_bootmem_named_block_desc *block_desc;
0309
0310 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
0311 cpu_relax();
0312
0313
0314
0315
0316
0317
0318 mask = 1 << coreid;
0319
0320 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
0321
0322 if (!block_desc) {
0323 struct linux_app_boot_info *labi;
0324
0325 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
0326
0327 labi->avail_coremask |= mask;
0328 new_mask = labi->avail_coremask;
0329 } else {
0330 uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
0331 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
0332 *p |= mask;
0333 new_mask = *p;
0334 }
0335
0336 pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
0337 mb();
0338 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
0339 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
0340 }
0341
0342 void play_dead(void)
0343 {
0344 int cpu = cpu_number_map(cvmx_get_core_num());
0345
0346 idle_task_exit();
0347 octeon_processor_boot = 0xff;
0348 per_cpu(cpu_state, cpu) = CPU_DEAD;
0349
0350 mb();
0351
0352 while (1)
0353 ;
0354 }
0355
0356 static void start_after_reset(void)
0357 {
0358 kernel_entry(0, 0, 0);
0359 }
0360
0361 static int octeon_update_boot_vector(unsigned int cpu)
0362 {
0363
0364 int coreid = cpu_logical_map(cpu);
0365 uint32_t avail_coremask;
0366 const struct cvmx_bootmem_named_block_desc *block_desc;
0367 struct boot_init_vector *boot_vect =
0368 (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
0369
0370 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
0371
0372 if (!block_desc) {
0373 struct linux_app_boot_info *labi;
0374
0375 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
0376
0377 avail_coremask = labi->avail_coremask;
0378 labi->avail_coremask &= ~(1 << coreid);
0379 } else {
0380 avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
0381 block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
0382 }
0383
0384 if (!(avail_coremask & (1 << coreid))) {
0385
0386 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
0387 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
0388 }
0389
0390 boot_vect[coreid].app_start_func_addr =
0391 (uint32_t) (unsigned long) start_after_reset;
0392 boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
0393
0394 mb();
0395
0396 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
0397
0398 return 0;
0399 }
0400
0401 static int register_cavium_notifier(void)
0402 {
0403 return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
0404 "mips/cavium:prepare",
0405 octeon_update_boot_vector, NULL);
0406 }
0407 late_initcall(register_cavium_notifier);
0408
0409 #endif
0410
0411 static const struct plat_smp_ops octeon_smp_ops = {
0412 .send_ipi_single = octeon_send_ipi_single,
0413 .send_ipi_mask = octeon_send_ipi_mask,
0414 .init_secondary = octeon_init_secondary,
0415 .smp_finish = octeon_smp_finish,
0416 .boot_secondary = octeon_boot_secondary,
0417 .smp_setup = octeon_smp_setup,
0418 .prepare_cpus = octeon_prepare_cpus,
0419 #ifdef CONFIG_HOTPLUG_CPU
0420 .cpu_disable = octeon_cpu_disable,
0421 .cpu_die = octeon_cpu_die,
0422 #endif
0423 #ifdef CONFIG_KEXEC
0424 .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
0425 #endif
0426 };
0427
0428 static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
0429 {
0430 scheduler_ipi();
0431 return IRQ_HANDLED;
0432 }
0433
0434 static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
0435 {
0436 generic_smp_call_function_interrupt();
0437 return IRQ_HANDLED;
0438 }
0439
0440 static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
0441 {
0442 octeon_icache_flush();
0443 return IRQ_HANDLED;
0444 }
0445
0446
0447
0448
0449 static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
0450 {
0451 if (request_irq(OCTEON_IRQ_MBOX0 + 0,
0452 octeon_78xx_reched_interrupt,
0453 IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
0454 octeon_78xx_reched_interrupt)) {
0455 panic("Cannot request_irq for SchedulerIPI");
0456 }
0457 if (request_irq(OCTEON_IRQ_MBOX0 + 1,
0458 octeon_78xx_call_function_interrupt,
0459 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
0460 octeon_78xx_call_function_interrupt)) {
0461 panic("Cannot request_irq for SMP-Call");
0462 }
0463 if (request_irq(OCTEON_IRQ_MBOX0 + 2,
0464 octeon_78xx_icache_flush_interrupt,
0465 IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
0466 octeon_78xx_icache_flush_interrupt)) {
0467 panic("Cannot request_irq for ICache-Flush");
0468 }
0469 }
0470
0471 static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
0472 {
0473 int i;
0474
0475 for (i = 0; i < 8; i++) {
0476 if (action & 1)
0477 octeon_ciu3_mbox_send(cpu, i);
0478 action >>= 1;
0479 }
0480 }
0481
0482 static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
0483 unsigned int action)
0484 {
0485 unsigned int cpu;
0486
0487 for_each_cpu(cpu, mask)
0488 octeon_78xx_send_ipi_single(cpu, action);
0489 }
0490
0491 static const struct plat_smp_ops octeon_78xx_smp_ops = {
0492 .send_ipi_single = octeon_78xx_send_ipi_single,
0493 .send_ipi_mask = octeon_78xx_send_ipi_mask,
0494 .init_secondary = octeon_init_secondary,
0495 .smp_finish = octeon_smp_finish,
0496 .boot_secondary = octeon_boot_secondary,
0497 .smp_setup = octeon_smp_setup,
0498 .prepare_cpus = octeon_78xx_prepare_cpus,
0499 #ifdef CONFIG_HOTPLUG_CPU
0500 .cpu_disable = octeon_cpu_disable,
0501 .cpu_die = octeon_cpu_die,
0502 #endif
0503 #ifdef CONFIG_KEXEC
0504 .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
0505 #endif
0506 };
0507
0508 void __init octeon_setup_smp(void)
0509 {
0510 const struct plat_smp_ops *ops;
0511
0512 if (octeon_has_feature(OCTEON_FEATURE_CIU3))
0513 ops = &octeon_78xx_smp_ops;
0514 else
0515 ops = &octeon_smp_ops;
0516
0517 register_smp_ops(ops);
0518 }