0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/cache.h>
0010 #include <linux/delay.h>
0011 #include <linux/init.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/smp.h>
0014 #include <linux/spinlock.h>
0015 #include <linux/threads.h>
0016 #include <linux/export.h>
0017 #include <linux/time.h>
0018 #include <linux/timex.h>
0019 #include <linux/sched/mm.h>
0020 #include <linux/cpumask.h>
0021 #include <linux/cpu.h>
0022 #include <linux/err.h>
0023 #include <linux/ftrace.h>
0024 #include <linux/irqdomain.h>
0025 #include <linux/of.h>
0026 #include <linux/of_irq.h>
0027
0028 #include <linux/atomic.h>
0029 #include <asm/cpu.h>
0030 #include <asm/ginvt.h>
0031 #include <asm/processor.h>
0032 #include <asm/idle.h>
0033 #include <asm/r4k-timer.h>
0034 #include <asm/mips-cps.h>
0035 #include <asm/mmu_context.h>
0036 #include <asm/time.h>
0037 #include <asm/setup.h>
0038 #include <asm/maar.h>
0039
0040 int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];
0041 EXPORT_SYMBOL(__cpu_number_map);
0042
0043 int __cpu_logical_map[NR_CPUS];
0044 EXPORT_SYMBOL(__cpu_logical_map);
0045
0046
0047 int smp_num_siblings = 1;
0048 EXPORT_SYMBOL(smp_num_siblings);
0049
0050
0051 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
0052 EXPORT_SYMBOL(cpu_sibling_map);
0053
0054
0055 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
0056 EXPORT_SYMBOL(cpu_core_map);
0057
0058 static DECLARE_COMPLETION(cpu_starting);
0059 static DECLARE_COMPLETION(cpu_running);
0060
0061
0062
0063
0064
0065 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
0066 EXPORT_SYMBOL(cpu_foreign_map);
0067
0068
0069 static cpumask_t cpu_sibling_setup_map;
0070
0071
0072 static cpumask_t cpu_core_setup_map;
0073
0074 cpumask_t cpu_coherent_mask;
0075
0076 #ifdef CONFIG_GENERIC_IRQ_IPI
0077 static struct irq_desc *call_desc;
0078 static struct irq_desc *sched_desc;
0079 #endif
0080
0081 static inline void set_cpu_sibling_map(int cpu)
0082 {
0083 int i;
0084
0085 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
0086
0087 if (smp_num_siblings > 1) {
0088 for_each_cpu(i, &cpu_sibling_setup_map) {
0089 if (cpus_are_siblings(cpu, i)) {
0090 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
0091 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
0092 }
0093 }
0094 } else
0095 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
0096 }
0097
0098 static inline void set_cpu_core_map(int cpu)
0099 {
0100 int i;
0101
0102 cpumask_set_cpu(cpu, &cpu_core_setup_map);
0103
0104 for_each_cpu(i, &cpu_core_setup_map) {
0105 if (cpu_data[cpu].package == cpu_data[i].package) {
0106 cpumask_set_cpu(i, &cpu_core_map[cpu]);
0107 cpumask_set_cpu(cpu, &cpu_core_map[i]);
0108 }
0109 }
0110 }
0111
0112
0113
0114
0115
0116 void calculate_cpu_foreign_map(void)
0117 {
0118 int i, k, core_present;
0119 cpumask_t temp_foreign_map;
0120
0121
0122 cpumask_clear(&temp_foreign_map);
0123 for_each_online_cpu(i) {
0124 core_present = 0;
0125 for_each_cpu(k, &temp_foreign_map)
0126 if (cpus_are_siblings(i, k))
0127 core_present = 1;
0128 if (!core_present)
0129 cpumask_set_cpu(i, &temp_foreign_map);
0130 }
0131
0132 for_each_online_cpu(i)
0133 cpumask_andnot(&cpu_foreign_map[i],
0134 &temp_foreign_map, &cpu_sibling_map[i]);
0135 }
0136
0137 const struct plat_smp_ops *mp_ops;
0138 EXPORT_SYMBOL(mp_ops);
0139
0140 void register_smp_ops(const struct plat_smp_ops *ops)
0141 {
0142 if (mp_ops)
0143 printk(KERN_WARNING "Overriding previously set SMP ops\n");
0144
0145 mp_ops = ops;
0146 }
0147
0148 #ifdef CONFIG_GENERIC_IRQ_IPI
0149 void mips_smp_send_ipi_single(int cpu, unsigned int action)
0150 {
0151 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
0152 }
0153
0154 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
0155 {
0156 unsigned long flags;
0157 unsigned int core;
0158 int cpu;
0159
0160 local_irq_save(flags);
0161
0162 switch (action) {
0163 case SMP_CALL_FUNCTION:
0164 __ipi_send_mask(call_desc, mask);
0165 break;
0166
0167 case SMP_RESCHEDULE_YOURSELF:
0168 __ipi_send_mask(sched_desc, mask);
0169 break;
0170
0171 default:
0172 BUG();
0173 }
0174
0175 if (mips_cpc_present()) {
0176 for_each_cpu(cpu, mask) {
0177 if (cpus_are_siblings(cpu, smp_processor_id()))
0178 continue;
0179
0180 core = cpu_core(&cpu_data[cpu]);
0181
0182 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
0183 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
0184 mips_cpc_lock_other(core);
0185 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
0186 mips_cpc_unlock_other();
0187 mips_cm_unlock_other();
0188 }
0189 }
0190 }
0191
0192 local_irq_restore(flags);
0193 }
0194
0195
0196 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
0197 {
0198 scheduler_ipi();
0199
0200 return IRQ_HANDLED;
0201 }
0202
0203 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
0204 {
0205 generic_smp_call_function_interrupt();
0206
0207 return IRQ_HANDLED;
0208 }
0209
0210 static void smp_ipi_init_one(unsigned int virq, const char *name,
0211 irq_handler_t handler)
0212 {
0213 int ret;
0214
0215 irq_set_handler(virq, handle_percpu_irq);
0216 ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
0217 BUG_ON(ret);
0218 }
0219
0220 static unsigned int call_virq, sched_virq;
0221
0222 int mips_smp_ipi_allocate(const struct cpumask *mask)
0223 {
0224 int virq;
0225 struct irq_domain *ipidomain;
0226 struct device_node *node;
0227
0228 node = of_irq_find_parent(of_root);
0229 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
0230
0231
0232
0233
0234
0235
0236 if (node && !ipidomain)
0237 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250 if (!ipidomain) {
0251 BUG_ON(num_present_cpus() > 1);
0252 return 0;
0253 }
0254
0255 virq = irq_reserve_ipi(ipidomain, mask);
0256 BUG_ON(!virq);
0257 if (!call_virq)
0258 call_virq = virq;
0259
0260 virq = irq_reserve_ipi(ipidomain, mask);
0261 BUG_ON(!virq);
0262 if (!sched_virq)
0263 sched_virq = virq;
0264
0265 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
0266 int cpu;
0267
0268 for_each_cpu(cpu, mask) {
0269 smp_ipi_init_one(call_virq + cpu, "IPI call",
0270 ipi_call_interrupt);
0271 smp_ipi_init_one(sched_virq + cpu, "IPI resched",
0272 ipi_resched_interrupt);
0273 }
0274 } else {
0275 smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
0276 smp_ipi_init_one(sched_virq, "IPI resched",
0277 ipi_resched_interrupt);
0278 }
0279
0280 return 0;
0281 }
0282
0283 int mips_smp_ipi_free(const struct cpumask *mask)
0284 {
0285 struct irq_domain *ipidomain;
0286 struct device_node *node;
0287
0288 node = of_irq_find_parent(of_root);
0289 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
0290
0291
0292
0293
0294
0295
0296 if (node && !ipidomain)
0297 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
0298
0299 BUG_ON(!ipidomain);
0300
0301 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
0302 int cpu;
0303
0304 for_each_cpu(cpu, mask) {
0305 free_irq(call_virq + cpu, NULL);
0306 free_irq(sched_virq + cpu, NULL);
0307 }
0308 }
0309 irq_destroy_ipi(call_virq, mask);
0310 irq_destroy_ipi(sched_virq, mask);
0311 return 0;
0312 }
0313
0314
0315 static int __init mips_smp_ipi_init(void)
0316 {
0317 if (num_possible_cpus() == 1)
0318 return 0;
0319
0320 mips_smp_ipi_allocate(cpu_possible_mask);
0321
0322 call_desc = irq_to_desc(call_virq);
0323 sched_desc = irq_to_desc(sched_virq);
0324
0325 return 0;
0326 }
0327 early_initcall(mips_smp_ipi_init);
0328 #endif
0329
0330
0331
0332
0333
0334 asmlinkage void start_secondary(void)
0335 {
0336 unsigned int cpu;
0337
0338 cpu_probe();
0339 per_cpu_trap_init(false);
0340 mips_clockevent_init();
0341 mp_ops->init_secondary();
0342 cpu_report();
0343 maar_init();
0344
0345
0346
0347
0348
0349
0350 calibrate_delay();
0351 cpu = smp_processor_id();
0352 cpu_data[cpu].udelay_val = loops_per_jiffy;
0353
0354 set_cpu_sibling_map(cpu);
0355 set_cpu_core_map(cpu);
0356
0357 cpumask_set_cpu(cpu, &cpu_coherent_mask);
0358 notify_cpu_starting(cpu);
0359
0360
0361 complete(&cpu_starting);
0362
0363 synchronise_count_slave(cpu);
0364
0365
0366 set_cpu_online(cpu, true);
0367
0368 calculate_cpu_foreign_map();
0369
0370
0371
0372
0373
0374 complete(&cpu_running);
0375
0376
0377
0378
0379
0380 WARN_ON_ONCE(!irqs_disabled());
0381 mp_ops->smp_finish();
0382
0383 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
0384 }
0385
0386 static void stop_this_cpu(void *dummy)
0387 {
0388
0389
0390
0391
0392 set_cpu_online(smp_processor_id(), false);
0393 calculate_cpu_foreign_map();
0394 local_irq_disable();
0395 while (1);
0396 }
0397
0398 void smp_send_stop(void)
0399 {
0400 smp_call_function(stop_this_cpu, NULL, 0);
0401 }
0402
0403 void __init smp_cpus_done(unsigned int max_cpus)
0404 {
0405 }
0406
0407
0408 void __init smp_prepare_cpus(unsigned int max_cpus)
0409 {
0410 init_new_context(current, &init_mm);
0411 current_thread_info()->cpu = 0;
0412 mp_ops->prepare_cpus(max_cpus);
0413 set_cpu_sibling_map(0);
0414 set_cpu_core_map(0);
0415 calculate_cpu_foreign_map();
0416 #ifndef CONFIG_HOTPLUG_CPU
0417 init_cpu_present(cpu_possible_mask);
0418 #endif
0419 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
0420 }
0421
0422
0423 void smp_prepare_boot_cpu(void)
0424 {
0425 if (mp_ops->prepare_boot_cpu)
0426 mp_ops->prepare_boot_cpu();
0427 set_cpu_possible(0, true);
0428 set_cpu_online(0, true);
0429 }
0430
0431 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
0432 {
0433 int err;
0434
0435 err = mp_ops->boot_secondary(cpu, tidle);
0436 if (err)
0437 return err;
0438
0439
0440 if (!wait_for_completion_timeout(&cpu_starting,
0441 msecs_to_jiffies(1000))) {
0442 pr_crit("CPU%u: failed to start\n", cpu);
0443 return -EIO;
0444 }
0445
0446 synchronise_count_master(cpu);
0447
0448
0449 wait_for_completion(&cpu_running);
0450 return 0;
0451 }
0452
0453
0454 int setup_profiling_timer(unsigned int multiplier)
0455 {
0456 return 0;
0457 }
0458
0459 static void flush_tlb_all_ipi(void *info)
0460 {
0461 local_flush_tlb_all();
0462 }
0463
0464 void flush_tlb_all(void)
0465 {
0466 if (cpu_has_mmid) {
0467 htw_stop();
0468 ginvt_full();
0469 sync_ginv();
0470 instruction_hazard();
0471 htw_start();
0472 return;
0473 }
0474
0475 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
0476 }
0477
0478 static void flush_tlb_mm_ipi(void *mm)
0479 {
0480 drop_mmu_context((struct mm_struct *)mm);
0481 }
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
0492 {
0493 smp_call_function(func, info, 1);
0494 }
0495
0496 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
0497 {
0498 preempt_disable();
0499
0500 smp_on_other_tlbs(func, info);
0501 func(info);
0502
0503 preempt_enable();
0504 }
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 void flush_tlb_mm(struct mm_struct *mm)
0520 {
0521 if (!mm)
0522 return;
0523
0524 if (atomic_read(&mm->mm_users) == 0)
0525 return;
0526
0527 preempt_disable();
0528
0529 if (cpu_has_mmid) {
0530
0531
0532
0533
0534 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
0535 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
0536 } else {
0537 unsigned int cpu;
0538
0539 for_each_online_cpu(cpu) {
0540 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
0541 set_cpu_context(cpu, mm, 0);
0542 }
0543 }
0544 drop_mmu_context(mm);
0545
0546 preempt_enable();
0547 }
0548
0549 struct flush_tlb_data {
0550 struct vm_area_struct *vma;
0551 unsigned long addr1;
0552 unsigned long addr2;
0553 };
0554
0555 static void flush_tlb_range_ipi(void *info)
0556 {
0557 struct flush_tlb_data *fd = info;
0558
0559 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
0560 }
0561
0562 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
0563 {
0564 struct mm_struct *mm = vma->vm_mm;
0565 unsigned long addr;
0566 u32 old_mmid;
0567
0568 preempt_disable();
0569 if (cpu_has_mmid) {
0570 htw_stop();
0571 old_mmid = read_c0_memorymapid();
0572 write_c0_memorymapid(cpu_asid(0, mm));
0573 mtc0_tlbw_hazard();
0574 addr = round_down(start, PAGE_SIZE * 2);
0575 end = round_up(end, PAGE_SIZE * 2);
0576 do {
0577 ginvt_va_mmid(addr);
0578 sync_ginv();
0579 addr += PAGE_SIZE * 2;
0580 } while (addr < end);
0581 write_c0_memorymapid(old_mmid);
0582 instruction_hazard();
0583 htw_start();
0584 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
0585 struct flush_tlb_data fd = {
0586 .vma = vma,
0587 .addr1 = start,
0588 .addr2 = end,
0589 };
0590
0591 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
0592 local_flush_tlb_range(vma, start, end);
0593 } else {
0594 unsigned int cpu;
0595 int exec = vma->vm_flags & VM_EXEC;
0596
0597 for_each_online_cpu(cpu) {
0598
0599
0600
0601
0602
0603
0604 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
0605 set_cpu_context(cpu, mm, !exec);
0606 }
0607 local_flush_tlb_range(vma, start, end);
0608 }
0609 preempt_enable();
0610 }
0611
0612 static void flush_tlb_kernel_range_ipi(void *info)
0613 {
0614 struct flush_tlb_data *fd = info;
0615
0616 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
0617 }
0618
0619 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
0620 {
0621 struct flush_tlb_data fd = {
0622 .addr1 = start,
0623 .addr2 = end,
0624 };
0625
0626 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
0627 }
0628
0629 static void flush_tlb_page_ipi(void *info)
0630 {
0631 struct flush_tlb_data *fd = info;
0632
0633 local_flush_tlb_page(fd->vma, fd->addr1);
0634 }
0635
0636 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
0637 {
0638 u32 old_mmid;
0639
0640 preempt_disable();
0641 if (cpu_has_mmid) {
0642 htw_stop();
0643 old_mmid = read_c0_memorymapid();
0644 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
0645 mtc0_tlbw_hazard();
0646 ginvt_va_mmid(page);
0647 sync_ginv();
0648 write_c0_memorymapid(old_mmid);
0649 instruction_hazard();
0650 htw_start();
0651 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
0652 (current->mm != vma->vm_mm)) {
0653 struct flush_tlb_data fd = {
0654 .vma = vma,
0655 .addr1 = page,
0656 };
0657
0658 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
0659 local_flush_tlb_page(vma, page);
0660 } else {
0661 unsigned int cpu;
0662
0663 for_each_online_cpu(cpu) {
0664
0665
0666
0667
0668
0669
0670 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
0671 set_cpu_context(cpu, vma->vm_mm, 1);
0672 }
0673 local_flush_tlb_page(vma, page);
0674 }
0675 preempt_enable();
0676 }
0677
0678 static void flush_tlb_one_ipi(void *info)
0679 {
0680 unsigned long vaddr = (unsigned long) info;
0681
0682 local_flush_tlb_one(vaddr);
0683 }
0684
0685 void flush_tlb_one(unsigned long vaddr)
0686 {
0687 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
0688 }
0689
0690 EXPORT_SYMBOL(flush_tlb_page);
0691 EXPORT_SYMBOL(flush_tlb_one);
0692
0693 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
0694
0695 static void tick_broadcast_callee(void *info)
0696 {
0697 tick_receive_broadcast();
0698 }
0699
0700 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
0701 CSD_INIT(tick_broadcast_callee, NULL);
0702
0703 void tick_broadcast(const struct cpumask *mask)
0704 {
0705 call_single_data_t *csd;
0706 int cpu;
0707
0708 for_each_cpu(cpu, mask) {
0709 csd = &per_cpu(tick_broadcast_csd, cpu);
0710 smp_call_function_single_async(cpu, csd);
0711 }
0712 }
0713
0714 #endif