0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/acpi.h>
0010 #include <linux/arm_sdei.h>
0011 #include <linux/delay.h>
0012 #include <linux/init.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/sched/mm.h>
0015 #include <linux/sched/hotplug.h>
0016 #include <linux/sched/task_stack.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/cache.h>
0019 #include <linux/profile.h>
0020 #include <linux/errno.h>
0021 #include <linux/mm.h>
0022 #include <linux/err.h>
0023 #include <linux/cpu.h>
0024 #include <linux/smp.h>
0025 #include <linux/seq_file.h>
0026 #include <linux/irq.h>
0027 #include <linux/irqchip/arm-gic-v3.h>
0028 #include <linux/percpu.h>
0029 #include <linux/clockchips.h>
0030 #include <linux/completion.h>
0031 #include <linux/of.h>
0032 #include <linux/irq_work.h>
0033 #include <linux/kernel_stat.h>
0034 #include <linux/kexec.h>
0035 #include <linux/kvm_host.h>
0036
0037 #include <asm/alternative.h>
0038 #include <asm/atomic.h>
0039 #include <asm/cacheflush.h>
0040 #include <asm/cpu.h>
0041 #include <asm/cputype.h>
0042 #include <asm/cpu_ops.h>
0043 #include <asm/daifflags.h>
0044 #include <asm/kvm_mmu.h>
0045 #include <asm/mmu_context.h>
0046 #include <asm/numa.h>
0047 #include <asm/processor.h>
0048 #include <asm/smp_plat.h>
0049 #include <asm/sections.h>
0050 #include <asm/tlbflush.h>
0051 #include <asm/ptrace.h>
0052 #include <asm/virt.h>
0053
0054 #define CREATE_TRACE_POINTS
0055 #include <trace/events/ipi.h>
0056
0057 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
0058 EXPORT_PER_CPU_SYMBOL(cpu_number);
0059
0060
0061
0062
0063
0064
0065 struct secondary_data secondary_data;
0066
0067 static int cpus_stuck_in_kernel;
0068
0069 enum ipi_msg_type {
0070 IPI_RESCHEDULE,
0071 IPI_CALL_FUNC,
0072 IPI_CPU_STOP,
0073 IPI_CPU_CRASH_STOP,
0074 IPI_TIMER,
0075 IPI_IRQ_WORK,
0076 IPI_WAKEUP,
0077 NR_IPI
0078 };
0079
0080 static int ipi_irq_base __read_mostly;
0081 static int nr_ipi __read_mostly = NR_IPI;
0082 static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
0083
0084 static void ipi_setup(int cpu);
0085
0086 #ifdef CONFIG_HOTPLUG_CPU
0087 static void ipi_teardown(int cpu);
0088 static int op_cpu_kill(unsigned int cpu);
0089 #else
0090 static inline int op_cpu_kill(unsigned int cpu)
0091 {
0092 return -ENOSYS;
0093 }
0094 #endif
0095
0096
0097
0098
0099
0100
0101 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
0102 {
0103 const struct cpu_operations *ops = get_cpu_ops(cpu);
0104
0105 if (ops->cpu_boot)
0106 return ops->cpu_boot(cpu);
0107
0108 return -EOPNOTSUPP;
0109 }
0110
0111 static DECLARE_COMPLETION(cpu_running);
0112
0113 int __cpu_up(unsigned int cpu, struct task_struct *idle)
0114 {
0115 int ret;
0116 long status;
0117
0118
0119
0120
0121
0122 secondary_data.task = idle;
0123 update_cpu_boot_status(CPU_MMU_OFF);
0124
0125
0126 ret = boot_secondary(cpu, idle);
0127 if (ret) {
0128 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
0129 return ret;
0130 }
0131
0132
0133
0134
0135
0136 wait_for_completion_timeout(&cpu_running,
0137 msecs_to_jiffies(5000));
0138 if (cpu_online(cpu))
0139 return 0;
0140
0141 pr_crit("CPU%u: failed to come online\n", cpu);
0142 secondary_data.task = NULL;
0143 status = READ_ONCE(secondary_data.status);
0144 if (status == CPU_MMU_OFF)
0145 status = READ_ONCE(__early_cpu_boot_status);
0146
0147 switch (status & CPU_BOOT_STATUS_MASK) {
0148 default:
0149 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
0150 cpu, status);
0151 cpus_stuck_in_kernel++;
0152 break;
0153 case CPU_KILL_ME:
0154 if (!op_cpu_kill(cpu)) {
0155 pr_crit("CPU%u: died during early boot\n", cpu);
0156 break;
0157 }
0158 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
0159 fallthrough;
0160 case CPU_STUCK_IN_KERNEL:
0161 pr_crit("CPU%u: is stuck in kernel\n", cpu);
0162 if (status & CPU_STUCK_REASON_52_BIT_VA)
0163 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
0164 if (status & CPU_STUCK_REASON_NO_GRAN) {
0165 pr_crit("CPU%u: does not support %luK granule\n",
0166 cpu, PAGE_SIZE / SZ_1K);
0167 }
0168 cpus_stuck_in_kernel++;
0169 break;
0170 case CPU_PANIC_KERNEL:
0171 panic("CPU%u detected unsupported configuration\n", cpu);
0172 }
0173
0174 return -EIO;
0175 }
0176
0177 static void init_gic_priority_masking(void)
0178 {
0179 u32 cpuflags;
0180
0181 if (WARN_ON(!gic_enable_sre()))
0182 return;
0183
0184 cpuflags = read_sysreg(daif);
0185
0186 WARN_ON(!(cpuflags & PSR_I_BIT));
0187 WARN_ON(!(cpuflags & PSR_F_BIT));
0188
0189 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
0190 }
0191
0192
0193
0194
0195
0196 asmlinkage notrace void secondary_start_kernel(void)
0197 {
0198 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
0199 struct mm_struct *mm = &init_mm;
0200 const struct cpu_operations *ops;
0201 unsigned int cpu = smp_processor_id();
0202
0203
0204
0205
0206
0207 mmgrab(mm);
0208 current->active_mm = mm;
0209
0210
0211
0212
0213
0214 cpu_uninstall_idmap();
0215
0216 if (system_uses_irq_prio_masking())
0217 init_gic_priority_masking();
0218
0219 rcu_cpu_starting(cpu);
0220 trace_hardirqs_off();
0221
0222
0223
0224
0225
0226
0227 check_local_cpu_capabilities();
0228
0229 ops = get_cpu_ops(cpu);
0230 if (ops->cpu_postboot)
0231 ops->cpu_postboot();
0232
0233
0234
0235
0236 cpuinfo_store_cpu();
0237 store_cpu_topology(cpu);
0238
0239
0240
0241
0242 notify_cpu_starting(cpu);
0243
0244 ipi_setup(cpu);
0245
0246 numa_add_cpu(cpu);
0247
0248
0249
0250
0251
0252
0253 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
0254 cpu, (unsigned long)mpidr,
0255 read_cpuid_id());
0256 update_cpu_boot_status(CPU_BOOT_SUCCESS);
0257 set_cpu_online(cpu, true);
0258 complete(&cpu_running);
0259
0260 local_daif_restore(DAIF_PROCCTX);
0261
0262
0263
0264
0265 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
0266 }
0267
0268 #ifdef CONFIG_HOTPLUG_CPU
0269 static int op_cpu_disable(unsigned int cpu)
0270 {
0271 const struct cpu_operations *ops = get_cpu_ops(cpu);
0272
0273
0274
0275
0276
0277 if (!ops || !ops->cpu_die)
0278 return -EOPNOTSUPP;
0279
0280
0281
0282
0283
0284 if (ops->cpu_disable)
0285 return ops->cpu_disable(cpu);
0286
0287 return 0;
0288 }
0289
0290
0291
0292
0293 int __cpu_disable(void)
0294 {
0295 unsigned int cpu = smp_processor_id();
0296 int ret;
0297
0298 ret = op_cpu_disable(cpu);
0299 if (ret)
0300 return ret;
0301
0302 remove_cpu_topology(cpu);
0303 numa_remove_cpu(cpu);
0304
0305
0306
0307
0308
0309 set_cpu_online(cpu, false);
0310 ipi_teardown(cpu);
0311
0312
0313
0314
0315 irq_migrate_all_off_this_cpu();
0316
0317 return 0;
0318 }
0319
0320 static int op_cpu_kill(unsigned int cpu)
0321 {
0322 const struct cpu_operations *ops = get_cpu_ops(cpu);
0323
0324
0325
0326
0327
0328
0329 if (!ops->cpu_kill)
0330 return 0;
0331
0332 return ops->cpu_kill(cpu);
0333 }
0334
0335
0336
0337
0338
0339 void __cpu_die(unsigned int cpu)
0340 {
0341 int err;
0342
0343 if (!cpu_wait_death(cpu, 5)) {
0344 pr_crit("CPU%u: cpu didn't die\n", cpu);
0345 return;
0346 }
0347 pr_debug("CPU%u: shutdown\n", cpu);
0348
0349
0350
0351
0352
0353
0354
0355 err = op_cpu_kill(cpu);
0356 if (err)
0357 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
0358 }
0359
0360
0361
0362
0363
0364 void cpu_die(void)
0365 {
0366 unsigned int cpu = smp_processor_id();
0367 const struct cpu_operations *ops = get_cpu_ops(cpu);
0368
0369 idle_task_exit();
0370
0371 local_daif_mask();
0372
0373
0374 (void)cpu_report_death();
0375
0376
0377
0378
0379
0380
0381 ops->cpu_die(cpu);
0382
0383 BUG();
0384 }
0385 #endif
0386
0387 static void __cpu_try_die(int cpu)
0388 {
0389 #ifdef CONFIG_HOTPLUG_CPU
0390 const struct cpu_operations *ops = get_cpu_ops(cpu);
0391
0392 if (ops && ops->cpu_die)
0393 ops->cpu_die(cpu);
0394 #endif
0395 }
0396
0397
0398
0399
0400
0401 void cpu_die_early(void)
0402 {
0403 int cpu = smp_processor_id();
0404
0405 pr_crit("CPU%d: will not boot\n", cpu);
0406
0407
0408 set_cpu_present(cpu, 0);
0409 rcu_report_dead(cpu);
0410
0411 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
0412 update_cpu_boot_status(CPU_KILL_ME);
0413 __cpu_try_die(cpu);
0414 }
0415
0416 update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
0417
0418 cpu_park_loop();
0419 }
0420
0421 static void __init hyp_mode_check(void)
0422 {
0423 if (is_hyp_mode_available())
0424 pr_info("CPU: All CPU(s) started at EL2\n");
0425 else if (is_hyp_mode_mismatched())
0426 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
0427 "CPU: CPUs started in inconsistent modes");
0428 else
0429 pr_info("CPU: All CPU(s) started at EL1\n");
0430 if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
0431 kvm_compute_layout();
0432 kvm_apply_hyp_relocations();
0433 }
0434 }
0435
0436 void __init smp_cpus_done(unsigned int max_cpus)
0437 {
0438 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
0439 setup_cpu_features();
0440 hyp_mode_check();
0441 apply_alternatives_all();
0442 mark_linear_text_alias_ro();
0443 }
0444
0445 void __init smp_prepare_boot_cpu(void)
0446 {
0447
0448
0449
0450
0451
0452 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
0453 cpuinfo_store_boot_cpu();
0454
0455
0456
0457
0458
0459
0460 apply_boot_alternatives();
0461
0462
0463 if (system_uses_irq_prio_masking())
0464 init_gic_priority_masking();
0465
0466 kasan_init_hw_tags();
0467 }
0468
0469
0470
0471
0472
0473
0474
0475 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
0476 {
0477 unsigned int i;
0478
0479 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
0480 if (cpu_logical_map(i) == hwid)
0481 return true;
0482 return false;
0483 }
0484
0485
0486
0487
0488
0489 static int __init smp_cpu_setup(int cpu)
0490 {
0491 const struct cpu_operations *ops;
0492
0493 if (init_cpu_ops(cpu))
0494 return -ENODEV;
0495
0496 ops = get_cpu_ops(cpu);
0497 if (ops->cpu_init(cpu))
0498 return -ENODEV;
0499
0500 set_cpu_possible(cpu, true);
0501
0502 return 0;
0503 }
0504
0505 static bool bootcpu_valid __initdata;
0506 static unsigned int cpu_count = 1;
0507
0508 #ifdef CONFIG_ACPI
0509 static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
0510
0511 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
0512 {
0513 return &cpu_madt_gicc[cpu];
0514 }
0515 EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc);
0516
0517
0518
0519
0520
0521
0522
0523 static void __init
0524 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
0525 {
0526 u64 hwid = processor->arm_mpidr;
0527
0528 if (!(processor->flags & ACPI_MADT_ENABLED)) {
0529 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
0530 return;
0531 }
0532
0533 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
0534 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
0535 return;
0536 }
0537
0538 if (is_mpidr_duplicate(cpu_count, hwid)) {
0539 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
0540 return;
0541 }
0542
0543
0544 if (cpu_logical_map(0) == hwid) {
0545 if (bootcpu_valid) {
0546 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
0547 hwid);
0548 return;
0549 }
0550 bootcpu_valid = true;
0551 cpu_madt_gicc[0] = *processor;
0552 return;
0553 }
0554
0555 if (cpu_count >= NR_CPUS)
0556 return;
0557
0558
0559 set_cpu_logical_map(cpu_count, hwid);
0560
0561 cpu_madt_gicc[cpu_count] = *processor;
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572 acpi_set_mailbox_entry(cpu_count, processor);
0573
0574 cpu_count++;
0575 }
0576
0577 static int __init
0578 acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
0579 const unsigned long end)
0580 {
0581 struct acpi_madt_generic_interrupt *processor;
0582
0583 processor = (struct acpi_madt_generic_interrupt *)header;
0584 if (BAD_MADT_GICC_ENTRY(processor, end))
0585 return -EINVAL;
0586
0587 acpi_table_print_madt_entry(&header->common);
0588
0589 acpi_map_gic_cpu_interface(processor);
0590
0591 return 0;
0592 }
0593
0594 static void __init acpi_parse_and_init_cpus(void)
0595 {
0596 int i;
0597
0598
0599
0600
0601
0602
0603 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
0604 acpi_parse_gic_cpu_interface, 0);
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614 acpi_map_cpus_to_nodes();
0615
0616 for (i = 0; i < nr_cpu_ids; i++)
0617 early_map_cpu_to_node(i, acpi_numa_get_nid(i));
0618 }
0619 #else
0620 #define acpi_parse_and_init_cpus(...) do { } while (0)
0621 #endif
0622
0623
0624
0625
0626
0627
0628 static void __init of_parse_and_init_cpus(void)
0629 {
0630 struct device_node *dn;
0631
0632 for_each_of_cpu_node(dn) {
0633 u64 hwid = of_get_cpu_hwid(dn, 0);
0634
0635 if (hwid & ~MPIDR_HWID_BITMASK)
0636 goto next;
0637
0638 if (is_mpidr_duplicate(cpu_count, hwid)) {
0639 pr_err("%pOF: duplicate cpu reg properties in the DT\n",
0640 dn);
0641 goto next;
0642 }
0643
0644
0645
0646
0647
0648
0649
0650 if (hwid == cpu_logical_map(0)) {
0651 if (bootcpu_valid) {
0652 pr_err("%pOF: duplicate boot cpu reg property in DT\n",
0653 dn);
0654 goto next;
0655 }
0656
0657 bootcpu_valid = true;
0658 early_map_cpu_to_node(0, of_node_to_nid(dn));
0659
0660
0661
0662
0663
0664
0665
0666 continue;
0667 }
0668
0669 if (cpu_count >= NR_CPUS)
0670 goto next;
0671
0672 pr_debug("cpu logical map 0x%llx\n", hwid);
0673 set_cpu_logical_map(cpu_count, hwid);
0674
0675 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
0676 next:
0677 cpu_count++;
0678 }
0679 }
0680
0681
0682
0683
0684
0685
0686 void __init smp_init_cpus(void)
0687 {
0688 int i;
0689
0690 if (acpi_disabled)
0691 of_parse_and_init_cpus();
0692 else
0693 acpi_parse_and_init_cpus();
0694
0695 if (cpu_count > nr_cpu_ids)
0696 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
0697 cpu_count, nr_cpu_ids);
0698
0699 if (!bootcpu_valid) {
0700 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
0701 return;
0702 }
0703
0704
0705
0706
0707
0708
0709
0710
0711 for (i = 1; i < nr_cpu_ids; i++) {
0712 if (cpu_logical_map(i) != INVALID_HWID) {
0713 if (smp_cpu_setup(i))
0714 set_cpu_logical_map(i, INVALID_HWID);
0715 }
0716 }
0717 }
0718
0719 void __init smp_prepare_cpus(unsigned int max_cpus)
0720 {
0721 const struct cpu_operations *ops;
0722 int err;
0723 unsigned int cpu;
0724 unsigned int this_cpu;
0725
0726 init_cpu_topology();
0727
0728 this_cpu = smp_processor_id();
0729 store_cpu_topology(this_cpu);
0730 numa_store_cpu_info(this_cpu);
0731 numa_add_cpu(this_cpu);
0732
0733
0734
0735
0736
0737 if (max_cpus == 0)
0738 return;
0739
0740
0741
0742
0743
0744
0745 for_each_possible_cpu(cpu) {
0746
0747 per_cpu(cpu_number, cpu) = cpu;
0748
0749 if (cpu == smp_processor_id())
0750 continue;
0751
0752 ops = get_cpu_ops(cpu);
0753 if (!ops)
0754 continue;
0755
0756 err = ops->cpu_prepare(cpu);
0757 if (err)
0758 continue;
0759
0760 set_cpu_present(cpu, true);
0761 numa_store_cpu_info(cpu);
0762 }
0763 }
0764
0765 static const char *ipi_types[NR_IPI] __tracepoint_string = {
0766 [IPI_RESCHEDULE] = "Rescheduling interrupts",
0767 [IPI_CALL_FUNC] = "Function call interrupts",
0768 [IPI_CPU_STOP] = "CPU stop interrupts",
0769 [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
0770 [IPI_TIMER] = "Timer broadcast interrupts",
0771 [IPI_IRQ_WORK] = "IRQ work interrupts",
0772 [IPI_WAKEUP] = "CPU wake-up interrupts",
0773 };
0774
0775 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
0776
0777 unsigned long irq_err_count;
0778
0779 int arch_show_interrupts(struct seq_file *p, int prec)
0780 {
0781 unsigned int cpu, i;
0782
0783 for (i = 0; i < NR_IPI; i++) {
0784 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
0785 prec >= 4 ? " " : "");
0786 for_each_online_cpu(cpu)
0787 seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
0788 seq_printf(p, " %s\n", ipi_types[i]);
0789 }
0790
0791 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
0792 return 0;
0793 }
0794
0795 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
0796 {
0797 smp_cross_call(mask, IPI_CALL_FUNC);
0798 }
0799
0800 void arch_send_call_function_single_ipi(int cpu)
0801 {
0802 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
0803 }
0804
0805 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
0806 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
0807 {
0808 smp_cross_call(mask, IPI_WAKEUP);
0809 }
0810 #endif
0811
0812 #ifdef CONFIG_IRQ_WORK
0813 void arch_irq_work_raise(void)
0814 {
0815 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
0816 }
0817 #endif
0818
0819 static void local_cpu_stop(void)
0820 {
0821 set_cpu_online(smp_processor_id(), false);
0822
0823 local_daif_mask();
0824 sdei_mask_local_cpu();
0825 cpu_park_loop();
0826 }
0827
0828
0829
0830
0831
0832
0833 void panic_smp_self_stop(void)
0834 {
0835 local_cpu_stop();
0836 }
0837
0838 #ifdef CONFIG_KEXEC_CORE
0839 static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
0840 #endif
0841
0842 static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
0843 {
0844 #ifdef CONFIG_KEXEC_CORE
0845 crash_save_cpu(regs, cpu);
0846
0847 atomic_dec(&waiting_for_crash_ipi);
0848
0849 local_irq_disable();
0850 sdei_mask_local_cpu();
0851
0852 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
0853 __cpu_try_die(cpu);
0854
0855
0856 cpu_park_loop();
0857 #endif
0858 }
0859
0860
0861
0862
0863 static void do_handle_IPI(int ipinr)
0864 {
0865 unsigned int cpu = smp_processor_id();
0866
0867 if ((unsigned)ipinr < NR_IPI)
0868 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
0869
0870 switch (ipinr) {
0871 case IPI_RESCHEDULE:
0872 scheduler_ipi();
0873 break;
0874
0875 case IPI_CALL_FUNC:
0876 generic_smp_call_function_interrupt();
0877 break;
0878
0879 case IPI_CPU_STOP:
0880 local_cpu_stop();
0881 break;
0882
0883 case IPI_CPU_CRASH_STOP:
0884 if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
0885 ipi_cpu_crash_stop(cpu, get_irq_regs());
0886
0887 unreachable();
0888 }
0889 break;
0890
0891 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
0892 case IPI_TIMER:
0893 tick_receive_broadcast();
0894 break;
0895 #endif
0896
0897 #ifdef CONFIG_IRQ_WORK
0898 case IPI_IRQ_WORK:
0899 irq_work_run();
0900 break;
0901 #endif
0902
0903 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
0904 case IPI_WAKEUP:
0905 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
0906 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
0907 cpu);
0908 break;
0909 #endif
0910
0911 default:
0912 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
0913 break;
0914 }
0915
0916 if ((unsigned)ipinr < NR_IPI)
0917 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
0918 }
0919
0920 static irqreturn_t ipi_handler(int irq, void *data)
0921 {
0922 do_handle_IPI(irq - ipi_irq_base);
0923 return IRQ_HANDLED;
0924 }
0925
0926 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
0927 {
0928 trace_ipi_raise(target, ipi_types[ipinr]);
0929 __ipi_send_mask(ipi_desc[ipinr], target);
0930 }
0931
0932 static void ipi_setup(int cpu)
0933 {
0934 int i;
0935
0936 if (WARN_ON_ONCE(!ipi_irq_base))
0937 return;
0938
0939 for (i = 0; i < nr_ipi; i++)
0940 enable_percpu_irq(ipi_irq_base + i, 0);
0941 }
0942
0943 #ifdef CONFIG_HOTPLUG_CPU
0944 static void ipi_teardown(int cpu)
0945 {
0946 int i;
0947
0948 if (WARN_ON_ONCE(!ipi_irq_base))
0949 return;
0950
0951 for (i = 0; i < nr_ipi; i++)
0952 disable_percpu_irq(ipi_irq_base + i);
0953 }
0954 #endif
0955
0956 void __init set_smp_ipi_range(int ipi_base, int n)
0957 {
0958 int i;
0959
0960 WARN_ON(n < NR_IPI);
0961 nr_ipi = min(n, NR_IPI);
0962
0963 for (i = 0; i < nr_ipi; i++) {
0964 int err;
0965
0966 err = request_percpu_irq(ipi_base + i, ipi_handler,
0967 "IPI", &cpu_number);
0968 WARN_ON(err);
0969
0970 ipi_desc[i] = irq_to_desc(ipi_base + i);
0971 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
0972 }
0973
0974 ipi_irq_base = ipi_base;
0975
0976
0977 ipi_setup(smp_processor_id());
0978 }
0979
0980 void smp_send_reschedule(int cpu)
0981 {
0982 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
0983 }
0984
0985 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
0986 void tick_broadcast(const struct cpumask *mask)
0987 {
0988 smp_cross_call(mask, IPI_TIMER);
0989 }
0990 #endif
0991
0992
0993
0994
0995
0996 static inline unsigned int num_other_online_cpus(void)
0997 {
0998 unsigned int this_cpu_online = cpu_online(smp_processor_id());
0999
1000 return num_online_cpus() - this_cpu_online;
1001 }
1002
1003 void smp_send_stop(void)
1004 {
1005 unsigned long timeout;
1006
1007 if (num_other_online_cpus()) {
1008 cpumask_t mask;
1009
1010 cpumask_copy(&mask, cpu_online_mask);
1011 cpumask_clear_cpu(smp_processor_id(), &mask);
1012
1013 if (system_state <= SYSTEM_RUNNING)
1014 pr_crit("SMP: stopping secondary CPUs\n");
1015 smp_cross_call(&mask, IPI_CPU_STOP);
1016 }
1017
1018
1019 timeout = USEC_PER_SEC;
1020 while (num_other_online_cpus() && timeout--)
1021 udelay(1);
1022
1023 if (num_other_online_cpus())
1024 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1025 cpumask_pr_args(cpu_online_mask));
1026
1027 sdei_mask_local_cpu();
1028 }
1029
1030 #ifdef CONFIG_KEXEC_CORE
1031 void crash_smp_send_stop(void)
1032 {
1033 static int cpus_stopped;
1034 cpumask_t mask;
1035 unsigned long timeout;
1036
1037
1038
1039
1040
1041 if (cpus_stopped)
1042 return;
1043
1044 cpus_stopped = 1;
1045
1046
1047
1048
1049
1050 if (num_other_online_cpus() == 0) {
1051 sdei_mask_local_cpu();
1052 return;
1053 }
1054
1055 cpumask_copy(&mask, cpu_online_mask);
1056 cpumask_clear_cpu(smp_processor_id(), &mask);
1057
1058 atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
1059
1060 pr_crit("SMP: stopping secondary CPUs\n");
1061 smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
1062
1063
1064 timeout = USEC_PER_SEC;
1065 while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
1066 udelay(1);
1067
1068 if (atomic_read(&waiting_for_crash_ipi) > 0)
1069 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1070 cpumask_pr_args(&mask));
1071
1072 sdei_mask_local_cpu();
1073 }
1074
1075 bool smp_crash_stop_failed(void)
1076 {
1077 return (atomic_read(&waiting_for_crash_ipi) > 0);
1078 }
1079 #endif
1080
1081 static bool have_cpu_die(void)
1082 {
1083 #ifdef CONFIG_HOTPLUG_CPU
1084 int any_cpu = raw_smp_processor_id();
1085 const struct cpu_operations *ops = get_cpu_ops(any_cpu);
1086
1087 if (ops && ops->cpu_die)
1088 return true;
1089 #endif
1090 return false;
1091 }
1092
1093 bool cpus_are_stuck_in_kernel(void)
1094 {
1095 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1096
1097 return !!cpus_stuck_in_kernel || smp_spin_tables ||
1098 is_protected_kvm_enabled();
1099 }