Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 #include <linux/module.h>
0004 #include <linux/init.h>
0005 #include <linux/kernel.h>
0006 #include <linux/mm.h>
0007 #include <linux/sched.h>
0008 #include <linux/kernel_stat.h>
0009 #include <linux/notifier.h>
0010 #include <linux/cpu.h>
0011 #include <linux/percpu.h>
0012 #include <linux/delay.h>
0013 #include <linux/err.h>
0014 #include <linux/irq.h>
0015 #include <linux/irq_work.h>
0016 #include <linux/irqdomain.h>
0017 #include <linux/of.h>
0018 #include <linux/seq_file.h>
0019 #include <linux/sched/task_stack.h>
0020 #include <linux/sched/mm.h>
0021 #include <linux/sched/hotplug.h>
0022 #include <asm/irq.h>
0023 #include <asm/traps.h>
0024 #include <asm/sections.h>
0025 #include <asm/mmu_context.h>
0026 #ifdef CONFIG_CPU_HAS_FPU
0027 #include <abi/fpu.h>
0028 #endif
0029 
0030 enum ipi_message_type {
0031     IPI_EMPTY,
0032     IPI_RESCHEDULE,
0033     IPI_CALL_FUNC,
0034     IPI_IRQ_WORK,
0035     IPI_MAX
0036 };
0037 
0038 struct ipi_data_struct {
0039     unsigned long bits ____cacheline_aligned;
0040     unsigned long stats[IPI_MAX] ____cacheline_aligned;
0041 };
0042 static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
0043 
0044 static irqreturn_t handle_ipi(int irq, void *dev)
0045 {
0046     unsigned long *stats = this_cpu_ptr(&ipi_data)->stats;
0047 
0048     while (true) {
0049         unsigned long ops;
0050 
0051         ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0);
0052         if (ops == 0)
0053             return IRQ_HANDLED;
0054 
0055         if (ops & (1 << IPI_RESCHEDULE)) {
0056             stats[IPI_RESCHEDULE]++;
0057             scheduler_ipi();
0058         }
0059 
0060         if (ops & (1 << IPI_CALL_FUNC)) {
0061             stats[IPI_CALL_FUNC]++;
0062             generic_smp_call_function_interrupt();
0063         }
0064 
0065         if (ops & (1 << IPI_IRQ_WORK)) {
0066             stats[IPI_IRQ_WORK]++;
0067             irq_work_run();
0068         }
0069 
0070         BUG_ON((ops >> IPI_MAX) != 0);
0071     }
0072 
0073     return IRQ_HANDLED;
0074 }
0075 
0076 static void (*send_arch_ipi)(const struct cpumask *mask);
0077 
0078 static int ipi_irq;
0079 void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq)
0080 {
0081     if (send_arch_ipi)
0082         return;
0083 
0084     send_arch_ipi = func;
0085     ipi_irq = irq;
0086 }
0087 
0088 static void
0089 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
0090 {
0091     int i;
0092 
0093     for_each_cpu(i, to_whom)
0094         set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits);
0095 
0096     smp_mb();
0097     send_arch_ipi(to_whom);
0098 }
0099 
0100 static const char * const ipi_names[] = {
0101     [IPI_EMPTY]     = "Empty interrupts",
0102     [IPI_RESCHEDULE]    = "Rescheduling interrupts",
0103     [IPI_CALL_FUNC]     = "Function call interrupts",
0104     [IPI_IRQ_WORK]      = "Irq work interrupts",
0105 };
0106 
0107 int arch_show_interrupts(struct seq_file *p, int prec)
0108 {
0109     unsigned int cpu, i;
0110 
0111     for (i = 0; i < IPI_MAX; i++) {
0112         seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
0113                prec >= 4 ? " " : "");
0114         for_each_online_cpu(cpu)
0115             seq_printf(p, "%10lu ",
0116                 per_cpu_ptr(&ipi_data, cpu)->stats[i]);
0117         seq_printf(p, " %s\n", ipi_names[i]);
0118     }
0119 
0120     return 0;
0121 }
0122 
0123 void arch_send_call_function_ipi_mask(struct cpumask *mask)
0124 {
0125     send_ipi_message(mask, IPI_CALL_FUNC);
0126 }
0127 
0128 void arch_send_call_function_single_ipi(int cpu)
0129 {
0130     send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
0131 }
0132 
0133 static void ipi_stop(void *unused)
0134 {
0135     while (1);
0136 }
0137 
0138 void smp_send_stop(void)
0139 {
0140     on_each_cpu(ipi_stop, NULL, 1);
0141 }
0142 
0143 void smp_send_reschedule(int cpu)
0144 {
0145     send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
0146 }
0147 
0148 #ifdef CONFIG_IRQ_WORK
0149 void arch_irq_work_raise(void)
0150 {
0151     send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
0152 }
0153 #endif
0154 
0155 void __init smp_prepare_boot_cpu(void)
0156 {
0157 }
0158 
0159 void __init smp_prepare_cpus(unsigned int max_cpus)
0160 {
0161 }
0162 
0163 static int ipi_dummy_dev;
0164 
0165 void __init setup_smp_ipi(void)
0166 {
0167     int rc;
0168 
0169     if (ipi_irq == 0)
0170         return;
0171 
0172     rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
0173                 &ipi_dummy_dev);
0174     if (rc)
0175         panic("%s IRQ request failed\n", __func__);
0176 
0177     enable_percpu_irq(ipi_irq, 0);
0178 }
0179 
0180 void __init setup_smp(void)
0181 {
0182     struct device_node *node = NULL;
0183     unsigned int cpu;
0184 
0185     for_each_of_cpu_node(node) {
0186         if (!of_device_is_available(node))
0187             continue;
0188 
0189         cpu = of_get_cpu_hwid(node, 0);
0190         if (cpu >= NR_CPUS)
0191             continue;
0192 
0193         set_cpu_possible(cpu, true);
0194         set_cpu_present(cpu, true);
0195     }
0196 }
0197 
0198 extern void _start_smp_secondary(void);
0199 
0200 volatile unsigned int secondary_hint;
0201 volatile unsigned int secondary_hint2;
0202 volatile unsigned int secondary_ccr;
0203 volatile unsigned int secondary_stack;
0204 volatile unsigned int secondary_msa1;
0205 volatile unsigned int secondary_pgd;
0206 
0207 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
0208 {
0209     unsigned long mask = 1 << cpu;
0210 
0211     secondary_stack =
0212         (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
0213     secondary_hint = mfcr("cr31");
0214     secondary_hint2 = mfcr("cr<21, 1>");
0215     secondary_ccr  = mfcr("cr18");
0216     secondary_msa1 = read_mmu_msa1();
0217     secondary_pgd = mfcr("cr<29, 15>");
0218 
0219     /*
0220      * Because other CPUs are in reset status, we must flush data
0221      * from cache to out and secondary CPUs use them in
0222      * csky_start_secondary(void)
0223      */
0224     mtcr("cr17", 0x22);
0225 
0226     if (mask & mfcr("cr<29, 0>")) {
0227         send_arch_ipi(cpumask_of(cpu));
0228     } else {
0229         /* Enable cpu in SMP reset ctrl reg */
0230         mask |= mfcr("cr<29, 0>");
0231         mtcr("cr<29, 0>", mask);
0232     }
0233 
0234     /* Wait for the cpu online */
0235     while (!cpu_online(cpu));
0236 
0237     secondary_stack = 0;
0238 
0239     return 0;
0240 }
0241 
0242 void __init smp_cpus_done(unsigned int max_cpus)
0243 {
0244 }
0245 
0246 void csky_start_secondary(void)
0247 {
0248     struct mm_struct *mm = &init_mm;
0249     unsigned int cpu = smp_processor_id();
0250 
0251     mtcr("cr31", secondary_hint);
0252     mtcr("cr<21, 1>", secondary_hint2);
0253     mtcr("cr18", secondary_ccr);
0254 
0255     mtcr("vbr", vec_base);
0256 
0257     flush_tlb_all();
0258     write_mmu_pagemask(0);
0259 
0260 #ifdef CONFIG_CPU_HAS_FPU
0261     init_fpu();
0262 #endif
0263 
0264     enable_percpu_irq(ipi_irq, 0);
0265 
0266     mmget(mm);
0267     mmgrab(mm);
0268     current->active_mm = mm;
0269     cpumask_set_cpu(cpu, mm_cpumask(mm));
0270 
0271     notify_cpu_starting(cpu);
0272     set_cpu_online(cpu, true);
0273 
0274     pr_info("CPU%u Online: %s...\n", cpu, __func__);
0275 
0276     local_irq_enable();
0277     cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
0278 }
0279 
0280 #ifdef CONFIG_HOTPLUG_CPU
0281 int __cpu_disable(void)
0282 {
0283     unsigned int cpu = smp_processor_id();
0284 
0285     set_cpu_online(cpu, false);
0286 
0287     irq_migrate_all_off_this_cpu();
0288 
0289     clear_tasks_mm_cpumask(cpu);
0290 
0291     return 0;
0292 }
0293 
0294 void __cpu_die(unsigned int cpu)
0295 {
0296     if (!cpu_wait_death(cpu, 5)) {
0297         pr_crit("CPU%u: shutdown failed\n", cpu);
0298         return;
0299     }
0300     pr_notice("CPU%u: shutdown\n", cpu);
0301 }
0302 
0303 void arch_cpu_idle_dead(void)
0304 {
0305     idle_task_exit();
0306 
0307     cpu_report_death();
0308 
0309     while (!secondary_stack)
0310         arch_cpu_idle();
0311 
0312     local_irq_disable();
0313 
0314     asm volatile(
0315         "mov    sp, %0\n"
0316         "mov    r8, %0\n"
0317         "jmpi   csky_start_secondary"
0318         :
0319         : "r" (secondary_stack));
0320 }
0321 #endif