Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
0004  *
0005  * RajeshwarR: Dec 11, 2007
0006  *   -- Added support for Inter Processor Interrupts
0007  *
0008  * Vineetg: Nov 1st, 2007
0009  *    -- Initial Write (Borrowed heavily from ARM)
0010  */
0011 
0012 #include <linux/spinlock.h>
0013 #include <linux/sched/mm.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/profile.h>
0016 #include <linux/mm.h>
0017 #include <linux/cpu.h>
0018 #include <linux/irq.h>
0019 #include <linux/atomic.h>
0020 #include <linux/cpumask.h>
0021 #include <linux/reboot.h>
0022 #include <linux/irqdomain.h>
0023 #include <linux/export.h>
0024 #include <linux/of_fdt.h>
0025 
0026 #include <asm/processor.h>
0027 #include <asm/setup.h>
0028 #include <asm/mach_desc.h>
0029 
0030 #ifndef CONFIG_ARC_HAS_LLSC
0031 arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
0032 
0033 EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
0034 #endif
0035 
0036 struct plat_smp_ops  __weak plat_smp_ops;
0037 
0038 /* XXX: per cpu ? Only needed once in early secondary boot */
0039 struct task_struct *secondary_idle_tsk;
0040 
0041 /* Called from start_kernel */
0042 void __init smp_prepare_boot_cpu(void)
0043 {
0044 }
0045 
0046 static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
0047 {
0048     unsigned long dt_root = of_get_flat_dt_root();
0049     const char *buf;
0050 
0051     buf = of_get_flat_dt_prop(dt_root, name, NULL);
0052     if (!buf)
0053         return -EINVAL;
0054 
0055     if (cpulist_parse(buf, cpumask))
0056         return -EINVAL;
0057 
0058     return 0;
0059 }
0060 
0061 /*
0062  * Read from DeviceTree and setup cpu possible mask. If there is no
0063  * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
0064  */
0065 static void __init arc_init_cpu_possible(void)
0066 {
0067     struct cpumask cpumask;
0068 
0069     if (arc_get_cpu_map("possible-cpus", &cpumask)) {
0070         pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
0071             NR_CPUS);
0072 
0073         cpumask_setall(&cpumask);
0074     }
0075 
0076     if (!cpumask_test_cpu(0, &cpumask))
0077         panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
0078 
0079     init_cpu_possible(&cpumask);
0080 }
0081 
0082 /*
0083  * Called from setup_arch() before calling setup_processor()
0084  *
0085  * - Initialise the CPU possible map early - this describes the CPUs
0086  *   which may be present or become present in the system.
0087  * - Call early smp init hook. This can initialize a specific multi-core
0088  *   IP which is say common to several platforms (hence not part of
0089  *   platform specific int_early() hook)
0090  */
0091 void __init smp_init_cpus(void)
0092 {
0093     arc_init_cpu_possible();
0094 
0095     if (plat_smp_ops.init_early_smp)
0096         plat_smp_ops.init_early_smp();
0097 }
0098 
0099 /* called from init ( ) =>  process 1 */
0100 void __init smp_prepare_cpus(unsigned int max_cpus)
0101 {
0102     /*
0103      * if platform didn't set the present map already, do it now
0104      * boot cpu is set to present already by init/main.c
0105      */
0106     if (num_present_cpus() <= 1)
0107         init_cpu_present(cpu_possible_mask);
0108 }
0109 
0110 void __init smp_cpus_done(unsigned int max_cpus)
0111 {
0112 
0113 }
0114 
0115 /*
0116  * Default smp boot helper for Run-on-reset case where all cores start off
0117  * together. Non-masters need to wait for Master to start running.
0118  * This is implemented using a flag in memory, which Non-masters spin-wait on.
0119  * Master sets it to cpu-id of core to "ungate" it.
0120  */
0121 static volatile int wake_flag;
0122 
0123 #ifdef CONFIG_ISA_ARCOMPACT
0124 
0125 #define __boot_read(f)      f
0126 #define __boot_write(f, v)  f = v
0127 
0128 #else
0129 
0130 #define __boot_read(f)      arc_read_uncached_32(&f)
0131 #define __boot_write(f, v)  arc_write_uncached_32(&f, v)
0132 
0133 #endif
0134 
0135 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
0136 {
0137     BUG_ON(cpu == 0);
0138 
0139     __boot_write(wake_flag, cpu);
0140 }
0141 
0142 void arc_platform_smp_wait_to_boot(int cpu)
0143 {
0144     /* for halt-on-reset, we've waited already */
0145     if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
0146         return;
0147 
0148     while (__boot_read(wake_flag) != cpu)
0149         ;
0150 
0151     __boot_write(wake_flag, 0);
0152 }
0153 
0154 const char *arc_platform_smp_cpuinfo(void)
0155 {
0156     return plat_smp_ops.info ? : "";
0157 }
0158 
0159 /*
0160  * The very first "C" code executed by secondary
0161  * Called from asm stub in head.S
0162  * "current"/R25 already setup by low level boot code
0163  */
0164 void start_kernel_secondary(void)
0165 {
0166     struct mm_struct *mm = &init_mm;
0167     unsigned int cpu = smp_processor_id();
0168 
0169     /* MMU, Caches, Vector Table, Interrupts etc */
0170     setup_processor();
0171 
0172     mmget(mm);
0173     mmgrab(mm);
0174     current->active_mm = mm;
0175     cpumask_set_cpu(cpu, mm_cpumask(mm));
0176 
0177     /* Some SMP H/w setup - for each cpu */
0178     if (plat_smp_ops.init_per_cpu)
0179         plat_smp_ops.init_per_cpu(cpu);
0180 
0181     if (machine_desc->init_per_cpu)
0182         machine_desc->init_per_cpu(cpu);
0183 
0184     notify_cpu_starting(cpu);
0185     set_cpu_online(cpu, true);
0186 
0187     pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
0188 
0189     local_irq_enable();
0190     cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
0191 }
0192 
0193 /*
0194  * Called from kernel_init( ) -> smp_init( ) - for each CPU
0195  *
0196  * At this point, Secondary Processor  is "HALT"ed:
0197  *  -It booted, but was halted in head.S
0198  *  -It was configured to halt-on-reset
0199  *  So need to wake it up.
0200  *
0201  * Essential requirements being where to run from (PC) and stack (SP)
0202 */
0203 int __cpu_up(unsigned int cpu, struct task_struct *idle)
0204 {
0205     unsigned long wait_till;
0206 
0207     secondary_idle_tsk = idle;
0208 
0209     pr_info("Idle Task [%d] %p", cpu, idle);
0210     pr_info("Trying to bring up CPU%u ...\n", cpu);
0211 
0212     if (plat_smp_ops.cpu_kick)
0213         plat_smp_ops.cpu_kick(cpu,
0214                 (unsigned long)first_lines_of_secondary);
0215     else
0216         arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
0217 
0218     /* wait for 1 sec after kicking the secondary */
0219     wait_till = jiffies + HZ;
0220     while (time_before(jiffies, wait_till)) {
0221         if (cpu_online(cpu))
0222             break;
0223     }
0224 
0225     if (!cpu_online(cpu)) {
0226         pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
0227         return -1;
0228     }
0229 
0230     secondary_idle_tsk = NULL;
0231 
0232     return 0;
0233 }
0234 
0235 /*****************************************************************************/
0236 /*              Inter Processor Interrupt Handling                           */
0237 /*****************************************************************************/
0238 
0239 enum ipi_msg_type {
0240     IPI_EMPTY = 0,
0241     IPI_RESCHEDULE = 1,
0242     IPI_CALL_FUNC,
0243     IPI_CPU_STOP,
0244 };
0245 
0246 /*
0247  * In arches with IRQ for each msg type (above), receiver can use IRQ-id  to
0248  * figure out what msg was sent. For those which don't (ARC has dedicated IPI
0249  * IRQ), the msg-type needs to be conveyed via per-cpu data
0250  */
0251 
0252 static DEFINE_PER_CPU(unsigned long, ipi_data);
0253 
0254 static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
0255 {
0256     unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
0257     unsigned long old, new;
0258     unsigned long flags;
0259 
0260     pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
0261 
0262     local_irq_save(flags);
0263 
0264     /*
0265      * Atomically write new msg bit (in case others are writing too),
0266      * and read back old value
0267      */
0268     do {
0269         new = old = *ipi_data_ptr;
0270         new |= 1U << msg;
0271     } while (cmpxchg(ipi_data_ptr, old, new) != old);
0272 
0273     /*
0274      * Call the platform specific IPI kick function, but avoid if possible:
0275      * Only do so if there's no pending msg from other concurrent sender(s).
0276      * Otherwise, receiver will see this msg as well when it takes the
0277      * IPI corresponding to that msg. This is true, even if it is already in
0278      * IPI handler, because !@old means it has not yet dequeued the msg(s)
0279      * so @new msg can be a free-loader
0280      */
0281     if (plat_smp_ops.ipi_send && !old)
0282         plat_smp_ops.ipi_send(cpu);
0283 
0284     local_irq_restore(flags);
0285 }
0286 
0287 static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
0288 {
0289     unsigned int cpu;
0290 
0291     for_each_cpu(cpu, callmap)
0292         ipi_send_msg_one(cpu, msg);
0293 }
0294 
0295 void smp_send_reschedule(int cpu)
0296 {
0297     ipi_send_msg_one(cpu, IPI_RESCHEDULE);
0298 }
0299 
0300 void smp_send_stop(void)
0301 {
0302     struct cpumask targets;
0303     cpumask_copy(&targets, cpu_online_mask);
0304     cpumask_clear_cpu(smp_processor_id(), &targets);
0305     ipi_send_msg(&targets, IPI_CPU_STOP);
0306 }
0307 
0308 void arch_send_call_function_single_ipi(int cpu)
0309 {
0310     ipi_send_msg_one(cpu, IPI_CALL_FUNC);
0311 }
0312 
0313 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
0314 {
0315     ipi_send_msg(mask, IPI_CALL_FUNC);
0316 }
0317 
0318 /*
0319  * ipi_cpu_stop - handle IPI from smp_send_stop()
0320  */
0321 static void ipi_cpu_stop(void)
0322 {
0323     machine_halt();
0324 }
0325 
0326 static inline int __do_IPI(unsigned long msg)
0327 {
0328     int rc = 0;
0329 
0330     switch (msg) {
0331     case IPI_RESCHEDULE:
0332         scheduler_ipi();
0333         break;
0334 
0335     case IPI_CALL_FUNC:
0336         generic_smp_call_function_interrupt();
0337         break;
0338 
0339     case IPI_CPU_STOP:
0340         ipi_cpu_stop();
0341         break;
0342 
0343     default:
0344         rc = 1;
0345     }
0346 
0347     return rc;
0348 }
0349 
0350 /*
0351  * arch-common ISR to handle for inter-processor interrupts
0352  * Has hooks for platform specific IPI
0353  */
0354 irqreturn_t do_IPI(int irq, void *dev_id)
0355 {
0356     unsigned long pending;
0357     unsigned long __maybe_unused copy;
0358 
0359     pr_debug("IPI [%ld] received on cpu %d\n",
0360          *this_cpu_ptr(&ipi_data), smp_processor_id());
0361 
0362     if (plat_smp_ops.ipi_clear)
0363         plat_smp_ops.ipi_clear(irq);
0364 
0365     /*
0366      * "dequeue" the msg corresponding to this IPI (and possibly other
0367      * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
0368      */
0369     copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
0370 
0371     do {
0372         unsigned long msg = __ffs(pending);
0373         int rc;
0374 
0375         rc = __do_IPI(msg);
0376         if (rc)
0377             pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
0378         pending &= ~(1U << msg);
0379     } while (pending);
0380 
0381     return IRQ_HANDLED;
0382 }
0383 
0384 /*
0385  * API called by platform code to hookup arch-common ISR to their IPI IRQ
0386  *
0387  * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
0388  * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
0389  * request_percpu_irq() below will fail
0390  */
0391 static DEFINE_PER_CPU(int, ipi_dev);
0392 
0393 int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
0394 {
0395     int *dev = per_cpu_ptr(&ipi_dev, cpu);
0396     unsigned int virq = irq_find_mapping(NULL, hwirq);
0397 
0398     if (!virq)
0399         panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
0400 
0401     /* Boot cpu calls request, all call enable */
0402     if (!cpu) {
0403         int rc;
0404 
0405         rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
0406         if (rc)
0407             panic("Percpu IRQ request failed for %u\n", virq);
0408     }
0409 
0410     enable_percpu_irq(virq, 0);
0411 
0412     return 0;
0413 }