![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0-only 0002 /* 0003 * Copyright (C) 2002 ARM Ltd. 0004 * All Rights Reserved 0005 * 0006 * This code is specific to the hardware found on ARM Realview and 0007 * Versatile Express platforms where the CPUs are unable to be individually 0008 * woken, and where there is no way to hot-unplug CPUs. Real platforms 0009 * should not copy this code. 0010 */ 0011 #include <linux/init.h> 0012 #include <linux/errno.h> 0013 #include <linux/delay.h> 0014 #include <linux/device.h> 0015 #include <linux/jiffies.h> 0016 #include <linux/smp.h> 0017 0018 #include <asm/cacheflush.h> 0019 #include <asm/smp_plat.h> 0020 0021 #include "platsmp.h" 0022 0023 /* 0024 * versatile_cpu_release controls the release of CPUs from the holding 0025 * pen in headsmp.S, which exists because we are not always able to 0026 * control the release of individual CPUs from the board firmware. 0027 * Production platforms do not need this. 0028 */ 0029 volatile int versatile_cpu_release = -1; 0030 0031 /* 0032 * Write versatile_cpu_release in a way that is guaranteed to be visible to 0033 * all observers, irrespective of whether they're taking part in coherency 0034 * or not. This is necessary for the hotplug code to work reliably. 0035 */ 0036 static void versatile_write_cpu_release(int val) 0037 { 0038 versatile_cpu_release = val; 0039 smp_wmb(); 0040 sync_cache_w(&versatile_cpu_release); 0041 } 0042 0043 /* 0044 * versatile_lock exists to avoid running the loops_per_jiffy delay loop 0045 * calibrations on the secondary CPU while the requesting CPU is using 0046 * the limited-bandwidth bus - which affects the calibration value. 0047 * Production platforms do not need this. 0048 */ 0049 static DEFINE_RAW_SPINLOCK(versatile_lock); 0050 0051 void versatile_secondary_init(unsigned int cpu) 0052 { 0053 /* 0054 * let the primary processor know we're out of the 0055 * pen, then head off into the C entry point 0056 */ 0057 versatile_write_cpu_release(-1); 0058 0059 /* 0060 * Synchronise with the boot thread. 0061 */ 0062 raw_spin_lock(&versatile_lock); 0063 raw_spin_unlock(&versatile_lock); 0064 } 0065 0066 int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) 0067 { 0068 unsigned long timeout; 0069 0070 /* 0071 * Set synchronisation state between this boot processor 0072 * and the secondary one 0073 */ 0074 raw_spin_lock(&versatile_lock); 0075 0076 /* 0077 * This is really belt and braces; we hold unintended secondary 0078 * CPUs in the holding pen until we're ready for them. However, 0079 * since we haven't sent them a soft interrupt, they shouldn't 0080 * be there. 0081 */ 0082 versatile_write_cpu_release(cpu_logical_map(cpu)); 0083 0084 /* 0085 * Send the secondary CPU a soft interrupt, thereby causing 0086 * the boot monitor to read the system wide flags register, 0087 * and branch to the address found there. 0088 */ 0089 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 0090 0091 timeout = jiffies + (1 * HZ); 0092 while (time_before(jiffies, timeout)) { 0093 smp_rmb(); 0094 if (versatile_cpu_release == -1) 0095 break; 0096 0097 udelay(10); 0098 } 0099 0100 /* 0101 * now the secondary core is starting up let it run its 0102 * calibrations, then wait for it to finish 0103 */ 0104 raw_spin_unlock(&versatile_lock); 0105 0106 return versatile_cpu_release != -1 ? -ENOSYS : 0; 0107 }
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |