![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-only */ 0002 /* 0003 * arch/arm/include/asm/mcpm.h 0004 * 0005 * Created by: Nicolas Pitre, April 2012 0006 * Copyright: (C) 2012-2013 Linaro Limited 0007 */ 0008 0009 #ifndef MCPM_H 0010 #define MCPM_H 0011 0012 /* 0013 * Maximum number of possible clusters / CPUs per cluster. 0014 * 0015 * This should be sufficient for quite a while, while keeping the 0016 * (assembly) code simpler. When this starts to grow then we'll have 0017 * to consider dynamic allocation. 0018 */ 0019 #define MAX_CPUS_PER_CLUSTER 4 0020 0021 #ifdef CONFIG_MCPM_QUAD_CLUSTER 0022 #define MAX_NR_CLUSTERS 4 0023 #else 0024 #define MAX_NR_CLUSTERS 2 0025 #endif 0026 0027 #ifndef __ASSEMBLY__ 0028 0029 #include <linux/types.h> 0030 #include <asm/cacheflush.h> 0031 0032 /* 0033 * Platform specific code should use this symbol to set up secondary 0034 * entry location for processors to use when released from reset. 0035 */ 0036 extern void mcpm_entry_point(void); 0037 0038 /* 0039 * This is used to indicate where the given CPU from given cluster should 0040 * branch once it is ready to re-enter the kernel using ptr, or NULL if it 0041 * should be gated. A gated CPU is held in a WFE loop until its vector 0042 * becomes non NULL. 0043 */ 0044 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); 0045 0046 /* 0047 * This sets an early poke i.e a value to be poked into some address 0048 * from very early assembly code before the CPU is ungated. The 0049 * address must be physical, and if 0 then nothing will happen. 0050 */ 0051 void mcpm_set_early_poke(unsigned cpu, unsigned cluster, 0052 unsigned long poke_phys_addr, unsigned long poke_val); 0053 0054 /* 0055 * CPU/cluster power operations API for higher subsystems to use. 0056 */ 0057 0058 /** 0059 * mcpm_is_available - returns whether MCPM is initialized and available 0060 * 0061 * This returns true or false accordingly. 0062 */ 0063 bool mcpm_is_available(void); 0064 0065 /** 0066 * mcpm_cpu_power_up - make given CPU in given cluster runable 0067 * 0068 * @cpu: CPU number within given cluster 0069 * @cluster: cluster number for the CPU 0070 * 0071 * The identified CPU is brought out of reset. If the cluster was powered 0072 * down then it is brought up as well, taking care not to let the other CPUs 0073 * in the cluster run, and ensuring appropriate cluster setup. 0074 * 0075 * Caller must ensure the appropriate entry vector is initialized with 0076 * mcpm_set_entry_vector() prior to calling this. 0077 * 0078 * This must be called in a sleepable context. However, the implementation 0079 * is strongly encouraged to return early and let the operation happen 0080 * asynchronously, especially when significant delays are expected. 0081 * 0082 * If the operation cannot be performed then an error code is returned. 0083 */ 0084 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); 0085 0086 /** 0087 * mcpm_cpu_power_down - power the calling CPU down 0088 * 0089 * The calling CPU is powered down. 0090 * 0091 * If this CPU is found to be the "last man standing" in the cluster 0092 * then the cluster is prepared for power-down too. 0093 * 0094 * This must be called with interrupts disabled. 0095 * 0096 * On success this does not return. Re-entry in the kernel is expected 0097 * via mcpm_entry_point. 0098 * 0099 * This will return if mcpm_platform_register() has not been called 0100 * previously in which case the caller should take appropriate action. 0101 * 0102 * On success, the CPU is not guaranteed to be truly halted until 0103 * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the 0104 * specified cpu. Until then, other CPUs should make sure they do not 0105 * trash memory the target CPU might be executing/accessing. 0106 */ 0107 void mcpm_cpu_power_down(void); 0108 0109 /** 0110 * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and 0111 * make sure it is powered off 0112 * 0113 * @cpu: CPU number within given cluster 0114 * @cluster: cluster number for the CPU 0115 * 0116 * Call this function to ensure that a pending powerdown has taken 0117 * effect and the CPU is safely parked before performing non-mcpm 0118 * operations that may affect the CPU (such as kexec trashing the 0119 * kernel text). 0120 * 0121 * It is *not* necessary to call this function if you only need to 0122 * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup 0123 * event. 0124 * 0125 * Do not call this function unless the specified CPU has already 0126 * called mcpm_cpu_power_down() or has committed to doing so. 0127 * 0128 * @return: 0129 * - zero if the CPU is in a safely parked state 0130 * - nonzero otherwise (e.g., timeout) 0131 */ 0132 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); 0133 0134 /** 0135 * mcpm_cpu_suspend - bring the calling CPU in a suspended state 0136 * 0137 * The calling CPU is suspended. This is similar to mcpm_cpu_power_down() 0138 * except for possible extra platform specific configuration steps to allow 0139 * an asynchronous wake-up e.g. with a pending interrupt. 0140 * 0141 * If this CPU is found to be the "last man standing" in the cluster 0142 * then the cluster may be prepared for power-down too. 0143 * 0144 * This must be called with interrupts disabled. 0145 * 0146 * On success this does not return. Re-entry in the kernel is expected 0147 * via mcpm_entry_point. 0148 * 0149 * This will return if mcpm_platform_register() has not been called 0150 * previously in which case the caller should take appropriate action. 0151 */ 0152 void mcpm_cpu_suspend(void); 0153 0154 /** 0155 * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up 0156 * 0157 * This lets the platform specific backend code perform needed housekeeping 0158 * work. This must be called by the newly activated CPU as soon as it is 0159 * fully operational in kernel space, before it enables interrupts. 0160 * 0161 * If the operation cannot be performed then an error code is returned. 0162 */ 0163 int mcpm_cpu_powered_up(void); 0164 0165 /* 0166 * Platform specific callbacks used in the implementation of the above API. 0167 * 0168 * cpu_powerup: 0169 * Make given CPU runable. Called with MCPM lock held and IRQs disabled. 0170 * The given cluster is assumed to be set up (cluster_powerup would have 0171 * been called beforehand). Must return 0 for success or negative error code. 0172 * 0173 * cluster_powerup: 0174 * Set up power for given cluster. Called with MCPM lock held and IRQs 0175 * disabled. Called before first cpu_powerup when cluster is down. Must 0176 * return 0 for success or negative error code. 0177 * 0178 * cpu_suspend_prepare: 0179 * Special suspend configuration. Called on target CPU with MCPM lock held 0180 * and IRQs disabled. This callback is optional. If provided, it is called 0181 * before cpu_powerdown_prepare. 0182 * 0183 * cpu_powerdown_prepare: 0184 * Configure given CPU for power down. Called on target CPU with MCPM lock 0185 * held and IRQs disabled. Power down must be effective only at the next WFI instruction. 0186 * 0187 * cluster_powerdown_prepare: 0188 * Configure given cluster for power down. Called on one CPU from target 0189 * cluster with MCPM lock held and IRQs disabled. A cpu_powerdown_prepare 0190 * for each CPU in the cluster has happened when this occurs. 0191 * 0192 * cpu_cache_disable: 0193 * Clean and disable CPU level cache for the calling CPU. Called on with IRQs 0194 * disabled only. The CPU is no longer cache coherent with the rest of the 0195 * system when this returns. 0196 * 0197 * cluster_cache_disable: 0198 * Clean and disable the cluster wide cache as well as the CPU level cache 0199 * for the calling CPU. No call to cpu_cache_disable will happen for this 0200 * CPU. Called with IRQs disabled and only when all the other CPUs are done 0201 * with their own cpu_cache_disable. The cluster is no longer cache coherent 0202 * with the rest of the system when this returns. 0203 * 0204 * cpu_is_up: 0205 * Called on given CPU after it has been powered up or resumed. The MCPM lock 0206 * is held and IRQs disabled. This callback is optional. 0207 * 0208 * cluster_is_up: 0209 * Called by the first CPU to be powered up or resumed in given cluster. 0210 * The MCPM lock is held and IRQs disabled. This callback is optional. If 0211 * provided, it is called before cpu_is_up for that CPU. 0212 * 0213 * wait_for_powerdown: 0214 * Wait until given CPU is powered down. This is called in sleeping context. 0215 * Some reasonable timeout must be considered. Must return 0 for success or 0216 * negative error code. 0217 */ 0218 struct mcpm_platform_ops { 0219 int (*cpu_powerup)(unsigned int cpu, unsigned int cluster); 0220 int (*cluster_powerup)(unsigned int cluster); 0221 void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster); 0222 void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster); 0223 void (*cluster_powerdown_prepare)(unsigned int cluster); 0224 void (*cpu_cache_disable)(void); 0225 void (*cluster_cache_disable)(void); 0226 void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); 0227 void (*cluster_is_up)(unsigned int cluster); 0228 int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); 0229 }; 0230 0231 /** 0232 * mcpm_platform_register - register platform specific power methods 0233 * 0234 * @ops: mcpm_platform_ops structure to register 0235 * 0236 * An error is returned if the registration has been done previously. 0237 */ 0238 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); 0239 0240 /** 0241 * mcpm_sync_init - Initialize the cluster synchronization support 0242 * 0243 * @power_up_setup: platform specific function invoked during very 0244 * early CPU/cluster bringup stage. 0245 * 0246 * This prepares memory used by vlocks and the MCPM state machine used 0247 * across CPUs that may have their caches active or inactive. Must be 0248 * called only after a successful call to mcpm_platform_register(). 0249 * 0250 * The power_up_setup argument is a pointer to assembly code called when 0251 * the MMU and caches are still disabled during boot and no stack space is 0252 * available. The affinity level passed to that code corresponds to the 0253 * resource that needs to be initialized (e.g. 1 for cluster level, 0 for 0254 * CPU level). Proper exclusion mechanisms are already activated at that 0255 * point. 0256 */ 0257 int __init mcpm_sync_init( 0258 void (*power_up_setup)(unsigned int affinity_level)); 0259 0260 /** 0261 * mcpm_loopback - make a run through the MCPM low-level code 0262 * 0263 * @cache_disable: pointer to function performing cache disabling 0264 * 0265 * This exercises the MCPM machinery by soft resetting the CPU and branching 0266 * to the MCPM low-level entry code before returning to the caller. 0267 * The @cache_disable function must do the necessary cache disabling to 0268 * let the regular kernel init code turn it back on as if the CPU was 0269 * hotplugged in. The MCPM state machine is set as if the cluster was 0270 * initialized meaning the power_up_setup callback passed to mcpm_sync_init() 0271 * will be invoked for all affinity levels. This may be useful to initialize 0272 * some resources such as enabling the CCI that requires the cache to be off, or simply for testing purposes. 0273 */ 0274 int __init mcpm_loopback(void (*cache_disable)(void)); 0275 0276 void __init mcpm_smp_set_ops(void); 0277 0278 /* 0279 * Synchronisation structures for coordinating safe cluster setup/teardown. 0280 * This is private to the MCPM core code and shared between C and assembly. 0281 * When modifying this structure, make sure you update the MCPM_SYNC_ defines 0282 * to match. 0283 */ 0284 struct mcpm_sync_struct { 0285 /* individual CPU states */ 0286 struct { 0287 s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); 0288 } cpus[MAX_CPUS_PER_CLUSTER]; 0289 0290 /* cluster state */ 0291 s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); 0292 0293 /* inbound-side state */ 0294 s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); 0295 }; 0296 0297 struct sync_struct { 0298 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; 0299 }; 0300 0301 #else 0302 0303 /* 0304 * asm-offsets.h causes trouble when included in .c files, and cacheflush.h 0305 * cannot be included in asm files. Let's work around the conflict like this. 0306 */ 0307 #include <asm/asm-offsets.h> 0308 #define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE 0309 0310 #endif /* ! __ASSEMBLY__ */ 0311 0312 /* Definitions for mcpm_sync_struct */ 0313 #define CPU_DOWN 0x11 0314 #define CPU_COMING_UP 0x12 0315 #define CPU_UP 0x13 0316 #define CPU_GOING_DOWN 0x14 0317 0318 #define CLUSTER_DOWN 0x21 0319 #define CLUSTER_UP 0x22 0320 #define CLUSTER_GOING_DOWN 0x23 0321 0322 #define INBOUND_NOT_COMING_UP 0x31 0323 #define INBOUND_COMING_UP 0x32 0324 0325 /* 0326 * Offsets for the mcpm_sync_struct members, for use in asm. 0327 * We don't want to make them global to the kernel via asm-offsets.c. 0328 */ 0329 #define MCPM_SYNC_CLUSTER_CPUS 0 0330 #define MCPM_SYNC_CPU_SIZE __CACHE_WRITEBACK_GRANULE 0331 #define MCPM_SYNC_CLUSTER_CLUSTER \ 0332 (MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER) 0333 #define MCPM_SYNC_CLUSTER_INBOUND \ 0334 (MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE) 0335 #define MCPM_SYNC_CLUSTER_SIZE \ 0336 (MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE) 0337 0338 #endif
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |