0001
0002 #ifndef __LINUX_SMP_H
0003 #define __LINUX_SMP_H
0004
0005
0006
0007
0008
0009
0010 #include <linux/errno.h>
0011 #include <linux/types.h>
0012 #include <linux/list.h>
0013 #include <linux/cpumask.h>
0014 #include <linux/init.h>
0015 #include <linux/smp_types.h>
0016
0017 typedef void (*smp_call_func_t)(void *info);
0018 typedef bool (*smp_cond_func_t)(int cpu, void *info);
0019
0020
0021
0022
0023 struct __call_single_data {
0024 struct __call_single_node node;
0025 smp_call_func_t func;
0026 void *info;
0027 };
0028
0029 #define CSD_INIT(_func, _info) \
0030 (struct __call_single_data){ .func = (_func), .info = (_info), }
0031
0032
0033 typedef struct __call_single_data call_single_data_t
0034 __aligned(sizeof(struct __call_single_data));
0035
0036 #define INIT_CSD(_csd, _func, _info) \
0037 do { \
0038 *(_csd) = CSD_INIT((_func), (_info)); \
0039 } while (0)
0040
0041
0042
0043
0044
0045 extern void __smp_call_single_queue(int cpu, struct llist_node *node);
0046
0047
0048 extern unsigned int total_cpus;
0049
0050 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
0051 int wait);
0052
0053 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
0054 void *info, bool wait, const struct cpumask *mask);
0055
0056 int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
0057
0058
0059
0060
0061
0062 void panic_smp_self_stop(void);
0063 void nmi_panic_self_stop(struct pt_regs *regs);
0064 void crash_smp_send_stop(void);
0065
0066
0067
0068
0069 static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
0070 {
0071 on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
0072 }
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static inline void on_each_cpu_mask(const struct cpumask *mask,
0091 smp_call_func_t func, void *info, bool wait)
0092 {
0093 on_each_cpu_cond_mask(NULL, func, info, wait, mask);
0094 }
0095
0096
0097
0098
0099
0100
0101
0102 static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
0103 smp_call_func_t func, void *info, bool wait)
0104 {
0105 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
0106 }
0107
0108 #ifdef CONFIG_SMP
0109
0110 #include <linux/preempt.h>
0111 #include <linux/compiler.h>
0112 #include <linux/thread_info.h>
0113 #include <asm/smp.h>
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 extern void smp_send_stop(void);
0124
0125
0126
0127
0128 extern void smp_send_reschedule(int cpu);
0129
0130
0131
0132
0133
0134 extern void smp_prepare_cpus(unsigned int max_cpus);
0135
0136
0137
0138
0139 extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
0140
0141
0142
0143
0144 extern void smp_cpus_done(unsigned int max_cpus);
0145
0146
0147
0148
0149 void smp_call_function(smp_call_func_t func, void *info, int wait);
0150 void smp_call_function_many(const struct cpumask *mask,
0151 smp_call_func_t func, void *info, bool wait);
0152
0153 int smp_call_function_any(const struct cpumask *mask,
0154 smp_call_func_t func, void *info, int wait);
0155
0156 void kick_all_cpus_sync(void);
0157 void wake_up_all_idle_cpus(void);
0158
0159
0160
0161
0162 void __init call_function_init(void);
0163 void generic_smp_call_function_single_interrupt(void);
0164 #define generic_smp_call_function_interrupt \
0165 generic_smp_call_function_single_interrupt
0166
0167
0168
0169
0170
0171 void smp_prepare_boot_cpu(void);
0172
0173 extern unsigned int setup_max_cpus;
0174 extern void __init setup_nr_cpu_ids(void);
0175 extern void __init smp_init(void);
0176
0177 extern int __boot_cpu_id;
0178
0179 static inline int get_boot_cpu_id(void)
0180 {
0181 return __boot_cpu_id;
0182 }
0183
0184 #else
0185
0186 static inline void smp_send_stop(void) { }
0187
0188
0189
0190
0191 #define raw_smp_processor_id() 0
0192 static inline void up_smp_call_function(smp_call_func_t func, void *info)
0193 {
0194 }
0195 #define smp_call_function(func, info, wait) \
0196 (up_smp_call_function(func, info))
0197
0198 static inline void smp_send_reschedule(int cpu) { }
0199 #define smp_prepare_boot_cpu() do {} while (0)
0200 #define smp_call_function_many(mask, func, info, wait) \
0201 (up_smp_call_function(func, info))
0202 static inline void call_function_init(void) { }
0203
0204 static inline int
0205 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
0206 void *info, int wait)
0207 {
0208 return smp_call_function_single(0, func, info, wait);
0209 }
0210
0211 static inline void kick_all_cpus_sync(void) { }
0212 static inline void wake_up_all_idle_cpus(void) { }
0213
0214 #ifdef CONFIG_UP_LATE_INIT
0215 extern void __init up_late_init(void);
0216 static inline void smp_init(void) { up_late_init(); }
0217 #else
0218 static inline void smp_init(void) { }
0219 #endif
0220
0221 static inline int get_boot_cpu_id(void)
0222 {
0223 return 0;
0224 }
0225
0226 #endif
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 #ifndef __smp_processor_id
0257 #define __smp_processor_id(x) raw_smp_processor_id(x)
0258 #endif
0259
0260 #ifdef CONFIG_DEBUG_PREEMPT
0261 extern unsigned int debug_smp_processor_id(void);
0262 # define smp_processor_id() debug_smp_processor_id()
0263 #else
0264 # define smp_processor_id() __smp_processor_id()
0265 #endif
0266
0267 #define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
0268 #define put_cpu() preempt_enable()
0269
0270
0271
0272
0273
0274 extern void arch_disable_smp_support(void);
0275
0276 extern void arch_thaw_secondary_cpus_begin(void);
0277 extern void arch_thaw_secondary_cpus_end(void);
0278
0279 void smp_setup_processor_id(void);
0280
0281 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
0282 bool phys);
0283
0284
0285 int smpcfd_prepare_cpu(unsigned int cpu);
0286 int smpcfd_dead_cpu(unsigned int cpu);
0287 int smpcfd_dying_cpu(unsigned int cpu);
0288
0289 #endif