0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef _ASM_POWERPC_SMP_H
0013 #define _ASM_POWERPC_SMP_H
0014 #ifdef __KERNEL__
0015
0016 #include <linux/threads.h>
0017 #include <linux/cpumask.h>
0018 #include <linux/kernel.h>
0019 #include <linux/irqreturn.h>
0020
0021 #ifndef __ASSEMBLY__
0022
0023 #ifdef CONFIG_PPC64
0024 #include <asm/paca.h>
0025 #endif
0026 #include <asm/percpu.h>
0027
0028 extern int boot_cpuid;
0029 extern int spinning_secondaries;
0030 extern u32 *cpu_to_phys_id;
0031 extern bool coregroup_enabled;
0032
0033 extern int cpu_to_chip_id(int cpu);
0034 extern int *chip_id_lookup_table;
0035
0036 DECLARE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
0037 DECLARE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
0038 DECLARE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
0039
0040 #ifdef CONFIG_SMP
0041
0042 struct smp_ops_t {
0043 void (*message_pass)(int cpu, int msg);
0044 #ifdef CONFIG_PPC_SMP_MUXED_IPI
0045 void (*cause_ipi)(int cpu);
0046 #endif
0047 int (*cause_nmi_ipi)(int cpu);
0048 void (*probe)(void);
0049 int (*kick_cpu)(int nr);
0050 int (*prepare_cpu)(int nr);
0051 void (*setup_cpu)(int nr);
0052 void (*bringup_done)(void);
0053 void (*take_timebase)(void);
0054 void (*give_timebase)(void);
0055 int (*cpu_disable)(void);
0056 void (*cpu_die)(unsigned int nr);
0057 int (*cpu_bootable)(unsigned int nr);
0058 #ifdef CONFIG_HOTPLUG_CPU
0059 void (*cpu_offline_self)(void);
0060 #endif
0061 };
0062
0063 extern struct task_struct *secondary_current;
0064
0065 void start_secondary(void *unused);
0066 extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
0067 extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
0068 extern void smp_send_debugger_break(void);
0069 extern void start_secondary_resume(void);
0070 extern void smp_generic_give_timebase(void);
0071 extern void smp_generic_take_timebase(void);
0072
0073 DECLARE_PER_CPU(unsigned int, cpu_pvr);
0074
0075 #ifdef CONFIG_HOTPLUG_CPU
0076 int generic_cpu_disable(void);
0077 void generic_cpu_die(unsigned int cpu);
0078 void generic_set_cpu_dead(unsigned int cpu);
0079 void generic_set_cpu_up(unsigned int cpu);
0080 int generic_check_cpu_restart(unsigned int cpu);
0081 int is_cpu_dead(unsigned int cpu);
0082 #else
0083 #define generic_set_cpu_up(i) do { } while (0)
0084 #endif
0085
0086 #ifdef CONFIG_PPC64
0087 #define raw_smp_processor_id() (local_paca->paca_index)
0088 #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
0089 #else
0090
0091 extern int smp_hw_index[];
0092
0093 #define raw_smp_processor_id() (current_thread_info()->cpu)
0094 #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
0095
0096 static inline int get_hard_smp_processor_id(int cpu)
0097 {
0098 return smp_hw_index[cpu];
0099 }
0100
0101 static inline void set_hard_smp_processor_id(int cpu, int phys)
0102 {
0103 smp_hw_index[cpu] = phys;
0104 }
0105 #endif
0106
0107 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
0108 DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
0109 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
0110 DECLARE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
0111
0112 static inline struct cpumask *cpu_sibling_mask(int cpu)
0113 {
0114 return per_cpu(cpu_sibling_map, cpu);
0115 }
0116
0117 static inline struct cpumask *cpu_core_mask(int cpu)
0118 {
0119 return per_cpu(cpu_core_map, cpu);
0120 }
0121
0122 static inline struct cpumask *cpu_l2_cache_mask(int cpu)
0123 {
0124 return per_cpu(cpu_l2_cache_map, cpu);
0125 }
0126
0127 static inline struct cpumask *cpu_smallcore_mask(int cpu)
0128 {
0129 return per_cpu(cpu_smallcore_map, cpu);
0130 }
0131
0132 extern int cpu_to_core_id(int cpu);
0133
0134 extern bool has_big_cores;
0135 extern bool thread_group_shares_l2;
0136 extern bool thread_group_shares_l3;
0137
0138 #define cpu_smt_mask cpu_smt_mask
0139 #ifdef CONFIG_SCHED_SMT
0140 static inline const struct cpumask *cpu_smt_mask(int cpu)
0141 {
0142 if (has_big_cores)
0143 return per_cpu(cpu_smallcore_map, cpu);
0144
0145 return per_cpu(cpu_sibling_map, cpu);
0146 }
0147 #endif
0148
0149
0150
0151
0152
0153 #define PPC_MSG_CALL_FUNCTION 0
0154 #define PPC_MSG_RESCHEDULE 1
0155 #define PPC_MSG_TICK_BROADCAST 2
0156 #define PPC_MSG_NMI_IPI 3
0157
0158
0159 #define PPC_MSG_RM_HOST_ACTION 4
0160
0161 #define NMI_IPI_ALL_OTHERS -2
0162
0163 #ifdef CONFIG_NMI_IPI
0164 extern int smp_handle_nmi_ipi(struct pt_regs *regs);
0165 #else
0166 static inline int smp_handle_nmi_ipi(struct pt_regs *regs) { return 0; }
0167 #endif
0168
0169
0170 extern int smp_request_message_ipi(int virq, int message);
0171 extern const char *smp_ipi_name[];
0172
0173
0174 extern void smp_muxed_ipi_message_pass(int cpu, int msg);
0175 extern void smp_muxed_ipi_set_message(int cpu, int msg);
0176 extern irqreturn_t smp_ipi_demux(void);
0177 extern irqreturn_t smp_ipi_demux_relaxed(void);
0178
0179 void smp_init_pSeries(void);
0180 void smp_init_cell(void);
0181 void smp_setup_cpu_maps(void);
0182
0183 extern int __cpu_disable(void);
0184 extern void __cpu_die(unsigned int cpu);
0185
0186 #else
0187
0188 #define hard_smp_processor_id() get_hard_smp_processor_id(0)
0189 #define smp_setup_cpu_maps()
0190 #define thread_group_shares_l2 0
0191 #define thread_group_shares_l3 0
0192 static inline const struct cpumask *cpu_sibling_mask(int cpu)
0193 {
0194 return cpumask_of(cpu);
0195 }
0196
0197 static inline const struct cpumask *cpu_smallcore_mask(int cpu)
0198 {
0199 return cpumask_of(cpu);
0200 }
0201
0202 static inline const struct cpumask *cpu_l2_cache_mask(int cpu)
0203 {
0204 return cpumask_of(cpu);
0205 }
0206 #endif
0207
0208 #ifdef CONFIG_PPC64
0209 static inline int get_hard_smp_processor_id(int cpu)
0210 {
0211 return paca_ptrs[cpu]->hw_cpu_id;
0212 }
0213
0214 static inline void set_hard_smp_processor_id(int cpu, int phys)
0215 {
0216 paca_ptrs[cpu]->hw_cpu_id = phys;
0217 }
0218 #else
0219
0220 #ifndef CONFIG_SMP
0221 extern int boot_cpuid_phys;
0222 static inline int get_hard_smp_processor_id(int cpu)
0223 {
0224 return boot_cpuid_phys;
0225 }
0226
0227 static inline void set_hard_smp_processor_id(int cpu, int phys)
0228 {
0229 boot_cpuid_phys = phys;
0230 }
0231 #endif
0232 #endif
0233
0234 #if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
0235 extern void smp_release_cpus(void);
0236 #else
0237 static inline void smp_release_cpus(void) { }
0238 #endif
0239
0240 extern int smt_enabled_at_boot;
0241
0242 extern void smp_mpic_probe(void);
0243 extern void smp_mpic_setup_cpu(int cpu);
0244 extern int smp_generic_kick_cpu(int nr);
0245 extern int smp_generic_cpu_bootable(unsigned int nr);
0246
0247
0248 extern void smp_generic_give_timebase(void);
0249 extern void smp_generic_take_timebase(void);
0250
0251 extern struct smp_ops_t *smp_ops;
0252
0253 extern void arch_send_call_function_single_ipi(int cpu);
0254 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
0255
0256
0257
0258
0259
0260 extern void generic_secondary_smp_init(void);
0261 extern unsigned long __secondary_hold_spinloop;
0262 extern unsigned long __secondary_hold_acknowledge;
0263 extern char __secondary_hold;
0264 extern unsigned int booting_thread_hwid;
0265
0266 extern void __early_start(void);
0267 #endif
0268
0269 #endif
0270 #endif