0001
0002
0003 #include <linux/cpumask.h>
0004 #include <linux/smp.h>
0005 #include <asm/io_apic.h>
0006
0007 #include "local.h"
0008
0009 DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
0010
0011 #ifdef CONFIG_SMP
0012 static int apic_ipi_shorthand_off __ro_after_init;
0013
0014 static __init int apic_ipi_shorthand(char *str)
0015 {
0016 get_option(&str, &apic_ipi_shorthand_off);
0017 return 1;
0018 }
0019 __setup("no_ipi_broadcast=", apic_ipi_shorthand);
0020
0021 static int __init print_ipi_mode(void)
0022 {
0023 pr_info("IPI shorthand broadcast: %s\n",
0024 apic_ipi_shorthand_off ? "disabled" : "enabled");
0025 return 0;
0026 }
0027 late_initcall(print_ipi_mode);
0028
0029 void apic_smt_update(void)
0030 {
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
0042 !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
0043 static_branch_disable(&apic_use_ipi_shorthand);
0044 } else {
0045 static_branch_enable(&apic_use_ipi_shorthand);
0046 }
0047 }
0048
0049 void apic_send_IPI_allbutself(unsigned int vector)
0050 {
0051 if (num_online_cpus() < 2)
0052 return;
0053
0054 if (static_branch_likely(&apic_use_ipi_shorthand))
0055 apic->send_IPI_allbutself(vector);
0056 else
0057 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
0058 }
0059
0060
0061
0062
0063
0064
0065 void native_smp_send_reschedule(int cpu)
0066 {
0067 if (unlikely(cpu_is_offline(cpu))) {
0068 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
0069 return;
0070 }
0071 apic->send_IPI(cpu, RESCHEDULE_VECTOR);
0072 }
0073
0074 void native_send_call_func_single_ipi(int cpu)
0075 {
0076 apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
0077 }
0078
0079 void native_send_call_func_ipi(const struct cpumask *mask)
0080 {
0081 if (static_branch_likely(&apic_use_ipi_shorthand)) {
0082 unsigned int cpu = smp_processor_id();
0083
0084 if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
0085 goto sendmask;
0086
0087 if (cpumask_test_cpu(cpu, mask))
0088 apic->send_IPI_all(CALL_FUNCTION_VECTOR);
0089 else if (num_online_cpus() > 1)
0090 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
0091 return;
0092 }
0093
0094 sendmask:
0095 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
0096 }
0097
0098 #endif
0099
0100 static inline int __prepare_ICR2(unsigned int mask)
0101 {
0102 return SET_XAPIC_DEST_FIELD(mask);
0103 }
0104
0105 static inline void __xapic_wait_icr_idle(void)
0106 {
0107 while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
0108 cpu_relax();
0109 }
0110
0111 void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
0112 {
0113
0114
0115
0116
0117
0118
0119
0120 unsigned int cfg;
0121
0122
0123
0124
0125 if (unlikely(vector == NMI_VECTOR))
0126 safe_apic_wait_icr_idle();
0127 else
0128 __xapic_wait_icr_idle();
0129
0130
0131
0132
0133
0134 cfg = __prepare_ICR(shortcut, vector, 0);
0135
0136
0137
0138
0139 native_apic_mem_write(APIC_ICR, cfg);
0140 }
0141
0142
0143
0144
0145
0146 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
0147 {
0148 unsigned long cfg;
0149
0150
0151
0152
0153 if (unlikely(vector == NMI_VECTOR))
0154 safe_apic_wait_icr_idle();
0155 else
0156 __xapic_wait_icr_idle();
0157
0158
0159
0160
0161 cfg = __prepare_ICR2(mask);
0162 native_apic_mem_write(APIC_ICR2, cfg);
0163
0164
0165
0166
0167 cfg = __prepare_ICR(0, vector, dest);
0168
0169
0170
0171
0172 native_apic_mem_write(APIC_ICR, cfg);
0173 }
0174
0175 void default_send_IPI_single_phys(int cpu, int vector)
0176 {
0177 unsigned long flags;
0178
0179 local_irq_save(flags);
0180 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
0181 vector, APIC_DEST_PHYSICAL);
0182 local_irq_restore(flags);
0183 }
0184
0185 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
0186 {
0187 unsigned long query_cpu;
0188 unsigned long flags;
0189
0190
0191
0192
0193
0194
0195 local_irq_save(flags);
0196 for_each_cpu(query_cpu, mask) {
0197 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
0198 query_cpu), vector, APIC_DEST_PHYSICAL);
0199 }
0200 local_irq_restore(flags);
0201 }
0202
0203 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
0204 int vector)
0205 {
0206 unsigned int this_cpu = smp_processor_id();
0207 unsigned int query_cpu;
0208 unsigned long flags;
0209
0210
0211
0212 local_irq_save(flags);
0213 for_each_cpu(query_cpu, mask) {
0214 if (query_cpu == this_cpu)
0215 continue;
0216 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
0217 query_cpu), vector, APIC_DEST_PHYSICAL);
0218 }
0219 local_irq_restore(flags);
0220 }
0221
0222
0223
0224
0225 void default_send_IPI_single(int cpu, int vector)
0226 {
0227 apic->send_IPI_mask(cpumask_of(cpu), vector);
0228 }
0229
0230 void default_send_IPI_allbutself(int vector)
0231 {
0232 __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
0233 }
0234
0235 void default_send_IPI_all(int vector)
0236 {
0237 __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
0238 }
0239
0240 void default_send_IPI_self(int vector)
0241 {
0242 __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
0243 }
0244
0245 #ifdef CONFIG_X86_32
0246
0247 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
0248 int vector)
0249 {
0250 unsigned long flags;
0251 unsigned int query_cpu;
0252
0253
0254
0255
0256
0257
0258
0259 local_irq_save(flags);
0260 for_each_cpu(query_cpu, mask)
0261 __default_send_IPI_dest_field(
0262 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
0263 vector, APIC_DEST_LOGICAL);
0264 local_irq_restore(flags);
0265 }
0266
0267 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
0268 int vector)
0269 {
0270 unsigned long flags;
0271 unsigned int query_cpu;
0272 unsigned int this_cpu = smp_processor_id();
0273
0274
0275
0276 local_irq_save(flags);
0277 for_each_cpu(query_cpu, mask) {
0278 if (query_cpu == this_cpu)
0279 continue;
0280 __default_send_IPI_dest_field(
0281 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
0282 vector, APIC_DEST_LOGICAL);
0283 }
0284 local_irq_restore(flags);
0285 }
0286
0287
0288
0289
0290 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
0291 {
0292 unsigned long mask = cpumask_bits(cpumask)[0];
0293 unsigned long flags;
0294
0295 if (!mask)
0296 return;
0297
0298 local_irq_save(flags);
0299 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
0300 __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
0301 local_irq_restore(flags);
0302 }
0303
0304
0305 static int convert_apicid_to_cpu(int apic_id)
0306 {
0307 int i;
0308
0309 for_each_possible_cpu(i) {
0310 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
0311 return i;
0312 }
0313 return -1;
0314 }
0315
0316 int safe_smp_processor_id(void)
0317 {
0318 int apicid, cpuid;
0319
0320 if (!boot_cpu_has(X86_FEATURE_APIC))
0321 return 0;
0322
0323 apicid = hard_smp_processor_id();
0324 if (apicid == BAD_APICID)
0325 return 0;
0326
0327 cpuid = convert_apicid_to_cpu(apicid);
0328
0329 return cpuid >= 0 ? cpuid : 0;
0330 }
0331 #endif