0001
0002
0003
0004
0005 #include <linux/cpu.h>
0006 #include <linux/err.h>
0007 #include <linux/smp.h>
0008 #include <linux/delay.h>
0009 #include <linux/init.h>
0010 #include <linux/list.h>
0011 #include <linux/slab.h>
0012 #include <linux/sched.h>
0013 #include <linux/sched/task.h>
0014 #include <linux/export.h>
0015 #include <linux/percpu.h>
0016 #include <linux/kthread.h>
0017 #include <linux/smpboot.h>
0018
0019 #include "smpboot.h"
0020
0021 #ifdef CONFIG_SMP
0022
0023 #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
0024
0025
0026
0027
0028 static DEFINE_PER_CPU(struct task_struct *, idle_threads);
0029
0030 struct task_struct *idle_thread_get(unsigned int cpu)
0031 {
0032 struct task_struct *tsk = per_cpu(idle_threads, cpu);
0033
0034 if (!tsk)
0035 return ERR_PTR(-ENOMEM);
0036 return tsk;
0037 }
0038
0039 void __init idle_thread_set_boot_cpu(void)
0040 {
0041 per_cpu(idle_threads, smp_processor_id()) = current;
0042 }
0043
0044
0045
0046
0047
0048
0049
0050 static __always_inline void idle_init(unsigned int cpu)
0051 {
0052 struct task_struct *tsk = per_cpu(idle_threads, cpu);
0053
0054 if (!tsk) {
0055 tsk = fork_idle(cpu);
0056 if (IS_ERR(tsk))
0057 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
0058 else
0059 per_cpu(idle_threads, cpu) = tsk;
0060 }
0061 }
0062
0063
0064
0065
0066 void __init idle_threads_init(void)
0067 {
0068 unsigned int cpu, boot_cpu;
0069
0070 boot_cpu = smp_processor_id();
0071
0072 for_each_possible_cpu(cpu) {
0073 if (cpu != boot_cpu)
0074 idle_init(cpu);
0075 }
0076 }
0077 #endif
0078
0079 #endif
0080
0081 static LIST_HEAD(hotplug_threads);
0082 static DEFINE_MUTEX(smpboot_threads_lock);
0083
0084 struct smpboot_thread_data {
0085 unsigned int cpu;
0086 unsigned int status;
0087 struct smp_hotplug_thread *ht;
0088 };
0089
0090 enum {
0091 HP_THREAD_NONE = 0,
0092 HP_THREAD_ACTIVE,
0093 HP_THREAD_PARKED,
0094 };
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 static int smpboot_thread_fn(void *data)
0107 {
0108 struct smpboot_thread_data *td = data;
0109 struct smp_hotplug_thread *ht = td->ht;
0110
0111 while (1) {
0112 set_current_state(TASK_INTERRUPTIBLE);
0113 preempt_disable();
0114 if (kthread_should_stop()) {
0115 __set_current_state(TASK_RUNNING);
0116 preempt_enable();
0117
0118 if (ht->cleanup && td->status != HP_THREAD_NONE)
0119 ht->cleanup(td->cpu, cpu_online(td->cpu));
0120 kfree(td);
0121 return 0;
0122 }
0123
0124 if (kthread_should_park()) {
0125 __set_current_state(TASK_RUNNING);
0126 preempt_enable();
0127 if (ht->park && td->status == HP_THREAD_ACTIVE) {
0128 BUG_ON(td->cpu != smp_processor_id());
0129 ht->park(td->cpu);
0130 td->status = HP_THREAD_PARKED;
0131 }
0132 kthread_parkme();
0133
0134 continue;
0135 }
0136
0137 BUG_ON(td->cpu != smp_processor_id());
0138
0139
0140 switch (td->status) {
0141 case HP_THREAD_NONE:
0142 __set_current_state(TASK_RUNNING);
0143 preempt_enable();
0144 if (ht->setup)
0145 ht->setup(td->cpu);
0146 td->status = HP_THREAD_ACTIVE;
0147 continue;
0148
0149 case HP_THREAD_PARKED:
0150 __set_current_state(TASK_RUNNING);
0151 preempt_enable();
0152 if (ht->unpark)
0153 ht->unpark(td->cpu);
0154 td->status = HP_THREAD_ACTIVE;
0155 continue;
0156 }
0157
0158 if (!ht->thread_should_run(td->cpu)) {
0159 preempt_enable_no_resched();
0160 schedule();
0161 } else {
0162 __set_current_state(TASK_RUNNING);
0163 preempt_enable();
0164 ht->thread_fn(td->cpu);
0165 }
0166 }
0167 }
0168
0169 static int
0170 __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
0171 {
0172 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
0173 struct smpboot_thread_data *td;
0174
0175 if (tsk)
0176 return 0;
0177
0178 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
0179 if (!td)
0180 return -ENOMEM;
0181 td->cpu = cpu;
0182 td->ht = ht;
0183
0184 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
0185 ht->thread_comm);
0186 if (IS_ERR(tsk)) {
0187 kfree(td);
0188 return PTR_ERR(tsk);
0189 }
0190 kthread_set_per_cpu(tsk, cpu);
0191
0192
0193
0194
0195 kthread_park(tsk);
0196 get_task_struct(tsk);
0197 *per_cpu_ptr(ht->store, cpu) = tsk;
0198 if (ht->create) {
0199
0200
0201
0202
0203
0204
0205 if (!wait_task_inactive(tsk, TASK_PARKED))
0206 WARN_ON(1);
0207 else
0208 ht->create(cpu);
0209 }
0210 return 0;
0211 }
0212
0213 int smpboot_create_threads(unsigned int cpu)
0214 {
0215 struct smp_hotplug_thread *cur;
0216 int ret = 0;
0217
0218 mutex_lock(&smpboot_threads_lock);
0219 list_for_each_entry(cur, &hotplug_threads, list) {
0220 ret = __smpboot_create_thread(cur, cpu);
0221 if (ret)
0222 break;
0223 }
0224 mutex_unlock(&smpboot_threads_lock);
0225 return ret;
0226 }
0227
0228 static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
0229 {
0230 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
0231
0232 if (!ht->selfparking)
0233 kthread_unpark(tsk);
0234 }
0235
0236 int smpboot_unpark_threads(unsigned int cpu)
0237 {
0238 struct smp_hotplug_thread *cur;
0239
0240 mutex_lock(&smpboot_threads_lock);
0241 list_for_each_entry(cur, &hotplug_threads, list)
0242 smpboot_unpark_thread(cur, cpu);
0243 mutex_unlock(&smpboot_threads_lock);
0244 return 0;
0245 }
0246
0247 static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
0248 {
0249 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
0250
0251 if (tsk && !ht->selfparking)
0252 kthread_park(tsk);
0253 }
0254
0255 int smpboot_park_threads(unsigned int cpu)
0256 {
0257 struct smp_hotplug_thread *cur;
0258
0259 mutex_lock(&smpboot_threads_lock);
0260 list_for_each_entry_reverse(cur, &hotplug_threads, list)
0261 smpboot_park_thread(cur, cpu);
0262 mutex_unlock(&smpboot_threads_lock);
0263 return 0;
0264 }
0265
0266 static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
0267 {
0268 unsigned int cpu;
0269
0270
0271 for_each_possible_cpu(cpu) {
0272 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
0273
0274 if (tsk) {
0275 kthread_stop(tsk);
0276 put_task_struct(tsk);
0277 *per_cpu_ptr(ht->store, cpu) = NULL;
0278 }
0279 }
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289 int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
0290 {
0291 unsigned int cpu;
0292 int ret = 0;
0293
0294 cpus_read_lock();
0295 mutex_lock(&smpboot_threads_lock);
0296 for_each_online_cpu(cpu) {
0297 ret = __smpboot_create_thread(plug_thread, cpu);
0298 if (ret) {
0299 smpboot_destroy_threads(plug_thread);
0300 goto out;
0301 }
0302 smpboot_unpark_thread(plug_thread, cpu);
0303 }
0304 list_add(&plug_thread->list, &hotplug_threads);
0305 out:
0306 mutex_unlock(&smpboot_threads_lock);
0307 cpus_read_unlock();
0308 return ret;
0309 }
0310 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
0311
0312
0313
0314
0315
0316
0317
0318 void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
0319 {
0320 cpus_read_lock();
0321 mutex_lock(&smpboot_threads_lock);
0322 list_del(&plug_thread->list);
0323 smpboot_destroy_threads(plug_thread);
0324 mutex_unlock(&smpboot_threads_lock);
0325 cpus_read_unlock();
0326 }
0327 EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
0328
0329 static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
0330
0331
0332
0333
0334
0335 int cpu_report_state(int cpu)
0336 {
0337 return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
0338 }
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352 int cpu_check_up_prepare(int cpu)
0353 {
0354 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
0355 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
0356 return 0;
0357 }
0358
0359 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
0360
0361 case CPU_POST_DEAD:
0362
0363
0364 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
0365 return 0;
0366
0367 case CPU_DEAD_FROZEN:
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 return -EBUSY;
0381
0382 case CPU_BROKEN:
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393 return -EAGAIN;
0394
0395 case CPU_UP_PREPARE:
0396
0397
0398
0399
0400 return 0;
0401
0402 default:
0403
0404
0405 return -EIO;
0406 }
0407 }
0408
0409
0410
0411
0412
0413
0414
0415 void cpu_set_state_online(int cpu)
0416 {
0417 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
0418 }
0419
0420 #ifdef CONFIG_HOTPLUG_CPU
0421
0422
0423
0424
0425 bool cpu_wait_death(unsigned int cpu, int seconds)
0426 {
0427 int jf_left = seconds * HZ;
0428 int oldstate;
0429 bool ret = true;
0430 int sleep_jf = 1;
0431
0432 might_sleep();
0433
0434
0435 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
0436 goto update_state;
0437 udelay(5);
0438
0439
0440 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
0441 schedule_timeout_uninterruptible(sleep_jf);
0442 jf_left -= sleep_jf;
0443 if (jf_left <= 0)
0444 break;
0445 sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
0446 }
0447 update_state:
0448 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
0449 if (oldstate == CPU_DEAD) {
0450
0451 smp_mb();
0452 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
0453 } else {
0454
0455 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
0456 oldstate, CPU_BROKEN) != oldstate)
0457 goto update_state;
0458 ret = false;
0459 }
0460 return ret;
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472 bool cpu_report_death(void)
0473 {
0474 int oldstate;
0475 int newstate;
0476 int cpu = smp_processor_id();
0477
0478 do {
0479 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
0480 if (oldstate != CPU_BROKEN)
0481 newstate = CPU_DEAD;
0482 else
0483 newstate = CPU_DEAD_FROZEN;
0484 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
0485 oldstate, newstate) != oldstate);
0486 return newstate == CPU_DEAD;
0487 }
0488
0489 #endif