Back to home page

OSCL-LXR

 
 

    


0001 /* CPU control.
0002  * (C) 2001, 2002, 2003, 2004 Rusty Russell
0003  *
0004  * This code is licenced under the GPL.
0005  */
0006 #include <linux/sched/mm.h>
0007 #include <linux/proc_fs.h>
0008 #include <linux/smp.h>
0009 #include <linux/init.h>
0010 #include <linux/notifier.h>
0011 #include <linux/sched/signal.h>
0012 #include <linux/sched/hotplug.h>
0013 #include <linux/sched/isolation.h>
0014 #include <linux/sched/task.h>
0015 #include <linux/sched/smt.h>
0016 #include <linux/unistd.h>
0017 #include <linux/cpu.h>
0018 #include <linux/oom.h>
0019 #include <linux/rcupdate.h>
0020 #include <linux/export.h>
0021 #include <linux/bug.h>
0022 #include <linux/kthread.h>
0023 #include <linux/stop_machine.h>
0024 #include <linux/mutex.h>
0025 #include <linux/gfp.h>
0026 #include <linux/suspend.h>
0027 #include <linux/lockdep.h>
0028 #include <linux/tick.h>
0029 #include <linux/irq.h>
0030 #include <linux/nmi.h>
0031 #include <linux/smpboot.h>
0032 #include <linux/relay.h>
0033 #include <linux/slab.h>
0034 #include <linux/scs.h>
0035 #include <linux/percpu-rwsem.h>
0036 #include <linux/cpuset.h>
0037 #include <linux/random.h>
0038 #include <linux/cc_platform.h>
0039 
0040 #include <trace/events/power.h>
0041 #define CREATE_TRACE_POINTS
0042 #include <trace/events/cpuhp.h>
0043 
0044 #include "smpboot.h"
0045 
0046 /**
0047  * struct cpuhp_cpu_state - Per cpu hotplug state storage
0048  * @state:  The current cpu state
0049  * @target: The target state
0050  * @fail:   Current CPU hotplug callback state
0051  * @thread: Pointer to the hotplug thread
0052  * @should_run: Thread should execute
0053  * @rollback:   Perform a rollback
0054  * @single: Single callback invocation
0055  * @bringup:    Single callback bringup or teardown selector
0056  * @cpu:    CPU number
0057  * @node:   Remote CPU node; for multi-instance, do a
0058  *      single entry callback for install/remove
0059  * @last:   For multi-instance rollback, remember how far we got
0060  * @cb_state:   The state for a single callback (install/uninstall)
0061  * @result: Result of the operation
0062  * @done_up:    Signal completion to the issuer of the task for cpu-up
0063  * @done_down:  Signal completion to the issuer of the task for cpu-down
0064  */
0065 struct cpuhp_cpu_state {
0066     enum cpuhp_state    state;
0067     enum cpuhp_state    target;
0068     enum cpuhp_state    fail;
0069 #ifdef CONFIG_SMP
0070     struct task_struct  *thread;
0071     bool            should_run;
0072     bool            rollback;
0073     bool            single;
0074     bool            bringup;
0075     struct hlist_node   *node;
0076     struct hlist_node   *last;
0077     enum cpuhp_state    cb_state;
0078     int         result;
0079     struct completion   done_up;
0080     struct completion   done_down;
0081 #endif
0082 };
0083 
0084 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
0085     .fail = CPUHP_INVALID,
0086 };
0087 
0088 #ifdef CONFIG_SMP
0089 cpumask_t cpus_booted_once_mask;
0090 #endif
0091 
0092 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
0093 static struct lockdep_map cpuhp_state_up_map =
0094     STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
0095 static struct lockdep_map cpuhp_state_down_map =
0096     STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
0097 
0098 
0099 static inline void cpuhp_lock_acquire(bool bringup)
0100 {
0101     lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
0102 }
0103 
0104 static inline void cpuhp_lock_release(bool bringup)
0105 {
0106     lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
0107 }
0108 #else
0109 
0110 static inline void cpuhp_lock_acquire(bool bringup) { }
0111 static inline void cpuhp_lock_release(bool bringup) { }
0112 
0113 #endif
0114 
0115 /**
0116  * struct cpuhp_step - Hotplug state machine step
0117  * @name:   Name of the step
0118  * @startup:    Startup function of the step
0119  * @teardown:   Teardown function of the step
0120  * @cant_stop:  Bringup/teardown can't be stopped at this step
0121  * @multi_instance: State has multiple instances which get added afterwards
0122  */
0123 struct cpuhp_step {
0124     const char      *name;
0125     union {
0126         int     (*single)(unsigned int cpu);
0127         int     (*multi)(unsigned int cpu,
0128                      struct hlist_node *node);
0129     } startup;
0130     union {
0131         int     (*single)(unsigned int cpu);
0132         int     (*multi)(unsigned int cpu,
0133                      struct hlist_node *node);
0134     } teardown;
0135     /* private: */
0136     struct hlist_head   list;
0137     /* public: */
0138     bool            cant_stop;
0139     bool            multi_instance;
0140 };
0141 
0142 static DEFINE_MUTEX(cpuhp_state_mutex);
0143 static struct cpuhp_step cpuhp_hp_states[];
0144 
0145 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
0146 {
0147     return cpuhp_hp_states + state;
0148 }
0149 
0150 static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
0151 {
0152     return bringup ? !step->startup.single : !step->teardown.single;
0153 }
0154 
0155 /**
0156  * cpuhp_invoke_callback - Invoke the callbacks for a given state
0157  * @cpu:    The cpu for which the callback should be invoked
0158  * @state:  The state to do callbacks for
0159  * @bringup:    True if the bringup callback should be invoked
0160  * @node:   For multi-instance, do a single entry callback for install/remove
0161  * @lastp:  For multi-instance rollback, remember how far we got
0162  *
0163  * Called from cpu hotplug and from the state register machinery.
0164  *
0165  * Return: %0 on success or a negative errno code
0166  */
0167 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
0168                  bool bringup, struct hlist_node *node,
0169                  struct hlist_node **lastp)
0170 {
0171     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
0172     struct cpuhp_step *step = cpuhp_get_step(state);
0173     int (*cbm)(unsigned int cpu, struct hlist_node *node);
0174     int (*cb)(unsigned int cpu);
0175     int ret, cnt;
0176 
0177     if (st->fail == state) {
0178         st->fail = CPUHP_INVALID;
0179         return -EAGAIN;
0180     }
0181 
0182     if (cpuhp_step_empty(bringup, step)) {
0183         WARN_ON_ONCE(1);
0184         return 0;
0185     }
0186 
0187     if (!step->multi_instance) {
0188         WARN_ON_ONCE(lastp && *lastp);
0189         cb = bringup ? step->startup.single : step->teardown.single;
0190 
0191         trace_cpuhp_enter(cpu, st->target, state, cb);
0192         ret = cb(cpu);
0193         trace_cpuhp_exit(cpu, st->state, state, ret);
0194         return ret;
0195     }
0196     cbm = bringup ? step->startup.multi : step->teardown.multi;
0197 
0198     /* Single invocation for instance add/remove */
0199     if (node) {
0200         WARN_ON_ONCE(lastp && *lastp);
0201         trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
0202         ret = cbm(cpu, node);
0203         trace_cpuhp_exit(cpu, st->state, state, ret);
0204         return ret;
0205     }
0206 
0207     /* State transition. Invoke on all instances */
0208     cnt = 0;
0209     hlist_for_each(node, &step->list) {
0210         if (lastp && node == *lastp)
0211             break;
0212 
0213         trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
0214         ret = cbm(cpu, node);
0215         trace_cpuhp_exit(cpu, st->state, state, ret);
0216         if (ret) {
0217             if (!lastp)
0218                 goto err;
0219 
0220             *lastp = node;
0221             return ret;
0222         }
0223         cnt++;
0224     }
0225     if (lastp)
0226         *lastp = NULL;
0227     return 0;
0228 err:
0229     /* Rollback the instances if one failed */
0230     cbm = !bringup ? step->startup.multi : step->teardown.multi;
0231     if (!cbm)
0232         return ret;
0233 
0234     hlist_for_each(node, &step->list) {
0235         if (!cnt--)
0236             break;
0237 
0238         trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
0239         ret = cbm(cpu, node);
0240         trace_cpuhp_exit(cpu, st->state, state, ret);
0241         /*
0242          * Rollback must not fail,
0243          */
0244         WARN_ON_ONCE(ret);
0245     }
0246     return ret;
0247 }
0248 
0249 #ifdef CONFIG_SMP
0250 static bool cpuhp_is_ap_state(enum cpuhp_state state)
0251 {
0252     /*
0253      * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
0254      * purposes as that state is handled explicitly in cpu_down.
0255      */
0256     return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
0257 }
0258 
0259 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
0260 {
0261     struct completion *done = bringup ? &st->done_up : &st->done_down;
0262     wait_for_completion(done);
0263 }
0264 
0265 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
0266 {
0267     struct completion *done = bringup ? &st->done_up : &st->done_down;
0268     complete(done);
0269 }
0270 
0271 /*
0272  * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
0273  */
0274 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
0275 {
0276     return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
0277 }
0278 
0279 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
0280 static DEFINE_MUTEX(cpu_add_remove_lock);
0281 bool cpuhp_tasks_frozen;
0282 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
0283 
0284 /*
0285  * The following two APIs (cpu_maps_update_begin/done) must be used when
0286  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
0287  */
0288 void cpu_maps_update_begin(void)
0289 {
0290     mutex_lock(&cpu_add_remove_lock);
0291 }
0292 
0293 void cpu_maps_update_done(void)
0294 {
0295     mutex_unlock(&cpu_add_remove_lock);
0296 }
0297 
0298 /*
0299  * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
0300  * Should always be manipulated under cpu_add_remove_lock
0301  */
0302 static int cpu_hotplug_disabled;
0303 
0304 #ifdef CONFIG_HOTPLUG_CPU
0305 
0306 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
0307 
0308 void cpus_read_lock(void)
0309 {
0310     percpu_down_read(&cpu_hotplug_lock);
0311 }
0312 EXPORT_SYMBOL_GPL(cpus_read_lock);
0313 
0314 int cpus_read_trylock(void)
0315 {
0316     return percpu_down_read_trylock(&cpu_hotplug_lock);
0317 }
0318 EXPORT_SYMBOL_GPL(cpus_read_trylock);
0319 
0320 void cpus_read_unlock(void)
0321 {
0322     percpu_up_read(&cpu_hotplug_lock);
0323 }
0324 EXPORT_SYMBOL_GPL(cpus_read_unlock);
0325 
0326 void cpus_write_lock(void)
0327 {
0328     percpu_down_write(&cpu_hotplug_lock);
0329 }
0330 
0331 void cpus_write_unlock(void)
0332 {
0333     percpu_up_write(&cpu_hotplug_lock);
0334 }
0335 
0336 void lockdep_assert_cpus_held(void)
0337 {
0338     /*
0339      * We can't have hotplug operations before userspace starts running,
0340      * and some init codepaths will knowingly not take the hotplug lock.
0341      * This is all valid, so mute lockdep until it makes sense to report
0342      * unheld locks.
0343      */
0344     if (system_state < SYSTEM_RUNNING)
0345         return;
0346 
0347     percpu_rwsem_assert_held(&cpu_hotplug_lock);
0348 }
0349 
0350 #ifdef CONFIG_LOCKDEP
0351 int lockdep_is_cpus_held(void)
0352 {
0353     return percpu_rwsem_is_held(&cpu_hotplug_lock);
0354 }
0355 #endif
0356 
0357 static void lockdep_acquire_cpus_lock(void)
0358 {
0359     rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
0360 }
0361 
0362 static void lockdep_release_cpus_lock(void)
0363 {
0364     rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
0365 }
0366 
0367 /*
0368  * Wait for currently running CPU hotplug operations to complete (if any) and
0369  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
0370  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
0371  * hotplug path before performing hotplug operations. So acquiring that lock
0372  * guarantees mutual exclusion from any currently running hotplug operations.
0373  */
0374 void cpu_hotplug_disable(void)
0375 {
0376     cpu_maps_update_begin();
0377     cpu_hotplug_disabled++;
0378     cpu_maps_update_done();
0379 }
0380 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
0381 
0382 static void __cpu_hotplug_enable(void)
0383 {
0384     if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
0385         return;
0386     cpu_hotplug_disabled--;
0387 }
0388 
0389 void cpu_hotplug_enable(void)
0390 {
0391     cpu_maps_update_begin();
0392     __cpu_hotplug_enable();
0393     cpu_maps_update_done();
0394 }
0395 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
0396 
0397 #else
0398 
0399 static void lockdep_acquire_cpus_lock(void)
0400 {
0401 }
0402 
0403 static void lockdep_release_cpus_lock(void)
0404 {
0405 }
0406 
0407 #endif  /* CONFIG_HOTPLUG_CPU */
0408 
0409 /*
0410  * Architectures that need SMT-specific errata handling during SMT hotplug
0411  * should override this.
0412  */
0413 void __weak arch_smt_update(void) { }
0414 
0415 #ifdef CONFIG_HOTPLUG_SMT
0416 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
0417 
0418 void __init cpu_smt_disable(bool force)
0419 {
0420     if (!cpu_smt_possible())
0421         return;
0422 
0423     if (force) {
0424         pr_info("SMT: Force disabled\n");
0425         cpu_smt_control = CPU_SMT_FORCE_DISABLED;
0426     } else {
0427         pr_info("SMT: disabled\n");
0428         cpu_smt_control = CPU_SMT_DISABLED;
0429     }
0430 }
0431 
0432 /*
0433  * The decision whether SMT is supported can only be done after the full
0434  * CPU identification. Called from architecture code.
0435  */
0436 void __init cpu_smt_check_topology(void)
0437 {
0438     if (!topology_smt_supported())
0439         cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
0440 }
0441 
0442 static int __init smt_cmdline_disable(char *str)
0443 {
0444     cpu_smt_disable(str && !strcmp(str, "force"));
0445     return 0;
0446 }
0447 early_param("nosmt", smt_cmdline_disable);
0448 
0449 static inline bool cpu_smt_allowed(unsigned int cpu)
0450 {
0451     if (cpu_smt_control == CPU_SMT_ENABLED)
0452         return true;
0453 
0454     if (topology_is_primary_thread(cpu))
0455         return true;
0456 
0457     /*
0458      * On x86 it's required to boot all logical CPUs at least once so
0459      * that the init code can get a chance to set CR4.MCE on each
0460      * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
0461      * core will shutdown the machine.
0462      */
0463     return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
0464 }
0465 
0466 /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
0467 bool cpu_smt_possible(void)
0468 {
0469     return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
0470         cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
0471 }
0472 EXPORT_SYMBOL_GPL(cpu_smt_possible);
0473 #else
0474 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
0475 #endif
0476 
0477 static inline enum cpuhp_state
0478 cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
0479 {
0480     enum cpuhp_state prev_state = st->state;
0481     bool bringup = st->state < target;
0482 
0483     st->rollback = false;
0484     st->last = NULL;
0485 
0486     st->target = target;
0487     st->single = false;
0488     st->bringup = bringup;
0489     if (cpu_dying(cpu) != !bringup)
0490         set_cpu_dying(cpu, !bringup);
0491 
0492     return prev_state;
0493 }
0494 
0495 static inline void
0496 cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
0497           enum cpuhp_state prev_state)
0498 {
0499     bool bringup = !st->bringup;
0500 
0501     st->target = prev_state;
0502 
0503     /*
0504      * Already rolling back. No need invert the bringup value or to change
0505      * the current state.
0506      */
0507     if (st->rollback)
0508         return;
0509 
0510     st->rollback = true;
0511 
0512     /*
0513      * If we have st->last we need to undo partial multi_instance of this
0514      * state first. Otherwise start undo at the previous state.
0515      */
0516     if (!st->last) {
0517         if (st->bringup)
0518             st->state--;
0519         else
0520             st->state++;
0521     }
0522 
0523     st->bringup = bringup;
0524     if (cpu_dying(cpu) != !bringup)
0525         set_cpu_dying(cpu, !bringup);
0526 }
0527 
0528 /* Regular hotplug invocation of the AP hotplug thread */
0529 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
0530 {
0531     if (!st->single && st->state == st->target)
0532         return;
0533 
0534     st->result = 0;
0535     /*
0536      * Make sure the above stores are visible before should_run becomes
0537      * true. Paired with the mb() above in cpuhp_thread_fun()
0538      */
0539     smp_mb();
0540     st->should_run = true;
0541     wake_up_process(st->thread);
0542     wait_for_ap_thread(st, st->bringup);
0543 }
0544 
0545 static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
0546              enum cpuhp_state target)
0547 {
0548     enum cpuhp_state prev_state;
0549     int ret;
0550 
0551     prev_state = cpuhp_set_state(cpu, st, target);
0552     __cpuhp_kick_ap(st);
0553     if ((ret = st->result)) {
0554         cpuhp_reset_state(cpu, st, prev_state);
0555         __cpuhp_kick_ap(st);
0556     }
0557 
0558     return ret;
0559 }
0560 
0561 static int bringup_wait_for_ap(unsigned int cpu)
0562 {
0563     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
0564 
0565     /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
0566     wait_for_ap_thread(st, true);
0567     if (WARN_ON_ONCE((!cpu_online(cpu))))
0568         return -ECANCELED;
0569 
0570     /* Unpark the hotplug thread of the target cpu */
0571     kthread_unpark(st->thread);
0572 
0573     /*
0574      * SMT soft disabling on X86 requires to bring the CPU out of the
0575      * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
0576      * CPU marked itself as booted_once in notify_cpu_starting() so the
0577      * cpu_smt_allowed() check will now return false if this is not the
0578      * primary sibling.
0579      */
0580     if (!cpu_smt_allowed(cpu))
0581         return -ECANCELED;
0582 
0583     if (st->target <= CPUHP_AP_ONLINE_IDLE)
0584         return 0;
0585 
0586     return cpuhp_kick_ap(cpu, st, st->target);
0587 }
0588 
0589 static int bringup_cpu(unsigned int cpu)
0590 {
0591     struct task_struct *idle = idle_thread_get(cpu);
0592     int ret;
0593 
0594     /*
0595      * Reset stale stack state from the last time this CPU was online.
0596      */
0597     scs_task_reset(idle);
0598     kasan_unpoison_task_stack(idle);
0599 
0600     /*
0601      * Some architectures have to walk the irq descriptors to
0602      * setup the vector space for the cpu which comes online.
0603      * Prevent irq alloc/free across the bringup.
0604      */
0605     irq_lock_sparse();
0606 
0607     /* Arch-specific enabling code. */
0608     ret = __cpu_up(cpu, idle);
0609     irq_unlock_sparse();
0610     if (ret)
0611         return ret;
0612     return bringup_wait_for_ap(cpu);
0613 }
0614 
0615 static int finish_cpu(unsigned int cpu)
0616 {
0617     struct task_struct *idle = idle_thread_get(cpu);
0618     struct mm_struct *mm = idle->active_mm;
0619 
0620     /*
0621      * idle_task_exit() will have switched to &init_mm, now
0622      * clean up any remaining active_mm state.
0623      */
0624     if (mm != &init_mm)
0625         idle->active_mm = &init_mm;
0626     mmdrop(mm);
0627     return 0;
0628 }
0629 
0630 /*
0631  * Hotplug state machine related functions
0632  */
0633 
0634 /*
0635  * Get the next state to run. Empty ones will be skipped. Returns true if a
0636  * state must be run.
0637  *
0638  * st->state will be modified ahead of time, to match state_to_run, as if it
0639  * has already ran.
0640  */
0641 static bool cpuhp_next_state(bool bringup,
0642                  enum cpuhp_state *state_to_run,
0643                  struct cpuhp_cpu_state *st,
0644                  enum cpuhp_state target)
0645 {
0646     do {
0647         if (bringup) {
0648             if (st->state >= target)
0649                 return false;
0650 
0651             *state_to_run = ++st->state;
0652         } else {
0653             if (st->state <= target)
0654                 return false;
0655 
0656             *state_to_run = st->state--;
0657         }
0658 
0659         if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
0660             break;
0661     } while (true);
0662 
0663     return true;
0664 }
0665 
0666 static int cpuhp_invoke_callback_range(bool bringup,
0667                        unsigned int cpu,
0668                        struct cpuhp_cpu_state *st,
0669                        enum cpuhp_state target)
0670 {
0671     enum cpuhp_state state;
0672     int err = 0;
0673 
0674     while (cpuhp_next_state(bringup, &state, st, target)) {
0675         err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
0676         if (err)
0677             break;
0678     }
0679 
0680     return err;
0681 }
0682 
0683 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
0684 {
0685     if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
0686         return true;
0687     /*
0688      * When CPU hotplug is disabled, then taking the CPU down is not
0689      * possible because takedown_cpu() and the architecture and
0690      * subsystem specific mechanisms are not available. So the CPU
0691      * which would be completely unplugged again needs to stay around
0692      * in the current state.
0693      */
0694     return st->state <= CPUHP_BRINGUP_CPU;
0695 }
0696 
0697 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
0698                   enum cpuhp_state target)
0699 {
0700     enum cpuhp_state prev_state = st->state;
0701     int ret = 0;
0702 
0703     ret = cpuhp_invoke_callback_range(true, cpu, st, target);
0704     if (ret) {
0705         pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
0706              ret, cpu, cpuhp_get_step(st->state)->name,
0707              st->state);
0708 
0709         cpuhp_reset_state(cpu, st, prev_state);
0710         if (can_rollback_cpu(st))
0711             WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
0712                                 prev_state));
0713     }
0714     return ret;
0715 }
0716 
0717 /*
0718  * The cpu hotplug threads manage the bringup and teardown of the cpus
0719  */
0720 static int cpuhp_should_run(unsigned int cpu)
0721 {
0722     struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
0723 
0724     return st->should_run;
0725 }
0726 
0727 /*
0728  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
0729  * callbacks when a state gets [un]installed at runtime.
0730  *
0731  * Each invocation of this function by the smpboot thread does a single AP
0732  * state callback.
0733  *
0734  * It has 3 modes of operation:
0735  *  - single: runs st->cb_state
0736  *  - up:     runs ++st->state, while st->state < st->target
0737  *  - down:   runs st->state--, while st->state > st->target
0738  *
0739  * When complete or on error, should_run is cleared and the completion is fired.
0740  */
0741 static void cpuhp_thread_fun(unsigned int cpu)
0742 {
0743     struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
0744     bool bringup = st->bringup;
0745     enum cpuhp_state state;
0746 
0747     if (WARN_ON_ONCE(!st->should_run))
0748         return;
0749 
0750     /*
0751      * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
0752      * that if we see ->should_run we also see the rest of the state.
0753      */
0754     smp_mb();
0755 
0756     /*
0757      * The BP holds the hotplug lock, but we're now running on the AP,
0758      * ensure that anybody asserting the lock is held, will actually find
0759      * it so.
0760      */
0761     lockdep_acquire_cpus_lock();
0762     cpuhp_lock_acquire(bringup);
0763 
0764     if (st->single) {
0765         state = st->cb_state;
0766         st->should_run = false;
0767     } else {
0768         st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
0769         if (!st->should_run)
0770             goto end;
0771     }
0772 
0773     WARN_ON_ONCE(!cpuhp_is_ap_state(state));
0774 
0775     if (cpuhp_is_atomic_state(state)) {
0776         local_irq_disable();
0777         st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
0778         local_irq_enable();
0779 
0780         /*
0781          * STARTING/DYING must not fail!
0782          */
0783         WARN_ON_ONCE(st->result);
0784     } else {
0785         st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
0786     }
0787 
0788     if (st->result) {
0789         /*
0790          * If we fail on a rollback, we're up a creek without no
0791          * paddle, no way forward, no way back. We loose, thanks for
0792          * playing.
0793          */
0794         WARN_ON_ONCE(st->rollback);
0795         st->should_run = false;
0796     }
0797 
0798 end:
0799     cpuhp_lock_release(bringup);
0800     lockdep_release_cpus_lock();
0801 
0802     if (!st->should_run)
0803         complete_ap_thread(st, bringup);
0804 }
0805 
0806 /* Invoke a single callback on a remote cpu */
0807 static int
0808 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
0809              struct hlist_node *node)
0810 {
0811     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
0812     int ret;
0813 
0814     if (!cpu_online(cpu))
0815         return 0;
0816 
0817     cpuhp_lock_acquire(false);
0818     cpuhp_lock_release(false);
0819 
0820     cpuhp_lock_acquire(true);
0821     cpuhp_lock_release(true);
0822 
0823     /*
0824      * If we are up and running, use the hotplug thread. For early calls
0825      * we invoke the thread function directly.
0826      */
0827     if (!st->thread)
0828         return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
0829 
0830     st->rollback = false;
0831     st->last = NULL;
0832 
0833     st->node = node;
0834     st->bringup = bringup;
0835     st->cb_state = state;
0836     st->single = true;
0837 
0838     __cpuhp_kick_ap(st);
0839 
0840     /*
0841      * If we failed and did a partial, do a rollback.
0842      */
0843     if ((ret = st->result) && st->last) {
0844         st->rollback = true;
0845         st->bringup = !bringup;
0846 
0847         __cpuhp_kick_ap(st);
0848     }
0849 
0850     /*
0851      * Clean up the leftovers so the next hotplug operation wont use stale
0852      * data.
0853      */
0854     st->node = st->last = NULL;
0855     return ret;
0856 }
0857 
0858 static int cpuhp_kick_ap_work(unsigned int cpu)
0859 {
0860     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
0861     enum cpuhp_state prev_state = st->state;
0862     int ret;
0863 
0864     cpuhp_lock_acquire(false);
0865     cpuhp_lock_release(false);
0866 
0867     cpuhp_lock_acquire(true);
0868     cpuhp_lock_release(true);
0869 
0870     trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
0871     ret = cpuhp_kick_ap(cpu, st, st->target);
0872     trace_cpuhp_exit(cpu, st->state, prev_state, ret);
0873 
0874     return ret;
0875 }
0876 
0877 static struct smp_hotplug_thread cpuhp_threads = {
0878     .store          = &cpuhp_state.thread,
0879     .thread_should_run  = cpuhp_should_run,
0880     .thread_fn      = cpuhp_thread_fun,
0881     .thread_comm        = "cpuhp/%u",
0882     .selfparking        = true,
0883 };
0884 
0885 static __init void cpuhp_init_state(void)
0886 {
0887     struct cpuhp_cpu_state *st;
0888     int cpu;
0889 
0890     for_each_possible_cpu(cpu) {
0891         st = per_cpu_ptr(&cpuhp_state, cpu);
0892         init_completion(&st->done_up);
0893         init_completion(&st->done_down);
0894     }
0895 }
0896 
0897 void __init cpuhp_threads_init(void)
0898 {
0899     cpuhp_init_state();
0900     BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
0901     kthread_unpark(this_cpu_read(cpuhp_state.thread));
0902 }
0903 
0904 /*
0905  *
0906  * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
0907  * protected region.
0908  *
0909  * The operation is still serialized against concurrent CPU hotplug via
0910  * cpu_add_remove_lock, i.e. CPU map protection.  But it is _not_
0911  * serialized against other hotplug related activity like adding or
0912  * removing of state callbacks and state instances, which invoke either the
0913  * startup or the teardown callback of the affected state.
0914  *
0915  * This is required for subsystems which are unfixable vs. CPU hotplug and
0916  * evade lock inversion problems by scheduling work which has to be
0917  * completed _before_ cpu_up()/_cpu_down() returns.
0918  *
0919  * Don't even think about adding anything to this for any new code or even
0920  * drivers. It's only purpose is to keep existing lock order trainwrecks
0921  * working.
0922  *
0923  * For cpu_down() there might be valid reasons to finish cleanups which are
0924  * not required to be done under cpu_hotplug_lock, but that's a different
0925  * story and would be not invoked via this.
0926  */
0927 static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
0928 {
0929     /*
0930      * cpusets delegate hotplug operations to a worker to "solve" the
0931      * lock order problems. Wait for the worker, but only if tasks are
0932      * _not_ frozen (suspend, hibernate) as that would wait forever.
0933      *
0934      * The wait is required because otherwise the hotplug operation
0935      * returns with inconsistent state, which could even be observed in
0936      * user space when a new CPU is brought up. The CPU plug uevent
0937      * would be delivered and user space reacting on it would fail to
0938      * move tasks to the newly plugged CPU up to the point where the
0939      * work has finished because up to that point the newly plugged CPU
0940      * is not assignable in cpusets/cgroups. On unplug that's not
0941      * necessarily a visible issue, but it is still inconsistent state,
0942      * which is the real problem which needs to be "fixed". This can't
0943      * prevent the transient state between scheduling the work and
0944      * returning from waiting for it.
0945      */
0946     if (!tasks_frozen)
0947         cpuset_wait_for_hotplug();
0948 }
0949 
0950 #ifdef CONFIG_HOTPLUG_CPU
0951 #ifndef arch_clear_mm_cpumask_cpu
0952 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
0953 #endif
0954 
0955 /**
0956  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
0957  * @cpu: a CPU id
0958  *
0959  * This function walks all processes, finds a valid mm struct for each one and
0960  * then clears a corresponding bit in mm's cpumask.  While this all sounds
0961  * trivial, there are various non-obvious corner cases, which this function
0962  * tries to solve in a safe manner.
0963  *
0964  * Also note that the function uses a somewhat relaxed locking scheme, so it may
0965  * be called only for an already offlined CPU.
0966  */
0967 void clear_tasks_mm_cpumask(int cpu)
0968 {
0969     struct task_struct *p;
0970 
0971     /*
0972      * This function is called after the cpu is taken down and marked
0973      * offline, so its not like new tasks will ever get this cpu set in
0974      * their mm mask. -- Peter Zijlstra
0975      * Thus, we may use rcu_read_lock() here, instead of grabbing
0976      * full-fledged tasklist_lock.
0977      */
0978     WARN_ON(cpu_online(cpu));
0979     rcu_read_lock();
0980     for_each_process(p) {
0981         struct task_struct *t;
0982 
0983         /*
0984          * Main thread might exit, but other threads may still have
0985          * a valid mm. Find one.
0986          */
0987         t = find_lock_task_mm(p);
0988         if (!t)
0989             continue;
0990         arch_clear_mm_cpumask_cpu(cpu, t->mm);
0991         task_unlock(t);
0992     }
0993     rcu_read_unlock();
0994 }
0995 
0996 /* Take this CPU down. */
0997 static int take_cpu_down(void *_param)
0998 {
0999     struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1000     enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1001     int err, cpu = smp_processor_id();
1002     int ret;
1003 
1004     /* Ensure this CPU doesn't handle any more interrupts. */
1005     err = __cpu_disable();
1006     if (err < 0)
1007         return err;
1008 
1009     /*
1010      * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1011      * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1012      */
1013     WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1014 
1015     /* Invoke the former CPU_DYING callbacks */
1016     ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1017 
1018     /*
1019      * DYING must not fail!
1020      */
1021     WARN_ON_ONCE(ret);
1022 
1023     /* Give up timekeeping duties */
1024     tick_handover_do_timer();
1025     /* Remove CPU from timer broadcasting */
1026     tick_offline_cpu(cpu);
1027     /* Park the stopper thread */
1028     stop_machine_park(cpu);
1029     return 0;
1030 }
1031 
1032 static int takedown_cpu(unsigned int cpu)
1033 {
1034     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1035     int err;
1036 
1037     /* Park the smpboot threads */
1038     kthread_park(st->thread);
1039 
1040     /*
1041      * Prevent irq alloc/free while the dying cpu reorganizes the
1042      * interrupt affinities.
1043      */
1044     irq_lock_sparse();
1045 
1046     /*
1047      * So now all preempt/rcu users must observe !cpu_active().
1048      */
1049     err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1050     if (err) {
1051         /* CPU refused to die */
1052         irq_unlock_sparse();
1053         /* Unpark the hotplug thread so we can rollback there */
1054         kthread_unpark(st->thread);
1055         return err;
1056     }
1057     BUG_ON(cpu_online(cpu));
1058 
1059     /*
1060      * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1061      * all runnable tasks from the CPU, there's only the idle task left now
1062      * that the migration thread is done doing the stop_machine thing.
1063      *
1064      * Wait for the stop thread to go away.
1065      */
1066     wait_for_ap_thread(st, false);
1067     BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1068 
1069     /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1070     irq_unlock_sparse();
1071 
1072     hotplug_cpu__broadcast_tick_pull(cpu);
1073     /* This actually kills the CPU. */
1074     __cpu_die(cpu);
1075 
1076     tick_cleanup_dead_cpu(cpu);
1077     rcutree_migrate_callbacks(cpu);
1078     return 0;
1079 }
1080 
1081 static void cpuhp_complete_idle_dead(void *arg)
1082 {
1083     struct cpuhp_cpu_state *st = arg;
1084 
1085     complete_ap_thread(st, false);
1086 }
1087 
1088 void cpuhp_report_idle_dead(void)
1089 {
1090     struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1091 
1092     BUG_ON(st->state != CPUHP_AP_OFFLINE);
1093     rcu_report_dead(smp_processor_id());
1094     st->state = CPUHP_AP_IDLE_DEAD;
1095     /*
1096      * We cannot call complete after rcu_report_dead() so we delegate it
1097      * to an online cpu.
1098      */
1099     smp_call_function_single(cpumask_first(cpu_online_mask),
1100                  cpuhp_complete_idle_dead, st, 0);
1101 }
1102 
1103 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1104                 enum cpuhp_state target)
1105 {
1106     enum cpuhp_state prev_state = st->state;
1107     int ret = 0;
1108 
1109     ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1110     if (ret) {
1111         pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1112              ret, cpu, cpuhp_get_step(st->state)->name,
1113              st->state);
1114 
1115         cpuhp_reset_state(cpu, st, prev_state);
1116 
1117         if (st->state < prev_state)
1118             WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1119                                 prev_state));
1120     }
1121 
1122     return ret;
1123 }
1124 
1125 /* Requires cpu_add_remove_lock to be held */
1126 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1127                enum cpuhp_state target)
1128 {
1129     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1130     int prev_state, ret = 0;
1131 
1132     if (num_online_cpus() == 1)
1133         return -EBUSY;
1134 
1135     if (!cpu_present(cpu))
1136         return -EINVAL;
1137 
1138     cpus_write_lock();
1139 
1140     cpuhp_tasks_frozen = tasks_frozen;
1141 
1142     prev_state = cpuhp_set_state(cpu, st, target);
1143     /*
1144      * If the current CPU state is in the range of the AP hotplug thread,
1145      * then we need to kick the thread.
1146      */
1147     if (st->state > CPUHP_TEARDOWN_CPU) {
1148         st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1149         ret = cpuhp_kick_ap_work(cpu);
1150         /*
1151          * The AP side has done the error rollback already. Just
1152          * return the error code..
1153          */
1154         if (ret)
1155             goto out;
1156 
1157         /*
1158          * We might have stopped still in the range of the AP hotplug
1159          * thread. Nothing to do anymore.
1160          */
1161         if (st->state > CPUHP_TEARDOWN_CPU)
1162             goto out;
1163 
1164         st->target = target;
1165     }
1166     /*
1167      * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1168      * to do the further cleanups.
1169      */
1170     ret = cpuhp_down_callbacks(cpu, st, target);
1171     if (ret && st->state < prev_state) {
1172         if (st->state == CPUHP_TEARDOWN_CPU) {
1173             cpuhp_reset_state(cpu, st, prev_state);
1174             __cpuhp_kick_ap(st);
1175         } else {
1176             WARN(1, "DEAD callback error for CPU%d", cpu);
1177         }
1178     }
1179 
1180 out:
1181     cpus_write_unlock();
1182     /*
1183      * Do post unplug cleanup. This is still protected against
1184      * concurrent CPU hotplug via cpu_add_remove_lock.
1185      */
1186     lockup_detector_cleanup();
1187     arch_smt_update();
1188     cpu_up_down_serialize_trainwrecks(tasks_frozen);
1189     return ret;
1190 }
1191 
1192 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1193 {
1194     /*
1195      * If the platform does not support hotplug, report it explicitly to
1196      * differentiate it from a transient offlining failure.
1197      */
1198     if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED))
1199         return -EOPNOTSUPP;
1200     if (cpu_hotplug_disabled)
1201         return -EBUSY;
1202     return _cpu_down(cpu, 0, target);
1203 }
1204 
1205 static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1206 {
1207     int err;
1208 
1209     cpu_maps_update_begin();
1210     err = cpu_down_maps_locked(cpu, target);
1211     cpu_maps_update_done();
1212     return err;
1213 }
1214 
1215 /**
1216  * cpu_device_down - Bring down a cpu device
1217  * @dev: Pointer to the cpu device to offline
1218  *
1219  * This function is meant to be used by device core cpu subsystem only.
1220  *
1221  * Other subsystems should use remove_cpu() instead.
1222  *
1223  * Return: %0 on success or a negative errno code
1224  */
1225 int cpu_device_down(struct device *dev)
1226 {
1227     return cpu_down(dev->id, CPUHP_OFFLINE);
1228 }
1229 
1230 int remove_cpu(unsigned int cpu)
1231 {
1232     int ret;
1233 
1234     lock_device_hotplug();
1235     ret = device_offline(get_cpu_device(cpu));
1236     unlock_device_hotplug();
1237 
1238     return ret;
1239 }
1240 EXPORT_SYMBOL_GPL(remove_cpu);
1241 
1242 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1243 {
1244     unsigned int cpu;
1245     int error;
1246 
1247     cpu_maps_update_begin();
1248 
1249     /*
1250      * Make certain the cpu I'm about to reboot on is online.
1251      *
1252      * This is inline to what migrate_to_reboot_cpu() already do.
1253      */
1254     if (!cpu_online(primary_cpu))
1255         primary_cpu = cpumask_first(cpu_online_mask);
1256 
1257     for_each_online_cpu(cpu) {
1258         if (cpu == primary_cpu)
1259             continue;
1260 
1261         error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1262         if (error) {
1263             pr_err("Failed to offline CPU%d - error=%d",
1264                 cpu, error);
1265             break;
1266         }
1267     }
1268 
1269     /*
1270      * Ensure all but the reboot CPU are offline.
1271      */
1272     BUG_ON(num_online_cpus() > 1);
1273 
1274     /*
1275      * Make sure the CPUs won't be enabled by someone else after this
1276      * point. Kexec will reboot to a new kernel shortly resetting
1277      * everything along the way.
1278      */
1279     cpu_hotplug_disabled++;
1280 
1281     cpu_maps_update_done();
1282 }
1283 
1284 #else
1285 #define takedown_cpu        NULL
1286 #endif /*CONFIG_HOTPLUG_CPU*/
1287 
1288 /**
1289  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1290  * @cpu: cpu that just started
1291  *
1292  * It must be called by the arch code on the new cpu, before the new cpu
1293  * enables interrupts and before the "boot" cpu returns from __cpu_up().
1294  */
1295 void notify_cpu_starting(unsigned int cpu)
1296 {
1297     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1298     enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1299     int ret;
1300 
1301     rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
1302     cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1303     ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1304 
1305     /*
1306      * STARTING must not fail!
1307      */
1308     WARN_ON_ONCE(ret);
1309 }
1310 
1311 /*
1312  * Called from the idle task. Wake up the controlling task which brings the
1313  * hotplug thread of the upcoming CPU up and then delegates the rest of the
1314  * online bringup to the hotplug thread.
1315  */
1316 void cpuhp_online_idle(enum cpuhp_state state)
1317 {
1318     struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1319 
1320     /* Happens for the boot cpu */
1321     if (state != CPUHP_AP_ONLINE_IDLE)
1322         return;
1323 
1324     /*
1325      * Unpart the stopper thread before we start the idle loop (and start
1326      * scheduling); this ensures the stopper task is always available.
1327      */
1328     stop_machine_unpark(smp_processor_id());
1329 
1330     st->state = CPUHP_AP_ONLINE_IDLE;
1331     complete_ap_thread(st, true);
1332 }
1333 
1334 /* Requires cpu_add_remove_lock to be held */
1335 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1336 {
1337     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1338     struct task_struct *idle;
1339     int ret = 0;
1340 
1341     cpus_write_lock();
1342 
1343     if (!cpu_present(cpu)) {
1344         ret = -EINVAL;
1345         goto out;
1346     }
1347 
1348     /*
1349      * The caller of cpu_up() might have raced with another
1350      * caller. Nothing to do.
1351      */
1352     if (st->state >= target)
1353         goto out;
1354 
1355     if (st->state == CPUHP_OFFLINE) {
1356         /* Let it fail before we try to bring the cpu up */
1357         idle = idle_thread_get(cpu);
1358         if (IS_ERR(idle)) {
1359             ret = PTR_ERR(idle);
1360             goto out;
1361         }
1362     }
1363 
1364     cpuhp_tasks_frozen = tasks_frozen;
1365 
1366     cpuhp_set_state(cpu, st, target);
1367     /*
1368      * If the current CPU state is in the range of the AP hotplug thread,
1369      * then we need to kick the thread once more.
1370      */
1371     if (st->state > CPUHP_BRINGUP_CPU) {
1372         ret = cpuhp_kick_ap_work(cpu);
1373         /*
1374          * The AP side has done the error rollback already. Just
1375          * return the error code..
1376          */
1377         if (ret)
1378             goto out;
1379     }
1380 
1381     /*
1382      * Try to reach the target state. We max out on the BP at
1383      * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1384      * responsible for bringing it up to the target state.
1385      */
1386     target = min((int)target, CPUHP_BRINGUP_CPU);
1387     ret = cpuhp_up_callbacks(cpu, st, target);
1388 out:
1389     cpus_write_unlock();
1390     arch_smt_update();
1391     cpu_up_down_serialize_trainwrecks(tasks_frozen);
1392     return ret;
1393 }
1394 
1395 static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1396 {
1397     int err = 0;
1398 
1399     if (!cpu_possible(cpu)) {
1400         pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1401                cpu);
1402 #if defined(CONFIG_IA64)
1403         pr_err("please check additional_cpus= boot parameter\n");
1404 #endif
1405         return -EINVAL;
1406     }
1407 
1408     err = try_online_node(cpu_to_node(cpu));
1409     if (err)
1410         return err;
1411 
1412     cpu_maps_update_begin();
1413 
1414     if (cpu_hotplug_disabled) {
1415         err = -EBUSY;
1416         goto out;
1417     }
1418     if (!cpu_smt_allowed(cpu)) {
1419         err = -EPERM;
1420         goto out;
1421     }
1422 
1423     err = _cpu_up(cpu, 0, target);
1424 out:
1425     cpu_maps_update_done();
1426     return err;
1427 }
1428 
1429 /**
1430  * cpu_device_up - Bring up a cpu device
1431  * @dev: Pointer to the cpu device to online
1432  *
1433  * This function is meant to be used by device core cpu subsystem only.
1434  *
1435  * Other subsystems should use add_cpu() instead.
1436  *
1437  * Return: %0 on success or a negative errno code
1438  */
1439 int cpu_device_up(struct device *dev)
1440 {
1441     return cpu_up(dev->id, CPUHP_ONLINE);
1442 }
1443 
1444 int add_cpu(unsigned int cpu)
1445 {
1446     int ret;
1447 
1448     lock_device_hotplug();
1449     ret = device_online(get_cpu_device(cpu));
1450     unlock_device_hotplug();
1451 
1452     return ret;
1453 }
1454 EXPORT_SYMBOL_GPL(add_cpu);
1455 
1456 /**
1457  * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1458  * @sleep_cpu: The cpu we hibernated on and should be brought up.
1459  *
1460  * On some architectures like arm64, we can hibernate on any CPU, but on
1461  * wake up the CPU we hibernated on might be offline as a side effect of
1462  * using maxcpus= for example.
1463  *
1464  * Return: %0 on success or a negative errno code
1465  */
1466 int bringup_hibernate_cpu(unsigned int sleep_cpu)
1467 {
1468     int ret;
1469 
1470     if (!cpu_online(sleep_cpu)) {
1471         pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1472         ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1473         if (ret) {
1474             pr_err("Failed to bring hibernate-CPU up!\n");
1475             return ret;
1476         }
1477     }
1478     return 0;
1479 }
1480 
1481 void bringup_nonboot_cpus(unsigned int setup_max_cpus)
1482 {
1483     unsigned int cpu;
1484 
1485     for_each_present_cpu(cpu) {
1486         if (num_online_cpus() >= setup_max_cpus)
1487             break;
1488         if (!cpu_online(cpu))
1489             cpu_up(cpu, CPUHP_ONLINE);
1490     }
1491 }
1492 
1493 #ifdef CONFIG_PM_SLEEP_SMP
1494 static cpumask_var_t frozen_cpus;
1495 
1496 int freeze_secondary_cpus(int primary)
1497 {
1498     int cpu, error = 0;
1499 
1500     cpu_maps_update_begin();
1501     if (primary == -1) {
1502         primary = cpumask_first(cpu_online_mask);
1503         if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
1504             primary = housekeeping_any_cpu(HK_TYPE_TIMER);
1505     } else {
1506         if (!cpu_online(primary))
1507             primary = cpumask_first(cpu_online_mask);
1508     }
1509 
1510     /*
1511      * We take down all of the non-boot CPUs in one shot to avoid races
1512      * with the userspace trying to use the CPU hotplug at the same time
1513      */
1514     cpumask_clear(frozen_cpus);
1515 
1516     pr_info("Disabling non-boot CPUs ...\n");
1517     for_each_online_cpu(cpu) {
1518         if (cpu == primary)
1519             continue;
1520 
1521         if (pm_wakeup_pending()) {
1522             pr_info("Wakeup pending. Abort CPU freeze\n");
1523             error = -EBUSY;
1524             break;
1525         }
1526 
1527         trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1528         error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1529         trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1530         if (!error)
1531             cpumask_set_cpu(cpu, frozen_cpus);
1532         else {
1533             pr_err("Error taking CPU%d down: %d\n", cpu, error);
1534             break;
1535         }
1536     }
1537 
1538     if (!error)
1539         BUG_ON(num_online_cpus() > 1);
1540     else
1541         pr_err("Non-boot CPUs are not disabled\n");
1542 
1543     /*
1544      * Make sure the CPUs won't be enabled by someone else. We need to do
1545      * this even in case of failure as all freeze_secondary_cpus() users are
1546      * supposed to do thaw_secondary_cpus() on the failure path.
1547      */
1548     cpu_hotplug_disabled++;
1549 
1550     cpu_maps_update_done();
1551     return error;
1552 }
1553 
1554 void __weak arch_thaw_secondary_cpus_begin(void)
1555 {
1556 }
1557 
1558 void __weak arch_thaw_secondary_cpus_end(void)
1559 {
1560 }
1561 
1562 void thaw_secondary_cpus(void)
1563 {
1564     int cpu, error;
1565 
1566     /* Allow everyone to use the CPU hotplug again */
1567     cpu_maps_update_begin();
1568     __cpu_hotplug_enable();
1569     if (cpumask_empty(frozen_cpus))
1570         goto out;
1571 
1572     pr_info("Enabling non-boot CPUs ...\n");
1573 
1574     arch_thaw_secondary_cpus_begin();
1575 
1576     for_each_cpu(cpu, frozen_cpus) {
1577         trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1578         error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1579         trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1580         if (!error) {
1581             pr_info("CPU%d is up\n", cpu);
1582             continue;
1583         }
1584         pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1585     }
1586 
1587     arch_thaw_secondary_cpus_end();
1588 
1589     cpumask_clear(frozen_cpus);
1590 out:
1591     cpu_maps_update_done();
1592 }
1593 
1594 static int __init alloc_frozen_cpus(void)
1595 {
1596     if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1597         return -ENOMEM;
1598     return 0;
1599 }
1600 core_initcall(alloc_frozen_cpus);
1601 
1602 /*
1603  * When callbacks for CPU hotplug notifications are being executed, we must
1604  * ensure that the state of the system with respect to the tasks being frozen
1605  * or not, as reported by the notification, remains unchanged *throughout the
1606  * duration* of the execution of the callbacks.
1607  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1608  *
1609  * This synchronization is implemented by mutually excluding regular CPU
1610  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1611  * Hibernate notifications.
1612  */
1613 static int
1614 cpu_hotplug_pm_callback(struct notifier_block *nb,
1615             unsigned long action, void *ptr)
1616 {
1617     switch (action) {
1618 
1619     case PM_SUSPEND_PREPARE:
1620     case PM_HIBERNATION_PREPARE:
1621         cpu_hotplug_disable();
1622         break;
1623 
1624     case PM_POST_SUSPEND:
1625     case PM_POST_HIBERNATION:
1626         cpu_hotplug_enable();
1627         break;
1628 
1629     default:
1630         return NOTIFY_DONE;
1631     }
1632 
1633     return NOTIFY_OK;
1634 }
1635 
1636 
1637 static int __init cpu_hotplug_pm_sync_init(void)
1638 {
1639     /*
1640      * cpu_hotplug_pm_callback has higher priority than x86
1641      * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1642      * to disable cpu hotplug to avoid cpu hotplug race.
1643      */
1644     pm_notifier(cpu_hotplug_pm_callback, 0);
1645     return 0;
1646 }
1647 core_initcall(cpu_hotplug_pm_sync_init);
1648 
1649 #endif /* CONFIG_PM_SLEEP_SMP */
1650 
1651 int __boot_cpu_id;
1652 
1653 #endif /* CONFIG_SMP */
1654 
1655 /* Boot processor state steps */
1656 static struct cpuhp_step cpuhp_hp_states[] = {
1657     [CPUHP_OFFLINE] = {
1658         .name           = "offline",
1659         .startup.single     = NULL,
1660         .teardown.single    = NULL,
1661     },
1662 #ifdef CONFIG_SMP
1663     [CPUHP_CREATE_THREADS]= {
1664         .name           = "threads:prepare",
1665         .startup.single     = smpboot_create_threads,
1666         .teardown.single    = NULL,
1667         .cant_stop      = true,
1668     },
1669     [CPUHP_PERF_PREPARE] = {
1670         .name           = "perf:prepare",
1671         .startup.single     = perf_event_init_cpu,
1672         .teardown.single    = perf_event_exit_cpu,
1673     },
1674     [CPUHP_RANDOM_PREPARE] = {
1675         .name           = "random:prepare",
1676         .startup.single     = random_prepare_cpu,
1677         .teardown.single    = NULL,
1678     },
1679     [CPUHP_WORKQUEUE_PREP] = {
1680         .name           = "workqueue:prepare",
1681         .startup.single     = workqueue_prepare_cpu,
1682         .teardown.single    = NULL,
1683     },
1684     [CPUHP_HRTIMERS_PREPARE] = {
1685         .name           = "hrtimers:prepare",
1686         .startup.single     = hrtimers_prepare_cpu,
1687         .teardown.single    = hrtimers_dead_cpu,
1688     },
1689     [CPUHP_SMPCFD_PREPARE] = {
1690         .name           = "smpcfd:prepare",
1691         .startup.single     = smpcfd_prepare_cpu,
1692         .teardown.single    = smpcfd_dead_cpu,
1693     },
1694     [CPUHP_RELAY_PREPARE] = {
1695         .name           = "relay:prepare",
1696         .startup.single     = relay_prepare_cpu,
1697         .teardown.single    = NULL,
1698     },
1699     [CPUHP_SLAB_PREPARE] = {
1700         .name           = "slab:prepare",
1701         .startup.single     = slab_prepare_cpu,
1702         .teardown.single    = slab_dead_cpu,
1703     },
1704     [CPUHP_RCUTREE_PREP] = {
1705         .name           = "RCU/tree:prepare",
1706         .startup.single     = rcutree_prepare_cpu,
1707         .teardown.single    = rcutree_dead_cpu,
1708     },
1709     /*
1710      * On the tear-down path, timers_dead_cpu() must be invoked
1711      * before blk_mq_queue_reinit_notify() from notify_dead(),
1712      * otherwise a RCU stall occurs.
1713      */
1714     [CPUHP_TIMERS_PREPARE] = {
1715         .name           = "timers:prepare",
1716         .startup.single     = timers_prepare_cpu,
1717         .teardown.single    = timers_dead_cpu,
1718     },
1719     /* Kicks the plugged cpu into life */
1720     [CPUHP_BRINGUP_CPU] = {
1721         .name           = "cpu:bringup",
1722         .startup.single     = bringup_cpu,
1723         .teardown.single    = finish_cpu,
1724         .cant_stop      = true,
1725     },
1726     /* Final state before CPU kills itself */
1727     [CPUHP_AP_IDLE_DEAD] = {
1728         .name           = "idle:dead",
1729     },
1730     /*
1731      * Last state before CPU enters the idle loop to die. Transient state
1732      * for synchronization.
1733      */
1734     [CPUHP_AP_OFFLINE] = {
1735         .name           = "ap:offline",
1736         .cant_stop      = true,
1737     },
1738     /* First state is scheduler control. Interrupts are disabled */
1739     [CPUHP_AP_SCHED_STARTING] = {
1740         .name           = "sched:starting",
1741         .startup.single     = sched_cpu_starting,
1742         .teardown.single    = sched_cpu_dying,
1743     },
1744     [CPUHP_AP_RCUTREE_DYING] = {
1745         .name           = "RCU/tree:dying",
1746         .startup.single     = NULL,
1747         .teardown.single    = rcutree_dying_cpu,
1748     },
1749     [CPUHP_AP_SMPCFD_DYING] = {
1750         .name           = "smpcfd:dying",
1751         .startup.single     = NULL,
1752         .teardown.single    = smpcfd_dying_cpu,
1753     },
1754     /* Entry state on starting. Interrupts enabled from here on. Transient
1755      * state for synchronsization */
1756     [CPUHP_AP_ONLINE] = {
1757         .name           = "ap:online",
1758     },
1759     /*
1760      * Handled on control processor until the plugged processor manages
1761      * this itself.
1762      */
1763     [CPUHP_TEARDOWN_CPU] = {
1764         .name           = "cpu:teardown",
1765         .startup.single     = NULL,
1766         .teardown.single    = takedown_cpu,
1767         .cant_stop      = true,
1768     },
1769 
1770     [CPUHP_AP_SCHED_WAIT_EMPTY] = {
1771         .name           = "sched:waitempty",
1772         .startup.single     = NULL,
1773         .teardown.single    = sched_cpu_wait_empty,
1774     },
1775 
1776     /* Handle smpboot threads park/unpark */
1777     [CPUHP_AP_SMPBOOT_THREADS] = {
1778         .name           = "smpboot/threads:online",
1779         .startup.single     = smpboot_unpark_threads,
1780         .teardown.single    = smpboot_park_threads,
1781     },
1782     [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1783         .name           = "irq/affinity:online",
1784         .startup.single     = irq_affinity_online_cpu,
1785         .teardown.single    = NULL,
1786     },
1787     [CPUHP_AP_PERF_ONLINE] = {
1788         .name           = "perf:online",
1789         .startup.single     = perf_event_init_cpu,
1790         .teardown.single    = perf_event_exit_cpu,
1791     },
1792     [CPUHP_AP_WATCHDOG_ONLINE] = {
1793         .name           = "lockup_detector:online",
1794         .startup.single     = lockup_detector_online_cpu,
1795         .teardown.single    = lockup_detector_offline_cpu,
1796     },
1797     [CPUHP_AP_WORKQUEUE_ONLINE] = {
1798         .name           = "workqueue:online",
1799         .startup.single     = workqueue_online_cpu,
1800         .teardown.single    = workqueue_offline_cpu,
1801     },
1802     [CPUHP_AP_RANDOM_ONLINE] = {
1803         .name           = "random:online",
1804         .startup.single     = random_online_cpu,
1805         .teardown.single    = NULL,
1806     },
1807     [CPUHP_AP_RCUTREE_ONLINE] = {
1808         .name           = "RCU/tree:online",
1809         .startup.single     = rcutree_online_cpu,
1810         .teardown.single    = rcutree_offline_cpu,
1811     },
1812 #endif
1813     /*
1814      * The dynamically registered state space is here
1815      */
1816 
1817 #ifdef CONFIG_SMP
1818     /* Last state is scheduler control setting the cpu active */
1819     [CPUHP_AP_ACTIVE] = {
1820         .name           = "sched:active",
1821         .startup.single     = sched_cpu_activate,
1822         .teardown.single    = sched_cpu_deactivate,
1823     },
1824 #endif
1825 
1826     /* CPU is fully up and running. */
1827     [CPUHP_ONLINE] = {
1828         .name           = "online",
1829         .startup.single     = NULL,
1830         .teardown.single    = NULL,
1831     },
1832 };
1833 
1834 /* Sanity check for callbacks */
1835 static int cpuhp_cb_check(enum cpuhp_state state)
1836 {
1837     if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1838         return -EINVAL;
1839     return 0;
1840 }
1841 
1842 /*
1843  * Returns a free for dynamic slot assignment of the Online state. The states
1844  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1845  * by having no name assigned.
1846  */
1847 static int cpuhp_reserve_state(enum cpuhp_state state)
1848 {
1849     enum cpuhp_state i, end;
1850     struct cpuhp_step *step;
1851 
1852     switch (state) {
1853     case CPUHP_AP_ONLINE_DYN:
1854         step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1855         end = CPUHP_AP_ONLINE_DYN_END;
1856         break;
1857     case CPUHP_BP_PREPARE_DYN:
1858         step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1859         end = CPUHP_BP_PREPARE_DYN_END;
1860         break;
1861     default:
1862         return -EINVAL;
1863     }
1864 
1865     for (i = state; i <= end; i++, step++) {
1866         if (!step->name)
1867             return i;
1868     }
1869     WARN(1, "No more dynamic states available for CPU hotplug\n");
1870     return -ENOSPC;
1871 }
1872 
1873 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1874                  int (*startup)(unsigned int cpu),
1875                  int (*teardown)(unsigned int cpu),
1876                  bool multi_instance)
1877 {
1878     /* (Un)Install the callbacks for further cpu hotplug operations */
1879     struct cpuhp_step *sp;
1880     int ret = 0;
1881 
1882     /*
1883      * If name is NULL, then the state gets removed.
1884      *
1885      * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1886      * the first allocation from these dynamic ranges, so the removal
1887      * would trigger a new allocation and clear the wrong (already
1888      * empty) state, leaving the callbacks of the to be cleared state
1889      * dangling, which causes wreckage on the next hotplug operation.
1890      */
1891     if (name && (state == CPUHP_AP_ONLINE_DYN ||
1892              state == CPUHP_BP_PREPARE_DYN)) {
1893         ret = cpuhp_reserve_state(state);
1894         if (ret < 0)
1895             return ret;
1896         state = ret;
1897     }
1898     sp = cpuhp_get_step(state);
1899     if (name && sp->name)
1900         return -EBUSY;
1901 
1902     sp->startup.single = startup;
1903     sp->teardown.single = teardown;
1904     sp->name = name;
1905     sp->multi_instance = multi_instance;
1906     INIT_HLIST_HEAD(&sp->list);
1907     return ret;
1908 }
1909 
1910 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1911 {
1912     return cpuhp_get_step(state)->teardown.single;
1913 }
1914 
1915 /*
1916  * Call the startup/teardown function for a step either on the AP or
1917  * on the current CPU.
1918  */
1919 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1920                 struct hlist_node *node)
1921 {
1922     struct cpuhp_step *sp = cpuhp_get_step(state);
1923     int ret;
1924 
1925     /*
1926      * If there's nothing to do, we done.
1927      * Relies on the union for multi_instance.
1928      */
1929     if (cpuhp_step_empty(bringup, sp))
1930         return 0;
1931     /*
1932      * The non AP bound callbacks can fail on bringup. On teardown
1933      * e.g. module removal we crash for now.
1934      */
1935 #ifdef CONFIG_SMP
1936     if (cpuhp_is_ap_state(state))
1937         ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1938     else
1939         ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1940 #else
1941     ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1942 #endif
1943     BUG_ON(ret && !bringup);
1944     return ret;
1945 }
1946 
1947 /*
1948  * Called from __cpuhp_setup_state on a recoverable failure.
1949  *
1950  * Note: The teardown callbacks for rollback are not allowed to fail!
1951  */
1952 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1953                    struct hlist_node *node)
1954 {
1955     int cpu;
1956 
1957     /* Roll back the already executed steps on the other cpus */
1958     for_each_present_cpu(cpu) {
1959         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1960         int cpustate = st->state;
1961 
1962         if (cpu >= failedcpu)
1963             break;
1964 
1965         /* Did we invoke the startup call on that cpu ? */
1966         if (cpustate >= state)
1967             cpuhp_issue_call(cpu, state, false, node);
1968     }
1969 }
1970 
1971 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1972                       struct hlist_node *node,
1973                       bool invoke)
1974 {
1975     struct cpuhp_step *sp;
1976     int cpu;
1977     int ret;
1978 
1979     lockdep_assert_cpus_held();
1980 
1981     sp = cpuhp_get_step(state);
1982     if (sp->multi_instance == false)
1983         return -EINVAL;
1984 
1985     mutex_lock(&cpuhp_state_mutex);
1986 
1987     if (!invoke || !sp->startup.multi)
1988         goto add_node;
1989 
1990     /*
1991      * Try to call the startup callback for each present cpu
1992      * depending on the hotplug state of the cpu.
1993      */
1994     for_each_present_cpu(cpu) {
1995         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1996         int cpustate = st->state;
1997 
1998         if (cpustate < state)
1999             continue;
2000 
2001         ret = cpuhp_issue_call(cpu, state, true, node);
2002         if (ret) {
2003             if (sp->teardown.multi)
2004                 cpuhp_rollback_install(cpu, state, node);
2005             goto unlock;
2006         }
2007     }
2008 add_node:
2009     ret = 0;
2010     hlist_add_head(node, &sp->list);
2011 unlock:
2012     mutex_unlock(&cpuhp_state_mutex);
2013     return ret;
2014 }
2015 
2016 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2017                    bool invoke)
2018 {
2019     int ret;
2020 
2021     cpus_read_lock();
2022     ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2023     cpus_read_unlock();
2024     return ret;
2025 }
2026 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2027 
2028 /**
2029  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2030  * @state:      The state to setup
2031  * @name:       Name of the step
2032  * @invoke:     If true, the startup function is invoked for cpus where
2033  *          cpu state >= @state
2034  * @startup:        startup callback function
2035  * @teardown:       teardown callback function
2036  * @multi_instance: State is set up for multiple instances which get
2037  *          added afterwards.
2038  *
2039  * The caller needs to hold cpus read locked while calling this function.
2040  * Return:
2041  *   On success:
2042  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN;
2043  *      0 for all other states
2044  *   On failure: proper (negative) error code
2045  */
2046 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2047                    const char *name, bool invoke,
2048                    int (*startup)(unsigned int cpu),
2049                    int (*teardown)(unsigned int cpu),
2050                    bool multi_instance)
2051 {
2052     int cpu, ret = 0;
2053     bool dynstate;
2054 
2055     lockdep_assert_cpus_held();
2056 
2057     if (cpuhp_cb_check(state) || !name)
2058         return -EINVAL;
2059 
2060     mutex_lock(&cpuhp_state_mutex);
2061 
2062     ret = cpuhp_store_callbacks(state, name, startup, teardown,
2063                     multi_instance);
2064 
2065     dynstate = state == CPUHP_AP_ONLINE_DYN;
2066     if (ret > 0 && dynstate) {
2067         state = ret;
2068         ret = 0;
2069     }
2070 
2071     if (ret || !invoke || !startup)
2072         goto out;
2073 
2074     /*
2075      * Try to call the startup callback for each present cpu
2076      * depending on the hotplug state of the cpu.
2077      */
2078     for_each_present_cpu(cpu) {
2079         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2080         int cpustate = st->state;
2081 
2082         if (cpustate < state)
2083             continue;
2084 
2085         ret = cpuhp_issue_call(cpu, state, true, NULL);
2086         if (ret) {
2087             if (teardown)
2088                 cpuhp_rollback_install(cpu, state, NULL);
2089             cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2090             goto out;
2091         }
2092     }
2093 out:
2094     mutex_unlock(&cpuhp_state_mutex);
2095     /*
2096      * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2097      * dynamically allocated state in case of success.
2098      */
2099     if (!ret && dynstate)
2100         return state;
2101     return ret;
2102 }
2103 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2104 
2105 int __cpuhp_setup_state(enum cpuhp_state state,
2106             const char *name, bool invoke,
2107             int (*startup)(unsigned int cpu),
2108             int (*teardown)(unsigned int cpu),
2109             bool multi_instance)
2110 {
2111     int ret;
2112 
2113     cpus_read_lock();
2114     ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2115                          teardown, multi_instance);
2116     cpus_read_unlock();
2117     return ret;
2118 }
2119 EXPORT_SYMBOL(__cpuhp_setup_state);
2120 
2121 int __cpuhp_state_remove_instance(enum cpuhp_state state,
2122                   struct hlist_node *node, bool invoke)
2123 {
2124     struct cpuhp_step *sp = cpuhp_get_step(state);
2125     int cpu;
2126 
2127     BUG_ON(cpuhp_cb_check(state));
2128 
2129     if (!sp->multi_instance)
2130         return -EINVAL;
2131 
2132     cpus_read_lock();
2133     mutex_lock(&cpuhp_state_mutex);
2134 
2135     if (!invoke || !cpuhp_get_teardown_cb(state))
2136         goto remove;
2137     /*
2138      * Call the teardown callback for each present cpu depending
2139      * on the hotplug state of the cpu. This function is not
2140      * allowed to fail currently!
2141      */
2142     for_each_present_cpu(cpu) {
2143         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2144         int cpustate = st->state;
2145 
2146         if (cpustate >= state)
2147             cpuhp_issue_call(cpu, state, false, node);
2148     }
2149 
2150 remove:
2151     hlist_del(node);
2152     mutex_unlock(&cpuhp_state_mutex);
2153     cpus_read_unlock();
2154 
2155     return 0;
2156 }
2157 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2158 
2159 /**
2160  * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2161  * @state:  The state to remove
2162  * @invoke: If true, the teardown function is invoked for cpus where
2163  *      cpu state >= @state
2164  *
2165  * The caller needs to hold cpus read locked while calling this function.
2166  * The teardown callback is currently not allowed to fail. Think
2167  * about module removal!
2168  */
2169 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2170 {
2171     struct cpuhp_step *sp = cpuhp_get_step(state);
2172     int cpu;
2173 
2174     BUG_ON(cpuhp_cb_check(state));
2175 
2176     lockdep_assert_cpus_held();
2177 
2178     mutex_lock(&cpuhp_state_mutex);
2179     if (sp->multi_instance) {
2180         WARN(!hlist_empty(&sp->list),
2181              "Error: Removing state %d which has instances left.\n",
2182              state);
2183         goto remove;
2184     }
2185 
2186     if (!invoke || !cpuhp_get_teardown_cb(state))
2187         goto remove;
2188 
2189     /*
2190      * Call the teardown callback for each present cpu depending
2191      * on the hotplug state of the cpu. This function is not
2192      * allowed to fail currently!
2193      */
2194     for_each_present_cpu(cpu) {
2195         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2196         int cpustate = st->state;
2197 
2198         if (cpustate >= state)
2199             cpuhp_issue_call(cpu, state, false, NULL);
2200     }
2201 remove:
2202     cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2203     mutex_unlock(&cpuhp_state_mutex);
2204 }
2205 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2206 
2207 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2208 {
2209     cpus_read_lock();
2210     __cpuhp_remove_state_cpuslocked(state, invoke);
2211     cpus_read_unlock();
2212 }
2213 EXPORT_SYMBOL(__cpuhp_remove_state);
2214 
2215 #ifdef CONFIG_HOTPLUG_SMT
2216 static void cpuhp_offline_cpu_device(unsigned int cpu)
2217 {
2218     struct device *dev = get_cpu_device(cpu);
2219 
2220     dev->offline = true;
2221     /* Tell user space about the state change */
2222     kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2223 }
2224 
2225 static void cpuhp_online_cpu_device(unsigned int cpu)
2226 {
2227     struct device *dev = get_cpu_device(cpu);
2228 
2229     dev->offline = false;
2230     /* Tell user space about the state change */
2231     kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2232 }
2233 
2234 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2235 {
2236     int cpu, ret = 0;
2237 
2238     cpu_maps_update_begin();
2239     for_each_online_cpu(cpu) {
2240         if (topology_is_primary_thread(cpu))
2241             continue;
2242         ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2243         if (ret)
2244             break;
2245         /*
2246          * As this needs to hold the cpu maps lock it's impossible
2247          * to call device_offline() because that ends up calling
2248          * cpu_down() which takes cpu maps lock. cpu maps lock
2249          * needs to be held as this might race against in kernel
2250          * abusers of the hotplug machinery (thermal management).
2251          *
2252          * So nothing would update device:offline state. That would
2253          * leave the sysfs entry stale and prevent onlining after
2254          * smt control has been changed to 'off' again. This is
2255          * called under the sysfs hotplug lock, so it is properly
2256          * serialized against the regular offline usage.
2257          */
2258         cpuhp_offline_cpu_device(cpu);
2259     }
2260     if (!ret)
2261         cpu_smt_control = ctrlval;
2262     cpu_maps_update_done();
2263     return ret;
2264 }
2265 
2266 int cpuhp_smt_enable(void)
2267 {
2268     int cpu, ret = 0;
2269 
2270     cpu_maps_update_begin();
2271     cpu_smt_control = CPU_SMT_ENABLED;
2272     for_each_present_cpu(cpu) {
2273         /* Skip online CPUs and CPUs on offline nodes */
2274         if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2275             continue;
2276         ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2277         if (ret)
2278             break;
2279         /* See comment in cpuhp_smt_disable() */
2280         cpuhp_online_cpu_device(cpu);
2281     }
2282     cpu_maps_update_done();
2283     return ret;
2284 }
2285 #endif
2286 
2287 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2288 static ssize_t state_show(struct device *dev,
2289               struct device_attribute *attr, char *buf)
2290 {
2291     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2292 
2293     return sprintf(buf, "%d\n", st->state);
2294 }
2295 static DEVICE_ATTR_RO(state);
2296 
2297 static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2298                 const char *buf, size_t count)
2299 {
2300     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2301     struct cpuhp_step *sp;
2302     int target, ret;
2303 
2304     ret = kstrtoint(buf, 10, &target);
2305     if (ret)
2306         return ret;
2307 
2308 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2309     if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2310         return -EINVAL;
2311 #else
2312     if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2313         return -EINVAL;
2314 #endif
2315 
2316     ret = lock_device_hotplug_sysfs();
2317     if (ret)
2318         return ret;
2319 
2320     mutex_lock(&cpuhp_state_mutex);
2321     sp = cpuhp_get_step(target);
2322     ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2323     mutex_unlock(&cpuhp_state_mutex);
2324     if (ret)
2325         goto out;
2326 
2327     if (st->state < target)
2328         ret = cpu_up(dev->id, target);
2329     else
2330         ret = cpu_down(dev->id, target);
2331 out:
2332     unlock_device_hotplug();
2333     return ret ? ret : count;
2334 }
2335 
2336 static ssize_t target_show(struct device *dev,
2337                struct device_attribute *attr, char *buf)
2338 {
2339     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2340 
2341     return sprintf(buf, "%d\n", st->target);
2342 }
2343 static DEVICE_ATTR_RW(target);
2344 
2345 static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
2346               const char *buf, size_t count)
2347 {
2348     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2349     struct cpuhp_step *sp;
2350     int fail, ret;
2351 
2352     ret = kstrtoint(buf, 10, &fail);
2353     if (ret)
2354         return ret;
2355 
2356     if (fail == CPUHP_INVALID) {
2357         st->fail = fail;
2358         return count;
2359     }
2360 
2361     if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2362         return -EINVAL;
2363 
2364     /*
2365      * Cannot fail STARTING/DYING callbacks.
2366      */
2367     if (cpuhp_is_atomic_state(fail))
2368         return -EINVAL;
2369 
2370     /*
2371      * DEAD callbacks cannot fail...
2372      * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2373      * triggering STARTING callbacks, a failure in this state would
2374      * hinder rollback.
2375      */
2376     if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2377         return -EINVAL;
2378 
2379     /*
2380      * Cannot fail anything that doesn't have callbacks.
2381      */
2382     mutex_lock(&cpuhp_state_mutex);
2383     sp = cpuhp_get_step(fail);
2384     if (!sp->startup.single && !sp->teardown.single)
2385         ret = -EINVAL;
2386     mutex_unlock(&cpuhp_state_mutex);
2387     if (ret)
2388         return ret;
2389 
2390     st->fail = fail;
2391 
2392     return count;
2393 }
2394 
2395 static ssize_t fail_show(struct device *dev,
2396              struct device_attribute *attr, char *buf)
2397 {
2398     struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2399 
2400     return sprintf(buf, "%d\n", st->fail);
2401 }
2402 
2403 static DEVICE_ATTR_RW(fail);
2404 
2405 static struct attribute *cpuhp_cpu_attrs[] = {
2406     &dev_attr_state.attr,
2407     &dev_attr_target.attr,
2408     &dev_attr_fail.attr,
2409     NULL
2410 };
2411 
2412 static const struct attribute_group cpuhp_cpu_attr_group = {
2413     .attrs = cpuhp_cpu_attrs,
2414     .name = "hotplug",
2415     NULL
2416 };
2417 
2418 static ssize_t states_show(struct device *dev,
2419                  struct device_attribute *attr, char *buf)
2420 {
2421     ssize_t cur, res = 0;
2422     int i;
2423 
2424     mutex_lock(&cpuhp_state_mutex);
2425     for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2426         struct cpuhp_step *sp = cpuhp_get_step(i);
2427 
2428         if (sp->name) {
2429             cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2430             buf += cur;
2431             res += cur;
2432         }
2433     }
2434     mutex_unlock(&cpuhp_state_mutex);
2435     return res;
2436 }
2437 static DEVICE_ATTR_RO(states);
2438 
2439 static struct attribute *cpuhp_cpu_root_attrs[] = {
2440     &dev_attr_states.attr,
2441     NULL
2442 };
2443 
2444 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2445     .attrs = cpuhp_cpu_root_attrs,
2446     .name = "hotplug",
2447     NULL
2448 };
2449 
2450 #ifdef CONFIG_HOTPLUG_SMT
2451 
2452 static ssize_t
2453 __store_smt_control(struct device *dev, struct device_attribute *attr,
2454             const char *buf, size_t count)
2455 {
2456     int ctrlval, ret;
2457 
2458     if (sysfs_streq(buf, "on"))
2459         ctrlval = CPU_SMT_ENABLED;
2460     else if (sysfs_streq(buf, "off"))
2461         ctrlval = CPU_SMT_DISABLED;
2462     else if (sysfs_streq(buf, "forceoff"))
2463         ctrlval = CPU_SMT_FORCE_DISABLED;
2464     else
2465         return -EINVAL;
2466 
2467     if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2468         return -EPERM;
2469 
2470     if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2471         return -ENODEV;
2472 
2473     ret = lock_device_hotplug_sysfs();
2474     if (ret)
2475         return ret;
2476 
2477     if (ctrlval != cpu_smt_control) {
2478         switch (ctrlval) {
2479         case CPU_SMT_ENABLED:
2480             ret = cpuhp_smt_enable();
2481             break;
2482         case CPU_SMT_DISABLED:
2483         case CPU_SMT_FORCE_DISABLED:
2484             ret = cpuhp_smt_disable(ctrlval);
2485             break;
2486         }
2487     }
2488 
2489     unlock_device_hotplug();
2490     return ret ? ret : count;
2491 }
2492 
2493 #else /* !CONFIG_HOTPLUG_SMT */
2494 static ssize_t
2495 __store_smt_control(struct device *dev, struct device_attribute *attr,
2496             const char *buf, size_t count)
2497 {
2498     return -ENODEV;
2499 }
2500 #endif /* CONFIG_HOTPLUG_SMT */
2501 
2502 static const char *smt_states[] = {
2503     [CPU_SMT_ENABLED]       = "on",
2504     [CPU_SMT_DISABLED]      = "off",
2505     [CPU_SMT_FORCE_DISABLED]    = "forceoff",
2506     [CPU_SMT_NOT_SUPPORTED]     = "notsupported",
2507     [CPU_SMT_NOT_IMPLEMENTED]   = "notimplemented",
2508 };
2509 
2510 static ssize_t control_show(struct device *dev,
2511                 struct device_attribute *attr, char *buf)
2512 {
2513     const char *state = smt_states[cpu_smt_control];
2514 
2515     return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2516 }
2517 
2518 static ssize_t control_store(struct device *dev, struct device_attribute *attr,
2519                  const char *buf, size_t count)
2520 {
2521     return __store_smt_control(dev, attr, buf, count);
2522 }
2523 static DEVICE_ATTR_RW(control);
2524 
2525 static ssize_t active_show(struct device *dev,
2526                struct device_attribute *attr, char *buf)
2527 {
2528     return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2529 }
2530 static DEVICE_ATTR_RO(active);
2531 
2532 static struct attribute *cpuhp_smt_attrs[] = {
2533     &dev_attr_control.attr,
2534     &dev_attr_active.attr,
2535     NULL
2536 };
2537 
2538 static const struct attribute_group cpuhp_smt_attr_group = {
2539     .attrs = cpuhp_smt_attrs,
2540     .name = "smt",
2541     NULL
2542 };
2543 
2544 static int __init cpu_smt_sysfs_init(void)
2545 {
2546     return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2547                   &cpuhp_smt_attr_group);
2548 }
2549 
2550 static int __init cpuhp_sysfs_init(void)
2551 {
2552     int cpu, ret;
2553 
2554     ret = cpu_smt_sysfs_init();
2555     if (ret)
2556         return ret;
2557 
2558     ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2559                  &cpuhp_cpu_root_attr_group);
2560     if (ret)
2561         return ret;
2562 
2563     for_each_possible_cpu(cpu) {
2564         struct device *dev = get_cpu_device(cpu);
2565 
2566         if (!dev)
2567             continue;
2568         ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2569         if (ret)
2570             return ret;
2571     }
2572     return 0;
2573 }
2574 device_initcall(cpuhp_sysfs_init);
2575 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2576 
2577 /*
2578  * cpu_bit_bitmap[] is a special, "compressed" data structure that
2579  * represents all NR_CPUS bits binary values of 1<<nr.
2580  *
2581  * It is used by cpumask_of() to get a constant address to a CPU
2582  * mask value that has a single bit set only.
2583  */
2584 
2585 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2586 #define MASK_DECLARE_1(x)   [x+1][0] = (1UL << (x))
2587 #define MASK_DECLARE_2(x)   MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2588 #define MASK_DECLARE_4(x)   MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2589 #define MASK_DECLARE_8(x)   MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2590 
2591 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2592 
2593     MASK_DECLARE_8(0),  MASK_DECLARE_8(8),
2594     MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2595 #if BITS_PER_LONG > 32
2596     MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2597     MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2598 #endif
2599 };
2600 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2601 
2602 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2603 EXPORT_SYMBOL(cpu_all_bits);
2604 
2605 #ifdef CONFIG_INIT_ALL_POSSIBLE
2606 struct cpumask __cpu_possible_mask __read_mostly
2607     = {CPU_BITS_ALL};
2608 #else
2609 struct cpumask __cpu_possible_mask __read_mostly;
2610 #endif
2611 EXPORT_SYMBOL(__cpu_possible_mask);
2612 
2613 struct cpumask __cpu_online_mask __read_mostly;
2614 EXPORT_SYMBOL(__cpu_online_mask);
2615 
2616 struct cpumask __cpu_present_mask __read_mostly;
2617 EXPORT_SYMBOL(__cpu_present_mask);
2618 
2619 struct cpumask __cpu_active_mask __read_mostly;
2620 EXPORT_SYMBOL(__cpu_active_mask);
2621 
2622 struct cpumask __cpu_dying_mask __read_mostly;
2623 EXPORT_SYMBOL(__cpu_dying_mask);
2624 
2625 atomic_t __num_online_cpus __read_mostly;
2626 EXPORT_SYMBOL(__num_online_cpus);
2627 
2628 void init_cpu_present(const struct cpumask *src)
2629 {
2630     cpumask_copy(&__cpu_present_mask, src);
2631 }
2632 
2633 void init_cpu_possible(const struct cpumask *src)
2634 {
2635     cpumask_copy(&__cpu_possible_mask, src);
2636 }
2637 
2638 void init_cpu_online(const struct cpumask *src)
2639 {
2640     cpumask_copy(&__cpu_online_mask, src);
2641 }
2642 
2643 void set_cpu_online(unsigned int cpu, bool online)
2644 {
2645     /*
2646      * atomic_inc/dec() is required to handle the horrid abuse of this
2647      * function by the reboot and kexec code which invoke it from
2648      * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2649      * regular CPU hotplug is properly serialized.
2650      *
2651      * Note, that the fact that __num_online_cpus is of type atomic_t
2652      * does not protect readers which are not serialized against
2653      * concurrent hotplug operations.
2654      */
2655     if (online) {
2656         if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2657             atomic_inc(&__num_online_cpus);
2658     } else {
2659         if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2660             atomic_dec(&__num_online_cpus);
2661     }
2662 }
2663 
2664 /*
2665  * Activate the first processor.
2666  */
2667 void __init boot_cpu_init(void)
2668 {
2669     int cpu = smp_processor_id();
2670 
2671     /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2672     set_cpu_online(cpu, true);
2673     set_cpu_active(cpu, true);
2674     set_cpu_present(cpu, true);
2675     set_cpu_possible(cpu, true);
2676 
2677 #ifdef CONFIG_SMP
2678     __boot_cpu_id = cpu;
2679 #endif
2680 }
2681 
2682 /*
2683  * Must be called _AFTER_ setting up the per_cpu areas
2684  */
2685 void __init boot_cpu_hotplug_init(void)
2686 {
2687 #ifdef CONFIG_SMP
2688     cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2689 #endif
2690     this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2691 }
2692 
2693 /*
2694  * These are used for a global "mitigations=" cmdline option for toggling
2695  * optional CPU mitigations.
2696  */
2697 enum cpu_mitigations {
2698     CPU_MITIGATIONS_OFF,
2699     CPU_MITIGATIONS_AUTO,
2700     CPU_MITIGATIONS_AUTO_NOSMT,
2701 };
2702 
2703 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2704     CPU_MITIGATIONS_AUTO;
2705 
2706 static int __init mitigations_parse_cmdline(char *arg)
2707 {
2708     if (!strcmp(arg, "off"))
2709         cpu_mitigations = CPU_MITIGATIONS_OFF;
2710     else if (!strcmp(arg, "auto"))
2711         cpu_mitigations = CPU_MITIGATIONS_AUTO;
2712     else if (!strcmp(arg, "auto,nosmt"))
2713         cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2714     else
2715         pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2716             arg);
2717 
2718     return 0;
2719 }
2720 early_param("mitigations", mitigations_parse_cmdline);
2721 
2722 /* mitigations=off */
2723 bool cpu_mitigations_off(void)
2724 {
2725     return cpu_mitigations == CPU_MITIGATIONS_OFF;
2726 }
2727 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2728 
2729 /* mitigations=auto,nosmt */
2730 bool cpu_mitigations_auto_nosmt(void)
2731 {
2732     return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2733 }
2734 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);