0001
0002 #ifndef _LINUX_STOP_MACHINE
0003 #define _LINUX_STOP_MACHINE
0004
0005 #include <linux/cpu.h>
0006 #include <linux/cpumask.h>
0007 #include <linux/smp.h>
0008 #include <linux/list.h>
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 typedef int (*cpu_stop_fn_t)(void *arg);
0021
0022 #ifdef CONFIG_SMP
0023
0024 struct cpu_stop_work {
0025 struct list_head list;
0026 cpu_stop_fn_t fn;
0027 unsigned long caller;
0028 void *arg;
0029 struct cpu_stop_done *done;
0030 };
0031
0032 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
0033 int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
0034 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
0035 struct cpu_stop_work *work_buf);
0036 void stop_machine_park(int cpu);
0037 void stop_machine_unpark(int cpu);
0038 void stop_machine_yield(const struct cpumask *cpumask);
0039
0040 extern void print_stop_info(const char *log_lvl, struct task_struct *task);
0041
0042 #else
0043
0044 #include <linux/workqueue.h>
0045
0046 struct cpu_stop_work {
0047 struct work_struct work;
0048 cpu_stop_fn_t fn;
0049 void *arg;
0050 };
0051
0052 static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
0053 {
0054 int ret = -ENOENT;
0055 preempt_disable();
0056 if (cpu == smp_processor_id())
0057 ret = fn(arg);
0058 preempt_enable();
0059 return ret;
0060 }
0061
0062 static void stop_one_cpu_nowait_workfn(struct work_struct *work)
0063 {
0064 struct cpu_stop_work *stwork =
0065 container_of(work, struct cpu_stop_work, work);
0066 preempt_disable();
0067 stwork->fn(stwork->arg);
0068 preempt_enable();
0069 }
0070
0071 static inline bool stop_one_cpu_nowait(unsigned int cpu,
0072 cpu_stop_fn_t fn, void *arg,
0073 struct cpu_stop_work *work_buf)
0074 {
0075 if (cpu == smp_processor_id()) {
0076 INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
0077 work_buf->fn = fn;
0078 work_buf->arg = arg;
0079 schedule_work(&work_buf->work);
0080 return true;
0081 }
0082
0083 return false;
0084 }
0085
0086 static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { }
0087
0088 #endif
0089
0090
0091
0092
0093
0094
0095
0096 #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);
0142
0143 int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
0144 const struct cpumask *cpus);
0145 #else
0146
0147 static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
0148 const struct cpumask *cpus)
0149 {
0150 unsigned long flags;
0151 int ret;
0152 local_irq_save(flags);
0153 ret = fn(data);
0154 local_irq_restore(flags);
0155 return ret;
0156 }
0157
0158 static __always_inline int
0159 stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
0160 {
0161 return stop_machine_cpuslocked(fn, data, cpus);
0162 }
0163
0164 static __always_inline int
0165 stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
0166 const struct cpumask *cpus)
0167 {
0168 return stop_machine(fn, data, cpus);
0169 }
0170
0171 #endif
0172 #endif