Back to home page

LXR

 
 

    


0001 /*
0002  * Generic helpers for smp ipi calls
0003  *
0004  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
0005  */
0006 
0007 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0008 
0009 #include <linux/irq_work.h>
0010 #include <linux/rcupdate.h>
0011 #include <linux/rculist.h>
0012 #include <linux/kernel.h>
0013 #include <linux/export.h>
0014 #include <linux/percpu.h>
0015 #include <linux/init.h>
0016 #include <linux/gfp.h>
0017 #include <linux/smp.h>
0018 #include <linux/cpu.h>
0019 #include <linux/sched.h>
0020 #include <linux/hypervisor.h>
0021 
0022 #include "smpboot.h"
0023 
0024 enum {
0025     CSD_FLAG_LOCK       = 0x01,
0026     CSD_FLAG_SYNCHRONOUS    = 0x02,
0027 };
0028 
0029 struct call_function_data {
0030     struct call_single_data __percpu *csd;
0031     cpumask_var_t       cpumask;
0032 };
0033 
0034 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
0035 
0036 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
0037 
0038 static void flush_smp_call_function_queue(bool warn_cpu_offline);
0039 
0040 int smpcfd_prepare_cpu(unsigned int cpu)
0041 {
0042     struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
0043 
0044     if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
0045                      cpu_to_node(cpu)))
0046         return -ENOMEM;
0047     cfd->csd = alloc_percpu(struct call_single_data);
0048     if (!cfd->csd) {
0049         free_cpumask_var(cfd->cpumask);
0050         return -ENOMEM;
0051     }
0052 
0053     return 0;
0054 }
0055 
0056 int smpcfd_dead_cpu(unsigned int cpu)
0057 {
0058     struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
0059 
0060     free_cpumask_var(cfd->cpumask);
0061     free_percpu(cfd->csd);
0062     return 0;
0063 }
0064 
0065 int smpcfd_dying_cpu(unsigned int cpu)
0066 {
0067     /*
0068      * The IPIs for the smp-call-function callbacks queued by other
0069      * CPUs might arrive late, either due to hardware latencies or
0070      * because this CPU disabled interrupts (inside stop-machine)
0071      * before the IPIs were sent. So flush out any pending callbacks
0072      * explicitly (without waiting for the IPIs to arrive), to
0073      * ensure that the outgoing CPU doesn't go offline with work
0074      * still pending.
0075      */
0076     flush_smp_call_function_queue(false);
0077     return 0;
0078 }
0079 
0080 void __init call_function_init(void)
0081 {
0082     int i;
0083 
0084     for_each_possible_cpu(i)
0085         init_llist_head(&per_cpu(call_single_queue, i));
0086 
0087     smpcfd_prepare_cpu(smp_processor_id());
0088 }
0089 
0090 /*
0091  * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
0092  *
0093  * For non-synchronous ipi calls the csd can still be in use by the
0094  * previous function call. For multi-cpu calls its even more interesting
0095  * as we'll have to ensure no other cpu is observing our csd.
0096  */
0097 static __always_inline void csd_lock_wait(struct call_single_data *csd)
0098 {
0099     smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
0100 }
0101 
0102 static __always_inline void csd_lock(struct call_single_data *csd)
0103 {
0104     csd_lock_wait(csd);
0105     csd->flags |= CSD_FLAG_LOCK;
0106 
0107     /*
0108      * prevent CPU from reordering the above assignment
0109      * to ->flags with any subsequent assignments to other
0110      * fields of the specified call_single_data structure:
0111      */
0112     smp_wmb();
0113 }
0114 
0115 static __always_inline void csd_unlock(struct call_single_data *csd)
0116 {
0117     WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
0118 
0119     /*
0120      * ensure we're all done before releasing data:
0121      */
0122     smp_store_release(&csd->flags, 0);
0123 }
0124 
0125 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
0126 
0127 /*
0128  * Insert a previously allocated call_single_data element
0129  * for execution on the given CPU. data must already have
0130  * ->func, ->info, and ->flags set.
0131  */
0132 static int generic_exec_single(int cpu, struct call_single_data *csd,
0133                    smp_call_func_t func, void *info)
0134 {
0135     if (cpu == smp_processor_id()) {
0136         unsigned long flags;
0137 
0138         /*
0139          * We can unlock early even for the synchronous on-stack case,
0140          * since we're doing this from the same CPU..
0141          */
0142         csd_unlock(csd);
0143         local_irq_save(flags);
0144         func(info);
0145         local_irq_restore(flags);
0146         return 0;
0147     }
0148 
0149 
0150     if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
0151         csd_unlock(csd);
0152         return -ENXIO;
0153     }
0154 
0155     csd->func = func;
0156     csd->info = info;
0157 
0158     /*
0159      * The list addition should be visible before sending the IPI
0160      * handler locks the list to pull the entry off it because of
0161      * normal cache coherency rules implied by spinlocks.
0162      *
0163      * If IPIs can go out of order to the cache coherency protocol
0164      * in an architecture, sufficient synchronisation should be added
0165      * to arch code to make it appear to obey cache coherency WRT
0166      * locking and barrier primitives. Generic code isn't really
0167      * equipped to do the right thing...
0168      */
0169     if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
0170         arch_send_call_function_single_ipi(cpu);
0171 
0172     return 0;
0173 }
0174 
0175 /**
0176  * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
0177  *
0178  * Invoked by arch to handle an IPI for call function single.
0179  * Must be called with interrupts disabled.
0180  */
0181 void generic_smp_call_function_single_interrupt(void)
0182 {
0183     flush_smp_call_function_queue(true);
0184 }
0185 
0186 /**
0187  * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
0188  *
0189  * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
0190  *            offline CPU. Skip this check if set to 'false'.
0191  *
0192  * Flush any pending smp-call-function callbacks queued on this CPU. This is
0193  * invoked by the generic IPI handler, as well as by a CPU about to go offline,
0194  * to ensure that all pending IPI callbacks are run before it goes completely
0195  * offline.
0196  *
0197  * Loop through the call_single_queue and run all the queued callbacks.
0198  * Must be called with interrupts disabled.
0199  */
0200 static void flush_smp_call_function_queue(bool warn_cpu_offline)
0201 {
0202     struct llist_head *head;
0203     struct llist_node *entry;
0204     struct call_single_data *csd, *csd_next;
0205     static bool warned;
0206 
0207     WARN_ON(!irqs_disabled());
0208 
0209     head = this_cpu_ptr(&call_single_queue);
0210     entry = llist_del_all(head);
0211     entry = llist_reverse_order(entry);
0212 
0213     /* There shouldn't be any pending callbacks on an offline CPU. */
0214     if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
0215              !warned && !llist_empty(head))) {
0216         warned = true;
0217         WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
0218 
0219         /*
0220          * We don't have to use the _safe() variant here
0221          * because we are not invoking the IPI handlers yet.
0222          */
0223         llist_for_each_entry(csd, entry, llist)
0224             pr_warn("IPI callback %pS sent to offline CPU\n",
0225                 csd->func);
0226     }
0227 
0228     llist_for_each_entry_safe(csd, csd_next, entry, llist) {
0229         smp_call_func_t func = csd->func;
0230         void *info = csd->info;
0231 
0232         /* Do we wait until *after* callback? */
0233         if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
0234             func(info);
0235             csd_unlock(csd);
0236         } else {
0237             csd_unlock(csd);
0238             func(info);
0239         }
0240     }
0241 
0242     /*
0243      * Handle irq works queued remotely by irq_work_queue_on().
0244      * Smp functions above are typically synchronous so they
0245      * better run first since some other CPUs may be busy waiting
0246      * for them.
0247      */
0248     irq_work_run();
0249 }
0250 
0251 /*
0252  * smp_call_function_single - Run a function on a specific CPU
0253  * @func: The function to run. This must be fast and non-blocking.
0254  * @info: An arbitrary pointer to pass to the function.
0255  * @wait: If true, wait until function has completed on other CPUs.
0256  *
0257  * Returns 0 on success, else a negative status code.
0258  */
0259 int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
0260                  int wait)
0261 {
0262     struct call_single_data *csd;
0263     struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
0264     int this_cpu;
0265     int err;
0266 
0267     /*
0268      * prevent preemption and reschedule on another processor,
0269      * as well as CPU removal
0270      */
0271     this_cpu = get_cpu();
0272 
0273     /*
0274      * Can deadlock when called with interrupts disabled.
0275      * We allow cpu's that are not yet online though, as no one else can
0276      * send smp call function interrupt to this cpu and as such deadlocks
0277      * can't happen.
0278      */
0279     WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
0280              && !oops_in_progress);
0281 
0282     csd = &csd_stack;
0283     if (!wait) {
0284         csd = this_cpu_ptr(&csd_data);
0285         csd_lock(csd);
0286     }
0287 
0288     err = generic_exec_single(cpu, csd, func, info);
0289 
0290     if (wait)
0291         csd_lock_wait(csd);
0292 
0293     put_cpu();
0294 
0295     return err;
0296 }
0297 EXPORT_SYMBOL(smp_call_function_single);
0298 
0299 /**
0300  * smp_call_function_single_async(): Run an asynchronous function on a
0301  *                   specific CPU.
0302  * @cpu: The CPU to run on.
0303  * @csd: Pre-allocated and setup data structure
0304  *
0305  * Like smp_call_function_single(), but the call is asynchonous and
0306  * can thus be done from contexts with disabled interrupts.
0307  *
0308  * The caller passes his own pre-allocated data structure
0309  * (ie: embedded in an object) and is responsible for synchronizing it
0310  * such that the IPIs performed on the @csd are strictly serialized.
0311  *
0312  * NOTE: Be careful, there is unfortunately no current debugging facility to
0313  * validate the correctness of this serialization.
0314  */
0315 int smp_call_function_single_async(int cpu, struct call_single_data *csd)
0316 {
0317     int err = 0;
0318 
0319     preempt_disable();
0320 
0321     /* We could deadlock if we have to wait here with interrupts disabled! */
0322     if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
0323         csd_lock_wait(csd);
0324 
0325     csd->flags = CSD_FLAG_LOCK;
0326     smp_wmb();
0327 
0328     err = generic_exec_single(cpu, csd, csd->func, csd->info);
0329     preempt_enable();
0330 
0331     return err;
0332 }
0333 EXPORT_SYMBOL_GPL(smp_call_function_single_async);
0334 
0335 /*
0336  * smp_call_function_any - Run a function on any of the given cpus
0337  * @mask: The mask of cpus it can run on.
0338  * @func: The function to run. This must be fast and non-blocking.
0339  * @info: An arbitrary pointer to pass to the function.
0340  * @wait: If true, wait until function has completed.
0341  *
0342  * Returns 0 on success, else a negative status code (if no cpus were online).
0343  *
0344  * Selection preference:
0345  *  1) current cpu if in @mask
0346  *  2) any cpu of current node if in @mask
0347  *  3) any other online cpu in @mask
0348  */
0349 int smp_call_function_any(const struct cpumask *mask,
0350               smp_call_func_t func, void *info, int wait)
0351 {
0352     unsigned int cpu;
0353     const struct cpumask *nodemask;
0354     int ret;
0355 
0356     /* Try for same CPU (cheapest) */
0357     cpu = get_cpu();
0358     if (cpumask_test_cpu(cpu, mask))
0359         goto call;
0360 
0361     /* Try for same node. */
0362     nodemask = cpumask_of_node(cpu_to_node(cpu));
0363     for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
0364          cpu = cpumask_next_and(cpu, nodemask, mask)) {
0365         if (cpu_online(cpu))
0366             goto call;
0367     }
0368 
0369     /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
0370     cpu = cpumask_any_and(mask, cpu_online_mask);
0371 call:
0372     ret = smp_call_function_single(cpu, func, info, wait);
0373     put_cpu();
0374     return ret;
0375 }
0376 EXPORT_SYMBOL_GPL(smp_call_function_any);
0377 
0378 /**
0379  * smp_call_function_many(): Run a function on a set of other CPUs.
0380  * @mask: The set of cpus to run on (only runs on online subset).
0381  * @func: The function to run. This must be fast and non-blocking.
0382  * @info: An arbitrary pointer to pass to the function.
0383  * @wait: If true, wait (atomically) until function has completed
0384  *        on other CPUs.
0385  *
0386  * If @wait is true, then returns once @func has returned.
0387  *
0388  * You must not call this function with disabled interrupts or from a
0389  * hardware interrupt handler or from a bottom half handler. Preemption
0390  * must be disabled when calling this function.
0391  */
0392 void smp_call_function_many(const struct cpumask *mask,
0393                 smp_call_func_t func, void *info, bool wait)
0394 {
0395     struct call_function_data *cfd;
0396     int cpu, next_cpu, this_cpu = smp_processor_id();
0397 
0398     /*
0399      * Can deadlock when called with interrupts disabled.
0400      * We allow cpu's that are not yet online though, as no one else can
0401      * send smp call function interrupt to this cpu and as such deadlocks
0402      * can't happen.
0403      */
0404     WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
0405              && !oops_in_progress && !early_boot_irqs_disabled);
0406 
0407     /* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
0408     cpu = cpumask_first_and(mask, cpu_online_mask);
0409     if (cpu == this_cpu)
0410         cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0411 
0412     /* No online cpus?  We're done. */
0413     if (cpu >= nr_cpu_ids)
0414         return;
0415 
0416     /* Do we have another CPU which isn't us? */
0417     next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
0418     if (next_cpu == this_cpu)
0419         next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
0420 
0421     /* Fastpath: do that cpu by itself. */
0422     if (next_cpu >= nr_cpu_ids) {
0423         smp_call_function_single(cpu, func, info, wait);
0424         return;
0425     }
0426 
0427     cfd = this_cpu_ptr(&cfd_data);
0428 
0429     cpumask_and(cfd->cpumask, mask, cpu_online_mask);
0430     cpumask_clear_cpu(this_cpu, cfd->cpumask);
0431 
0432     /* Some callers race with other cpus changing the passed mask */
0433     if (unlikely(!cpumask_weight(cfd->cpumask)))
0434         return;
0435 
0436     for_each_cpu(cpu, cfd->cpumask) {
0437         struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
0438 
0439         csd_lock(csd);
0440         if (wait)
0441             csd->flags |= CSD_FLAG_SYNCHRONOUS;
0442         csd->func = func;
0443         csd->info = info;
0444         llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
0445     }
0446 
0447     /* Send a message to all CPUs in the map */
0448     arch_send_call_function_ipi_mask(cfd->cpumask);
0449 
0450     if (wait) {
0451         for_each_cpu(cpu, cfd->cpumask) {
0452             struct call_single_data *csd;
0453 
0454             csd = per_cpu_ptr(cfd->csd, cpu);
0455             csd_lock_wait(csd);
0456         }
0457     }
0458 }
0459 EXPORT_SYMBOL(smp_call_function_many);
0460 
0461 /**
0462  * smp_call_function(): Run a function on all other CPUs.
0463  * @func: The function to run. This must be fast and non-blocking.
0464  * @info: An arbitrary pointer to pass to the function.
0465  * @wait: If true, wait (atomically) until function has completed
0466  *        on other CPUs.
0467  *
0468  * Returns 0.
0469  *
0470  * If @wait is true, then returns once @func has returned; otherwise
0471  * it returns just before the target cpu calls @func.
0472  *
0473  * You must not call this function with disabled interrupts or from a
0474  * hardware interrupt handler or from a bottom half handler.
0475  */
0476 int smp_call_function(smp_call_func_t func, void *info, int wait)
0477 {
0478     preempt_disable();
0479     smp_call_function_many(cpu_online_mask, func, info, wait);
0480     preempt_enable();
0481 
0482     return 0;
0483 }
0484 EXPORT_SYMBOL(smp_call_function);
0485 
0486 /* Setup configured maximum number of CPUs to activate */
0487 unsigned int setup_max_cpus = NR_CPUS;
0488 EXPORT_SYMBOL(setup_max_cpus);
0489 
0490 
0491 /*
0492  * Setup routine for controlling SMP activation
0493  *
0494  * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
0495  * activation entirely (the MPS table probe still happens, though).
0496  *
0497  * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
0498  * greater than 0, limits the maximum number of CPUs activated in
0499  * SMP mode to <NUM>.
0500  */
0501 
0502 void __weak arch_disable_smp_support(void) { }
0503 
0504 static int __init nosmp(char *str)
0505 {
0506     setup_max_cpus = 0;
0507     arch_disable_smp_support();
0508 
0509     return 0;
0510 }
0511 
0512 early_param("nosmp", nosmp);
0513 
0514 /* this is hard limit */
0515 static int __init nrcpus(char *str)
0516 {
0517     int nr_cpus;
0518 
0519     get_option(&str, &nr_cpus);
0520     if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
0521         nr_cpu_ids = nr_cpus;
0522 
0523     return 0;
0524 }
0525 
0526 early_param("nr_cpus", nrcpus);
0527 
0528 static int __init maxcpus(char *str)
0529 {
0530     get_option(&str, &setup_max_cpus);
0531     if (setup_max_cpus == 0)
0532         arch_disable_smp_support();
0533 
0534     return 0;
0535 }
0536 
0537 early_param("maxcpus", maxcpus);
0538 
0539 /* Setup number of possible processor ids */
0540 int nr_cpu_ids __read_mostly = NR_CPUS;
0541 EXPORT_SYMBOL(nr_cpu_ids);
0542 
0543 /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
0544 void __init setup_nr_cpu_ids(void)
0545 {
0546     nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
0547 }
0548 
0549 /* Called by boot processor to activate the rest. */
0550 void __init smp_init(void)
0551 {
0552     int num_nodes, num_cpus;
0553     unsigned int cpu;
0554 
0555     idle_threads_init();
0556     cpuhp_threads_init();
0557 
0558     pr_info("Bringing up secondary CPUs ...\n");
0559 
0560     /* FIXME: This should be done in userspace --RR */
0561     for_each_present_cpu(cpu) {
0562         if (num_online_cpus() >= setup_max_cpus)
0563             break;
0564         if (!cpu_online(cpu))
0565             cpu_up(cpu);
0566     }
0567 
0568     num_nodes = num_online_nodes();
0569     num_cpus  = num_online_cpus();
0570     pr_info("Brought up %d node%s, %d CPU%s\n",
0571         num_nodes, (num_nodes > 1 ? "s" : ""),
0572         num_cpus,  (num_cpus  > 1 ? "s" : ""));
0573 
0574     /* Any cleanup work */
0575     smp_cpus_done(setup_max_cpus);
0576 }
0577 
0578 /*
0579  * Call a function on all processors.  May be used during early boot while
0580  * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
0581  * of local_irq_disable/enable().
0582  */
0583 int on_each_cpu(void (*func) (void *info), void *info, int wait)
0584 {
0585     unsigned long flags;
0586     int ret = 0;
0587 
0588     preempt_disable();
0589     ret = smp_call_function(func, info, wait);
0590     local_irq_save(flags);
0591     func(info);
0592     local_irq_restore(flags);
0593     preempt_enable();
0594     return ret;
0595 }
0596 EXPORT_SYMBOL(on_each_cpu);
0597 
0598 /**
0599  * on_each_cpu_mask(): Run a function on processors specified by
0600  * cpumask, which may include the local processor.
0601  * @mask: The set of cpus to run on (only runs on online subset).
0602  * @func: The function to run. This must be fast and non-blocking.
0603  * @info: An arbitrary pointer to pass to the function.
0604  * @wait: If true, wait (atomically) until function has completed
0605  *        on other CPUs.
0606  *
0607  * If @wait is true, then returns once @func has returned.
0608  *
0609  * You must not call this function with disabled interrupts or from a
0610  * hardware interrupt handler or from a bottom half handler.  The
0611  * exception is that it may be used during early boot while
0612  * early_boot_irqs_disabled is set.
0613  */
0614 void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
0615             void *info, bool wait)
0616 {
0617     int cpu = get_cpu();
0618 
0619     smp_call_function_many(mask, func, info, wait);
0620     if (cpumask_test_cpu(cpu, mask)) {
0621         unsigned long flags;
0622         local_irq_save(flags);
0623         func(info);
0624         local_irq_restore(flags);
0625     }
0626     put_cpu();
0627 }
0628 EXPORT_SYMBOL(on_each_cpu_mask);
0629 
0630 /*
0631  * on_each_cpu_cond(): Call a function on each processor for which
0632  * the supplied function cond_func returns true, optionally waiting
0633  * for all the required CPUs to finish. This may include the local
0634  * processor.
0635  * @cond_func:  A callback function that is passed a cpu id and
0636  *      the the info parameter. The function is called
0637  *      with preemption disabled. The function should
0638  *      return a blooean value indicating whether to IPI
0639  *      the specified CPU.
0640  * @func:   The function to run on all applicable CPUs.
0641  *      This must be fast and non-blocking.
0642  * @info:   An arbitrary pointer to pass to both functions.
0643  * @wait:   If true, wait (atomically) until function has
0644  *      completed on other CPUs.
0645  * @gfp_flags:  GFP flags to use when allocating the cpumask
0646  *      used internally by the function.
0647  *
0648  * The function might sleep if the GFP flags indicates a non
0649  * atomic allocation is allowed.
0650  *
0651  * Preemption is disabled to protect against CPUs going offline but not online.
0652  * CPUs going online during the call will not be seen or sent an IPI.
0653  *
0654  * You must not call this function with disabled interrupts or
0655  * from a hardware interrupt handler or from a bottom half handler.
0656  */
0657 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
0658             smp_call_func_t func, void *info, bool wait,
0659             gfp_t gfp_flags)
0660 {
0661     cpumask_var_t cpus;
0662     int cpu, ret;
0663 
0664     might_sleep_if(gfpflags_allow_blocking(gfp_flags));
0665 
0666     if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
0667         preempt_disable();
0668         for_each_online_cpu(cpu)
0669             if (cond_func(cpu, info))
0670                 cpumask_set_cpu(cpu, cpus);
0671         on_each_cpu_mask(cpus, func, info, wait);
0672         preempt_enable();
0673         free_cpumask_var(cpus);
0674     } else {
0675         /*
0676          * No free cpumask, bother. No matter, we'll
0677          * just have to IPI them one by one.
0678          */
0679         preempt_disable();
0680         for_each_online_cpu(cpu)
0681             if (cond_func(cpu, info)) {
0682                 ret = smp_call_function_single(cpu, func,
0683                                 info, wait);
0684                 WARN_ON_ONCE(ret);
0685             }
0686         preempt_enable();
0687     }
0688 }
0689 EXPORT_SYMBOL(on_each_cpu_cond);
0690 
0691 static void do_nothing(void *unused)
0692 {
0693 }
0694 
0695 /**
0696  * kick_all_cpus_sync - Force all cpus out of idle
0697  *
0698  * Used to synchronize the update of pm_idle function pointer. It's
0699  * called after the pointer is updated and returns after the dummy
0700  * callback function has been executed on all cpus. The execution of
0701  * the function can only happen on the remote cpus after they have
0702  * left the idle function which had been called via pm_idle function
0703  * pointer. So it's guaranteed that nothing uses the previous pointer
0704  * anymore.
0705  */
0706 void kick_all_cpus_sync(void)
0707 {
0708     /* Make sure the change is visible before we kick the cpus */
0709     smp_mb();
0710     smp_call_function(do_nothing, NULL, 1);
0711 }
0712 EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
0713 
0714 /**
0715  * wake_up_all_idle_cpus - break all cpus out of idle
0716  * wake_up_all_idle_cpus try to break all cpus which is in idle state even
0717  * including idle polling cpus, for non-idle cpus, we will do nothing
0718  * for them.
0719  */
0720 void wake_up_all_idle_cpus(void)
0721 {
0722     int cpu;
0723 
0724     preempt_disable();
0725     for_each_online_cpu(cpu) {
0726         if (cpu == smp_processor_id())
0727             continue;
0728 
0729         wake_up_if_idle(cpu);
0730     }
0731     preempt_enable();
0732 }
0733 EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
0734 
0735 /**
0736  * smp_call_on_cpu - Call a function on a specific cpu
0737  *
0738  * Used to call a function on a specific cpu and wait for it to return.
0739  * Optionally make sure the call is done on a specified physical cpu via vcpu
0740  * pinning in order to support virtualized environments.
0741  */
0742 struct smp_call_on_cpu_struct {
0743     struct work_struct  work;
0744     struct completion   done;
0745     int         (*func)(void *);
0746     void            *data;
0747     int         ret;
0748     int         cpu;
0749 };
0750 
0751 static void smp_call_on_cpu_callback(struct work_struct *work)
0752 {
0753     struct smp_call_on_cpu_struct *sscs;
0754 
0755     sscs = container_of(work, struct smp_call_on_cpu_struct, work);
0756     if (sscs->cpu >= 0)
0757         hypervisor_pin_vcpu(sscs->cpu);
0758     sscs->ret = sscs->func(sscs->data);
0759     if (sscs->cpu >= 0)
0760         hypervisor_pin_vcpu(-1);
0761 
0762     complete(&sscs->done);
0763 }
0764 
0765 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
0766 {
0767     struct smp_call_on_cpu_struct sscs = {
0768         .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
0769         .func = func,
0770         .data = par,
0771         .cpu  = phys ? cpu : -1,
0772     };
0773 
0774     INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
0775 
0776     if (cpu >= nr_cpu_ids || !cpu_online(cpu))
0777         return -ENXIO;
0778 
0779     queue_work_on(cpu, system_wq, &sscs.work);
0780     wait_for_completion(&sscs.done);
0781 
0782     return sscs.ret;
0783 }
0784 EXPORT_SYMBOL_GPL(smp_call_on_cpu);