Back to home page

LXR

 
 

    


0001 /*
0002  * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
0003  */
0004 
0005 #include <linux/interrupt.h>
0006 #include <linux/kernel.h>
0007 #include <linux/export.h>
0008 #include <linux/smp.h>
0009 #include <linux/hypervisor.h>
0010 
0011 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
0012                 int wait)
0013 {
0014     unsigned long flags;
0015 
0016     WARN_ON(cpu != 0);
0017 
0018     local_irq_save(flags);
0019     func(info);
0020     local_irq_restore(flags);
0021 
0022     return 0;
0023 }
0024 EXPORT_SYMBOL(smp_call_function_single);
0025 
0026 int smp_call_function_single_async(int cpu, struct call_single_data *csd)
0027 {
0028     unsigned long flags;
0029 
0030     local_irq_save(flags);
0031     csd->func(csd->info);
0032     local_irq_restore(flags);
0033     return 0;
0034 }
0035 EXPORT_SYMBOL(smp_call_function_single_async);
0036 
0037 int on_each_cpu(smp_call_func_t func, void *info, int wait)
0038 {
0039     unsigned long flags;
0040 
0041     local_irq_save(flags);
0042     func(info);
0043     local_irq_restore(flags);
0044     return 0;
0045 }
0046 EXPORT_SYMBOL(on_each_cpu);
0047 
0048 /*
0049  * Note we still need to test the mask even for UP
0050  * because we actually can get an empty mask from
0051  * code that on SMP might call us without the local
0052  * CPU in the mask.
0053  */
0054 void on_each_cpu_mask(const struct cpumask *mask,
0055               smp_call_func_t func, void *info, bool wait)
0056 {
0057     unsigned long flags;
0058 
0059     if (cpumask_test_cpu(0, mask)) {
0060         local_irq_save(flags);
0061         func(info);
0062         local_irq_restore(flags);
0063     }
0064 }
0065 EXPORT_SYMBOL(on_each_cpu_mask);
0066 
0067 /*
0068  * Preemption is disabled here to make sure the cond_func is called under the
0069  * same condtions in UP and SMP.
0070  */
0071 void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
0072               smp_call_func_t func, void *info, bool wait,
0073               gfp_t gfp_flags)
0074 {
0075     unsigned long flags;
0076 
0077     preempt_disable();
0078     if (cond_func(0, info)) {
0079         local_irq_save(flags);
0080         func(info);
0081         local_irq_restore(flags);
0082     }
0083     preempt_enable();
0084 }
0085 EXPORT_SYMBOL(on_each_cpu_cond);
0086 
0087 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
0088 {
0089     int ret;
0090 
0091     if (cpu != 0)
0092         return -ENXIO;
0093 
0094     if (phys)
0095         hypervisor_pin_vcpu(0);
0096     ret = func(par);
0097     if (phys)
0098         hypervisor_pin_vcpu(-1);
0099 
0100     return ret;
0101 }
0102 EXPORT_SYMBOL_GPL(smp_call_on_cpu);