0001
0002 #include <linux/slab.h>
0003 #include <linux/kernel.h>
0004 #include <linux/bitops.h>
0005 #include <linux/cpumask.h>
0006 #include <linux/export.h>
0007 #include <linux/memblock.h>
0008 #include <linux/numa.h>
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
0023 {
0024 unsigned int next;
0025
0026 again:
0027 next = cpumask_next(n, mask);
0028
0029 if (wrap && n < start && next >= start) {
0030 return nr_cpumask_bits;
0031
0032 } else if (next >= nr_cpumask_bits) {
0033 wrap = true;
0034 n = -1;
0035 goto again;
0036 }
0037
0038 return next;
0039 }
0040 EXPORT_SYMBOL(cpumask_next_wrap);
0041
0042
0043 #ifdef CONFIG_CPUMASK_OFFSTACK
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
0059 {
0060 *mask = kmalloc_node(cpumask_size(), flags, node);
0061
0062 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
0063 if (!*mask) {
0064 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
0065 dump_stack();
0066 }
0067 #endif
0068
0069 return *mask != NULL;
0070 }
0071 EXPORT_SYMBOL(alloc_cpumask_var_node);
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
0083 {
0084 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
0085 if (!*mask)
0086 panic("%s: Failed to allocate %u bytes\n", __func__,
0087 cpumask_size());
0088 }
0089
0090
0091
0092
0093
0094
0095
0096 void free_cpumask_var(cpumask_var_t mask)
0097 {
0098 kfree(mask);
0099 }
0100 EXPORT_SYMBOL(free_cpumask_var);
0101
0102
0103
0104
0105
0106 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
0107 {
0108 memblock_free(mask, cpumask_size());
0109 }
0110 #endif
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 unsigned int cpumask_local_spread(unsigned int i, int node)
0124 {
0125 unsigned int cpu;
0126
0127
0128 i %= num_online_cpus();
0129
0130 if (node == NUMA_NO_NODE) {
0131 for_each_cpu(cpu, cpu_online_mask)
0132 if (i-- == 0)
0133 return cpu;
0134 } else {
0135
0136 for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
0137 if (i-- == 0)
0138 return cpu;
0139
0140 for_each_cpu(cpu, cpu_online_mask) {
0141
0142 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
0143 continue;
0144
0145 if (i-- == 0)
0146 return cpu;
0147 }
0148 }
0149 BUG();
0150 }
0151 EXPORT_SYMBOL(cpumask_local_spread);
0152
0153 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
0164 const struct cpumask *src2p)
0165 {
0166 unsigned int next, prev;
0167
0168
0169 prev = __this_cpu_read(distribute_cpu_mask_prev);
0170
0171 next = cpumask_next_and(prev, src1p, src2p);
0172 if (next >= nr_cpu_ids)
0173 next = cpumask_first_and(src1p, src2p);
0174
0175 if (next < nr_cpu_ids)
0176 __this_cpu_write(distribute_cpu_mask_prev, next);
0177
0178 return next;
0179 }
0180 EXPORT_SYMBOL(cpumask_any_and_distribute);
0181
0182 unsigned int cpumask_any_distribute(const struct cpumask *srcp)
0183 {
0184 unsigned int next, prev;
0185
0186
0187 prev = __this_cpu_read(distribute_cpu_mask_prev);
0188
0189 next = cpumask_next(prev, srcp);
0190 if (next >= nr_cpu_ids)
0191 next = cpumask_first(srcp);
0192
0193 if (next < nr_cpu_ids)
0194 __this_cpu_write(distribute_cpu_mask_prev, next);
0195
0196 return next;
0197 }
0198 EXPORT_SYMBOL(cpumask_any_distribute);