Back to home page

LXR

 
 

    


0001 #include <linux/slab.h>
0002 #include <linux/kernel.h>
0003 #include <linux/bitops.h>
0004 #include <linux/cpumask.h>
0005 #include <linux/export.h>
0006 #include <linux/bootmem.h>
0007 
0008 /**
0009  * cpumask_next_and - get the next cpu in *src1p & *src2p
0010  * @n: the cpu prior to the place to search (ie. return will be > @n)
0011  * @src1p: the first cpumask pointer
0012  * @src2p: the second cpumask pointer
0013  *
0014  * Returns >= nr_cpu_ids if no further cpus set in both.
0015  */
0016 int cpumask_next_and(int n, const struct cpumask *src1p,
0017              const struct cpumask *src2p)
0018 {
0019     while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
0020         if (cpumask_test_cpu(n, src2p))
0021             break;
0022     return n;
0023 }
0024 EXPORT_SYMBOL(cpumask_next_and);
0025 
0026 /**
0027  * cpumask_any_but - return a "random" in a cpumask, but not this one.
0028  * @mask: the cpumask to search
0029  * @cpu: the cpu to ignore.
0030  *
0031  * Often used to find any cpu but smp_processor_id() in a mask.
0032  * Returns >= nr_cpu_ids if no cpus set.
0033  */
0034 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
0035 {
0036     unsigned int i;
0037 
0038     cpumask_check(cpu);
0039     for_each_cpu(i, mask)
0040         if (i != cpu)
0041             break;
0042     return i;
0043 }
0044 EXPORT_SYMBOL(cpumask_any_but);
0045 
0046 /* These are not inline because of header tangles. */
0047 #ifdef CONFIG_CPUMASK_OFFSTACK
0048 /**
0049  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
0050  * @mask: pointer to cpumask_var_t where the cpumask is returned
0051  * @flags: GFP_ flags
0052  *
0053  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
0054  * a nop returning a constant 1 (in <linux/cpumask.h>)
0055  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
0056  *
0057  * In addition, mask will be NULL if this fails.  Note that gcc is
0058  * usually smart enough to know that mask can never be NULL if
0059  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
0060  * too.
0061  */
0062 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
0063 {
0064     *mask = kmalloc_node(cpumask_size(), flags, node);
0065 
0066 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
0067     if (!*mask) {
0068         printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
0069         dump_stack();
0070     }
0071 #endif
0072 
0073     return *mask != NULL;
0074 }
0075 EXPORT_SYMBOL(alloc_cpumask_var_node);
0076 
0077 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
0078 {
0079     return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
0080 }
0081 EXPORT_SYMBOL(zalloc_cpumask_var_node);
0082 
0083 /**
0084  * alloc_cpumask_var - allocate a struct cpumask
0085  * @mask: pointer to cpumask_var_t where the cpumask is returned
0086  * @flags: GFP_ flags
0087  *
0088  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
0089  * a nop returning a constant 1 (in <linux/cpumask.h>).
0090  *
0091  * See alloc_cpumask_var_node.
0092  */
0093 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
0094 {
0095     return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
0096 }
0097 EXPORT_SYMBOL(alloc_cpumask_var);
0098 
0099 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
0100 {
0101     return alloc_cpumask_var(mask, flags | __GFP_ZERO);
0102 }
0103 EXPORT_SYMBOL(zalloc_cpumask_var);
0104 
0105 /**
0106  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
0107  * @mask: pointer to cpumask_var_t where the cpumask is returned
0108  *
0109  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
0110  * a nop (in <linux/cpumask.h>).
0111  * Either returns an allocated (zero-filled) cpumask, or causes the
0112  * system to panic.
0113  */
0114 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
0115 {
0116     *mask = memblock_virt_alloc(cpumask_size(), 0);
0117 }
0118 
0119 /**
0120  * free_cpumask_var - frees memory allocated for a struct cpumask.
0121  * @mask: cpumask to free
0122  *
0123  * This is safe on a NULL mask.
0124  */
0125 void free_cpumask_var(cpumask_var_t mask)
0126 {
0127     kfree(mask);
0128 }
0129 EXPORT_SYMBOL(free_cpumask_var);
0130 
0131 /**
0132  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
0133  * @mask: cpumask to free
0134  */
0135 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
0136 {
0137     memblock_free_early(__pa(mask), cpumask_size());
0138 }
0139 #endif
0140 
0141 /**
0142  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
0143  * @i: index number
0144  * @node: local numa_node
0145  *
0146  * This function selects an online CPU according to a numa aware policy;
0147  * local cpus are returned first, followed by non-local ones, then it
0148  * wraps around.
0149  *
0150  * It's not very efficient, but useful for setup.
0151  */
0152 unsigned int cpumask_local_spread(unsigned int i, int node)
0153 {
0154     int cpu;
0155 
0156     /* Wrap: we always want a cpu. */
0157     i %= num_online_cpus();
0158 
0159     if (node == -1) {
0160         for_each_cpu(cpu, cpu_online_mask)
0161             if (i-- == 0)
0162                 return cpu;
0163     } else {
0164         /* NUMA first. */
0165         for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
0166             if (i-- == 0)
0167                 return cpu;
0168 
0169         for_each_cpu(cpu, cpu_online_mask) {
0170             /* Skip NUMA nodes, done above. */
0171             if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
0172                 continue;
0173 
0174             if (i-- == 0)
0175                 return cpu;
0176         }
0177     }
0178     BUG();
0179 }
0180 EXPORT_SYMBOL(cpumask_local_spread);