Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/slab.h>
0003 #include <linux/kernel.h>
0004 #include <linux/bitops.h>
0005 #include <linux/cpumask.h>
0006 #include <linux/export.h>
0007 #include <linux/memblock.h>
0008 #include <linux/numa.h>
0009 
0010 /**
0011  * cpumask_next_wrap - helper to implement for_each_cpu_wrap
0012  * @n: the cpu prior to the place to search
0013  * @mask: the cpumask pointer
0014  * @start: the start point of the iteration
0015  * @wrap: assume @n crossing @start terminates the iteration
0016  *
0017  * Returns >= nr_cpu_ids on completion
0018  *
0019  * Note: the @wrap argument is required for the start condition when
0020  * we cannot assume @start is set in @mask.
0021  */
0022 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
0023 {
0024     unsigned int next;
0025 
0026 again:
0027     next = cpumask_next(n, mask);
0028 
0029     if (wrap && n < start && next >= start) {
0030         return nr_cpumask_bits;
0031 
0032     } else if (next >= nr_cpumask_bits) {
0033         wrap = true;
0034         n = -1;
0035         goto again;
0036     }
0037 
0038     return next;
0039 }
0040 EXPORT_SYMBOL(cpumask_next_wrap);
0041 
0042 /* These are not inline because of header tangles. */
0043 #ifdef CONFIG_CPUMASK_OFFSTACK
0044 /**
0045  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
0046  * @mask: pointer to cpumask_var_t where the cpumask is returned
0047  * @flags: GFP_ flags
0048  *
0049  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
0050  * a nop returning a constant 1 (in <linux/cpumask.h>)
0051  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
0052  *
0053  * In addition, mask will be NULL if this fails.  Note that gcc is
0054  * usually smart enough to know that mask can never be NULL if
0055  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
0056  * too.
0057  */
0058 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
0059 {
0060     *mask = kmalloc_node(cpumask_size(), flags, node);
0061 
0062 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
0063     if (!*mask) {
0064         printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
0065         dump_stack();
0066     }
0067 #endif
0068 
0069     return *mask != NULL;
0070 }
0071 EXPORT_SYMBOL(alloc_cpumask_var_node);
0072 
0073 /**
0074  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
0075  * @mask: pointer to cpumask_var_t where the cpumask is returned
0076  *
0077  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
0078  * a nop (in <linux/cpumask.h>).
0079  * Either returns an allocated (zero-filled) cpumask, or causes the
0080  * system to panic.
0081  */
0082 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
0083 {
0084     *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
0085     if (!*mask)
0086         panic("%s: Failed to allocate %u bytes\n", __func__,
0087               cpumask_size());
0088 }
0089 
0090 /**
0091  * free_cpumask_var - frees memory allocated for a struct cpumask.
0092  * @mask: cpumask to free
0093  *
0094  * This is safe on a NULL mask.
0095  */
0096 void free_cpumask_var(cpumask_var_t mask)
0097 {
0098     kfree(mask);
0099 }
0100 EXPORT_SYMBOL(free_cpumask_var);
0101 
0102 /**
0103  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
0104  * @mask: cpumask to free
0105  */
0106 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
0107 {
0108     memblock_free(mask, cpumask_size());
0109 }
0110 #endif
0111 
0112 /**
0113  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
0114  * @i: index number
0115  * @node: local numa_node
0116  *
0117  * This function selects an online CPU according to a numa aware policy;
0118  * local cpus are returned first, followed by non-local ones, then it
0119  * wraps around.
0120  *
0121  * It's not very efficient, but useful for setup.
0122  */
0123 unsigned int cpumask_local_spread(unsigned int i, int node)
0124 {
0125     unsigned int cpu;
0126 
0127     /* Wrap: we always want a cpu. */
0128     i %= num_online_cpus();
0129 
0130     if (node == NUMA_NO_NODE) {
0131         for_each_cpu(cpu, cpu_online_mask)
0132             if (i-- == 0)
0133                 return cpu;
0134     } else {
0135         /* NUMA first. */
0136         for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
0137             if (i-- == 0)
0138                 return cpu;
0139 
0140         for_each_cpu(cpu, cpu_online_mask) {
0141             /* Skip NUMA nodes, done above. */
0142             if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
0143                 continue;
0144 
0145             if (i-- == 0)
0146                 return cpu;
0147         }
0148     }
0149     BUG();
0150 }
0151 EXPORT_SYMBOL(cpumask_local_spread);
0152 
0153 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
0154 
0155 /**
0156  * Returns an arbitrary cpu within srcp1 & srcp2.
0157  *
0158  * Iterated calls using the same srcp1 and srcp2 will be distributed within
0159  * their intersection.
0160  *
0161  * Returns >= nr_cpu_ids if the intersection is empty.
0162  */
0163 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
0164                    const struct cpumask *src2p)
0165 {
0166     unsigned int next, prev;
0167 
0168     /* NOTE: our first selection will skip 0. */
0169     prev = __this_cpu_read(distribute_cpu_mask_prev);
0170 
0171     next = cpumask_next_and(prev, src1p, src2p);
0172     if (next >= nr_cpu_ids)
0173         next = cpumask_first_and(src1p, src2p);
0174 
0175     if (next < nr_cpu_ids)
0176         __this_cpu_write(distribute_cpu_mask_prev, next);
0177 
0178     return next;
0179 }
0180 EXPORT_SYMBOL(cpumask_any_and_distribute);
0181 
0182 unsigned int cpumask_any_distribute(const struct cpumask *srcp)
0183 {
0184     unsigned int next, prev;
0185 
0186     /* NOTE: our first selection will skip 0. */
0187     prev = __this_cpu_read(distribute_cpu_mask_prev);
0188 
0189     next = cpumask_next(prev, srcp);
0190     if (next >= nr_cpu_ids)
0191         next = cpumask_first(srcp);
0192 
0193     if (next < nr_cpu_ids)
0194         __this_cpu_write(distribute_cpu_mask_prev, next);
0195 
0196     return next;
0197 }
0198 EXPORT_SYMBOL(cpumask_any_distribute);