Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_CPUSET_H
0003 #define _LINUX_CPUSET_H
0004 /*
0005  *  cpuset interface
0006  *
0007  *  Copyright (C) 2003 BULL SA
0008  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
0009  *
0010  */
0011 
0012 #include <linux/sched.h>
0013 #include <linux/sched/topology.h>
0014 #include <linux/sched/task.h>
0015 #include <linux/cpumask.h>
0016 #include <linux/nodemask.h>
0017 #include <linux/mm.h>
0018 #include <linux/mmu_context.h>
0019 #include <linux/jump_label.h>
0020 
0021 #ifdef CONFIG_CPUSETS
0022 
0023 /*
0024  * Static branch rewrites can happen in an arbitrary order for a given
0025  * key. In code paths where we need to loop with read_mems_allowed_begin() and
0026  * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
0027  * to ensure that begin() always gets rewritten before retry() in the
0028  * disabled -> enabled transition. If not, then if local irqs are disabled
0029  * around the loop, we can deadlock since retry() would always be
0030  * comparing the latest value of the mems_allowed seqcount against 0 as
0031  * begin() still would see cpusets_enabled() as false. The enabled -> disabled
0032  * transition should happen in reverse order for the same reasons (want to stop
0033  * looking at real value of mems_allowed.sequence in retry() first).
0034  */
0035 extern struct static_key_false cpusets_pre_enable_key;
0036 extern struct static_key_false cpusets_enabled_key;
0037 extern struct static_key_false cpusets_insane_config_key;
0038 
0039 static inline bool cpusets_enabled(void)
0040 {
0041     return static_branch_unlikely(&cpusets_enabled_key);
0042 }
0043 
0044 static inline void cpuset_inc(void)
0045 {
0046     static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
0047     static_branch_inc_cpuslocked(&cpusets_enabled_key);
0048 }
0049 
0050 static inline void cpuset_dec(void)
0051 {
0052     static_branch_dec_cpuslocked(&cpusets_enabled_key);
0053     static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
0054 }
0055 
0056 /*
0057  * This will get enabled whenever a cpuset configuration is considered
0058  * unsupportable in general. E.g. movable only node which cannot satisfy
0059  * any non movable allocations (see update_nodemask). Page allocator
0060  * needs to make additional checks for those configurations and this
0061  * check is meant to guard those checks without any overhead for sane
0062  * configurations.
0063  */
0064 static inline bool cpusets_insane_config(void)
0065 {
0066     return static_branch_unlikely(&cpusets_insane_config_key);
0067 }
0068 
0069 extern int cpuset_init(void);
0070 extern void cpuset_init_smp(void);
0071 extern void cpuset_force_rebuild(void);
0072 extern void cpuset_update_active_cpus(void);
0073 extern void cpuset_wait_for_hotplug(void);
0074 extern void cpuset_read_lock(void);
0075 extern void cpuset_read_unlock(void);
0076 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
0077 extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
0078 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
0079 #define cpuset_current_mems_allowed (current->mems_allowed)
0080 void cpuset_init_current_mems_allowed(void);
0081 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
0082 
0083 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
0084 
0085 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
0086 {
0087     if (cpusets_enabled())
0088         return __cpuset_node_allowed(node, gfp_mask);
0089     return true;
0090 }
0091 
0092 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
0093 {
0094     return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
0095 }
0096 
0097 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
0098 {
0099     if (cpusets_enabled())
0100         return __cpuset_zone_allowed(z, gfp_mask);
0101     return true;
0102 }
0103 
0104 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
0105                       const struct task_struct *tsk2);
0106 
0107 #define cpuset_memory_pressure_bump()               \
0108     do {                            \
0109         if (cpuset_memory_pressure_enabled)     \
0110             __cpuset_memory_pressure_bump();    \
0111     } while (0)
0112 extern int cpuset_memory_pressure_enabled;
0113 extern void __cpuset_memory_pressure_bump(void);
0114 
0115 extern void cpuset_task_status_allowed(struct seq_file *m,
0116                     struct task_struct *task);
0117 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
0118                 struct pid *pid, struct task_struct *tsk);
0119 
0120 extern int cpuset_mem_spread_node(void);
0121 extern int cpuset_slab_spread_node(void);
0122 
0123 static inline int cpuset_do_page_mem_spread(void)
0124 {
0125     return task_spread_page(current);
0126 }
0127 
0128 static inline int cpuset_do_slab_mem_spread(void)
0129 {
0130     return task_spread_slab(current);
0131 }
0132 
0133 extern bool current_cpuset_is_being_rebound(void);
0134 
0135 extern void rebuild_sched_domains(void);
0136 
0137 extern void cpuset_print_current_mems_allowed(void);
0138 
0139 /*
0140  * read_mems_allowed_begin is required when making decisions involving
0141  * mems_allowed such as during page allocation. mems_allowed can be updated in
0142  * parallel and depending on the new value an operation can fail potentially
0143  * causing process failure. A retry loop with read_mems_allowed_begin and
0144  * read_mems_allowed_retry prevents these artificial failures.
0145  */
0146 static inline unsigned int read_mems_allowed_begin(void)
0147 {
0148     if (!static_branch_unlikely(&cpusets_pre_enable_key))
0149         return 0;
0150 
0151     return read_seqcount_begin(&current->mems_allowed_seq);
0152 }
0153 
0154 /*
0155  * If this returns true, the operation that took place after
0156  * read_mems_allowed_begin may have failed artificially due to a concurrent
0157  * update of mems_allowed. It is up to the caller to retry the operation if
0158  * appropriate.
0159  */
0160 static inline bool read_mems_allowed_retry(unsigned int seq)
0161 {
0162     if (!static_branch_unlikely(&cpusets_enabled_key))
0163         return false;
0164 
0165     return read_seqcount_retry(&current->mems_allowed_seq, seq);
0166 }
0167 
0168 static inline void set_mems_allowed(nodemask_t nodemask)
0169 {
0170     unsigned long flags;
0171 
0172     task_lock(current);
0173     local_irq_save(flags);
0174     write_seqcount_begin(&current->mems_allowed_seq);
0175     current->mems_allowed = nodemask;
0176     write_seqcount_end(&current->mems_allowed_seq);
0177     local_irq_restore(flags);
0178     task_unlock(current);
0179 }
0180 
0181 #else /* !CONFIG_CPUSETS */
0182 
0183 static inline bool cpusets_enabled(void) { return false; }
0184 
0185 static inline bool cpusets_insane_config(void) { return false; }
0186 
0187 static inline int cpuset_init(void) { return 0; }
0188 static inline void cpuset_init_smp(void) {}
0189 
0190 static inline void cpuset_force_rebuild(void) { }
0191 
0192 static inline void cpuset_update_active_cpus(void)
0193 {
0194     partition_sched_domains(1, NULL, NULL);
0195 }
0196 
0197 static inline void cpuset_wait_for_hotplug(void) { }
0198 
0199 static inline void cpuset_read_lock(void) { }
0200 static inline void cpuset_read_unlock(void) { }
0201 
0202 static inline void cpuset_cpus_allowed(struct task_struct *p,
0203                        struct cpumask *mask)
0204 {
0205     cpumask_copy(mask, task_cpu_possible_mask(p));
0206 }
0207 
0208 static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
0209 {
0210     return false;
0211 }
0212 
0213 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
0214 {
0215     return node_possible_map;
0216 }
0217 
0218 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
0219 static inline void cpuset_init_current_mems_allowed(void) {}
0220 
0221 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
0222 {
0223     return 1;
0224 }
0225 
0226 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
0227 {
0228     return true;
0229 }
0230 
0231 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
0232 {
0233     return true;
0234 }
0235 
0236 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
0237 {
0238     return true;
0239 }
0240 
0241 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
0242                          const struct task_struct *tsk2)
0243 {
0244     return 1;
0245 }
0246 
0247 static inline void cpuset_memory_pressure_bump(void) {}
0248 
0249 static inline void cpuset_task_status_allowed(struct seq_file *m,
0250                         struct task_struct *task)
0251 {
0252 }
0253 
0254 static inline int cpuset_mem_spread_node(void)
0255 {
0256     return 0;
0257 }
0258 
0259 static inline int cpuset_slab_spread_node(void)
0260 {
0261     return 0;
0262 }
0263 
0264 static inline int cpuset_do_page_mem_spread(void)
0265 {
0266     return 0;
0267 }
0268 
0269 static inline int cpuset_do_slab_mem_spread(void)
0270 {
0271     return 0;
0272 }
0273 
0274 static inline bool current_cpuset_is_being_rebound(void)
0275 {
0276     return false;
0277 }
0278 
0279 static inline void rebuild_sched_domains(void)
0280 {
0281     partition_sched_domains(1, NULL, NULL);
0282 }
0283 
0284 static inline void cpuset_print_current_mems_allowed(void)
0285 {
0286 }
0287 
0288 static inline void set_mems_allowed(nodemask_t nodemask)
0289 {
0290 }
0291 
0292 static inline unsigned int read_mems_allowed_begin(void)
0293 {
0294     return 0;
0295 }
0296 
0297 static inline bool read_mems_allowed_retry(unsigned int seq)
0298 {
0299     return false;
0300 }
0301 
0302 #endif /* !CONFIG_CPUSETS */
0303 
0304 #endif /* _LINUX_CPUSET_H */