0001
0002 #ifndef _LINUX_SCHED_TOPOLOGY_H
0003 #define _LINUX_SCHED_TOPOLOGY_H
0004
0005 #include <linux/topology.h>
0006
0007 #include <linux/sched/idle.h>
0008
0009
0010
0011
0012 #ifdef CONFIG_SMP
0013
0014
0015 #define SD_FLAG(name, mflags) __##name,
0016 enum {
0017 #include <linux/sched/sd_flags.h>
0018 __SD_FLAG_CNT,
0019 };
0020 #undef SD_FLAG
0021
0022 #define SD_FLAG(name, mflags) name = 1 << __##name,
0023 enum {
0024 #include <linux/sched/sd_flags.h>
0025 };
0026 #undef SD_FLAG
0027
0028 #ifdef CONFIG_SCHED_DEBUG
0029
0030 struct sd_flag_debug {
0031 unsigned int meta_flags;
0032 char *name;
0033 };
0034 extern const struct sd_flag_debug sd_flag_debug[];
0035
0036 #endif
0037
0038 #ifdef CONFIG_SCHED_SMT
0039 static inline int cpu_smt_flags(void)
0040 {
0041 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
0042 }
0043 #endif
0044
0045 #ifdef CONFIG_SCHED_CLUSTER
0046 static inline int cpu_cluster_flags(void)
0047 {
0048 return SD_SHARE_PKG_RESOURCES;
0049 }
0050 #endif
0051
0052 #ifdef CONFIG_SCHED_MC
0053 static inline int cpu_core_flags(void)
0054 {
0055 return SD_SHARE_PKG_RESOURCES;
0056 }
0057 #endif
0058
0059 #ifdef CONFIG_NUMA
0060 static inline int cpu_numa_flags(void)
0061 {
0062 return SD_NUMA;
0063 }
0064 #endif
0065
0066 extern int arch_asym_cpu_priority(int cpu);
0067
0068 struct sched_domain_attr {
0069 int relax_domain_level;
0070 };
0071
0072 #define SD_ATTR_INIT (struct sched_domain_attr) { \
0073 .relax_domain_level = -1, \
0074 }
0075
0076 extern int sched_domain_level_max;
0077
0078 struct sched_group;
0079
0080 struct sched_domain_shared {
0081 atomic_t ref;
0082 atomic_t nr_busy_cpus;
0083 int has_idle_cores;
0084 int nr_idle_scan;
0085 };
0086
0087 struct sched_domain {
0088
0089 struct sched_domain __rcu *parent;
0090 struct sched_domain __rcu *child;
0091 struct sched_group *groups;
0092 unsigned long min_interval;
0093 unsigned long max_interval;
0094 unsigned int busy_factor;
0095 unsigned int imbalance_pct;
0096 unsigned int cache_nice_tries;
0097 unsigned int imb_numa_nr;
0098
0099 int nohz_idle;
0100 int flags;
0101 int level;
0102
0103
0104 unsigned long last_balance;
0105 unsigned int balance_interval;
0106 unsigned int nr_balance_failed;
0107
0108
0109 u64 max_newidle_lb_cost;
0110 unsigned long last_decay_max_lb_cost;
0111
0112 u64 avg_scan_cost;
0113
0114 #ifdef CONFIG_SCHEDSTATS
0115
0116 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
0117 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
0118 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
0119 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
0120 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
0121 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
0122 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
0123 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
0124
0125
0126 unsigned int alb_count;
0127 unsigned int alb_failed;
0128 unsigned int alb_pushed;
0129
0130
0131 unsigned int sbe_count;
0132 unsigned int sbe_balanced;
0133 unsigned int sbe_pushed;
0134
0135
0136 unsigned int sbf_count;
0137 unsigned int sbf_balanced;
0138 unsigned int sbf_pushed;
0139
0140
0141 unsigned int ttwu_wake_remote;
0142 unsigned int ttwu_move_affine;
0143 unsigned int ttwu_move_balance;
0144 #endif
0145 #ifdef CONFIG_SCHED_DEBUG
0146 char *name;
0147 #endif
0148 union {
0149 void *private;
0150 struct rcu_head rcu;
0151 };
0152 struct sched_domain_shared *shared;
0153
0154 unsigned int span_weight;
0155
0156
0157
0158
0159
0160
0161
0162 unsigned long span[];
0163 };
0164
0165 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
0166 {
0167 return to_cpumask(sd->span);
0168 }
0169
0170 extern void partition_sched_domains_locked(int ndoms_new,
0171 cpumask_var_t doms_new[],
0172 struct sched_domain_attr *dattr_new);
0173
0174 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
0175 struct sched_domain_attr *dattr_new);
0176
0177
0178 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
0179 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
0180
0181 bool cpus_share_cache(int this_cpu, int that_cpu);
0182
0183 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
0184 typedef int (*sched_domain_flags_f)(void);
0185
0186 #define SDTL_OVERLAP 0x01
0187
0188 struct sd_data {
0189 struct sched_domain *__percpu *sd;
0190 struct sched_domain_shared *__percpu *sds;
0191 struct sched_group *__percpu *sg;
0192 struct sched_group_capacity *__percpu *sgc;
0193 };
0194
0195 struct sched_domain_topology_level {
0196 sched_domain_mask_f mask;
0197 sched_domain_flags_f sd_flags;
0198 int flags;
0199 int numa_level;
0200 struct sd_data data;
0201 #ifdef CONFIG_SCHED_DEBUG
0202 char *name;
0203 #endif
0204 };
0205
0206 extern void set_sched_topology(struct sched_domain_topology_level *tl);
0207
0208 #ifdef CONFIG_SCHED_DEBUG
0209 # define SD_INIT_NAME(type) .name = #type
0210 #else
0211 # define SD_INIT_NAME(type)
0212 #endif
0213
0214 #else
0215
0216 struct sched_domain_attr;
0217
0218 static inline void
0219 partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
0220 struct sched_domain_attr *dattr_new)
0221 {
0222 }
0223
0224 static inline void
0225 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
0226 struct sched_domain_attr *dattr_new)
0227 {
0228 }
0229
0230 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
0231 {
0232 return true;
0233 }
0234
0235 #endif
0236
0237 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
0238 extern void rebuild_sched_domains_energy(void);
0239 #else
0240 static inline void rebuild_sched_domains_energy(void)
0241 {
0242 }
0243 #endif
0244
0245 #ifndef arch_scale_cpu_capacity
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 static __always_inline
0257 unsigned long arch_scale_cpu_capacity(int cpu)
0258 {
0259 return SCHED_CAPACITY_SCALE;
0260 }
0261 #endif
0262
0263 #ifndef arch_scale_thermal_pressure
0264 static __always_inline
0265 unsigned long arch_scale_thermal_pressure(int cpu)
0266 {
0267 return 0;
0268 }
0269 #endif
0270
0271 #ifndef arch_update_thermal_pressure
0272 static __always_inline
0273 void arch_update_thermal_pressure(const struct cpumask *cpus,
0274 unsigned long capped_frequency)
0275 { }
0276 #endif
0277
0278 static inline int task_node(const struct task_struct *p)
0279 {
0280 return cpu_to_node(task_cpu(p));
0281 }
0282
0283 #endif