Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SCHED_TOPOLOGY_H
0003 #define _LINUX_SCHED_TOPOLOGY_H
0004 
0005 #include <linux/topology.h>
0006 
0007 #include <linux/sched/idle.h>
0008 
0009 /*
0010  * sched-domains (multiprocessor balancing) declarations:
0011  */
0012 #ifdef CONFIG_SMP
0013 
0014 /* Generate SD flag indexes */
0015 #define SD_FLAG(name, mflags) __##name,
0016 enum {
0017     #include <linux/sched/sd_flags.h>
0018     __SD_FLAG_CNT,
0019 };
0020 #undef SD_FLAG
0021 /* Generate SD flag bits */
0022 #define SD_FLAG(name, mflags) name = 1 << __##name,
0023 enum {
0024     #include <linux/sched/sd_flags.h>
0025 };
0026 #undef SD_FLAG
0027 
0028 #ifdef CONFIG_SCHED_DEBUG
0029 
0030 struct sd_flag_debug {
0031     unsigned int meta_flags;
0032     char *name;
0033 };
0034 extern const struct sd_flag_debug sd_flag_debug[];
0035 
0036 #endif
0037 
0038 #ifdef CONFIG_SCHED_SMT
0039 static inline int cpu_smt_flags(void)
0040 {
0041     return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
0042 }
0043 #endif
0044 
0045 #ifdef CONFIG_SCHED_CLUSTER
0046 static inline int cpu_cluster_flags(void)
0047 {
0048     return SD_SHARE_PKG_RESOURCES;
0049 }
0050 #endif
0051 
0052 #ifdef CONFIG_SCHED_MC
0053 static inline int cpu_core_flags(void)
0054 {
0055     return SD_SHARE_PKG_RESOURCES;
0056 }
0057 #endif
0058 
0059 #ifdef CONFIG_NUMA
0060 static inline int cpu_numa_flags(void)
0061 {
0062     return SD_NUMA;
0063 }
0064 #endif
0065 
0066 extern int arch_asym_cpu_priority(int cpu);
0067 
0068 struct sched_domain_attr {
0069     int relax_domain_level;
0070 };
0071 
0072 #define SD_ATTR_INIT    (struct sched_domain_attr) {    \
0073     .relax_domain_level = -1,           \
0074 }
0075 
0076 extern int sched_domain_level_max;
0077 
0078 struct sched_group;
0079 
0080 struct sched_domain_shared {
0081     atomic_t    ref;
0082     atomic_t    nr_busy_cpus;
0083     int     has_idle_cores;
0084     int     nr_idle_scan;
0085 };
0086 
0087 struct sched_domain {
0088     /* These fields must be setup */
0089     struct sched_domain __rcu *parent;  /* top domain must be null terminated */
0090     struct sched_domain __rcu *child;   /* bottom domain must be null terminated */
0091     struct sched_group *groups; /* the balancing groups of the domain */
0092     unsigned long min_interval; /* Minimum balance interval ms */
0093     unsigned long max_interval; /* Maximum balance interval ms */
0094     unsigned int busy_factor;   /* less balancing by factor if busy */
0095     unsigned int imbalance_pct; /* No balance until over watermark */
0096     unsigned int cache_nice_tries;  /* Leave cache hot tasks for # tries */
0097     unsigned int imb_numa_nr;   /* Nr running tasks that allows a NUMA imbalance */
0098 
0099     int nohz_idle;          /* NOHZ IDLE status */
0100     int flags;          /* See SD_* */
0101     int level;
0102 
0103     /* Runtime fields. */
0104     unsigned long last_balance; /* init to jiffies. units in jiffies */
0105     unsigned int balance_interval;  /* initialise to 1. units in ms. */
0106     unsigned int nr_balance_failed; /* initialise to 0 */
0107 
0108     /* idle_balance() stats */
0109     u64 max_newidle_lb_cost;
0110     unsigned long last_decay_max_lb_cost;
0111 
0112     u64 avg_scan_cost;      /* select_idle_sibling */
0113 
0114 #ifdef CONFIG_SCHEDSTATS
0115     /* load_balance() stats */
0116     unsigned int lb_count[CPU_MAX_IDLE_TYPES];
0117     unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
0118     unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
0119     unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
0120     unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
0121     unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
0122     unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
0123     unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
0124 
0125     /* Active load balancing */
0126     unsigned int alb_count;
0127     unsigned int alb_failed;
0128     unsigned int alb_pushed;
0129 
0130     /* SD_BALANCE_EXEC stats */
0131     unsigned int sbe_count;
0132     unsigned int sbe_balanced;
0133     unsigned int sbe_pushed;
0134 
0135     /* SD_BALANCE_FORK stats */
0136     unsigned int sbf_count;
0137     unsigned int sbf_balanced;
0138     unsigned int sbf_pushed;
0139 
0140     /* try_to_wake_up() stats */
0141     unsigned int ttwu_wake_remote;
0142     unsigned int ttwu_move_affine;
0143     unsigned int ttwu_move_balance;
0144 #endif
0145 #ifdef CONFIG_SCHED_DEBUG
0146     char *name;
0147 #endif
0148     union {
0149         void *private;      /* used during construction */
0150         struct rcu_head rcu;    /* used during destruction */
0151     };
0152     struct sched_domain_shared *shared;
0153 
0154     unsigned int span_weight;
0155     /*
0156      * Span of all CPUs in this domain.
0157      *
0158      * NOTE: this field is variable length. (Allocated dynamically
0159      * by attaching extra space to the end of the structure,
0160      * depending on how many CPUs the kernel has booted up with)
0161      */
0162     unsigned long span[];
0163 };
0164 
0165 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
0166 {
0167     return to_cpumask(sd->span);
0168 }
0169 
0170 extern void partition_sched_domains_locked(int ndoms_new,
0171                        cpumask_var_t doms_new[],
0172                        struct sched_domain_attr *dattr_new);
0173 
0174 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
0175                     struct sched_domain_attr *dattr_new);
0176 
0177 /* Allocate an array of sched domains, for partition_sched_domains(). */
0178 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
0179 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
0180 
0181 bool cpus_share_cache(int this_cpu, int that_cpu);
0182 
0183 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
0184 typedef int (*sched_domain_flags_f)(void);
0185 
0186 #define SDTL_OVERLAP    0x01
0187 
0188 struct sd_data {
0189     struct sched_domain *__percpu *sd;
0190     struct sched_domain_shared *__percpu *sds;
0191     struct sched_group *__percpu *sg;
0192     struct sched_group_capacity *__percpu *sgc;
0193 };
0194 
0195 struct sched_domain_topology_level {
0196     sched_domain_mask_f mask;
0197     sched_domain_flags_f sd_flags;
0198     int         flags;
0199     int         numa_level;
0200     struct sd_data      data;
0201 #ifdef CONFIG_SCHED_DEBUG
0202     char                *name;
0203 #endif
0204 };
0205 
0206 extern void set_sched_topology(struct sched_domain_topology_level *tl);
0207 
0208 #ifdef CONFIG_SCHED_DEBUG
0209 # define SD_INIT_NAME(type)     .name = #type
0210 #else
0211 # define SD_INIT_NAME(type)
0212 #endif
0213 
0214 #else /* CONFIG_SMP */
0215 
0216 struct sched_domain_attr;
0217 
0218 static inline void
0219 partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
0220                    struct sched_domain_attr *dattr_new)
0221 {
0222 }
0223 
0224 static inline void
0225 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
0226             struct sched_domain_attr *dattr_new)
0227 {
0228 }
0229 
0230 static inline bool cpus_share_cache(int this_cpu, int that_cpu)
0231 {
0232     return true;
0233 }
0234 
0235 #endif  /* !CONFIG_SMP */
0236 
0237 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
0238 extern void rebuild_sched_domains_energy(void);
0239 #else
0240 static inline void rebuild_sched_domains_energy(void)
0241 {
0242 }
0243 #endif
0244 
0245 #ifndef arch_scale_cpu_capacity
0246 /**
0247  * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
0248  * @cpu: the CPU in question.
0249  *
0250  * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
0251  *
0252  *             max_perf(cpu)
0253  *      ----------------------------- * SCHED_CAPACITY_SCALE
0254  *      max(max_perf(c) : c \in CPUs)
0255  */
0256 static __always_inline
0257 unsigned long arch_scale_cpu_capacity(int cpu)
0258 {
0259     return SCHED_CAPACITY_SCALE;
0260 }
0261 #endif
0262 
0263 #ifndef arch_scale_thermal_pressure
0264 static __always_inline
0265 unsigned long arch_scale_thermal_pressure(int cpu)
0266 {
0267     return 0;
0268 }
0269 #endif
0270 
0271 #ifndef arch_update_thermal_pressure
0272 static __always_inline
0273 void arch_update_thermal_pressure(const struct cpumask *cpus,
0274                   unsigned long capped_frequency)
0275 { }
0276 #endif
0277 
0278 static inline int task_node(const struct task_struct *p)
0279 {
0280     return cpu_to_node(task_cpu(p));
0281 }
0282 
0283 #endif /* _LINUX_SCHED_TOPOLOGY_H */