0001 #ifdef CONFIG_SMP
0002 #include "sched-pelt.h"
0003
0004 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
0005 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
0006 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
0007 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
0008 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
0009
0010 #ifdef CONFIG_SCHED_THERMAL_PRESSURE
0011 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
0012
0013 static inline u64 thermal_load_avg(struct rq *rq)
0014 {
0015 return READ_ONCE(rq->avg_thermal.load_avg);
0016 }
0017 #else
0018 static inline int
0019 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
0020 {
0021 return 0;
0022 }
0023
0024 static inline u64 thermal_load_avg(struct rq *rq)
0025 {
0026 return 0;
0027 }
0028 #endif
0029
0030 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
0031 int update_irq_load_avg(struct rq *rq, u64 running);
0032 #else
0033 static inline int
0034 update_irq_load_avg(struct rq *rq, u64 running)
0035 {
0036 return 0;
0037 }
0038 #endif
0039
0040 #define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
0041
0042 static inline u32 get_pelt_divider(struct sched_avg *avg)
0043 {
0044 return PELT_MIN_DIVIDER + avg->period_contrib;
0045 }
0046
0047 static inline void cfs_se_util_change(struct sched_avg *avg)
0048 {
0049 unsigned int enqueued;
0050
0051 if (!sched_feat(UTIL_EST))
0052 return;
0053
0054
0055 enqueued = avg->util_est.enqueued;
0056 if (!(enqueued & UTIL_AVG_UNCHANGED))
0057 return;
0058
0059
0060 enqueued &= ~UTIL_AVG_UNCHANGED;
0061 WRITE_ONCE(avg->util_est.enqueued, enqueued);
0062 }
0063
0064 static inline u64 rq_clock_pelt(struct rq *rq)
0065 {
0066 lockdep_assert_rq_held(rq);
0067 assert_clock_updated(rq);
0068
0069 return rq->clock_pelt - rq->lost_idle_time;
0070 }
0071
0072
0073 static inline void _update_idle_rq_clock_pelt(struct rq *rq)
0074 {
0075 rq->clock_pelt = rq_clock_task(rq);
0076
0077 u64_u32_store(rq->clock_idle, rq_clock(rq));
0078
0079 smp_wmb();
0080 u64_u32_store(rq->clock_pelt_idle, rq_clock_pelt(rq));
0081 }
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
0096 {
0097 if (unlikely(is_idle_task(rq->curr))) {
0098 _update_idle_rq_clock_pelt(rq);
0099 return;
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
0119 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
0120
0121 rq->clock_pelt += delta;
0122 }
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 static inline void update_idle_rq_clock_pelt(struct rq *rq)
0134 {
0135 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
0136 u32 util_sum = rq->cfs.avg.util_sum;
0137 util_sum += rq->avg_rt.util_sum;
0138 util_sum += rq->avg_dl.util_sum;
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 if (util_sum >= divider)
0150 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
0151
0152 _update_idle_rq_clock_pelt(rq);
0153 }
0154
0155 #ifdef CONFIG_CFS_BANDWIDTH
0156 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
0157 {
0158 u64 throttled;
0159
0160 if (unlikely(cfs_rq->throttle_count))
0161 throttled = U64_MAX;
0162 else
0163 throttled = cfs_rq->throttled_clock_pelt_time;
0164
0165 u64_u32_store(cfs_rq->throttled_pelt_idle, throttled);
0166 }
0167
0168
0169 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
0170 {
0171 if (unlikely(cfs_rq->throttle_count))
0172 return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
0173
0174 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
0175 }
0176 #else
0177 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
0178 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
0179 {
0180 return rq_clock_pelt(rq_of(cfs_rq));
0181 }
0182 #endif
0183
0184 #else
0185
0186 static inline int
0187 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
0188 {
0189 return 0;
0190 }
0191
0192 static inline int
0193 update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
0194 {
0195 return 0;
0196 }
0197
0198 static inline int
0199 update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
0200 {
0201 return 0;
0202 }
0203
0204 static inline int
0205 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
0206 {
0207 return 0;
0208 }
0209
0210 static inline u64 thermal_load_avg(struct rq *rq)
0211 {
0212 return 0;
0213 }
0214
0215 static inline int
0216 update_irq_load_avg(struct rq *rq, u64 running)
0217 {
0218 return 0;
0219 }
0220
0221 static inline u64 rq_clock_pelt(struct rq *rq)
0222 {
0223 return rq_clock_task(rq);
0224 }
0225
0226 static inline void
0227 update_rq_clock_pelt(struct rq *rq, s64 delta) { }
0228
0229 static inline void
0230 update_idle_rq_clock_pelt(struct rq *rq) { }
0231
0232 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
0233 #endif
0234
0235