0001
0002 #ifndef _KERNEL_STATS_H
0003 #define _KERNEL_STATS_H
0004
0005 #ifdef CONFIG_SCHEDSTATS
0006
0007 extern struct static_key_false sched_schedstats;
0008
0009
0010
0011
0012 static inline void
0013 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
0014 {
0015 if (rq) {
0016 rq->rq_sched_info.run_delay += delta;
0017 rq->rq_sched_info.pcount++;
0018 }
0019 }
0020
0021
0022
0023
0024 static inline void
0025 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
0026 {
0027 if (rq)
0028 rq->rq_cpu_time += delta;
0029 }
0030
0031 static inline void
0032 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
0033 {
0034 if (rq)
0035 rq->rq_sched_info.run_delay += delta;
0036 }
0037 #define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
0038 #define __schedstat_inc(var) do { var++; } while (0)
0039 #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
0040 #define __schedstat_add(var, amt) do { var += (amt); } while (0)
0041 #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
0042 #define __schedstat_set(var, val) do { var = (val); } while (0)
0043 #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
0044 #define schedstat_val(var) (var)
0045 #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
0046
0047 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
0048 struct sched_statistics *stats);
0049
0050 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
0051 struct sched_statistics *stats);
0052 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
0053 struct sched_statistics *stats);
0054
0055 static inline void
0056 check_schedstat_required(void)
0057 {
0058 if (schedstat_enabled())
0059 return;
0060
0061
0062 if (trace_sched_stat_wait_enabled() ||
0063 trace_sched_stat_sleep_enabled() ||
0064 trace_sched_stat_iowait_enabled() ||
0065 trace_sched_stat_blocked_enabled() ||
0066 trace_sched_stat_runtime_enabled())
0067 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
0068 }
0069
0070 #else
0071
0072 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
0073 static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
0074 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
0075 # define schedstat_enabled() 0
0076 # define __schedstat_inc(var) do { } while (0)
0077 # define schedstat_inc(var) do { } while (0)
0078 # define __schedstat_add(var, amt) do { } while (0)
0079 # define schedstat_add(var, amt) do { } while (0)
0080 # define __schedstat_set(var, val) do { } while (0)
0081 # define schedstat_set(var, val) do { } while (0)
0082 # define schedstat_val(var) 0
0083 # define schedstat_val_or_zero(var) 0
0084
0085 # define __update_stats_wait_start(rq, p, stats) do { } while (0)
0086 # define __update_stats_wait_end(rq, p, stats) do { } while (0)
0087 # define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0)
0088 # define check_schedstat_required() do { } while (0)
0089
0090 #endif
0091
0092 #ifdef CONFIG_FAIR_GROUP_SCHED
0093 struct sched_entity_stats {
0094 struct sched_entity se;
0095 struct sched_statistics stats;
0096 } __no_randomize_layout;
0097 #endif
0098
0099 static inline struct sched_statistics *
0100 __schedstats_from_se(struct sched_entity *se)
0101 {
0102 #ifdef CONFIG_FAIR_GROUP_SCHED
0103 if (!entity_is_task(se))
0104 return &container_of(se, struct sched_entity_stats, se)->stats;
0105 #endif
0106 return &task_of(se)->stats;
0107 }
0108
0109 #ifdef CONFIG_PSI
0110
0111
0112
0113
0114
0115
0116 static inline void psi_enqueue(struct task_struct *p, bool wakeup)
0117 {
0118 int clear = 0, set = TSK_RUNNING;
0119
0120 if (static_branch_likely(&psi_disabled))
0121 return;
0122
0123 if (p->in_memstall)
0124 set |= TSK_MEMSTALL_RUNNING;
0125
0126 if (!wakeup || p->sched_psi_wake_requeue) {
0127 if (p->in_memstall)
0128 set |= TSK_MEMSTALL;
0129 if (p->sched_psi_wake_requeue)
0130 p->sched_psi_wake_requeue = 0;
0131 } else {
0132 if (p->in_iowait)
0133 clear |= TSK_IOWAIT;
0134 }
0135
0136 psi_task_change(p, clear, set);
0137 }
0138
0139 static inline void psi_dequeue(struct task_struct *p, bool sleep)
0140 {
0141 int clear = TSK_RUNNING;
0142
0143 if (static_branch_likely(&psi_disabled))
0144 return;
0145
0146
0147
0148
0149
0150
0151
0152 if (sleep)
0153 return;
0154
0155 if (p->in_memstall)
0156 clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
0157
0158 psi_task_change(p, clear, 0);
0159 }
0160
0161 static inline void psi_ttwu_dequeue(struct task_struct *p)
0162 {
0163 if (static_branch_likely(&psi_disabled))
0164 return;
0165
0166
0167
0168
0169
0170 if (unlikely(p->in_iowait || p->in_memstall)) {
0171 struct rq_flags rf;
0172 struct rq *rq;
0173 int clear = 0;
0174
0175 if (p->in_iowait)
0176 clear |= TSK_IOWAIT;
0177 if (p->in_memstall)
0178 clear |= TSK_MEMSTALL;
0179
0180 rq = __task_rq_lock(p, &rf);
0181 psi_task_change(p, clear, 0);
0182 p->sched_psi_wake_requeue = 1;
0183 __task_rq_unlock(rq, &rf);
0184 }
0185 }
0186
0187 static inline void psi_sched_switch(struct task_struct *prev,
0188 struct task_struct *next,
0189 bool sleep)
0190 {
0191 if (static_branch_likely(&psi_disabled))
0192 return;
0193
0194 psi_task_switch(prev, next, sleep);
0195 }
0196
0197 #else
0198 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
0199 static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
0200 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
0201 static inline void psi_sched_switch(struct task_struct *prev,
0202 struct task_struct *next,
0203 bool sleep) {}
0204 #endif
0205
0206 #ifdef CONFIG_SCHED_INFO
0207
0208
0209
0210
0211
0212
0213 static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
0214 {
0215 unsigned long long delta = 0;
0216
0217 if (!t->sched_info.last_queued)
0218 return;
0219
0220 delta = rq_clock(rq) - t->sched_info.last_queued;
0221 t->sched_info.last_queued = 0;
0222 t->sched_info.run_delay += delta;
0223
0224 rq_sched_info_dequeue(rq, delta);
0225 }
0226
0227
0228
0229
0230
0231
0232 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
0233 {
0234 unsigned long long now, delta = 0;
0235
0236 if (!t->sched_info.last_queued)
0237 return;
0238
0239 now = rq_clock(rq);
0240 delta = now - t->sched_info.last_queued;
0241 t->sched_info.last_queued = 0;
0242 t->sched_info.run_delay += delta;
0243 t->sched_info.last_arrival = now;
0244 t->sched_info.pcount++;
0245
0246 rq_sched_info_arrive(rq, delta);
0247 }
0248
0249
0250
0251
0252
0253
0254 static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
0255 {
0256 if (!t->sched_info.last_queued)
0257 t->sched_info.last_queued = rq_clock(rq);
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
0269 {
0270 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
0271
0272 rq_sched_info_depart(rq, delta);
0273
0274 if (task_is_running(t))
0275 sched_info_enqueue(rq, t);
0276 }
0277
0278
0279
0280
0281
0282
0283 static inline void
0284 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
0285 {
0286
0287
0288
0289
0290
0291 if (prev != rq->idle)
0292 sched_info_depart(rq, prev);
0293
0294 if (next != rq->idle)
0295 sched_info_arrive(rq, next);
0296 }
0297
0298 #else
0299 # define sched_info_enqueue(rq, t) do { } while (0)
0300 # define sched_info_dequeue(rq, t) do { } while (0)
0301 # define sched_info_switch(rq, t, next) do { } while (0)
0302 #endif
0303
0304 #endif