0001
0002
0003
0004
0005
0006 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
0007 struct sched_statistics *stats)
0008 {
0009 u64 wait_start, prev_wait_start;
0010
0011 wait_start = rq_clock(rq);
0012 prev_wait_start = schedstat_val(stats->wait_start);
0013
0014 if (p && likely(wait_start > prev_wait_start))
0015 wait_start -= prev_wait_start;
0016
0017 __schedstat_set(stats->wait_start, wait_start);
0018 }
0019
0020 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
0021 struct sched_statistics *stats)
0022 {
0023 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
0024
0025 if (p) {
0026 if (task_on_rq_migrating(p)) {
0027
0028
0029
0030
0031
0032 __schedstat_set(stats->wait_start, delta);
0033
0034 return;
0035 }
0036
0037 trace_sched_stat_wait(p, delta);
0038 }
0039
0040 __schedstat_set(stats->wait_max,
0041 max(schedstat_val(stats->wait_max), delta));
0042 __schedstat_inc(stats->wait_count);
0043 __schedstat_add(stats->wait_sum, delta);
0044 __schedstat_set(stats->wait_start, 0);
0045 }
0046
0047 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
0048 struct sched_statistics *stats)
0049 {
0050 u64 sleep_start, block_start;
0051
0052 sleep_start = schedstat_val(stats->sleep_start);
0053 block_start = schedstat_val(stats->block_start);
0054
0055 if (sleep_start) {
0056 u64 delta = rq_clock(rq) - sleep_start;
0057
0058 if ((s64)delta < 0)
0059 delta = 0;
0060
0061 if (unlikely(delta > schedstat_val(stats->sleep_max)))
0062 __schedstat_set(stats->sleep_max, delta);
0063
0064 __schedstat_set(stats->sleep_start, 0);
0065 __schedstat_add(stats->sum_sleep_runtime, delta);
0066
0067 if (p) {
0068 account_scheduler_latency(p, delta >> 10, 1);
0069 trace_sched_stat_sleep(p, delta);
0070 }
0071 }
0072
0073 if (block_start) {
0074 u64 delta = rq_clock(rq) - block_start;
0075
0076 if ((s64)delta < 0)
0077 delta = 0;
0078
0079 if (unlikely(delta > schedstat_val(stats->block_max)))
0080 __schedstat_set(stats->block_max, delta);
0081
0082 __schedstat_set(stats->block_start, 0);
0083 __schedstat_add(stats->sum_sleep_runtime, delta);
0084 __schedstat_add(stats->sum_block_runtime, delta);
0085
0086 if (p) {
0087 if (p->in_iowait) {
0088 __schedstat_add(stats->iowait_sum, delta);
0089 __schedstat_inc(stats->iowait_count);
0090 trace_sched_stat_iowait(p, delta);
0091 }
0092
0093 trace_sched_stat_blocked(p, delta);
0094
0095
0096
0097
0098
0099
0100 if (unlikely(prof_on == SLEEP_PROFILING)) {
0101 profile_hits(SLEEP_PROFILING,
0102 (void *)get_wchan(p),
0103 delta >> 20);
0104 }
0105 account_scheduler_latency(p, delta >> 10, 0);
0106 }
0107 }
0108 }
0109
0110
0111
0112
0113
0114
0115
0116 #define SCHEDSTAT_VERSION 15
0117
0118 static int show_schedstat(struct seq_file *seq, void *v)
0119 {
0120 int cpu;
0121
0122 if (v == (void *)1) {
0123 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
0124 seq_printf(seq, "timestamp %lu\n", jiffies);
0125 } else {
0126 struct rq *rq;
0127 #ifdef CONFIG_SMP
0128 struct sched_domain *sd;
0129 int dcount = 0;
0130 #endif
0131 cpu = (unsigned long)(v - 2);
0132 rq = cpu_rq(cpu);
0133
0134
0135 seq_printf(seq,
0136 "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
0137 cpu, rq->yld_count,
0138 rq->sched_count, rq->sched_goidle,
0139 rq->ttwu_count, rq->ttwu_local,
0140 rq->rq_cpu_time,
0141 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
0142
0143 seq_printf(seq, "\n");
0144
0145 #ifdef CONFIG_SMP
0146
0147 rcu_read_lock();
0148 for_each_domain(cpu, sd) {
0149 enum cpu_idle_type itype;
0150
0151 seq_printf(seq, "domain%d %*pb", dcount++,
0152 cpumask_pr_args(sched_domain_span(sd)));
0153 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
0154 itype++) {
0155 seq_printf(seq, " %u %u %u %u %u %u %u %u",
0156 sd->lb_count[itype],
0157 sd->lb_balanced[itype],
0158 sd->lb_failed[itype],
0159 sd->lb_imbalance[itype],
0160 sd->lb_gained[itype],
0161 sd->lb_hot_gained[itype],
0162 sd->lb_nobusyq[itype],
0163 sd->lb_nobusyg[itype]);
0164 }
0165 seq_printf(seq,
0166 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
0167 sd->alb_count, sd->alb_failed, sd->alb_pushed,
0168 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
0169 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
0170 sd->ttwu_wake_remote, sd->ttwu_move_affine,
0171 sd->ttwu_move_balance);
0172 }
0173 rcu_read_unlock();
0174 #endif
0175 }
0176 return 0;
0177 }
0178
0179
0180
0181
0182
0183
0184
0185
0186 static void *schedstat_start(struct seq_file *file, loff_t *offset)
0187 {
0188 unsigned long n = *offset;
0189
0190 if (n == 0)
0191 return (void *) 1;
0192
0193 n--;
0194
0195 if (n > 0)
0196 n = cpumask_next(n - 1, cpu_online_mask);
0197 else
0198 n = cpumask_first(cpu_online_mask);
0199
0200 *offset = n + 1;
0201
0202 if (n < nr_cpu_ids)
0203 return (void *)(unsigned long)(n + 2);
0204
0205 return NULL;
0206 }
0207
0208 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
0209 {
0210 (*offset)++;
0211
0212 return schedstat_start(file, offset);
0213 }
0214
0215 static void schedstat_stop(struct seq_file *file, void *data)
0216 {
0217 }
0218
0219 static const struct seq_operations schedstat_sops = {
0220 .start = schedstat_start,
0221 .next = schedstat_next,
0222 .stop = schedstat_stop,
0223 .show = show_schedstat,
0224 };
0225
0226 static int __init proc_schedstat_init(void)
0227 {
0228 proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
0229 return 0;
0230 }
0231 subsys_initcall(proc_schedstat_init);