Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include "builtin.h"
0003 #include "perf.h"
0004 #include "perf-sys.h"
0005 
0006 #include "util/cpumap.h"
0007 #include "util/evlist.h"
0008 #include "util/evsel.h"
0009 #include "util/evsel_fprintf.h"
0010 #include "util/symbol.h"
0011 #include "util/thread.h"
0012 #include "util/header.h"
0013 #include "util/session.h"
0014 #include "util/tool.h"
0015 #include "util/cloexec.h"
0016 #include "util/thread_map.h"
0017 #include "util/color.h"
0018 #include "util/stat.h"
0019 #include "util/string2.h"
0020 #include "util/callchain.h"
0021 #include "util/time-utils.h"
0022 
0023 #include <subcmd/pager.h>
0024 #include <subcmd/parse-options.h>
0025 #include "util/trace-event.h"
0026 
0027 #include "util/debug.h"
0028 #include "util/event.h"
0029 
0030 #include <linux/kernel.h>
0031 #include <linux/log2.h>
0032 #include <linux/zalloc.h>
0033 #include <sys/prctl.h>
0034 #include <sys/resource.h>
0035 #include <inttypes.h>
0036 
0037 #include <errno.h>
0038 #include <semaphore.h>
0039 #include <pthread.h>
0040 #include <math.h>
0041 #include <api/fs/fs.h>
0042 #include <perf/cpumap.h>
0043 #include <linux/time64.h>
0044 #include <linux/err.h>
0045 
0046 #include <linux/ctype.h>
0047 
0048 #define PR_SET_NAME     15               /* Set process name */
0049 #define MAX_CPUS        4096
0050 #define COMM_LEN        20
0051 #define SYM_LEN         129
0052 #define MAX_PID         1024000
0053 
0054 static const char *cpu_list;
0055 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
0056 
0057 struct sched_atom;
0058 
0059 struct task_desc {
0060     unsigned long       nr;
0061     unsigned long       pid;
0062     char            comm[COMM_LEN];
0063 
0064     unsigned long       nr_events;
0065     unsigned long       curr_event;
0066     struct sched_atom   **atoms;
0067 
0068     pthread_t       thread;
0069     sem_t           sleep_sem;
0070 
0071     sem_t           ready_for_work;
0072     sem_t           work_done_sem;
0073 
0074     u64         cpu_usage;
0075 };
0076 
0077 enum sched_event_type {
0078     SCHED_EVENT_RUN,
0079     SCHED_EVENT_SLEEP,
0080     SCHED_EVENT_WAKEUP,
0081     SCHED_EVENT_MIGRATION,
0082 };
0083 
0084 struct sched_atom {
0085     enum sched_event_type   type;
0086     int         specific_wait;
0087     u64         timestamp;
0088     u64         duration;
0089     unsigned long       nr;
0090     sem_t           *wait_sem;
0091     struct task_desc    *wakee;
0092 };
0093 
0094 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
0095 
0096 /* task state bitmask, copied from include/linux/sched.h */
0097 #define TASK_RUNNING        0
0098 #define TASK_INTERRUPTIBLE  1
0099 #define TASK_UNINTERRUPTIBLE    2
0100 #define __TASK_STOPPED      4
0101 #define __TASK_TRACED       8
0102 /* in tsk->exit_state */
0103 #define EXIT_DEAD       16
0104 #define EXIT_ZOMBIE     32
0105 #define EXIT_TRACE      (EXIT_ZOMBIE | EXIT_DEAD)
0106 /* in tsk->state again */
0107 #define TASK_DEAD       64
0108 #define TASK_WAKEKILL       128
0109 #define TASK_WAKING     256
0110 #define TASK_PARKED     512
0111 
0112 enum thread_state {
0113     THREAD_SLEEPING = 0,
0114     THREAD_WAIT_CPU,
0115     THREAD_SCHED_IN,
0116     THREAD_IGNORE
0117 };
0118 
0119 struct work_atom {
0120     struct list_head    list;
0121     enum thread_state   state;
0122     u64         sched_out_time;
0123     u64         wake_up_time;
0124     u64         sched_in_time;
0125     u64         runtime;
0126 };
0127 
0128 struct work_atoms {
0129     struct list_head    work_list;
0130     struct thread       *thread;
0131     struct rb_node      node;
0132     u64         max_lat;
0133     u64         max_lat_start;
0134     u64         max_lat_end;
0135     u64         total_lat;
0136     u64         nb_atoms;
0137     u64         total_runtime;
0138     int         num_merged;
0139 };
0140 
0141 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
0142 
0143 struct perf_sched;
0144 
0145 struct trace_sched_handler {
0146     int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
0147                 struct perf_sample *sample, struct machine *machine);
0148 
0149     int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
0150                  struct perf_sample *sample, struct machine *machine);
0151 
0152     int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
0153                 struct perf_sample *sample, struct machine *machine);
0154 
0155     /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
0156     int (*fork_event)(struct perf_sched *sched, union perf_event *event,
0157               struct machine *machine);
0158 
0159     int (*migrate_task_event)(struct perf_sched *sched,
0160                   struct evsel *evsel,
0161                   struct perf_sample *sample,
0162                   struct machine *machine);
0163 };
0164 
0165 #define COLOR_PIDS PERF_COLOR_BLUE
0166 #define COLOR_CPUS PERF_COLOR_BG_RED
0167 
0168 struct perf_sched_map {
0169     DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
0170     struct perf_cpu     *comp_cpus;
0171     bool             comp;
0172     struct perf_thread_map *color_pids;
0173     const char      *color_pids_str;
0174     struct perf_cpu_map *color_cpus;
0175     const char      *color_cpus_str;
0176     struct perf_cpu_map *cpus;
0177     const char      *cpus_str;
0178 };
0179 
0180 struct perf_sched {
0181     struct perf_tool tool;
0182     const char   *sort_order;
0183     unsigned long    nr_tasks;
0184     struct task_desc **pid_to_task;
0185     struct task_desc **tasks;
0186     const struct trace_sched_handler *tp_handler;
0187     pthread_mutex_t  start_work_mutex;
0188     pthread_mutex_t  work_done_wait_mutex;
0189     int      profile_cpu;
0190 /*
0191  * Track the current task - that way we can know whether there's any
0192  * weird events, such as a task being switched away that is not current.
0193  */
0194     struct perf_cpu  max_cpu;
0195     u32      curr_pid[MAX_CPUS];
0196     struct thread    *curr_thread[MAX_CPUS];
0197     char         next_shortname1;
0198     char         next_shortname2;
0199     unsigned int     replay_repeat;
0200     unsigned long    nr_run_events;
0201     unsigned long    nr_sleep_events;
0202     unsigned long    nr_wakeup_events;
0203     unsigned long    nr_sleep_corrections;
0204     unsigned long    nr_run_events_optimized;
0205     unsigned long    targetless_wakeups;
0206     unsigned long    multitarget_wakeups;
0207     unsigned long    nr_runs;
0208     unsigned long    nr_timestamps;
0209     unsigned long    nr_unordered_timestamps;
0210     unsigned long    nr_context_switch_bugs;
0211     unsigned long    nr_events;
0212     unsigned long    nr_lost_chunks;
0213     unsigned long    nr_lost_events;
0214     u64      run_measurement_overhead;
0215     u64      sleep_measurement_overhead;
0216     u64      start_time;
0217     u64      cpu_usage;
0218     u64      runavg_cpu_usage;
0219     u64      parent_cpu_usage;
0220     u64      runavg_parent_cpu_usage;
0221     u64      sum_runtime;
0222     u64      sum_fluct;
0223     u64      run_avg;
0224     u64      all_runtime;
0225     u64      all_count;
0226     u64      cpu_last_switched[MAX_CPUS];
0227     struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
0228     struct list_head sort_list, cmp_pid;
0229     bool force;
0230     bool skip_merge;
0231     struct perf_sched_map map;
0232 
0233     /* options for timehist command */
0234     bool        summary;
0235     bool        summary_only;
0236     bool        idle_hist;
0237     bool        show_callchain;
0238     unsigned int    max_stack;
0239     bool        show_cpu_visual;
0240     bool        show_wakeups;
0241     bool        show_next;
0242     bool        show_migrations;
0243     bool        show_state;
0244     u64     skipped_samples;
0245     const char  *time_str;
0246     struct perf_time_interval ptime;
0247     struct perf_time_interval hist_time;
0248 };
0249 
0250 /* per thread run time data */
0251 struct thread_runtime {
0252     u64 last_time;      /* time of previous sched in/out event */
0253     u64 dt_run;         /* run time */
0254     u64 dt_sleep;       /* time between CPU access by sleep (off cpu) */
0255     u64 dt_iowait;      /* time between CPU access by iowait (off cpu) */
0256     u64 dt_preempt;     /* time between CPU access by preempt (off cpu) */
0257     u64 dt_delay;       /* time between wakeup and sched-in */
0258     u64 ready_to_run;   /* time of wakeup */
0259 
0260     struct stats run_stats;
0261     u64 total_run_time;
0262     u64 total_sleep_time;
0263     u64 total_iowait_time;
0264     u64 total_preempt_time;
0265     u64 total_delay_time;
0266 
0267     int last_state;
0268 
0269     char shortname[3];
0270     bool comm_changed;
0271 
0272     u64 migrations;
0273 };
0274 
0275 /* per event run time data */
0276 struct evsel_runtime {
0277     u64 *last_time; /* time this event was last seen per cpu */
0278     u32 ncpu;       /* highest cpu slot allocated */
0279 };
0280 
0281 /* per cpu idle time data */
0282 struct idle_thread_runtime {
0283     struct thread_runtime   tr;
0284     struct thread       *last_thread;
0285     struct rb_root_cached   sorted_root;
0286     struct callchain_root   callchain;
0287     struct callchain_cursor cursor;
0288 };
0289 
0290 /* track idle times per cpu */
0291 static struct thread **idle_threads;
0292 static int idle_max_cpu;
0293 static char idle_comm[] = "<idle>";
0294 
0295 static u64 get_nsecs(void)
0296 {
0297     struct timespec ts;
0298 
0299     clock_gettime(CLOCK_MONOTONIC, &ts);
0300 
0301     return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
0302 }
0303 
0304 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
0305 {
0306     u64 T0 = get_nsecs(), T1;
0307 
0308     do {
0309         T1 = get_nsecs();
0310     } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
0311 }
0312 
0313 static void sleep_nsecs(u64 nsecs)
0314 {
0315     struct timespec ts;
0316 
0317     ts.tv_nsec = nsecs % 999999999;
0318     ts.tv_sec = nsecs / 999999999;
0319 
0320     nanosleep(&ts, NULL);
0321 }
0322 
0323 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
0324 {
0325     u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
0326     int i;
0327 
0328     for (i = 0; i < 10; i++) {
0329         T0 = get_nsecs();
0330         burn_nsecs(sched, 0);
0331         T1 = get_nsecs();
0332         delta = T1-T0;
0333         min_delta = min(min_delta, delta);
0334     }
0335     sched->run_measurement_overhead = min_delta;
0336 
0337     printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
0338 }
0339 
0340 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
0341 {
0342     u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
0343     int i;
0344 
0345     for (i = 0; i < 10; i++) {
0346         T0 = get_nsecs();
0347         sleep_nsecs(10000);
0348         T1 = get_nsecs();
0349         delta = T1-T0;
0350         min_delta = min(min_delta, delta);
0351     }
0352     min_delta -= 10000;
0353     sched->sleep_measurement_overhead = min_delta;
0354 
0355     printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
0356 }
0357 
0358 static struct sched_atom *
0359 get_new_event(struct task_desc *task, u64 timestamp)
0360 {
0361     struct sched_atom *event = zalloc(sizeof(*event));
0362     unsigned long idx = task->nr_events;
0363     size_t size;
0364 
0365     event->timestamp = timestamp;
0366     event->nr = idx;
0367 
0368     task->nr_events++;
0369     size = sizeof(struct sched_atom *) * task->nr_events;
0370     task->atoms = realloc(task->atoms, size);
0371     BUG_ON(!task->atoms);
0372 
0373     task->atoms[idx] = event;
0374 
0375     return event;
0376 }
0377 
0378 static struct sched_atom *last_event(struct task_desc *task)
0379 {
0380     if (!task->nr_events)
0381         return NULL;
0382 
0383     return task->atoms[task->nr_events - 1];
0384 }
0385 
0386 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
0387                 u64 timestamp, u64 duration)
0388 {
0389     struct sched_atom *event, *curr_event = last_event(task);
0390 
0391     /*
0392      * optimize an existing RUN event by merging this one
0393      * to it:
0394      */
0395     if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
0396         sched->nr_run_events_optimized++;
0397         curr_event->duration += duration;
0398         return;
0399     }
0400 
0401     event = get_new_event(task, timestamp);
0402 
0403     event->type = SCHED_EVENT_RUN;
0404     event->duration = duration;
0405 
0406     sched->nr_run_events++;
0407 }
0408 
0409 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
0410                    u64 timestamp, struct task_desc *wakee)
0411 {
0412     struct sched_atom *event, *wakee_event;
0413 
0414     event = get_new_event(task, timestamp);
0415     event->type = SCHED_EVENT_WAKEUP;
0416     event->wakee = wakee;
0417 
0418     wakee_event = last_event(wakee);
0419     if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
0420         sched->targetless_wakeups++;
0421         return;
0422     }
0423     if (wakee_event->wait_sem) {
0424         sched->multitarget_wakeups++;
0425         return;
0426     }
0427 
0428     wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
0429     sem_init(wakee_event->wait_sem, 0, 0);
0430     wakee_event->specific_wait = 1;
0431     event->wait_sem = wakee_event->wait_sem;
0432 
0433     sched->nr_wakeup_events++;
0434 }
0435 
0436 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
0437                   u64 timestamp, u64 task_state __maybe_unused)
0438 {
0439     struct sched_atom *event = get_new_event(task, timestamp);
0440 
0441     event->type = SCHED_EVENT_SLEEP;
0442 
0443     sched->nr_sleep_events++;
0444 }
0445 
0446 static struct task_desc *register_pid(struct perf_sched *sched,
0447                       unsigned long pid, const char *comm)
0448 {
0449     struct task_desc *task;
0450     static int pid_max;
0451 
0452     if (sched->pid_to_task == NULL) {
0453         if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
0454             pid_max = MAX_PID;
0455         BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
0456     }
0457     if (pid >= (unsigned long)pid_max) {
0458         BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
0459             sizeof(struct task_desc *))) == NULL);
0460         while (pid >= (unsigned long)pid_max)
0461             sched->pid_to_task[pid_max++] = NULL;
0462     }
0463 
0464     task = sched->pid_to_task[pid];
0465 
0466     if (task)
0467         return task;
0468 
0469     task = zalloc(sizeof(*task));
0470     task->pid = pid;
0471     task->nr = sched->nr_tasks;
0472     strcpy(task->comm, comm);
0473     /*
0474      * every task starts in sleeping state - this gets ignored
0475      * if there's no wakeup pointing to this sleep state:
0476      */
0477     add_sched_event_sleep(sched, task, 0, 0);
0478 
0479     sched->pid_to_task[pid] = task;
0480     sched->nr_tasks++;
0481     sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
0482     BUG_ON(!sched->tasks);
0483     sched->tasks[task->nr] = task;
0484 
0485     if (verbose > 0)
0486         printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
0487 
0488     return task;
0489 }
0490 
0491 
0492 static void print_task_traces(struct perf_sched *sched)
0493 {
0494     struct task_desc *task;
0495     unsigned long i;
0496 
0497     for (i = 0; i < sched->nr_tasks; i++) {
0498         task = sched->tasks[i];
0499         printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
0500             task->nr, task->comm, task->pid, task->nr_events);
0501     }
0502 }
0503 
0504 static void add_cross_task_wakeups(struct perf_sched *sched)
0505 {
0506     struct task_desc *task1, *task2;
0507     unsigned long i, j;
0508 
0509     for (i = 0; i < sched->nr_tasks; i++) {
0510         task1 = sched->tasks[i];
0511         j = i + 1;
0512         if (j == sched->nr_tasks)
0513             j = 0;
0514         task2 = sched->tasks[j];
0515         add_sched_event_wakeup(sched, task1, 0, task2);
0516     }
0517 }
0518 
0519 static void perf_sched__process_event(struct perf_sched *sched,
0520                       struct sched_atom *atom)
0521 {
0522     int ret = 0;
0523 
0524     switch (atom->type) {
0525         case SCHED_EVENT_RUN:
0526             burn_nsecs(sched, atom->duration);
0527             break;
0528         case SCHED_EVENT_SLEEP:
0529             if (atom->wait_sem)
0530                 ret = sem_wait(atom->wait_sem);
0531             BUG_ON(ret);
0532             break;
0533         case SCHED_EVENT_WAKEUP:
0534             if (atom->wait_sem)
0535                 ret = sem_post(atom->wait_sem);
0536             BUG_ON(ret);
0537             break;
0538         case SCHED_EVENT_MIGRATION:
0539             break;
0540         default:
0541             BUG_ON(1);
0542     }
0543 }
0544 
0545 static u64 get_cpu_usage_nsec_parent(void)
0546 {
0547     struct rusage ru;
0548     u64 sum;
0549     int err;
0550 
0551     err = getrusage(RUSAGE_SELF, &ru);
0552     BUG_ON(err);
0553 
0554     sum =  ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
0555     sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
0556 
0557     return sum;
0558 }
0559 
0560 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
0561 {
0562     struct perf_event_attr attr;
0563     char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
0564     int fd;
0565     struct rlimit limit;
0566     bool need_privilege = false;
0567 
0568     memset(&attr, 0, sizeof(attr));
0569 
0570     attr.type = PERF_TYPE_SOFTWARE;
0571     attr.config = PERF_COUNT_SW_TASK_CLOCK;
0572 
0573 force_again:
0574     fd = sys_perf_event_open(&attr, 0, -1, -1,
0575                  perf_event_open_cloexec_flag());
0576 
0577     if (fd < 0) {
0578         if (errno == EMFILE) {
0579             if (sched->force) {
0580                 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
0581                 limit.rlim_cur += sched->nr_tasks - cur_task;
0582                 if (limit.rlim_cur > limit.rlim_max) {
0583                     limit.rlim_max = limit.rlim_cur;
0584                     need_privilege = true;
0585                 }
0586                 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
0587                     if (need_privilege && errno == EPERM)
0588                         strcpy(info, "Need privilege\n");
0589                 } else
0590                     goto force_again;
0591             } else
0592                 strcpy(info, "Have a try with -f option\n");
0593         }
0594         pr_err("Error: sys_perf_event_open() syscall returned "
0595                "with %d (%s)\n%s", fd,
0596                str_error_r(errno, sbuf, sizeof(sbuf)), info);
0597         exit(EXIT_FAILURE);
0598     }
0599     return fd;
0600 }
0601 
0602 static u64 get_cpu_usage_nsec_self(int fd)
0603 {
0604     u64 runtime;
0605     int ret;
0606 
0607     ret = read(fd, &runtime, sizeof(runtime));
0608     BUG_ON(ret != sizeof(runtime));
0609 
0610     return runtime;
0611 }
0612 
0613 struct sched_thread_parms {
0614     struct task_desc  *task;
0615     struct perf_sched *sched;
0616     int fd;
0617 };
0618 
0619 static void *thread_func(void *ctx)
0620 {
0621     struct sched_thread_parms *parms = ctx;
0622     struct task_desc *this_task = parms->task;
0623     struct perf_sched *sched = parms->sched;
0624     u64 cpu_usage_0, cpu_usage_1;
0625     unsigned long i, ret;
0626     char comm2[22];
0627     int fd = parms->fd;
0628 
0629     zfree(&parms);
0630 
0631     sprintf(comm2, ":%s", this_task->comm);
0632     prctl(PR_SET_NAME, comm2);
0633     if (fd < 0)
0634         return NULL;
0635 again:
0636     ret = sem_post(&this_task->ready_for_work);
0637     BUG_ON(ret);
0638     ret = pthread_mutex_lock(&sched->start_work_mutex);
0639     BUG_ON(ret);
0640     ret = pthread_mutex_unlock(&sched->start_work_mutex);
0641     BUG_ON(ret);
0642 
0643     cpu_usage_0 = get_cpu_usage_nsec_self(fd);
0644 
0645     for (i = 0; i < this_task->nr_events; i++) {
0646         this_task->curr_event = i;
0647         perf_sched__process_event(sched, this_task->atoms[i]);
0648     }
0649 
0650     cpu_usage_1 = get_cpu_usage_nsec_self(fd);
0651     this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
0652     ret = sem_post(&this_task->work_done_sem);
0653     BUG_ON(ret);
0654 
0655     ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
0656     BUG_ON(ret);
0657     ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
0658     BUG_ON(ret);
0659 
0660     goto again;
0661 }
0662 
0663 static void create_tasks(struct perf_sched *sched)
0664 {
0665     struct task_desc *task;
0666     pthread_attr_t attr;
0667     unsigned long i;
0668     int err;
0669 
0670     err = pthread_attr_init(&attr);
0671     BUG_ON(err);
0672     err = pthread_attr_setstacksize(&attr,
0673             (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
0674     BUG_ON(err);
0675     err = pthread_mutex_lock(&sched->start_work_mutex);
0676     BUG_ON(err);
0677     err = pthread_mutex_lock(&sched->work_done_wait_mutex);
0678     BUG_ON(err);
0679     for (i = 0; i < sched->nr_tasks; i++) {
0680         struct sched_thread_parms *parms = malloc(sizeof(*parms));
0681         BUG_ON(parms == NULL);
0682         parms->task = task = sched->tasks[i];
0683         parms->sched = sched;
0684         parms->fd = self_open_counters(sched, i);
0685         sem_init(&task->sleep_sem, 0, 0);
0686         sem_init(&task->ready_for_work, 0, 0);
0687         sem_init(&task->work_done_sem, 0, 0);
0688         task->curr_event = 0;
0689         err = pthread_create(&task->thread, &attr, thread_func, parms);
0690         BUG_ON(err);
0691     }
0692 }
0693 
0694 static void wait_for_tasks(struct perf_sched *sched)
0695 {
0696     u64 cpu_usage_0, cpu_usage_1;
0697     struct task_desc *task;
0698     unsigned long i, ret;
0699 
0700     sched->start_time = get_nsecs();
0701     sched->cpu_usage = 0;
0702     pthread_mutex_unlock(&sched->work_done_wait_mutex);
0703 
0704     for (i = 0; i < sched->nr_tasks; i++) {
0705         task = sched->tasks[i];
0706         ret = sem_wait(&task->ready_for_work);
0707         BUG_ON(ret);
0708         sem_init(&task->ready_for_work, 0, 0);
0709     }
0710     ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
0711     BUG_ON(ret);
0712 
0713     cpu_usage_0 = get_cpu_usage_nsec_parent();
0714 
0715     pthread_mutex_unlock(&sched->start_work_mutex);
0716 
0717     for (i = 0; i < sched->nr_tasks; i++) {
0718         task = sched->tasks[i];
0719         ret = sem_wait(&task->work_done_sem);
0720         BUG_ON(ret);
0721         sem_init(&task->work_done_sem, 0, 0);
0722         sched->cpu_usage += task->cpu_usage;
0723         task->cpu_usage = 0;
0724     }
0725 
0726     cpu_usage_1 = get_cpu_usage_nsec_parent();
0727     if (!sched->runavg_cpu_usage)
0728         sched->runavg_cpu_usage = sched->cpu_usage;
0729     sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
0730 
0731     sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
0732     if (!sched->runavg_parent_cpu_usage)
0733         sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
0734     sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
0735                      sched->parent_cpu_usage)/sched->replay_repeat;
0736 
0737     ret = pthread_mutex_lock(&sched->start_work_mutex);
0738     BUG_ON(ret);
0739 
0740     for (i = 0; i < sched->nr_tasks; i++) {
0741         task = sched->tasks[i];
0742         sem_init(&task->sleep_sem, 0, 0);
0743         task->curr_event = 0;
0744     }
0745 }
0746 
0747 static void run_one_test(struct perf_sched *sched)
0748 {
0749     u64 T0, T1, delta, avg_delta, fluct;
0750 
0751     T0 = get_nsecs();
0752     wait_for_tasks(sched);
0753     T1 = get_nsecs();
0754 
0755     delta = T1 - T0;
0756     sched->sum_runtime += delta;
0757     sched->nr_runs++;
0758 
0759     avg_delta = sched->sum_runtime / sched->nr_runs;
0760     if (delta < avg_delta)
0761         fluct = avg_delta - delta;
0762     else
0763         fluct = delta - avg_delta;
0764     sched->sum_fluct += fluct;
0765     if (!sched->run_avg)
0766         sched->run_avg = delta;
0767     sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
0768 
0769     printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
0770 
0771     printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
0772 
0773     printf("cpu: %0.2f / %0.2f",
0774         (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
0775 
0776 #if 0
0777     /*
0778      * rusage statistics done by the parent, these are less
0779      * accurate than the sched->sum_exec_runtime based statistics:
0780      */
0781     printf(" [%0.2f / %0.2f]",
0782         (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
0783         (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
0784 #endif
0785 
0786     printf("\n");
0787 
0788     if (sched->nr_sleep_corrections)
0789         printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
0790     sched->nr_sleep_corrections = 0;
0791 }
0792 
0793 static void test_calibrations(struct perf_sched *sched)
0794 {
0795     u64 T0, T1;
0796 
0797     T0 = get_nsecs();
0798     burn_nsecs(sched, NSEC_PER_MSEC);
0799     T1 = get_nsecs();
0800 
0801     printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
0802 
0803     T0 = get_nsecs();
0804     sleep_nsecs(NSEC_PER_MSEC);
0805     T1 = get_nsecs();
0806 
0807     printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
0808 }
0809 
0810 static int
0811 replay_wakeup_event(struct perf_sched *sched,
0812             struct evsel *evsel, struct perf_sample *sample,
0813             struct machine *machine __maybe_unused)
0814 {
0815     const char *comm = evsel__strval(evsel, sample, "comm");
0816     const u32 pid    = evsel__intval(evsel, sample, "pid");
0817     struct task_desc *waker, *wakee;
0818 
0819     if (verbose > 0) {
0820         printf("sched_wakeup event %p\n", evsel);
0821 
0822         printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
0823     }
0824 
0825     waker = register_pid(sched, sample->tid, "<unknown>");
0826     wakee = register_pid(sched, pid, comm);
0827 
0828     add_sched_event_wakeup(sched, waker, sample->time, wakee);
0829     return 0;
0830 }
0831 
0832 static int replay_switch_event(struct perf_sched *sched,
0833                    struct evsel *evsel,
0834                    struct perf_sample *sample,
0835                    struct machine *machine __maybe_unused)
0836 {
0837     const char *prev_comm  = evsel__strval(evsel, sample, "prev_comm"),
0838            *next_comm  = evsel__strval(evsel, sample, "next_comm");
0839     const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
0840           next_pid = evsel__intval(evsel, sample, "next_pid");
0841     const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
0842     struct task_desc *prev, __maybe_unused *next;
0843     u64 timestamp0, timestamp = sample->time;
0844     int cpu = sample->cpu;
0845     s64 delta;
0846 
0847     if (verbose > 0)
0848         printf("sched_switch event %p\n", evsel);
0849 
0850     if (cpu >= MAX_CPUS || cpu < 0)
0851         return 0;
0852 
0853     timestamp0 = sched->cpu_last_switched[cpu];
0854     if (timestamp0)
0855         delta = timestamp - timestamp0;
0856     else
0857         delta = 0;
0858 
0859     if (delta < 0) {
0860         pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
0861         return -1;
0862     }
0863 
0864     pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
0865          prev_comm, prev_pid, next_comm, next_pid, delta);
0866 
0867     prev = register_pid(sched, prev_pid, prev_comm);
0868     next = register_pid(sched, next_pid, next_comm);
0869 
0870     sched->cpu_last_switched[cpu] = timestamp;
0871 
0872     add_sched_event_run(sched, prev, timestamp, delta);
0873     add_sched_event_sleep(sched, prev, timestamp, prev_state);
0874 
0875     return 0;
0876 }
0877 
0878 static int replay_fork_event(struct perf_sched *sched,
0879                  union perf_event *event,
0880                  struct machine *machine)
0881 {
0882     struct thread *child, *parent;
0883 
0884     child = machine__findnew_thread(machine, event->fork.pid,
0885                     event->fork.tid);
0886     parent = machine__findnew_thread(machine, event->fork.ppid,
0887                      event->fork.ptid);
0888 
0889     if (child == NULL || parent == NULL) {
0890         pr_debug("thread does not exist on fork event: child %p, parent %p\n",
0891                  child, parent);
0892         goto out_put;
0893     }
0894 
0895     if (verbose > 0) {
0896         printf("fork event\n");
0897         printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
0898         printf("...  child: %s/%d\n", thread__comm_str(child), child->tid);
0899     }
0900 
0901     register_pid(sched, parent->tid, thread__comm_str(parent));
0902     register_pid(sched, child->tid, thread__comm_str(child));
0903 out_put:
0904     thread__put(child);
0905     thread__put(parent);
0906     return 0;
0907 }
0908 
0909 struct sort_dimension {
0910     const char      *name;
0911     sort_fn_t       cmp;
0912     struct list_head    list;
0913 };
0914 
0915 /*
0916  * handle runtime stats saved per thread
0917  */
0918 static struct thread_runtime *thread__init_runtime(struct thread *thread)
0919 {
0920     struct thread_runtime *r;
0921 
0922     r = zalloc(sizeof(struct thread_runtime));
0923     if (!r)
0924         return NULL;
0925 
0926     init_stats(&r->run_stats);
0927     thread__set_priv(thread, r);
0928 
0929     return r;
0930 }
0931 
0932 static struct thread_runtime *thread__get_runtime(struct thread *thread)
0933 {
0934     struct thread_runtime *tr;
0935 
0936     tr = thread__priv(thread);
0937     if (tr == NULL) {
0938         tr = thread__init_runtime(thread);
0939         if (tr == NULL)
0940             pr_debug("Failed to malloc memory for runtime data.\n");
0941     }
0942 
0943     return tr;
0944 }
0945 
0946 static int
0947 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
0948 {
0949     struct sort_dimension *sort;
0950     int ret = 0;
0951 
0952     BUG_ON(list_empty(list));
0953 
0954     list_for_each_entry(sort, list, list) {
0955         ret = sort->cmp(l, r);
0956         if (ret)
0957             return ret;
0958     }
0959 
0960     return ret;
0961 }
0962 
0963 static struct work_atoms *
0964 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
0965              struct list_head *sort_list)
0966 {
0967     struct rb_node *node = root->rb_root.rb_node;
0968     struct work_atoms key = { .thread = thread };
0969 
0970     while (node) {
0971         struct work_atoms *atoms;
0972         int cmp;
0973 
0974         atoms = container_of(node, struct work_atoms, node);
0975 
0976         cmp = thread_lat_cmp(sort_list, &key, atoms);
0977         if (cmp > 0)
0978             node = node->rb_left;
0979         else if (cmp < 0)
0980             node = node->rb_right;
0981         else {
0982             BUG_ON(thread != atoms->thread);
0983             return atoms;
0984         }
0985     }
0986     return NULL;
0987 }
0988 
0989 static void
0990 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
0991              struct list_head *sort_list)
0992 {
0993     struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
0994     bool leftmost = true;
0995 
0996     while (*new) {
0997         struct work_atoms *this;
0998         int cmp;
0999 
1000         this = container_of(*new, struct work_atoms, node);
1001         parent = *new;
1002 
1003         cmp = thread_lat_cmp(sort_list, data, this);
1004 
1005         if (cmp > 0)
1006             new = &((*new)->rb_left);
1007         else {
1008             new = &((*new)->rb_right);
1009             leftmost = false;
1010         }
1011     }
1012 
1013     rb_link_node(&data->node, parent, new);
1014     rb_insert_color_cached(&data->node, root, leftmost);
1015 }
1016 
1017 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1018 {
1019     struct work_atoms *atoms = zalloc(sizeof(*atoms));
1020     if (!atoms) {
1021         pr_err("No memory at %s\n", __func__);
1022         return -1;
1023     }
1024 
1025     atoms->thread = thread__get(thread);
1026     INIT_LIST_HEAD(&atoms->work_list);
1027     __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1028     return 0;
1029 }
1030 
1031 static char sched_out_state(u64 prev_state)
1032 {
1033     const char *str = TASK_STATE_TO_CHAR_STR;
1034 
1035     return str[prev_state];
1036 }
1037 
1038 static int
1039 add_sched_out_event(struct work_atoms *atoms,
1040             char run_state,
1041             u64 timestamp)
1042 {
1043     struct work_atom *atom = zalloc(sizeof(*atom));
1044     if (!atom) {
1045         pr_err("Non memory at %s", __func__);
1046         return -1;
1047     }
1048 
1049     atom->sched_out_time = timestamp;
1050 
1051     if (run_state == 'R') {
1052         atom->state = THREAD_WAIT_CPU;
1053         atom->wake_up_time = atom->sched_out_time;
1054     }
1055 
1056     list_add_tail(&atom->list, &atoms->work_list);
1057     return 0;
1058 }
1059 
1060 static void
1061 add_runtime_event(struct work_atoms *atoms, u64 delta,
1062           u64 timestamp __maybe_unused)
1063 {
1064     struct work_atom *atom;
1065 
1066     BUG_ON(list_empty(&atoms->work_list));
1067 
1068     atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1069 
1070     atom->runtime += delta;
1071     atoms->total_runtime += delta;
1072 }
1073 
1074 static void
1075 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1076 {
1077     struct work_atom *atom;
1078     u64 delta;
1079 
1080     if (list_empty(&atoms->work_list))
1081         return;
1082 
1083     atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1084 
1085     if (atom->state != THREAD_WAIT_CPU)
1086         return;
1087 
1088     if (timestamp < atom->wake_up_time) {
1089         atom->state = THREAD_IGNORE;
1090         return;
1091     }
1092 
1093     atom->state = THREAD_SCHED_IN;
1094     atom->sched_in_time = timestamp;
1095 
1096     delta = atom->sched_in_time - atom->wake_up_time;
1097     atoms->total_lat += delta;
1098     if (delta > atoms->max_lat) {
1099         atoms->max_lat = delta;
1100         atoms->max_lat_start = atom->wake_up_time;
1101         atoms->max_lat_end = timestamp;
1102     }
1103     atoms->nb_atoms++;
1104 }
1105 
1106 static int latency_switch_event(struct perf_sched *sched,
1107                 struct evsel *evsel,
1108                 struct perf_sample *sample,
1109                 struct machine *machine)
1110 {
1111     const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1112           next_pid = evsel__intval(evsel, sample, "next_pid");
1113     const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
1114     struct work_atoms *out_events, *in_events;
1115     struct thread *sched_out, *sched_in;
1116     u64 timestamp0, timestamp = sample->time;
1117     int cpu = sample->cpu, err = -1;
1118     s64 delta;
1119 
1120     BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1121 
1122     timestamp0 = sched->cpu_last_switched[cpu];
1123     sched->cpu_last_switched[cpu] = timestamp;
1124     if (timestamp0)
1125         delta = timestamp - timestamp0;
1126     else
1127         delta = 0;
1128 
1129     if (delta < 0) {
1130         pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1131         return -1;
1132     }
1133 
1134     sched_out = machine__findnew_thread(machine, -1, prev_pid);
1135     sched_in = machine__findnew_thread(machine, -1, next_pid);
1136     if (sched_out == NULL || sched_in == NULL)
1137         goto out_put;
1138 
1139     out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1140     if (!out_events) {
1141         if (thread_atoms_insert(sched, sched_out))
1142             goto out_put;
1143         out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1144         if (!out_events) {
1145             pr_err("out-event: Internal tree error");
1146             goto out_put;
1147         }
1148     }
1149     if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1150         return -1;
1151 
1152     in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1153     if (!in_events) {
1154         if (thread_atoms_insert(sched, sched_in))
1155             goto out_put;
1156         in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1157         if (!in_events) {
1158             pr_err("in-event: Internal tree error");
1159             goto out_put;
1160         }
1161         /*
1162          * Take came in we have not heard about yet,
1163          * add in an initial atom in runnable state:
1164          */
1165         if (add_sched_out_event(in_events, 'R', timestamp))
1166             goto out_put;
1167     }
1168     add_sched_in_event(in_events, timestamp);
1169     err = 0;
1170 out_put:
1171     thread__put(sched_out);
1172     thread__put(sched_in);
1173     return err;
1174 }
1175 
1176 static int latency_runtime_event(struct perf_sched *sched,
1177                  struct evsel *evsel,
1178                  struct perf_sample *sample,
1179                  struct machine *machine)
1180 {
1181     const u32 pid      = evsel__intval(evsel, sample, "pid");
1182     const u64 runtime  = evsel__intval(evsel, sample, "runtime");
1183     struct thread *thread = machine__findnew_thread(machine, -1, pid);
1184     struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1185     u64 timestamp = sample->time;
1186     int cpu = sample->cpu, err = -1;
1187 
1188     if (thread == NULL)
1189         return -1;
1190 
1191     BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1192     if (!atoms) {
1193         if (thread_atoms_insert(sched, thread))
1194             goto out_put;
1195         atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1196         if (!atoms) {
1197             pr_err("in-event: Internal tree error");
1198             goto out_put;
1199         }
1200         if (add_sched_out_event(atoms, 'R', timestamp))
1201             goto out_put;
1202     }
1203 
1204     add_runtime_event(atoms, runtime, timestamp);
1205     err = 0;
1206 out_put:
1207     thread__put(thread);
1208     return err;
1209 }
1210 
1211 static int latency_wakeup_event(struct perf_sched *sched,
1212                 struct evsel *evsel,
1213                 struct perf_sample *sample,
1214                 struct machine *machine)
1215 {
1216     const u32 pid     = evsel__intval(evsel, sample, "pid");
1217     struct work_atoms *atoms;
1218     struct work_atom *atom;
1219     struct thread *wakee;
1220     u64 timestamp = sample->time;
1221     int err = -1;
1222 
1223     wakee = machine__findnew_thread(machine, -1, pid);
1224     if (wakee == NULL)
1225         return -1;
1226     atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1227     if (!atoms) {
1228         if (thread_atoms_insert(sched, wakee))
1229             goto out_put;
1230         atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1231         if (!atoms) {
1232             pr_err("wakeup-event: Internal tree error");
1233             goto out_put;
1234         }
1235         if (add_sched_out_event(atoms, 'S', timestamp))
1236             goto out_put;
1237     }
1238 
1239     BUG_ON(list_empty(&atoms->work_list));
1240 
1241     atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1242 
1243     /*
1244      * As we do not guarantee the wakeup event happens when
1245      * task is out of run queue, also may happen when task is
1246      * on run queue and wakeup only change ->state to TASK_RUNNING,
1247      * then we should not set the ->wake_up_time when wake up a
1248      * task which is on run queue.
1249      *
1250      * You WILL be missing events if you've recorded only
1251      * one CPU, or are only looking at only one, so don't
1252      * skip in this case.
1253      */
1254     if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1255         goto out_ok;
1256 
1257     sched->nr_timestamps++;
1258     if (atom->sched_out_time > timestamp) {
1259         sched->nr_unordered_timestamps++;
1260         goto out_ok;
1261     }
1262 
1263     atom->state = THREAD_WAIT_CPU;
1264     atom->wake_up_time = timestamp;
1265 out_ok:
1266     err = 0;
1267 out_put:
1268     thread__put(wakee);
1269     return err;
1270 }
1271 
1272 static int latency_migrate_task_event(struct perf_sched *sched,
1273                       struct evsel *evsel,
1274                       struct perf_sample *sample,
1275                       struct machine *machine)
1276 {
1277     const u32 pid = evsel__intval(evsel, sample, "pid");
1278     u64 timestamp = sample->time;
1279     struct work_atoms *atoms;
1280     struct work_atom *atom;
1281     struct thread *migrant;
1282     int err = -1;
1283 
1284     /*
1285      * Only need to worry about migration when profiling one CPU.
1286      */
1287     if (sched->profile_cpu == -1)
1288         return 0;
1289 
1290     migrant = machine__findnew_thread(machine, -1, pid);
1291     if (migrant == NULL)
1292         return -1;
1293     atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1294     if (!atoms) {
1295         if (thread_atoms_insert(sched, migrant))
1296             goto out_put;
1297         register_pid(sched, migrant->tid, thread__comm_str(migrant));
1298         atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1299         if (!atoms) {
1300             pr_err("migration-event: Internal tree error");
1301             goto out_put;
1302         }
1303         if (add_sched_out_event(atoms, 'R', timestamp))
1304             goto out_put;
1305     }
1306 
1307     BUG_ON(list_empty(&atoms->work_list));
1308 
1309     atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1310     atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1311 
1312     sched->nr_timestamps++;
1313 
1314     if (atom->sched_out_time > timestamp)
1315         sched->nr_unordered_timestamps++;
1316     err = 0;
1317 out_put:
1318     thread__put(migrant);
1319     return err;
1320 }
1321 
1322 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1323 {
1324     int i;
1325     int ret;
1326     u64 avg;
1327     char max_lat_start[32], max_lat_end[32];
1328 
1329     if (!work_list->nb_atoms)
1330         return;
1331     /*
1332      * Ignore idle threads:
1333      */
1334     if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1335         return;
1336 
1337     sched->all_runtime += work_list->total_runtime;
1338     sched->all_count   += work_list->nb_atoms;
1339 
1340     if (work_list->num_merged > 1)
1341         ret = printf("  %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1342     else
1343         ret = printf("  %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1344 
1345     for (i = 0; i < 24 - ret; i++)
1346         printf(" ");
1347 
1348     avg = work_list->total_lat / work_list->nb_atoms;
1349     timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1350     timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1351 
1352     printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1353           (double)work_list->total_runtime / NSEC_PER_MSEC,
1354          work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1355          (double)work_list->max_lat / NSEC_PER_MSEC,
1356          max_lat_start, max_lat_end);
1357 }
1358 
1359 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1360 {
1361     if (l->thread == r->thread)
1362         return 0;
1363     if (l->thread->tid < r->thread->tid)
1364         return -1;
1365     if (l->thread->tid > r->thread->tid)
1366         return 1;
1367     return (int)(l->thread - r->thread);
1368 }
1369 
1370 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1371 {
1372     u64 avgl, avgr;
1373 
1374     if (!l->nb_atoms)
1375         return -1;
1376 
1377     if (!r->nb_atoms)
1378         return 1;
1379 
1380     avgl = l->total_lat / l->nb_atoms;
1381     avgr = r->total_lat / r->nb_atoms;
1382 
1383     if (avgl < avgr)
1384         return -1;
1385     if (avgl > avgr)
1386         return 1;
1387 
1388     return 0;
1389 }
1390 
1391 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1392 {
1393     if (l->max_lat < r->max_lat)
1394         return -1;
1395     if (l->max_lat > r->max_lat)
1396         return 1;
1397 
1398     return 0;
1399 }
1400 
1401 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1402 {
1403     if (l->nb_atoms < r->nb_atoms)
1404         return -1;
1405     if (l->nb_atoms > r->nb_atoms)
1406         return 1;
1407 
1408     return 0;
1409 }
1410 
1411 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1412 {
1413     if (l->total_runtime < r->total_runtime)
1414         return -1;
1415     if (l->total_runtime > r->total_runtime)
1416         return 1;
1417 
1418     return 0;
1419 }
1420 
1421 static int sort_dimension__add(const char *tok, struct list_head *list)
1422 {
1423     size_t i;
1424     static struct sort_dimension avg_sort_dimension = {
1425         .name = "avg",
1426         .cmp  = avg_cmp,
1427     };
1428     static struct sort_dimension max_sort_dimension = {
1429         .name = "max",
1430         .cmp  = max_cmp,
1431     };
1432     static struct sort_dimension pid_sort_dimension = {
1433         .name = "pid",
1434         .cmp  = pid_cmp,
1435     };
1436     static struct sort_dimension runtime_sort_dimension = {
1437         .name = "runtime",
1438         .cmp  = runtime_cmp,
1439     };
1440     static struct sort_dimension switch_sort_dimension = {
1441         .name = "switch",
1442         .cmp  = switch_cmp,
1443     };
1444     struct sort_dimension *available_sorts[] = {
1445         &pid_sort_dimension,
1446         &avg_sort_dimension,
1447         &max_sort_dimension,
1448         &switch_sort_dimension,
1449         &runtime_sort_dimension,
1450     };
1451 
1452     for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1453         if (!strcmp(available_sorts[i]->name, tok)) {
1454             list_add_tail(&available_sorts[i]->list, list);
1455 
1456             return 0;
1457         }
1458     }
1459 
1460     return -1;
1461 }
1462 
1463 static void perf_sched__sort_lat(struct perf_sched *sched)
1464 {
1465     struct rb_node *node;
1466     struct rb_root_cached *root = &sched->atom_root;
1467 again:
1468     for (;;) {
1469         struct work_atoms *data;
1470         node = rb_first_cached(root);
1471         if (!node)
1472             break;
1473 
1474         rb_erase_cached(node, root);
1475         data = rb_entry(node, struct work_atoms, node);
1476         __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1477     }
1478     if (root == &sched->atom_root) {
1479         root = &sched->merged_atom_root;
1480         goto again;
1481     }
1482 }
1483 
1484 static int process_sched_wakeup_event(struct perf_tool *tool,
1485                       struct evsel *evsel,
1486                       struct perf_sample *sample,
1487                       struct machine *machine)
1488 {
1489     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1490 
1491     if (sched->tp_handler->wakeup_event)
1492         return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1493 
1494     return 0;
1495 }
1496 
1497 union map_priv {
1498     void    *ptr;
1499     bool     color;
1500 };
1501 
1502 static bool thread__has_color(struct thread *thread)
1503 {
1504     union map_priv priv = {
1505         .ptr = thread__priv(thread),
1506     };
1507 
1508     return priv.color;
1509 }
1510 
1511 static struct thread*
1512 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1513 {
1514     struct thread *thread = machine__findnew_thread(machine, pid, tid);
1515     union map_priv priv = {
1516         .color = false,
1517     };
1518 
1519     if (!sched->map.color_pids || !thread || thread__priv(thread))
1520         return thread;
1521 
1522     if (thread_map__has(sched->map.color_pids, tid))
1523         priv.color = true;
1524 
1525     thread__set_priv(thread, priv.ptr);
1526     return thread;
1527 }
1528 
1529 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1530                 struct perf_sample *sample, struct machine *machine)
1531 {
1532     const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1533     struct thread *sched_in;
1534     struct thread_runtime *tr;
1535     int new_shortname;
1536     u64 timestamp0, timestamp = sample->time;
1537     s64 delta;
1538     int i;
1539     struct perf_cpu this_cpu = {
1540         .cpu = sample->cpu,
1541     };
1542     int cpus_nr;
1543     bool new_cpu = false;
1544     const char *color = PERF_COLOR_NORMAL;
1545     char stimestamp[32];
1546 
1547     BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1548 
1549     if (this_cpu.cpu > sched->max_cpu.cpu)
1550         sched->max_cpu = this_cpu;
1551 
1552     if (sched->map.comp) {
1553         cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1554         if (!test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1555             sched->map.comp_cpus[cpus_nr++] = this_cpu;
1556             new_cpu = true;
1557         }
1558     } else
1559         cpus_nr = sched->max_cpu.cpu;
1560 
1561     timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1562     sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1563     if (timestamp0)
1564         delta = timestamp - timestamp0;
1565     else
1566         delta = 0;
1567 
1568     if (delta < 0) {
1569         pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1570         return -1;
1571     }
1572 
1573     sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1574     if (sched_in == NULL)
1575         return -1;
1576 
1577     tr = thread__get_runtime(sched_in);
1578     if (tr == NULL) {
1579         thread__put(sched_in);
1580         return -1;
1581     }
1582 
1583     sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1584 
1585     printf("  ");
1586 
1587     new_shortname = 0;
1588     if (!tr->shortname[0]) {
1589         if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1590             /*
1591              * Don't allocate a letter-number for swapper:0
1592              * as a shortname. Instead, we use '.' for it.
1593              */
1594             tr->shortname[0] = '.';
1595             tr->shortname[1] = ' ';
1596         } else {
1597             tr->shortname[0] = sched->next_shortname1;
1598             tr->shortname[1] = sched->next_shortname2;
1599 
1600             if (sched->next_shortname1 < 'Z') {
1601                 sched->next_shortname1++;
1602             } else {
1603                 sched->next_shortname1 = 'A';
1604                 if (sched->next_shortname2 < '9')
1605                     sched->next_shortname2++;
1606                 else
1607                     sched->next_shortname2 = '0';
1608             }
1609         }
1610         new_shortname = 1;
1611     }
1612 
1613     for (i = 0; i < cpus_nr; i++) {
1614         struct perf_cpu cpu = {
1615             .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1616         };
1617         struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1618         struct thread_runtime *curr_tr;
1619         const char *pid_color = color;
1620         const char *cpu_color = color;
1621 
1622         if (curr_thread && thread__has_color(curr_thread))
1623             pid_color = COLOR_PIDS;
1624 
1625         if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu))
1626             continue;
1627 
1628         if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1629             cpu_color = COLOR_CPUS;
1630 
1631         if (cpu.cpu != this_cpu.cpu)
1632             color_fprintf(stdout, color, " ");
1633         else
1634             color_fprintf(stdout, cpu_color, "*");
1635 
1636         if (sched->curr_thread[cpu.cpu]) {
1637             curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1638             if (curr_tr == NULL) {
1639                 thread__put(sched_in);
1640                 return -1;
1641             }
1642             color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1643         } else
1644             color_fprintf(stdout, color, "   ");
1645     }
1646 
1647     if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1648         goto out;
1649 
1650     timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1651     color_fprintf(stdout, color, "  %12s secs ", stimestamp);
1652     if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
1653         const char *pid_color = color;
1654 
1655         if (thread__has_color(sched_in))
1656             pid_color = COLOR_PIDS;
1657 
1658         color_fprintf(stdout, pid_color, "%s => %s:%d",
1659                tr->shortname, thread__comm_str(sched_in), sched_in->tid);
1660         tr->comm_changed = false;
1661     }
1662 
1663     if (sched->map.comp && new_cpu)
1664         color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1665 
1666 out:
1667     color_fprintf(stdout, color, "\n");
1668 
1669     thread__put(sched_in);
1670 
1671     return 0;
1672 }
1673 
1674 static int process_sched_switch_event(struct perf_tool *tool,
1675                       struct evsel *evsel,
1676                       struct perf_sample *sample,
1677                       struct machine *machine)
1678 {
1679     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1680     int this_cpu = sample->cpu, err = 0;
1681     u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1682         next_pid = evsel__intval(evsel, sample, "next_pid");
1683 
1684     if (sched->curr_pid[this_cpu] != (u32)-1) {
1685         /*
1686          * Are we trying to switch away a PID that is
1687          * not current?
1688          */
1689         if (sched->curr_pid[this_cpu] != prev_pid)
1690             sched->nr_context_switch_bugs++;
1691     }
1692 
1693     if (sched->tp_handler->switch_event)
1694         err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1695 
1696     sched->curr_pid[this_cpu] = next_pid;
1697     return err;
1698 }
1699 
1700 static int process_sched_runtime_event(struct perf_tool *tool,
1701                        struct evsel *evsel,
1702                        struct perf_sample *sample,
1703                        struct machine *machine)
1704 {
1705     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1706 
1707     if (sched->tp_handler->runtime_event)
1708         return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1709 
1710     return 0;
1711 }
1712 
1713 static int perf_sched__process_fork_event(struct perf_tool *tool,
1714                       union perf_event *event,
1715                       struct perf_sample *sample,
1716                       struct machine *machine)
1717 {
1718     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1719 
1720     /* run the fork event through the perf machinery */
1721     perf_event__process_fork(tool, event, sample, machine);
1722 
1723     /* and then run additional processing needed for this command */
1724     if (sched->tp_handler->fork_event)
1725         return sched->tp_handler->fork_event(sched, event, machine);
1726 
1727     return 0;
1728 }
1729 
1730 static int process_sched_migrate_task_event(struct perf_tool *tool,
1731                         struct evsel *evsel,
1732                         struct perf_sample *sample,
1733                         struct machine *machine)
1734 {
1735     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1736 
1737     if (sched->tp_handler->migrate_task_event)
1738         return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1739 
1740     return 0;
1741 }
1742 
1743 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1744                   struct evsel *evsel,
1745                   struct perf_sample *sample,
1746                   struct machine *machine);
1747 
1748 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1749                          union perf_event *event __maybe_unused,
1750                          struct perf_sample *sample,
1751                          struct evsel *evsel,
1752                          struct machine *machine)
1753 {
1754     int err = 0;
1755 
1756     if (evsel->handler != NULL) {
1757         tracepoint_handler f = evsel->handler;
1758         err = f(tool, evsel, sample, machine);
1759     }
1760 
1761     return err;
1762 }
1763 
1764 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1765                     union perf_event *event,
1766                     struct perf_sample *sample,
1767                     struct machine *machine)
1768 {
1769     struct thread *thread;
1770     struct thread_runtime *tr;
1771     int err;
1772 
1773     err = perf_event__process_comm(tool, event, sample, machine);
1774     if (err)
1775         return err;
1776 
1777     thread = machine__find_thread(machine, sample->pid, sample->tid);
1778     if (!thread) {
1779         pr_err("Internal error: can't find thread\n");
1780         return -1;
1781     }
1782 
1783     tr = thread__get_runtime(thread);
1784     if (tr == NULL) {
1785         thread__put(thread);
1786         return -1;
1787     }
1788 
1789     tr->comm_changed = true;
1790     thread__put(thread);
1791 
1792     return 0;
1793 }
1794 
1795 static int perf_sched__read_events(struct perf_sched *sched)
1796 {
1797     const struct evsel_str_handler handlers[] = {
1798         { "sched:sched_switch",       process_sched_switch_event, },
1799         { "sched:sched_stat_runtime", process_sched_runtime_event, },
1800         { "sched:sched_wakeup",       process_sched_wakeup_event, },
1801         { "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1802         { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1803     };
1804     struct perf_session *session;
1805     struct perf_data data = {
1806         .path  = input_name,
1807         .mode  = PERF_DATA_MODE_READ,
1808         .force = sched->force,
1809     };
1810     int rc = -1;
1811 
1812     session = perf_session__new(&data, &sched->tool);
1813     if (IS_ERR(session)) {
1814         pr_debug("Error creating perf session");
1815         return PTR_ERR(session);
1816     }
1817 
1818     symbol__init(&session->header.env);
1819 
1820     if (perf_session__set_tracepoints_handlers(session, handlers))
1821         goto out_delete;
1822 
1823     if (perf_session__has_traces(session, "record -R")) {
1824         int err = perf_session__process_events(session);
1825         if (err) {
1826             pr_err("Failed to process events, error %d", err);
1827             goto out_delete;
1828         }
1829 
1830         sched->nr_events      = session->evlist->stats.nr_events[0];
1831         sched->nr_lost_events = session->evlist->stats.total_lost;
1832         sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1833     }
1834 
1835     rc = 0;
1836 out_delete:
1837     perf_session__delete(session);
1838     return rc;
1839 }
1840 
1841 /*
1842  * scheduling times are printed as msec.usec
1843  */
1844 static inline void print_sched_time(unsigned long long nsecs, int width)
1845 {
1846     unsigned long msecs;
1847     unsigned long usecs;
1848 
1849     msecs  = nsecs / NSEC_PER_MSEC;
1850     nsecs -= msecs * NSEC_PER_MSEC;
1851     usecs  = nsecs / NSEC_PER_USEC;
1852     printf("%*lu.%03lu ", width, msecs, usecs);
1853 }
1854 
1855 /*
1856  * returns runtime data for event, allocating memory for it the
1857  * first time it is used.
1858  */
1859 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1860 {
1861     struct evsel_runtime *r = evsel->priv;
1862 
1863     if (r == NULL) {
1864         r = zalloc(sizeof(struct evsel_runtime));
1865         evsel->priv = r;
1866     }
1867 
1868     return r;
1869 }
1870 
1871 /*
1872  * save last time event was seen per cpu
1873  */
1874 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1875 {
1876     struct evsel_runtime *r = evsel__get_runtime(evsel);
1877 
1878     if (r == NULL)
1879         return;
1880 
1881     if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1882         int i, n = __roundup_pow_of_two(cpu+1);
1883         void *p = r->last_time;
1884 
1885         p = realloc(r->last_time, n * sizeof(u64));
1886         if (!p)
1887             return;
1888 
1889         r->last_time = p;
1890         for (i = r->ncpu; i < n; ++i)
1891             r->last_time[i] = (u64) 0;
1892 
1893         r->ncpu = n;
1894     }
1895 
1896     r->last_time[cpu] = timestamp;
1897 }
1898 
1899 /* returns last time this event was seen on the given cpu */
1900 static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
1901 {
1902     struct evsel_runtime *r = evsel__get_runtime(evsel);
1903 
1904     if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1905         return 0;
1906 
1907     return r->last_time[cpu];
1908 }
1909 
1910 static int comm_width = 30;
1911 
1912 static char *timehist_get_commstr(struct thread *thread)
1913 {
1914     static char str[32];
1915     const char *comm = thread__comm_str(thread);
1916     pid_t tid = thread->tid;
1917     pid_t pid = thread->pid_;
1918     int n;
1919 
1920     if (pid == 0)
1921         n = scnprintf(str, sizeof(str), "%s", comm);
1922 
1923     else if (tid != pid)
1924         n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1925 
1926     else
1927         n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1928 
1929     if (n > comm_width)
1930         comm_width = n;
1931 
1932     return str;
1933 }
1934 
1935 static void timehist_header(struct perf_sched *sched)
1936 {
1937     u32 ncpus = sched->max_cpu.cpu + 1;
1938     u32 i, j;
1939 
1940     printf("%15s %6s ", "time", "cpu");
1941 
1942     if (sched->show_cpu_visual) {
1943         printf(" ");
1944         for (i = 0, j = 0; i < ncpus; ++i) {
1945             printf("%x", j++);
1946             if (j > 15)
1947                 j = 0;
1948         }
1949         printf(" ");
1950     }
1951 
1952     printf(" %-*s  %9s  %9s  %9s", comm_width,
1953         "task name", "wait time", "sch delay", "run time");
1954 
1955     if (sched->show_state)
1956         printf("  %s", "state");
1957 
1958     printf("\n");
1959 
1960     /*
1961      * units row
1962      */
1963     printf("%15s %-6s ", "", "");
1964 
1965     if (sched->show_cpu_visual)
1966         printf(" %*s ", ncpus, "");
1967 
1968     printf(" %-*s  %9s  %9s  %9s", comm_width,
1969            "[tid/pid]", "(msec)", "(msec)", "(msec)");
1970 
1971     if (sched->show_state)
1972         printf("  %5s", "");
1973 
1974     printf("\n");
1975 
1976     /*
1977      * separator
1978      */
1979     printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1980 
1981     if (sched->show_cpu_visual)
1982         printf(" %.*s ", ncpus, graph_dotted_line);
1983 
1984     printf(" %.*s  %.9s  %.9s  %.9s", comm_width,
1985         graph_dotted_line, graph_dotted_line, graph_dotted_line,
1986         graph_dotted_line);
1987 
1988     if (sched->show_state)
1989         printf("  %.5s", graph_dotted_line);
1990 
1991     printf("\n");
1992 }
1993 
1994 static char task_state_char(struct thread *thread, int state)
1995 {
1996     static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1997     unsigned bit = state ? ffs(state) : 0;
1998 
1999     /* 'I' for idle */
2000     if (thread->tid == 0)
2001         return 'I';
2002 
2003     return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
2004 }
2005 
2006 static void timehist_print_sample(struct perf_sched *sched,
2007                   struct evsel *evsel,
2008                   struct perf_sample *sample,
2009                   struct addr_location *al,
2010                   struct thread *thread,
2011                   u64 t, int state)
2012 {
2013     struct thread_runtime *tr = thread__priv(thread);
2014     const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2015     const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2016     u32 max_cpus = sched->max_cpu.cpu + 1;
2017     char tstr[64];
2018     char nstr[30];
2019     u64 wait_time;
2020 
2021     if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2022         return;
2023 
2024     timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2025     printf("%15s [%04d] ", tstr, sample->cpu);
2026 
2027     if (sched->show_cpu_visual) {
2028         u32 i;
2029         char c;
2030 
2031         printf(" ");
2032         for (i = 0; i < max_cpus; ++i) {
2033             /* flag idle times with 'i'; others are sched events */
2034             if (i == sample->cpu)
2035                 c = (thread->tid == 0) ? 'i' : 's';
2036             else
2037                 c = ' ';
2038             printf("%c", c);
2039         }
2040         printf(" ");
2041     }
2042 
2043     printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2044 
2045     wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2046     print_sched_time(wait_time, 6);
2047 
2048     print_sched_time(tr->dt_delay, 6);
2049     print_sched_time(tr->dt_run, 6);
2050 
2051     if (sched->show_state)
2052         printf(" %5c ", task_state_char(thread, state));
2053 
2054     if (sched->show_next) {
2055         snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2056         printf(" %-*s", comm_width, nstr);
2057     }
2058 
2059     if (sched->show_wakeups && !sched->show_next)
2060         printf("  %-*s", comm_width, "");
2061 
2062     if (thread->tid == 0)
2063         goto out;
2064 
2065     if (sched->show_callchain)
2066         printf("  ");
2067 
2068     sample__fprintf_sym(sample, al, 0,
2069                 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2070                 EVSEL__PRINT_CALLCHAIN_ARROW |
2071                 EVSEL__PRINT_SKIP_IGNORED,
2072                 &callchain_cursor, symbol_conf.bt_stop_list,  stdout);
2073 
2074 out:
2075     printf("\n");
2076 }
2077 
2078 /*
2079  * Explanation of delta-time stats:
2080  *
2081  *            t = time of current schedule out event
2082  *        tprev = time of previous sched out event
2083  *                also time of schedule-in event for current task
2084  *    last_time = time of last sched change event for current task
2085  *                (i.e, time process was last scheduled out)
2086  * ready_to_run = time of wakeup for current task
2087  *
2088  * -----|------------|------------|------------|------
2089  *    last         ready        tprev          t
2090  *    time         to run
2091  *
2092  *      |-------- dt_wait --------|
2093  *                   |- dt_delay -|-- dt_run --|
2094  *
2095  *   dt_run = run time of current task
2096  *  dt_wait = time between last schedule out event for task and tprev
2097  *            represents time spent off the cpu
2098  * dt_delay = time between wakeup and schedule-in of task
2099  */
2100 
2101 static void timehist_update_runtime_stats(struct thread_runtime *r,
2102                      u64 t, u64 tprev)
2103 {
2104     r->dt_delay   = 0;
2105     r->dt_sleep   = 0;
2106     r->dt_iowait  = 0;
2107     r->dt_preempt = 0;
2108     r->dt_run     = 0;
2109 
2110     if (tprev) {
2111         r->dt_run = t - tprev;
2112         if (r->ready_to_run) {
2113             if (r->ready_to_run > tprev)
2114                 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2115             else
2116                 r->dt_delay = tprev - r->ready_to_run;
2117         }
2118 
2119         if (r->last_time > tprev)
2120             pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2121         else if (r->last_time) {
2122             u64 dt_wait = tprev - r->last_time;
2123 
2124             if (r->last_state == TASK_RUNNING)
2125                 r->dt_preempt = dt_wait;
2126             else if (r->last_state == TASK_UNINTERRUPTIBLE)
2127                 r->dt_iowait = dt_wait;
2128             else
2129                 r->dt_sleep = dt_wait;
2130         }
2131     }
2132 
2133     update_stats(&r->run_stats, r->dt_run);
2134 
2135     r->total_run_time     += r->dt_run;
2136     r->total_delay_time   += r->dt_delay;
2137     r->total_sleep_time   += r->dt_sleep;
2138     r->total_iowait_time  += r->dt_iowait;
2139     r->total_preempt_time += r->dt_preempt;
2140 }
2141 
2142 static bool is_idle_sample(struct perf_sample *sample,
2143                struct evsel *evsel)
2144 {
2145     /* pid 0 == swapper == idle task */
2146     if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
2147         return evsel__intval(evsel, sample, "prev_pid") == 0;
2148 
2149     return sample->pid == 0;
2150 }
2151 
2152 static void save_task_callchain(struct perf_sched *sched,
2153                 struct perf_sample *sample,
2154                 struct evsel *evsel,
2155                 struct machine *machine)
2156 {
2157     struct callchain_cursor *cursor = &callchain_cursor;
2158     struct thread *thread;
2159 
2160     /* want main thread for process - has maps */
2161     thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2162     if (thread == NULL) {
2163         pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2164         return;
2165     }
2166 
2167     if (!sched->show_callchain || sample->callchain == NULL)
2168         return;
2169 
2170     if (thread__resolve_callchain(thread, cursor, evsel, sample,
2171                       NULL, NULL, sched->max_stack + 2) != 0) {
2172         if (verbose > 0)
2173             pr_err("Failed to resolve callchain. Skipping\n");
2174 
2175         return;
2176     }
2177 
2178     callchain_cursor_commit(cursor);
2179 
2180     while (true) {
2181         struct callchain_cursor_node *node;
2182         struct symbol *sym;
2183 
2184         node = callchain_cursor_current(cursor);
2185         if (node == NULL)
2186             break;
2187 
2188         sym = node->ms.sym;
2189         if (sym) {
2190             if (!strcmp(sym->name, "schedule") ||
2191                 !strcmp(sym->name, "__schedule") ||
2192                 !strcmp(sym->name, "preempt_schedule"))
2193                 sym->ignore = 1;
2194         }
2195 
2196         callchain_cursor_advance(cursor);
2197     }
2198 }
2199 
2200 static int init_idle_thread(struct thread *thread)
2201 {
2202     struct idle_thread_runtime *itr;
2203 
2204     thread__set_comm(thread, idle_comm, 0);
2205 
2206     itr = zalloc(sizeof(*itr));
2207     if (itr == NULL)
2208         return -ENOMEM;
2209 
2210     init_stats(&itr->tr.run_stats);
2211     callchain_init(&itr->callchain);
2212     callchain_cursor_reset(&itr->cursor);
2213     thread__set_priv(thread, itr);
2214 
2215     return 0;
2216 }
2217 
2218 /*
2219  * Track idle stats per cpu by maintaining a local thread
2220  * struct for the idle task on each cpu.
2221  */
2222 static int init_idle_threads(int ncpu)
2223 {
2224     int i, ret;
2225 
2226     idle_threads = zalloc(ncpu * sizeof(struct thread *));
2227     if (!idle_threads)
2228         return -ENOMEM;
2229 
2230     idle_max_cpu = ncpu;
2231 
2232     /* allocate the actual thread struct if needed */
2233     for (i = 0; i < ncpu; ++i) {
2234         idle_threads[i] = thread__new(0, 0);
2235         if (idle_threads[i] == NULL)
2236             return -ENOMEM;
2237 
2238         ret = init_idle_thread(idle_threads[i]);
2239         if (ret < 0)
2240             return ret;
2241     }
2242 
2243     return 0;
2244 }
2245 
2246 static void free_idle_threads(void)
2247 {
2248     int i;
2249 
2250     if (idle_threads == NULL)
2251         return;
2252 
2253     for (i = 0; i < idle_max_cpu; ++i) {
2254         if ((idle_threads[i]))
2255             thread__delete(idle_threads[i]);
2256     }
2257 
2258     free(idle_threads);
2259 }
2260 
2261 static struct thread *get_idle_thread(int cpu)
2262 {
2263     /*
2264      * expand/allocate array of pointers to local thread
2265      * structs if needed
2266      */
2267     if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2268         int i, j = __roundup_pow_of_two(cpu+1);
2269         void *p;
2270 
2271         p = realloc(idle_threads, j * sizeof(struct thread *));
2272         if (!p)
2273             return NULL;
2274 
2275         idle_threads = (struct thread **) p;
2276         for (i = idle_max_cpu; i < j; ++i)
2277             idle_threads[i] = NULL;
2278 
2279         idle_max_cpu = j;
2280     }
2281 
2282     /* allocate a new thread struct if needed */
2283     if (idle_threads[cpu] == NULL) {
2284         idle_threads[cpu] = thread__new(0, 0);
2285 
2286         if (idle_threads[cpu]) {
2287             if (init_idle_thread(idle_threads[cpu]) < 0)
2288                 return NULL;
2289         }
2290     }
2291 
2292     return idle_threads[cpu];
2293 }
2294 
2295 static void save_idle_callchain(struct perf_sched *sched,
2296                 struct idle_thread_runtime *itr,
2297                 struct perf_sample *sample)
2298 {
2299     if (!sched->show_callchain || sample->callchain == NULL)
2300         return;
2301 
2302     callchain_cursor__copy(&itr->cursor, &callchain_cursor);
2303 }
2304 
2305 static struct thread *timehist_get_thread(struct perf_sched *sched,
2306                       struct perf_sample *sample,
2307                       struct machine *machine,
2308                       struct evsel *evsel)
2309 {
2310     struct thread *thread;
2311 
2312     if (is_idle_sample(sample, evsel)) {
2313         thread = get_idle_thread(sample->cpu);
2314         if (thread == NULL)
2315             pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2316 
2317     } else {
2318         /* there were samples with tid 0 but non-zero pid */
2319         thread = machine__findnew_thread(machine, sample->pid,
2320                          sample->tid ?: sample->pid);
2321         if (thread == NULL) {
2322             pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2323                  sample->tid);
2324         }
2325 
2326         save_task_callchain(sched, sample, evsel, machine);
2327         if (sched->idle_hist) {
2328             struct thread *idle;
2329             struct idle_thread_runtime *itr;
2330 
2331             idle = get_idle_thread(sample->cpu);
2332             if (idle == NULL) {
2333                 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2334                 return NULL;
2335             }
2336 
2337             itr = thread__priv(idle);
2338             if (itr == NULL)
2339                 return NULL;
2340 
2341             itr->last_thread = thread;
2342 
2343             /* copy task callchain when entering to idle */
2344             if (evsel__intval(evsel, sample, "next_pid") == 0)
2345                 save_idle_callchain(sched, itr, sample);
2346         }
2347     }
2348 
2349     return thread;
2350 }
2351 
2352 static bool timehist_skip_sample(struct perf_sched *sched,
2353                  struct thread *thread,
2354                  struct evsel *evsel,
2355                  struct perf_sample *sample)
2356 {
2357     bool rc = false;
2358 
2359     if (thread__is_filtered(thread)) {
2360         rc = true;
2361         sched->skipped_samples++;
2362     }
2363 
2364     if (sched->idle_hist) {
2365         if (strcmp(evsel__name(evsel), "sched:sched_switch"))
2366             rc = true;
2367         else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2368              evsel__intval(evsel, sample, "next_pid") != 0)
2369             rc = true;
2370     }
2371 
2372     return rc;
2373 }
2374 
2375 static void timehist_print_wakeup_event(struct perf_sched *sched,
2376                     struct evsel *evsel,
2377                     struct perf_sample *sample,
2378                     struct machine *machine,
2379                     struct thread *awakened)
2380 {
2381     struct thread *thread;
2382     char tstr[64];
2383 
2384     thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2385     if (thread == NULL)
2386         return;
2387 
2388     /* show wakeup unless both awakee and awaker are filtered */
2389     if (timehist_skip_sample(sched, thread, evsel, sample) &&
2390         timehist_skip_sample(sched, awakened, evsel, sample)) {
2391         return;
2392     }
2393 
2394     timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2395     printf("%15s [%04d] ", tstr, sample->cpu);
2396     if (sched->show_cpu_visual)
2397         printf(" %*s ", sched->max_cpu.cpu + 1, "");
2398 
2399     printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2400 
2401     /* dt spacer */
2402     printf("  %9s  %9s  %9s ", "", "", "");
2403 
2404     printf("awakened: %s", timehist_get_commstr(awakened));
2405 
2406     printf("\n");
2407 }
2408 
2409 static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
2410                     union perf_event *event __maybe_unused,
2411                     struct evsel *evsel __maybe_unused,
2412                     struct perf_sample *sample __maybe_unused,
2413                     struct machine *machine __maybe_unused)
2414 {
2415     return 0;
2416 }
2417 
2418 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2419                        union perf_event *event __maybe_unused,
2420                        struct evsel *evsel,
2421                        struct perf_sample *sample,
2422                        struct machine *machine)
2423 {
2424     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2425     struct thread *thread;
2426     struct thread_runtime *tr = NULL;
2427     /* want pid of awakened task not pid in sample */
2428     const u32 pid = evsel__intval(evsel, sample, "pid");
2429 
2430     thread = machine__findnew_thread(machine, 0, pid);
2431     if (thread == NULL)
2432         return -1;
2433 
2434     tr = thread__get_runtime(thread);
2435     if (tr == NULL)
2436         return -1;
2437 
2438     if (tr->ready_to_run == 0)
2439         tr->ready_to_run = sample->time;
2440 
2441     /* show wakeups if requested */
2442     if (sched->show_wakeups &&
2443         !perf_time__skip_sample(&sched->ptime, sample->time))
2444         timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2445 
2446     return 0;
2447 }
2448 
2449 static void timehist_print_migration_event(struct perf_sched *sched,
2450                     struct evsel *evsel,
2451                     struct perf_sample *sample,
2452                     struct machine *machine,
2453                     struct thread *migrated)
2454 {
2455     struct thread *thread;
2456     char tstr[64];
2457     u32 max_cpus;
2458     u32 ocpu, dcpu;
2459 
2460     if (sched->summary_only)
2461         return;
2462 
2463     max_cpus = sched->max_cpu.cpu + 1;
2464     ocpu = evsel__intval(evsel, sample, "orig_cpu");
2465     dcpu = evsel__intval(evsel, sample, "dest_cpu");
2466 
2467     thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2468     if (thread == NULL)
2469         return;
2470 
2471     if (timehist_skip_sample(sched, thread, evsel, sample) &&
2472         timehist_skip_sample(sched, migrated, evsel, sample)) {
2473         return;
2474     }
2475 
2476     timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2477     printf("%15s [%04d] ", tstr, sample->cpu);
2478 
2479     if (sched->show_cpu_visual) {
2480         u32 i;
2481         char c;
2482 
2483         printf("  ");
2484         for (i = 0; i < max_cpus; ++i) {
2485             c = (i == sample->cpu) ? 'm' : ' ';
2486             printf("%c", c);
2487         }
2488         printf("  ");
2489     }
2490 
2491     printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2492 
2493     /* dt spacer */
2494     printf("  %9s  %9s  %9s ", "", "", "");
2495 
2496     printf("migrated: %s", timehist_get_commstr(migrated));
2497     printf(" cpu %d => %d", ocpu, dcpu);
2498 
2499     printf("\n");
2500 }
2501 
2502 static int timehist_migrate_task_event(struct perf_tool *tool,
2503                        union perf_event *event __maybe_unused,
2504                        struct evsel *evsel,
2505                        struct perf_sample *sample,
2506                        struct machine *machine)
2507 {
2508     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2509     struct thread *thread;
2510     struct thread_runtime *tr = NULL;
2511     /* want pid of migrated task not pid in sample */
2512     const u32 pid = evsel__intval(evsel, sample, "pid");
2513 
2514     thread = machine__findnew_thread(machine, 0, pid);
2515     if (thread == NULL)
2516         return -1;
2517 
2518     tr = thread__get_runtime(thread);
2519     if (tr == NULL)
2520         return -1;
2521 
2522     tr->migrations++;
2523 
2524     /* show migrations if requested */
2525     timehist_print_migration_event(sched, evsel, sample, machine, thread);
2526 
2527     return 0;
2528 }
2529 
2530 static int timehist_sched_change_event(struct perf_tool *tool,
2531                        union perf_event *event,
2532                        struct evsel *evsel,
2533                        struct perf_sample *sample,
2534                        struct machine *machine)
2535 {
2536     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2537     struct perf_time_interval *ptime = &sched->ptime;
2538     struct addr_location al;
2539     struct thread *thread;
2540     struct thread_runtime *tr = NULL;
2541     u64 tprev, t = sample->time;
2542     int rc = 0;
2543     int state = evsel__intval(evsel, sample, "prev_state");
2544 
2545     if (machine__resolve(machine, &al, sample) < 0) {
2546         pr_err("problem processing %d event. skipping it\n",
2547                event->header.type);
2548         rc = -1;
2549         goto out;
2550     }
2551 
2552     thread = timehist_get_thread(sched, sample, machine, evsel);
2553     if (thread == NULL) {
2554         rc = -1;
2555         goto out;
2556     }
2557 
2558     if (timehist_skip_sample(sched, thread, evsel, sample))
2559         goto out;
2560 
2561     tr = thread__get_runtime(thread);
2562     if (tr == NULL) {
2563         rc = -1;
2564         goto out;
2565     }
2566 
2567     tprev = evsel__get_time(evsel, sample->cpu);
2568 
2569     /*
2570      * If start time given:
2571      * - sample time is under window user cares about - skip sample
2572      * - tprev is under window user cares about  - reset to start of window
2573      */
2574     if (ptime->start && ptime->start > t)
2575         goto out;
2576 
2577     if (tprev && ptime->start > tprev)
2578         tprev = ptime->start;
2579 
2580     /*
2581      * If end time given:
2582      * - previous sched event is out of window - we are done
2583      * - sample time is beyond window user cares about - reset it
2584      *   to close out stats for time window interest
2585      */
2586     if (ptime->end) {
2587         if (tprev > ptime->end)
2588             goto out;
2589 
2590         if (t > ptime->end)
2591             t = ptime->end;
2592     }
2593 
2594     if (!sched->idle_hist || thread->tid == 0) {
2595         if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2596             timehist_update_runtime_stats(tr, t, tprev);
2597 
2598         if (sched->idle_hist) {
2599             struct idle_thread_runtime *itr = (void *)tr;
2600             struct thread_runtime *last_tr;
2601 
2602             BUG_ON(thread->tid != 0);
2603 
2604             if (itr->last_thread == NULL)
2605                 goto out;
2606 
2607             /* add current idle time as last thread's runtime */
2608             last_tr = thread__get_runtime(itr->last_thread);
2609             if (last_tr == NULL)
2610                 goto out;
2611 
2612             timehist_update_runtime_stats(last_tr, t, tprev);
2613             /*
2614              * remove delta time of last thread as it's not updated
2615              * and otherwise it will show an invalid value next
2616              * time.  we only care total run time and run stat.
2617              */
2618             last_tr->dt_run = 0;
2619             last_tr->dt_delay = 0;
2620             last_tr->dt_sleep = 0;
2621             last_tr->dt_iowait = 0;
2622             last_tr->dt_preempt = 0;
2623 
2624             if (itr->cursor.nr)
2625                 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2626 
2627             itr->last_thread = NULL;
2628         }
2629     }
2630 
2631     if (!sched->summary_only)
2632         timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2633 
2634 out:
2635     if (sched->hist_time.start == 0 && t >= ptime->start)
2636         sched->hist_time.start = t;
2637     if (ptime->end == 0 || t <= ptime->end)
2638         sched->hist_time.end = t;
2639 
2640     if (tr) {
2641         /* time of this sched_switch event becomes last time task seen */
2642         tr->last_time = sample->time;
2643 
2644         /* last state is used to determine where to account wait time */
2645         tr->last_state = state;
2646 
2647         /* sched out event for task so reset ready to run time */
2648         tr->ready_to_run = 0;
2649     }
2650 
2651     evsel__save_time(evsel, sample->time, sample->cpu);
2652 
2653     return rc;
2654 }
2655 
2656 static int timehist_sched_switch_event(struct perf_tool *tool,
2657                  union perf_event *event,
2658                  struct evsel *evsel,
2659                  struct perf_sample *sample,
2660                  struct machine *machine __maybe_unused)
2661 {
2662     return timehist_sched_change_event(tool, event, evsel, sample, machine);
2663 }
2664 
2665 static int process_lost(struct perf_tool *tool __maybe_unused,
2666             union perf_event *event,
2667             struct perf_sample *sample,
2668             struct machine *machine __maybe_unused)
2669 {
2670     char tstr[64];
2671 
2672     timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2673     printf("%15s ", tstr);
2674     printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2675 
2676     return 0;
2677 }
2678 
2679 
2680 static void print_thread_runtime(struct thread *t,
2681                  struct thread_runtime *r)
2682 {
2683     double mean = avg_stats(&r->run_stats);
2684     float stddev;
2685 
2686     printf("%*s   %5d  %9" PRIu64 " ",
2687            comm_width, timehist_get_commstr(t), t->ppid,
2688            (u64) r->run_stats.n);
2689 
2690     print_sched_time(r->total_run_time, 8);
2691     stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2692     print_sched_time(r->run_stats.min, 6);
2693     printf(" ");
2694     print_sched_time((u64) mean, 6);
2695     printf(" ");
2696     print_sched_time(r->run_stats.max, 6);
2697     printf("  ");
2698     printf("%5.2f", stddev);
2699     printf("   %5" PRIu64, r->migrations);
2700     printf("\n");
2701 }
2702 
2703 static void print_thread_waittime(struct thread *t,
2704                   struct thread_runtime *r)
2705 {
2706     printf("%*s   %5d  %9" PRIu64 " ",
2707            comm_width, timehist_get_commstr(t), t->ppid,
2708            (u64) r->run_stats.n);
2709 
2710     print_sched_time(r->total_run_time, 8);
2711     print_sched_time(r->total_sleep_time, 6);
2712     printf(" ");
2713     print_sched_time(r->total_iowait_time, 6);
2714     printf(" ");
2715     print_sched_time(r->total_preempt_time, 6);
2716     printf(" ");
2717     print_sched_time(r->total_delay_time, 6);
2718     printf("\n");
2719 }
2720 
2721 struct total_run_stats {
2722     struct perf_sched *sched;
2723     u64  sched_count;
2724     u64  task_count;
2725     u64  total_run_time;
2726 };
2727 
2728 static int __show_thread_runtime(struct thread *t, void *priv)
2729 {
2730     struct total_run_stats *stats = priv;
2731     struct thread_runtime *r;
2732 
2733     if (thread__is_filtered(t))
2734         return 0;
2735 
2736     r = thread__priv(t);
2737     if (r && r->run_stats.n) {
2738         stats->task_count++;
2739         stats->sched_count += r->run_stats.n;
2740         stats->total_run_time += r->total_run_time;
2741 
2742         if (stats->sched->show_state)
2743             print_thread_waittime(t, r);
2744         else
2745             print_thread_runtime(t, r);
2746     }
2747 
2748     return 0;
2749 }
2750 
2751 static int show_thread_runtime(struct thread *t, void *priv)
2752 {
2753     if (t->dead)
2754         return 0;
2755 
2756     return __show_thread_runtime(t, priv);
2757 }
2758 
2759 static int show_deadthread_runtime(struct thread *t, void *priv)
2760 {
2761     if (!t->dead)
2762         return 0;
2763 
2764     return __show_thread_runtime(t, priv);
2765 }
2766 
2767 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2768 {
2769     const char *sep = " <- ";
2770     struct callchain_list *chain;
2771     size_t ret = 0;
2772     char bf[1024];
2773     bool first;
2774 
2775     if (node == NULL)
2776         return 0;
2777 
2778     ret = callchain__fprintf_folded(fp, node->parent);
2779     first = (ret == 0);
2780 
2781     list_for_each_entry(chain, &node->val, list) {
2782         if (chain->ip >= PERF_CONTEXT_MAX)
2783             continue;
2784         if (chain->ms.sym && chain->ms.sym->ignore)
2785             continue;
2786         ret += fprintf(fp, "%s%s", first ? "" : sep,
2787                    callchain_list__sym_name(chain, bf, sizeof(bf),
2788                             false));
2789         first = false;
2790     }
2791 
2792     return ret;
2793 }
2794 
2795 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2796 {
2797     size_t ret = 0;
2798     FILE *fp = stdout;
2799     struct callchain_node *chain;
2800     struct rb_node *rb_node = rb_first_cached(root);
2801 
2802     printf("  %16s  %8s  %s\n", "Idle time (msec)", "Count", "Callchains");
2803     printf("  %.16s  %.8s  %.50s\n", graph_dotted_line, graph_dotted_line,
2804            graph_dotted_line);
2805 
2806     while (rb_node) {
2807         chain = rb_entry(rb_node, struct callchain_node, rb_node);
2808         rb_node = rb_next(rb_node);
2809 
2810         ret += fprintf(fp, "  ");
2811         print_sched_time(chain->hit, 12);
2812         ret += 16;  /* print_sched_time returns 2nd arg + 4 */
2813         ret += fprintf(fp, " %8d  ", chain->count);
2814         ret += callchain__fprintf_folded(fp, chain);
2815         ret += fprintf(fp, "\n");
2816     }
2817 
2818     return ret;
2819 }
2820 
2821 static void timehist_print_summary(struct perf_sched *sched,
2822                    struct perf_session *session)
2823 {
2824     struct machine *m = &session->machines.host;
2825     struct total_run_stats totals;
2826     u64 task_count;
2827     struct thread *t;
2828     struct thread_runtime *r;
2829     int i;
2830     u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2831 
2832     memset(&totals, 0, sizeof(totals));
2833     totals.sched = sched;
2834 
2835     if (sched->idle_hist) {
2836         printf("\nIdle-time summary\n");
2837         printf("%*s  parent  sched-out  ", comm_width, "comm");
2838         printf("  idle-time   min-idle    avg-idle    max-idle  stddev  migrations\n");
2839     } else if (sched->show_state) {
2840         printf("\nWait-time summary\n");
2841         printf("%*s  parent   sched-in  ", comm_width, "comm");
2842         printf("   run-time      sleep      iowait     preempt       delay\n");
2843     } else {
2844         printf("\nRuntime summary\n");
2845         printf("%*s  parent   sched-in  ", comm_width, "comm");
2846         printf("   run-time    min-run     avg-run     max-run  stddev  migrations\n");
2847     }
2848     printf("%*s            (count)  ", comm_width, "");
2849     printf("     (msec)     (msec)      (msec)      (msec)       %s\n",
2850            sched->show_state ? "(msec)" : "%");
2851     printf("%.117s\n", graph_dotted_line);
2852 
2853     machine__for_each_thread(m, show_thread_runtime, &totals);
2854     task_count = totals.task_count;
2855     if (!task_count)
2856         printf("<no still running tasks>\n");
2857 
2858     printf("\nTerminated tasks:\n");
2859     machine__for_each_thread(m, show_deadthread_runtime, &totals);
2860     if (task_count == totals.task_count)
2861         printf("<no terminated tasks>\n");
2862 
2863     /* CPU idle stats not tracked when samples were skipped */
2864     if (sched->skipped_samples && !sched->idle_hist)
2865         return;
2866 
2867     printf("\nIdle stats:\n");
2868     for (i = 0; i < idle_max_cpu; ++i) {
2869         if (cpu_list && !test_bit(i, cpu_bitmap))
2870             continue;
2871 
2872         t = idle_threads[i];
2873         if (!t)
2874             continue;
2875 
2876         r = thread__priv(t);
2877         if (r && r->run_stats.n) {
2878             totals.sched_count += r->run_stats.n;
2879             printf("    CPU %2d idle for ", i);
2880             print_sched_time(r->total_run_time, 6);
2881             printf(" msec  (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2882         } else
2883             printf("    CPU %2d idle entire time window\n", i);
2884     }
2885 
2886     if (sched->idle_hist && sched->show_callchain) {
2887         callchain_param.mode  = CHAIN_FOLDED;
2888         callchain_param.value = CCVAL_PERIOD;
2889 
2890         callchain_register_param(&callchain_param);
2891 
2892         printf("\nIdle stats by callchain:\n");
2893         for (i = 0; i < idle_max_cpu; ++i) {
2894             struct idle_thread_runtime *itr;
2895 
2896             t = idle_threads[i];
2897             if (!t)
2898                 continue;
2899 
2900             itr = thread__priv(t);
2901             if (itr == NULL)
2902                 continue;
2903 
2904             callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2905                          0, &callchain_param);
2906 
2907             printf("  CPU %2d:", i);
2908             print_sched_time(itr->tr.total_run_time, 6);
2909             printf(" msec\n");
2910             timehist_print_idlehist_callchain(&itr->sorted_root);
2911             printf("\n");
2912         }
2913     }
2914 
2915     printf("\n"
2916            "    Total number of unique tasks: %" PRIu64 "\n"
2917            "Total number of context switches: %" PRIu64 "\n",
2918            totals.task_count, totals.sched_count);
2919 
2920     printf("           Total run time (msec): ");
2921     print_sched_time(totals.total_run_time, 2);
2922     printf("\n");
2923 
2924     printf("    Total scheduling time (msec): ");
2925     print_sched_time(hist_time, 2);
2926     printf(" (x %d)\n", sched->max_cpu.cpu);
2927 }
2928 
2929 typedef int (*sched_handler)(struct perf_tool *tool,
2930               union perf_event *event,
2931               struct evsel *evsel,
2932               struct perf_sample *sample,
2933               struct machine *machine);
2934 
2935 static int perf_timehist__process_sample(struct perf_tool *tool,
2936                      union perf_event *event,
2937                      struct perf_sample *sample,
2938                      struct evsel *evsel,
2939                      struct machine *machine)
2940 {
2941     struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2942     int err = 0;
2943     struct perf_cpu this_cpu = {
2944         .cpu = sample->cpu,
2945     };
2946 
2947     if (this_cpu.cpu > sched->max_cpu.cpu)
2948         sched->max_cpu = this_cpu;
2949 
2950     if (evsel->handler != NULL) {
2951         sched_handler f = evsel->handler;
2952 
2953         err = f(tool, event, evsel, sample, machine);
2954     }
2955 
2956     return err;
2957 }
2958 
2959 static int timehist_check_attr(struct perf_sched *sched,
2960                    struct evlist *evlist)
2961 {
2962     struct evsel *evsel;
2963     struct evsel_runtime *er;
2964 
2965     list_for_each_entry(evsel, &evlist->core.entries, core.node) {
2966         er = evsel__get_runtime(evsel);
2967         if (er == NULL) {
2968             pr_err("Failed to allocate memory for evsel runtime data\n");
2969             return -1;
2970         }
2971 
2972         if (sched->show_callchain && !evsel__has_callchain(evsel)) {
2973             pr_info("Samples do not have callchains.\n");
2974             sched->show_callchain = 0;
2975             symbol_conf.use_callchain = 0;
2976         }
2977     }
2978 
2979     return 0;
2980 }
2981 
2982 static int perf_sched__timehist(struct perf_sched *sched)
2983 {
2984     struct evsel_str_handler handlers[] = {
2985         { "sched:sched_switch",       timehist_sched_switch_event, },
2986         { "sched:sched_wakeup",       timehist_sched_wakeup_event, },
2987         { "sched:sched_waking",       timehist_sched_wakeup_event, },
2988         { "sched:sched_wakeup_new",   timehist_sched_wakeup_event, },
2989     };
2990     const struct evsel_str_handler migrate_handlers[] = {
2991         { "sched:sched_migrate_task", timehist_migrate_task_event, },
2992     };
2993     struct perf_data data = {
2994         .path  = input_name,
2995         .mode  = PERF_DATA_MODE_READ,
2996         .force = sched->force,
2997     };
2998 
2999     struct perf_session *session;
3000     struct evlist *evlist;
3001     int err = -1;
3002 
3003     /*
3004      * event handlers for timehist option
3005      */
3006     sched->tool.sample   = perf_timehist__process_sample;
3007     sched->tool.mmap     = perf_event__process_mmap;
3008     sched->tool.comm     = perf_event__process_comm;
3009     sched->tool.exit     = perf_event__process_exit;
3010     sched->tool.fork     = perf_event__process_fork;
3011     sched->tool.lost     = process_lost;
3012     sched->tool.attr     = perf_event__process_attr;
3013     sched->tool.tracing_data = perf_event__process_tracing_data;
3014     sched->tool.build_id     = perf_event__process_build_id;
3015 
3016     sched->tool.ordered_events = true;
3017     sched->tool.ordering_requires_timestamps = true;
3018 
3019     symbol_conf.use_callchain = sched->show_callchain;
3020 
3021     session = perf_session__new(&data, &sched->tool);
3022     if (IS_ERR(session))
3023         return PTR_ERR(session);
3024 
3025     if (cpu_list) {
3026         err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3027         if (err < 0)
3028             goto out;
3029     }
3030 
3031     evlist = session->evlist;
3032 
3033     symbol__init(&session->header.env);
3034 
3035     if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3036         pr_err("Invalid time string\n");
3037         return -EINVAL;
3038     }
3039 
3040     if (timehist_check_attr(sched, evlist) != 0)
3041         goto out;
3042 
3043     setup_pager();
3044 
3045     /* prefer sched_waking if it is captured */
3046     if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3047         handlers[1].handler = timehist_sched_wakeup_ignore;
3048 
3049     /* setup per-evsel handlers */
3050     if (perf_session__set_tracepoints_handlers(session, handlers))
3051         goto out;
3052 
3053     /* sched_switch event at a minimum needs to exist */
3054     if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3055         pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3056         goto out;
3057     }
3058 
3059     if (sched->show_migrations &&
3060         perf_session__set_tracepoints_handlers(session, migrate_handlers))
3061         goto out;
3062 
3063     /* pre-allocate struct for per-CPU idle stats */
3064     sched->max_cpu.cpu = session->header.env.nr_cpus_online;
3065     if (sched->max_cpu.cpu == 0)
3066         sched->max_cpu.cpu = 4;
3067     if (init_idle_threads(sched->max_cpu.cpu))
3068         goto out;
3069 
3070     /* summary_only implies summary option, but don't overwrite summary if set */
3071     if (sched->summary_only)
3072         sched->summary = sched->summary_only;
3073 
3074     if (!sched->summary_only)
3075         timehist_header(sched);
3076 
3077     err = perf_session__process_events(session);
3078     if (err) {
3079         pr_err("Failed to process events, error %d", err);
3080         goto out;
3081     }
3082 
3083     sched->nr_events      = evlist->stats.nr_events[0];
3084     sched->nr_lost_events = evlist->stats.total_lost;
3085     sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3086 
3087     if (sched->summary)
3088         timehist_print_summary(sched, session);
3089 
3090 out:
3091     free_idle_threads();
3092     perf_session__delete(session);
3093 
3094     return err;
3095 }
3096 
3097 
3098 static void print_bad_events(struct perf_sched *sched)
3099 {
3100     if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3101         printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3102             (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3103             sched->nr_unordered_timestamps, sched->nr_timestamps);
3104     }
3105     if (sched->nr_lost_events && sched->nr_events) {
3106         printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3107             (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3108             sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3109     }
3110     if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3111         printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
3112             (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3113             sched->nr_context_switch_bugs, sched->nr_timestamps);
3114         if (sched->nr_lost_events)
3115             printf(" (due to lost events?)");
3116         printf("\n");
3117     }
3118 }
3119 
3120 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3121 {
3122     struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3123     struct work_atoms *this;
3124     const char *comm = thread__comm_str(data->thread), *this_comm;
3125     bool leftmost = true;
3126 
3127     while (*new) {
3128         int cmp;
3129 
3130         this = container_of(*new, struct work_atoms, node);
3131         parent = *new;
3132 
3133         this_comm = thread__comm_str(this->thread);
3134         cmp = strcmp(comm, this_comm);
3135         if (cmp > 0) {
3136             new = &((*new)->rb_left);
3137         } else if (cmp < 0) {
3138             new = &((*new)->rb_right);
3139             leftmost = false;
3140         } else {
3141             this->num_merged++;
3142             this->total_runtime += data->total_runtime;
3143             this->nb_atoms += data->nb_atoms;
3144             this->total_lat += data->total_lat;
3145             list_splice(&data->work_list, &this->work_list);
3146             if (this->max_lat < data->max_lat) {
3147                 this->max_lat = data->max_lat;
3148                 this->max_lat_start = data->max_lat_start;
3149                 this->max_lat_end = data->max_lat_end;
3150             }
3151             zfree(&data);
3152             return;
3153         }
3154     }
3155 
3156     data->num_merged++;
3157     rb_link_node(&data->node, parent, new);
3158     rb_insert_color_cached(&data->node, root, leftmost);
3159 }
3160 
3161 static void perf_sched__merge_lat(struct perf_sched *sched)
3162 {
3163     struct work_atoms *data;
3164     struct rb_node *node;
3165 
3166     if (sched->skip_merge)
3167         return;
3168 
3169     while ((node = rb_first_cached(&sched->atom_root))) {
3170         rb_erase_cached(node, &sched->atom_root);
3171         data = rb_entry(node, struct work_atoms, node);
3172         __merge_work_atoms(&sched->merged_atom_root, data);
3173     }
3174 }
3175 
3176 static int perf_sched__lat(struct perf_sched *sched)
3177 {
3178     struct rb_node *next;
3179 
3180     setup_pager();
3181 
3182     if (perf_sched__read_events(sched))
3183         return -1;
3184 
3185     perf_sched__merge_lat(sched);
3186     perf_sched__sort_lat(sched);
3187 
3188     printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3189     printf("  Task                  |   Runtime ms  | Switches | Avg delay ms    | Max delay ms    | Max delay start           | Max delay end          |\n");
3190     printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3191 
3192     next = rb_first_cached(&sched->sorted_atom_root);
3193 
3194     while (next) {
3195         struct work_atoms *work_list;
3196 
3197         work_list = rb_entry(next, struct work_atoms, node);
3198         output_lat_thread(sched, work_list);
3199         next = rb_next(next);
3200         thread__zput(work_list->thread);
3201     }
3202 
3203     printf(" -----------------------------------------------------------------------------------------------------------------\n");
3204     printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
3205         (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3206 
3207     printf(" ---------------------------------------------------\n");
3208 
3209     print_bad_events(sched);
3210     printf("\n");
3211 
3212     return 0;
3213 }
3214 
3215 static int setup_map_cpus(struct perf_sched *sched)
3216 {
3217     struct perf_cpu_map *map;
3218 
3219     sched->max_cpu.cpu  = sysconf(_SC_NPROCESSORS_CONF);
3220 
3221     if (sched->map.comp) {
3222         sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3223         if (!sched->map.comp_cpus)
3224             return -1;
3225     }
3226 
3227     if (!sched->map.cpus_str)
3228         return 0;
3229 
3230     map = perf_cpu_map__new(sched->map.cpus_str);
3231     if (!map) {
3232         pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3233         return -1;
3234     }
3235 
3236     sched->map.cpus = map;
3237     return 0;
3238 }
3239 
3240 static int setup_color_pids(struct perf_sched *sched)
3241 {
3242     struct perf_thread_map *map;
3243 
3244     if (!sched->map.color_pids_str)
3245         return 0;
3246 
3247     map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3248     if (!map) {
3249         pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3250         return -1;
3251     }
3252 
3253     sched->map.color_pids = map;
3254     return 0;
3255 }
3256 
3257 static int setup_color_cpus(struct perf_sched *sched)
3258 {
3259     struct perf_cpu_map *map;
3260 
3261     if (!sched->map.color_cpus_str)
3262         return 0;
3263 
3264     map = perf_cpu_map__new(sched->map.color_cpus_str);
3265     if (!map) {
3266         pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3267         return -1;
3268     }
3269 
3270     sched->map.color_cpus = map;
3271     return 0;
3272 }
3273 
3274 static int perf_sched__map(struct perf_sched *sched)
3275 {
3276     if (setup_map_cpus(sched))
3277         return -1;
3278 
3279     if (setup_color_pids(sched))
3280         return -1;
3281 
3282     if (setup_color_cpus(sched))
3283         return -1;
3284 
3285     setup_pager();
3286     if (perf_sched__read_events(sched))
3287         return -1;
3288     print_bad_events(sched);
3289     return 0;
3290 }
3291 
3292 static int perf_sched__replay(struct perf_sched *sched)
3293 {
3294     unsigned long i;
3295 
3296     calibrate_run_measurement_overhead(sched);
3297     calibrate_sleep_measurement_overhead(sched);
3298 
3299     test_calibrations(sched);
3300 
3301     if (perf_sched__read_events(sched))
3302         return -1;
3303 
3304     printf("nr_run_events:        %ld\n", sched->nr_run_events);
3305     printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
3306     printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
3307 
3308     if (sched->targetless_wakeups)
3309         printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
3310     if (sched->multitarget_wakeups)
3311         printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3312     if (sched->nr_run_events_optimized)
3313         printf("run atoms optimized: %ld\n",
3314             sched->nr_run_events_optimized);
3315 
3316     print_task_traces(sched);
3317     add_cross_task_wakeups(sched);
3318 
3319     create_tasks(sched);
3320     printf("------------------------------------------------------------\n");
3321     for (i = 0; i < sched->replay_repeat; i++)
3322         run_one_test(sched);
3323 
3324     return 0;
3325 }
3326 
3327 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3328               const char * const usage_msg[])
3329 {
3330     char *tmp, *tok, *str = strdup(sched->sort_order);
3331 
3332     for (tok = strtok_r(str, ", ", &tmp);
3333             tok; tok = strtok_r(NULL, ", ", &tmp)) {
3334         if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3335             usage_with_options_msg(usage_msg, options,
3336                     "Unknown --sort key: `%s'", tok);
3337         }
3338     }
3339 
3340     free(str);
3341 
3342     sort_dimension__add("pid", &sched->cmp_pid);
3343 }
3344 
3345 static bool schedstat_events_exposed(void)
3346 {
3347     /*
3348      * Select "sched:sched_stat_wait" event to check
3349      * whether schedstat tracepoints are exposed.
3350      */
3351     return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3352         false : true;
3353 }
3354 
3355 static int __cmd_record(int argc, const char **argv)
3356 {
3357     unsigned int rec_argc, i, j;
3358     char **rec_argv;
3359     const char **rec_argv_copy;
3360     const char * const record_args[] = {
3361         "record",
3362         "-a",
3363         "-R",
3364         "-m", "1024",
3365         "-c", "1",
3366         "-e", "sched:sched_switch",
3367         "-e", "sched:sched_stat_runtime",
3368         "-e", "sched:sched_process_fork",
3369         "-e", "sched:sched_wakeup_new",
3370         "-e", "sched:sched_migrate_task",
3371     };
3372 
3373     /*
3374      * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3375      * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3376      * to prevent "perf sched record" execution failure, determine
3377      * whether to record schedstat events according to actual situation.
3378      */
3379     const char * const schedstat_args[] = {
3380         "-e", "sched:sched_stat_wait",
3381         "-e", "sched:sched_stat_sleep",
3382         "-e", "sched:sched_stat_iowait",
3383     };
3384     unsigned int schedstat_argc = schedstat_events_exposed() ?
3385         ARRAY_SIZE(schedstat_args) : 0;
3386 
3387     struct tep_event *waking_event;
3388     int ret;
3389 
3390     /*
3391      * +2 for either "-e", "sched:sched_wakeup" or
3392      * "-e", "sched:sched_waking"
3393      */
3394     rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3395     rec_argv = calloc(rec_argc + 1, sizeof(char *));
3396     if (rec_argv == NULL)
3397         return -ENOMEM;
3398     rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3399     if (rec_argv_copy == NULL) {
3400         free(rec_argv);
3401         return -ENOMEM;
3402     }
3403 
3404     for (i = 0; i < ARRAY_SIZE(record_args); i++)
3405         rec_argv[i] = strdup(record_args[i]);
3406 
3407     rec_argv[i++] = strdup("-e");
3408     waking_event = trace_event__tp_format("sched", "sched_waking");
3409     if (!IS_ERR(waking_event))
3410         rec_argv[i++] = strdup("sched:sched_waking");
3411     else
3412         rec_argv[i++] = strdup("sched:sched_wakeup");
3413 
3414     for (j = 0; j < schedstat_argc; j++)
3415         rec_argv[i++] = strdup(schedstat_args[j]);
3416 
3417     for (j = 1; j < (unsigned int)argc; j++, i++)
3418         rec_argv[i] = strdup(argv[j]);
3419 
3420     BUG_ON(i != rec_argc);
3421 
3422     memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3423     ret = cmd_record(rec_argc, rec_argv_copy);
3424 
3425     for (i = 0; i < rec_argc; i++)
3426         free(rec_argv[i]);
3427     free(rec_argv);
3428     free(rec_argv_copy);
3429 
3430     return ret;
3431 }
3432 
3433 int cmd_sched(int argc, const char **argv)
3434 {
3435     static const char default_sort_order[] = "avg, max, switch, runtime";
3436     struct perf_sched sched = {
3437         .tool = {
3438             .sample      = perf_sched__process_tracepoint_sample,
3439             .comm        = perf_sched__process_comm,
3440             .namespaces  = perf_event__process_namespaces,
3441             .lost        = perf_event__process_lost,
3442             .fork        = perf_sched__process_fork_event,
3443             .ordered_events = true,
3444         },
3445         .cmp_pid          = LIST_HEAD_INIT(sched.cmp_pid),
3446         .sort_list        = LIST_HEAD_INIT(sched.sort_list),
3447         .start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
3448         .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
3449         .sort_order       = default_sort_order,
3450         .replay_repeat        = 10,
3451         .profile_cpu          = -1,
3452         .next_shortname1      = 'A',
3453         .next_shortname2      = '0',
3454         .skip_merge           = 0,
3455         .show_callchain       = 1,
3456         .max_stack            = 5,
3457     };
3458     const struct option sched_options[] = {
3459     OPT_STRING('i', "input", &input_name, "file",
3460             "input file name"),
3461     OPT_INCR('v', "verbose", &verbose,
3462             "be more verbose (show symbol address, etc)"),
3463     OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3464             "dump raw trace in ASCII"),
3465     OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3466     OPT_END()
3467     };
3468     const struct option latency_options[] = {
3469     OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3470            "sort by key(s): runtime, switch, avg, max"),
3471     OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3472             "CPU to profile on"),
3473     OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3474             "latency stats per pid instead of per comm"),
3475     OPT_PARENT(sched_options)
3476     };
3477     const struct option replay_options[] = {
3478     OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3479              "repeat the workload replay N times (-1: infinite)"),
3480     OPT_PARENT(sched_options)
3481     };
3482     const struct option map_options[] = {
3483     OPT_BOOLEAN(0, "compact", &sched.map.comp,
3484             "map output in compact mode"),
3485     OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3486            "highlight given pids in map"),
3487     OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3488                     "highlight given CPUs in map"),
3489     OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3490                     "display given CPUs in map"),
3491     OPT_PARENT(sched_options)
3492     };
3493     const struct option timehist_options[] = {
3494     OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3495            "file", "vmlinux pathname"),
3496     OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3497            "file", "kallsyms pathname"),
3498     OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3499             "Display call chains if present (default on)"),
3500     OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3501            "Maximum number of functions to display backtrace."),
3502     OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3503             "Look for files with symbols relative to this directory"),
3504     OPT_BOOLEAN('s', "summary", &sched.summary_only,
3505             "Show only syscall summary with statistics"),
3506     OPT_BOOLEAN('S', "with-summary", &sched.summary,
3507             "Show all syscalls and summary with statistics"),
3508     OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3509     OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3510     OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3511     OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3512     OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3513     OPT_STRING(0, "time", &sched.time_str, "str",
3514            "Time span for analysis (start,stop)"),
3515     OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3516     OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3517            "analyze events only for given process id(s)"),
3518     OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3519            "analyze events only for given thread id(s)"),
3520     OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3521     OPT_PARENT(sched_options)
3522     };
3523 
3524     const char * const latency_usage[] = {
3525         "perf sched latency [<options>]",
3526         NULL
3527     };
3528     const char * const replay_usage[] = {
3529         "perf sched replay [<options>]",
3530         NULL
3531     };
3532     const char * const map_usage[] = {
3533         "perf sched map [<options>]",
3534         NULL
3535     };
3536     const char * const timehist_usage[] = {
3537         "perf sched timehist [<options>]",
3538         NULL
3539     };
3540     const char *const sched_subcommands[] = { "record", "latency", "map",
3541                           "replay", "script",
3542                           "timehist", NULL };
3543     const char *sched_usage[] = {
3544         NULL,
3545         NULL
3546     };
3547     struct trace_sched_handler lat_ops  = {
3548         .wakeup_event       = latency_wakeup_event,
3549         .switch_event       = latency_switch_event,
3550         .runtime_event      = latency_runtime_event,
3551         .migrate_task_event = latency_migrate_task_event,
3552     };
3553     struct trace_sched_handler map_ops  = {
3554         .switch_event       = map_switch_event,
3555     };
3556     struct trace_sched_handler replay_ops  = {
3557         .wakeup_event       = replay_wakeup_event,
3558         .switch_event       = replay_switch_event,
3559         .fork_event     = replay_fork_event,
3560     };
3561     unsigned int i;
3562     int ret;
3563 
3564     for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
3565         sched.curr_pid[i] = -1;
3566 
3567     argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3568                     sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3569     if (!argc)
3570         usage_with_options(sched_usage, sched_options);
3571 
3572     /*
3573      * Aliased to 'perf script' for now:
3574      */
3575     if (!strcmp(argv[0], "script"))
3576         return cmd_script(argc, argv);
3577 
3578     if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3579         return __cmd_record(argc, argv);
3580     } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3581         sched.tp_handler = &lat_ops;
3582         if (argc > 1) {
3583             argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3584             if (argc)
3585                 usage_with_options(latency_usage, latency_options);
3586         }
3587         setup_sorting(&sched, latency_options, latency_usage);
3588         return perf_sched__lat(&sched);
3589     } else if (!strcmp(argv[0], "map")) {
3590         if (argc) {
3591             argc = parse_options(argc, argv, map_options, map_usage, 0);
3592             if (argc)
3593                 usage_with_options(map_usage, map_options);
3594         }
3595         sched.tp_handler = &map_ops;
3596         setup_sorting(&sched, latency_options, latency_usage);
3597         return perf_sched__map(&sched);
3598     } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
3599         sched.tp_handler = &replay_ops;
3600         if (argc) {
3601             argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3602             if (argc)
3603                 usage_with_options(replay_usage, replay_options);
3604         }
3605         return perf_sched__replay(&sched);
3606     } else if (!strcmp(argv[0], "timehist")) {
3607         if (argc) {
3608             argc = parse_options(argc, argv, timehist_options,
3609                          timehist_usage, 0);
3610             if (argc)
3611                 usage_with_options(timehist_usage, timehist_options);
3612         }
3613         if ((sched.show_wakeups || sched.show_next) &&
3614             sched.summary_only) {
3615             pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3616             parse_options_usage(timehist_usage, timehist_options, "s", true);
3617             if (sched.show_wakeups)
3618                 parse_options_usage(NULL, timehist_options, "w", true);
3619             if (sched.show_next)
3620                 parse_options_usage(NULL, timehist_options, "n", true);
3621             return -EINVAL;
3622         }
3623         ret = symbol__validate_sym_arguments();
3624         if (ret)
3625             return ret;
3626 
3627         return perf_sched__timehist(&sched);
3628     } else {
3629         usage_with_options(sched_usage, sched_options);
3630     }
3631 
3632     return 0;
3633 }