0001
0002 #undef TRACE_SYSTEM
0003 #define TRACE_SYSTEM sched
0004
0005 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
0006 #define _TRACE_SCHED_H
0007
0008 #include <linux/kthread.h>
0009 #include <linux/sched/numa_balancing.h>
0010 #include <linux/tracepoint.h>
0011 #include <linux/binfmts.h>
0012
0013
0014
0015
0016 TRACE_EVENT(sched_kthread_stop,
0017
0018 TP_PROTO(struct task_struct *t),
0019
0020 TP_ARGS(t),
0021
0022 TP_STRUCT__entry(
0023 __array( char, comm, TASK_COMM_LEN )
0024 __field( pid_t, pid )
0025 ),
0026
0027 TP_fast_assign(
0028 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
0029 __entry->pid = t->pid;
0030 ),
0031
0032 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
0033 );
0034
0035
0036
0037
0038 TRACE_EVENT(sched_kthread_stop_ret,
0039
0040 TP_PROTO(int ret),
0041
0042 TP_ARGS(ret),
0043
0044 TP_STRUCT__entry(
0045 __field( int, ret )
0046 ),
0047
0048 TP_fast_assign(
0049 __entry->ret = ret;
0050 ),
0051
0052 TP_printk("ret=%d", __entry->ret)
0053 );
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 TRACE_EVENT(sched_kthread_work_queue_work,
0065
0066 TP_PROTO(struct kthread_worker *worker,
0067 struct kthread_work *work),
0068
0069 TP_ARGS(worker, work),
0070
0071 TP_STRUCT__entry(
0072 __field( void *, work )
0073 __field( void *, function)
0074 __field( void *, worker)
0075 ),
0076
0077 TP_fast_assign(
0078 __entry->work = work;
0079 __entry->function = work->func;
0080 __entry->worker = worker;
0081 ),
0082
0083 TP_printk("work struct=%p function=%ps worker=%p",
0084 __entry->work, __entry->function, __entry->worker)
0085 );
0086
0087
0088
0089
0090
0091
0092
0093 TRACE_EVENT(sched_kthread_work_execute_start,
0094
0095 TP_PROTO(struct kthread_work *work),
0096
0097 TP_ARGS(work),
0098
0099 TP_STRUCT__entry(
0100 __field( void *, work )
0101 __field( void *, function)
0102 ),
0103
0104 TP_fast_assign(
0105 __entry->work = work;
0106 __entry->function = work->func;
0107 ),
0108
0109 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
0110 );
0111
0112
0113
0114
0115
0116
0117
0118
0119 TRACE_EVENT(sched_kthread_work_execute_end,
0120
0121 TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
0122
0123 TP_ARGS(work, function),
0124
0125 TP_STRUCT__entry(
0126 __field( void *, work )
0127 __field( void *, function)
0128 ),
0129
0130 TP_fast_assign(
0131 __entry->work = work;
0132 __entry->function = function;
0133 ),
0134
0135 TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
0136 );
0137
0138
0139
0140
0141 DECLARE_EVENT_CLASS(sched_wakeup_template,
0142
0143 TP_PROTO(struct task_struct *p),
0144
0145 TP_ARGS(__perf_task(p)),
0146
0147 TP_STRUCT__entry(
0148 __array( char, comm, TASK_COMM_LEN )
0149 __field( pid_t, pid )
0150 __field( int, prio )
0151 __field( int, target_cpu )
0152 ),
0153
0154 TP_fast_assign(
0155 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
0156 __entry->pid = p->pid;
0157 __entry->prio = p->prio;
0158 __entry->target_cpu = task_cpu(p);
0159 ),
0160
0161 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
0162 __entry->comm, __entry->pid, __entry->prio,
0163 __entry->target_cpu)
0164 );
0165
0166
0167
0168
0169
0170 DEFINE_EVENT(sched_wakeup_template, sched_waking,
0171 TP_PROTO(struct task_struct *p),
0172 TP_ARGS(p));
0173
0174
0175
0176
0177
0178 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
0179 TP_PROTO(struct task_struct *p),
0180 TP_ARGS(p));
0181
0182
0183
0184
0185 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
0186 TP_PROTO(struct task_struct *p),
0187 TP_ARGS(p));
0188
0189 #ifdef CREATE_TRACE_POINTS
0190 static inline long __trace_sched_switch_state(bool preempt,
0191 unsigned int prev_state,
0192 struct task_struct *p)
0193 {
0194 unsigned int state;
0195
0196 #ifdef CONFIG_SCHED_DEBUG
0197 BUG_ON(p != current);
0198 #endif
0199
0200
0201
0202
0203
0204 if (preempt)
0205 return TASK_REPORT_MAX;
0206
0207
0208
0209
0210
0211
0212
0213 state = __task_state_index(prev_state, p->exit_state);
0214
0215 return state ? (1 << (state - 1)) : state;
0216 }
0217 #endif
0218
0219
0220
0221
0222 TRACE_EVENT(sched_switch,
0223
0224 TP_PROTO(bool preempt,
0225 struct task_struct *prev,
0226 struct task_struct *next,
0227 unsigned int prev_state),
0228
0229 TP_ARGS(preempt, prev, next, prev_state),
0230
0231 TP_STRUCT__entry(
0232 __array( char, prev_comm, TASK_COMM_LEN )
0233 __field( pid_t, prev_pid )
0234 __field( int, prev_prio )
0235 __field( long, prev_state )
0236 __array( char, next_comm, TASK_COMM_LEN )
0237 __field( pid_t, next_pid )
0238 __field( int, next_prio )
0239 ),
0240
0241 TP_fast_assign(
0242 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
0243 __entry->prev_pid = prev->pid;
0244 __entry->prev_prio = prev->prio;
0245 __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev);
0246 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
0247 __entry->next_pid = next->pid;
0248 __entry->next_prio = next->prio;
0249
0250 ),
0251
0252 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
0253 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
0254
0255 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
0256 __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
0257 { TASK_INTERRUPTIBLE, "S" },
0258 { TASK_UNINTERRUPTIBLE, "D" },
0259 { __TASK_STOPPED, "T" },
0260 { __TASK_TRACED, "t" },
0261 { EXIT_DEAD, "X" },
0262 { EXIT_ZOMBIE, "Z" },
0263 { TASK_PARKED, "P" },
0264 { TASK_DEAD, "I" }) :
0265 "R",
0266
0267 __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
0268 __entry->next_comm, __entry->next_pid, __entry->next_prio)
0269 );
0270
0271
0272
0273
0274 TRACE_EVENT(sched_migrate_task,
0275
0276 TP_PROTO(struct task_struct *p, int dest_cpu),
0277
0278 TP_ARGS(p, dest_cpu),
0279
0280 TP_STRUCT__entry(
0281 __array( char, comm, TASK_COMM_LEN )
0282 __field( pid_t, pid )
0283 __field( int, prio )
0284 __field( int, orig_cpu )
0285 __field( int, dest_cpu )
0286 ),
0287
0288 TP_fast_assign(
0289 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
0290 __entry->pid = p->pid;
0291 __entry->prio = p->prio;
0292 __entry->orig_cpu = task_cpu(p);
0293 __entry->dest_cpu = dest_cpu;
0294 ),
0295
0296 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
0297 __entry->comm, __entry->pid, __entry->prio,
0298 __entry->orig_cpu, __entry->dest_cpu)
0299 );
0300
0301 DECLARE_EVENT_CLASS(sched_process_template,
0302
0303 TP_PROTO(struct task_struct *p),
0304
0305 TP_ARGS(p),
0306
0307 TP_STRUCT__entry(
0308 __array( char, comm, TASK_COMM_LEN )
0309 __field( pid_t, pid )
0310 __field( int, prio )
0311 ),
0312
0313 TP_fast_assign(
0314 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
0315 __entry->pid = p->pid;
0316 __entry->prio = p->prio;
0317 ),
0318
0319 TP_printk("comm=%s pid=%d prio=%d",
0320 __entry->comm, __entry->pid, __entry->prio)
0321 );
0322
0323
0324
0325
0326 DEFINE_EVENT(sched_process_template, sched_process_free,
0327 TP_PROTO(struct task_struct *p),
0328 TP_ARGS(p));
0329
0330
0331
0332
0333 DEFINE_EVENT(sched_process_template, sched_process_exit,
0334 TP_PROTO(struct task_struct *p),
0335 TP_ARGS(p));
0336
0337
0338
0339
0340 DEFINE_EVENT(sched_process_template, sched_wait_task,
0341 TP_PROTO(struct task_struct *p),
0342 TP_ARGS(p));
0343
0344
0345
0346
0347 TRACE_EVENT(sched_process_wait,
0348
0349 TP_PROTO(struct pid *pid),
0350
0351 TP_ARGS(pid),
0352
0353 TP_STRUCT__entry(
0354 __array( char, comm, TASK_COMM_LEN )
0355 __field( pid_t, pid )
0356 __field( int, prio )
0357 ),
0358
0359 TP_fast_assign(
0360 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
0361 __entry->pid = pid_nr(pid);
0362 __entry->prio = current->prio;
0363 ),
0364
0365 TP_printk("comm=%s pid=%d prio=%d",
0366 __entry->comm, __entry->pid, __entry->prio)
0367 );
0368
0369
0370
0371
0372 TRACE_EVENT(sched_process_fork,
0373
0374 TP_PROTO(struct task_struct *parent, struct task_struct *child),
0375
0376 TP_ARGS(parent, child),
0377
0378 TP_STRUCT__entry(
0379 __array( char, parent_comm, TASK_COMM_LEN )
0380 __field( pid_t, parent_pid )
0381 __array( char, child_comm, TASK_COMM_LEN )
0382 __field( pid_t, child_pid )
0383 ),
0384
0385 TP_fast_assign(
0386 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
0387 __entry->parent_pid = parent->pid;
0388 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
0389 __entry->child_pid = child->pid;
0390 ),
0391
0392 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
0393 __entry->parent_comm, __entry->parent_pid,
0394 __entry->child_comm, __entry->child_pid)
0395 );
0396
0397
0398
0399
0400 TRACE_EVENT(sched_process_exec,
0401
0402 TP_PROTO(struct task_struct *p, pid_t old_pid,
0403 struct linux_binprm *bprm),
0404
0405 TP_ARGS(p, old_pid, bprm),
0406
0407 TP_STRUCT__entry(
0408 __string( filename, bprm->filename )
0409 __field( pid_t, pid )
0410 __field( pid_t, old_pid )
0411 ),
0412
0413 TP_fast_assign(
0414 __assign_str(filename, bprm->filename);
0415 __entry->pid = p->pid;
0416 __entry->old_pid = old_pid;
0417 ),
0418
0419 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
0420 __entry->pid, __entry->old_pid)
0421 );
0422
0423
0424 #ifdef CONFIG_SCHEDSTATS
0425 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT
0426 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS
0427 #else
0428 #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP
0429 #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP
0430 #endif
0431
0432
0433
0434
0435
0436 DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template,
0437
0438 TP_PROTO(struct task_struct *tsk, u64 delay),
0439
0440 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
0441
0442 TP_STRUCT__entry(
0443 __array( char, comm, TASK_COMM_LEN )
0444 __field( pid_t, pid )
0445 __field( u64, delay )
0446 ),
0447
0448 TP_fast_assign(
0449 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
0450 __entry->pid = tsk->pid;
0451 __entry->delay = delay;
0452 ),
0453
0454 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
0455 __entry->comm, __entry->pid,
0456 (unsigned long long)__entry->delay)
0457 );
0458
0459
0460
0461
0462
0463 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait,
0464 TP_PROTO(struct task_struct *tsk, u64 delay),
0465 TP_ARGS(tsk, delay));
0466
0467
0468
0469
0470
0471 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep,
0472 TP_PROTO(struct task_struct *tsk, u64 delay),
0473 TP_ARGS(tsk, delay));
0474
0475
0476
0477
0478
0479 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait,
0480 TP_PROTO(struct task_struct *tsk, u64 delay),
0481 TP_ARGS(tsk, delay));
0482
0483
0484
0485
0486 DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
0487 TP_PROTO(struct task_struct *tsk, u64 delay),
0488 TP_ARGS(tsk, delay));
0489
0490
0491
0492
0493
0494 DECLARE_EVENT_CLASS(sched_stat_runtime,
0495
0496 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
0497
0498 TP_ARGS(tsk, __perf_count(runtime), vruntime),
0499
0500 TP_STRUCT__entry(
0501 __array( char, comm, TASK_COMM_LEN )
0502 __field( pid_t, pid )
0503 __field( u64, runtime )
0504 __field( u64, vruntime )
0505 ),
0506
0507 TP_fast_assign(
0508 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
0509 __entry->pid = tsk->pid;
0510 __entry->runtime = runtime;
0511 __entry->vruntime = vruntime;
0512 ),
0513
0514 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
0515 __entry->comm, __entry->pid,
0516 (unsigned long long)__entry->runtime,
0517 (unsigned long long)__entry->vruntime)
0518 );
0519
0520 DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
0521 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
0522 TP_ARGS(tsk, runtime, vruntime));
0523
0524
0525
0526
0527
0528 TRACE_EVENT(sched_pi_setprio,
0529
0530 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
0531
0532 TP_ARGS(tsk, pi_task),
0533
0534 TP_STRUCT__entry(
0535 __array( char, comm, TASK_COMM_LEN )
0536 __field( pid_t, pid )
0537 __field( int, oldprio )
0538 __field( int, newprio )
0539 ),
0540
0541 TP_fast_assign(
0542 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
0543 __entry->pid = tsk->pid;
0544 __entry->oldprio = tsk->prio;
0545 __entry->newprio = pi_task ?
0546 min(tsk->normal_prio, pi_task->prio) :
0547 tsk->normal_prio;
0548
0549 ),
0550
0551 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
0552 __entry->comm, __entry->pid,
0553 __entry->oldprio, __entry->newprio)
0554 );
0555
0556 #ifdef CONFIG_DETECT_HUNG_TASK
0557 TRACE_EVENT(sched_process_hang,
0558 TP_PROTO(struct task_struct *tsk),
0559 TP_ARGS(tsk),
0560
0561 TP_STRUCT__entry(
0562 __array( char, comm, TASK_COMM_LEN )
0563 __field( pid_t, pid )
0564 ),
0565
0566 TP_fast_assign(
0567 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
0568 __entry->pid = tsk->pid;
0569 ),
0570
0571 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
0572 );
0573 #endif
0574
0575
0576
0577
0578
0579 TRACE_EVENT(sched_move_numa,
0580
0581 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
0582
0583 TP_ARGS(tsk, src_cpu, dst_cpu),
0584
0585 TP_STRUCT__entry(
0586 __field( pid_t, pid )
0587 __field( pid_t, tgid )
0588 __field( pid_t, ngid )
0589 __field( int, src_cpu )
0590 __field( int, src_nid )
0591 __field( int, dst_cpu )
0592 __field( int, dst_nid )
0593 ),
0594
0595 TP_fast_assign(
0596 __entry->pid = task_pid_nr(tsk);
0597 __entry->tgid = task_tgid_nr(tsk);
0598 __entry->ngid = task_numa_group_id(tsk);
0599 __entry->src_cpu = src_cpu;
0600 __entry->src_nid = cpu_to_node(src_cpu);
0601 __entry->dst_cpu = dst_cpu;
0602 __entry->dst_nid = cpu_to_node(dst_cpu);
0603 ),
0604
0605 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
0606 __entry->pid, __entry->tgid, __entry->ngid,
0607 __entry->src_cpu, __entry->src_nid,
0608 __entry->dst_cpu, __entry->dst_nid)
0609 );
0610
0611 DECLARE_EVENT_CLASS(sched_numa_pair_template,
0612
0613 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
0614 struct task_struct *dst_tsk, int dst_cpu),
0615
0616 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
0617
0618 TP_STRUCT__entry(
0619 __field( pid_t, src_pid )
0620 __field( pid_t, src_tgid )
0621 __field( pid_t, src_ngid )
0622 __field( int, src_cpu )
0623 __field( int, src_nid )
0624 __field( pid_t, dst_pid )
0625 __field( pid_t, dst_tgid )
0626 __field( pid_t, dst_ngid )
0627 __field( int, dst_cpu )
0628 __field( int, dst_nid )
0629 ),
0630
0631 TP_fast_assign(
0632 __entry->src_pid = task_pid_nr(src_tsk);
0633 __entry->src_tgid = task_tgid_nr(src_tsk);
0634 __entry->src_ngid = task_numa_group_id(src_tsk);
0635 __entry->src_cpu = src_cpu;
0636 __entry->src_nid = cpu_to_node(src_cpu);
0637 __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
0638 __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
0639 __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
0640 __entry->dst_cpu = dst_cpu;
0641 __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
0642 ),
0643
0644 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
0645 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
0646 __entry->src_cpu, __entry->src_nid,
0647 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
0648 __entry->dst_cpu, __entry->dst_nid)
0649 );
0650
0651 DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
0652
0653 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
0654 struct task_struct *dst_tsk, int dst_cpu),
0655
0656 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
0657 );
0658
0659 DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
0660
0661 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
0662 struct task_struct *dst_tsk, int dst_cpu),
0663
0664 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
0665 );
0666
0667
0668
0669
0670
0671 TRACE_EVENT(sched_wake_idle_without_ipi,
0672
0673 TP_PROTO(int cpu),
0674
0675 TP_ARGS(cpu),
0676
0677 TP_STRUCT__entry(
0678 __field( int, cpu )
0679 ),
0680
0681 TP_fast_assign(
0682 __entry->cpu = cpu;
0683 ),
0684
0685 TP_printk("cpu=%d", __entry->cpu)
0686 );
0687
0688
0689
0690
0691
0692
0693
0694 DECLARE_TRACE(pelt_cfs_tp,
0695 TP_PROTO(struct cfs_rq *cfs_rq),
0696 TP_ARGS(cfs_rq));
0697
0698 DECLARE_TRACE(pelt_rt_tp,
0699 TP_PROTO(struct rq *rq),
0700 TP_ARGS(rq));
0701
0702 DECLARE_TRACE(pelt_dl_tp,
0703 TP_PROTO(struct rq *rq),
0704 TP_ARGS(rq));
0705
0706 DECLARE_TRACE(pelt_thermal_tp,
0707 TP_PROTO(struct rq *rq),
0708 TP_ARGS(rq));
0709
0710 DECLARE_TRACE(pelt_irq_tp,
0711 TP_PROTO(struct rq *rq),
0712 TP_ARGS(rq));
0713
0714 DECLARE_TRACE(pelt_se_tp,
0715 TP_PROTO(struct sched_entity *se),
0716 TP_ARGS(se));
0717
0718 DECLARE_TRACE(sched_cpu_capacity_tp,
0719 TP_PROTO(struct rq *rq),
0720 TP_ARGS(rq));
0721
0722 DECLARE_TRACE(sched_overutilized_tp,
0723 TP_PROTO(struct root_domain *rd, bool overutilized),
0724 TP_ARGS(rd, overutilized));
0725
0726 DECLARE_TRACE(sched_util_est_cfs_tp,
0727 TP_PROTO(struct cfs_rq *cfs_rq),
0728 TP_ARGS(cfs_rq));
0729
0730 DECLARE_TRACE(sched_util_est_se_tp,
0731 TP_PROTO(struct sched_entity *se),
0732 TP_ARGS(se));
0733
0734 DECLARE_TRACE(sched_update_nr_running_tp,
0735 TP_PROTO(struct rq *rq, int change),
0736 TP_ARGS(rq, change));
0737
0738 #endif
0739
0740
0741 #include <trace/define_trace.h>