0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/module.h>
0014 #include <linux/kallsyms.h>
0015 #include <linux/uaccess.h>
0016 #include <linux/ftrace.h>
0017 #include <linux/sched/rt.h>
0018 #include <linux/sched/deadline.h>
0019 #include <trace/events/sched.h>
0020 #include "trace.h"
0021
0022 static struct trace_array *wakeup_trace;
0023 static int __read_mostly tracer_enabled;
0024
0025 static struct task_struct *wakeup_task;
0026 static int wakeup_cpu;
0027 static int wakeup_current_cpu;
0028 static unsigned wakeup_prio = -1;
0029 static bool wakeup_rt;
0030 static bool wakeup_dl;
0031 static bool tracing_dl;
0032
0033 static arch_spinlock_t wakeup_lock =
0034 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
0035
0036 static void wakeup_reset(struct trace_array *tr);
0037 static void __wakeup_reset(struct trace_array *tr);
0038 static int start_func_tracer(struct trace_array *tr, int graph);
0039 static void stop_func_tracer(struct trace_array *tr, int graph);
0040
0041 static int save_flags;
0042
0043 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0044 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
0045 #else
0046 # define is_graph(tr) false
0047 #endif
0048
0049 #ifdef CONFIG_FUNCTION_TRACER
0050
0051 static bool function_enabled;
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067 static int
0068 func_prolog_preempt_disable(struct trace_array *tr,
0069 struct trace_array_cpu **data,
0070 unsigned int *trace_ctx)
0071 {
0072 long disabled;
0073 int cpu;
0074
0075 if (likely(!wakeup_task))
0076 return 0;
0077
0078 *trace_ctx = tracing_gen_ctx();
0079 preempt_disable_notrace();
0080
0081 cpu = raw_smp_processor_id();
0082 if (cpu != wakeup_current_cpu)
0083 goto out_enable;
0084
0085 *data = per_cpu_ptr(tr->array_buffer.data, cpu);
0086 disabled = atomic_inc_return(&(*data)->disabled);
0087 if (unlikely(disabled != 1))
0088 goto out;
0089
0090 return 1;
0091
0092 out:
0093 atomic_dec(&(*data)->disabled);
0094
0095 out_enable:
0096 preempt_enable_notrace();
0097 return 0;
0098 }
0099
0100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0101
0102 static int wakeup_display_graph(struct trace_array *tr, int set)
0103 {
0104 if (!(is_graph(tr) ^ set))
0105 return 0;
0106
0107 stop_func_tracer(tr, !set);
0108
0109 wakeup_reset(wakeup_trace);
0110 tr->max_latency = 0;
0111
0112 return start_func_tracer(tr, set);
0113 }
0114
0115 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
0116 {
0117 struct trace_array *tr = wakeup_trace;
0118 struct trace_array_cpu *data;
0119 unsigned int trace_ctx;
0120 int ret = 0;
0121
0122 if (ftrace_graph_ignore_func(trace))
0123 return 0;
0124
0125
0126
0127
0128
0129
0130
0131 if (ftrace_graph_notrace_addr(trace->func))
0132 return 1;
0133
0134 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
0135 return 0;
0136
0137 ret = __trace_graph_entry(tr, trace, trace_ctx);
0138 atomic_dec(&data->disabled);
0139 preempt_enable_notrace();
0140
0141 return ret;
0142 }
0143
0144 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
0145 {
0146 struct trace_array *tr = wakeup_trace;
0147 struct trace_array_cpu *data;
0148 unsigned int trace_ctx;
0149
0150 ftrace_graph_addr_finish(trace);
0151
0152 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
0153 return;
0154
0155 __trace_graph_return(tr, trace, trace_ctx);
0156 atomic_dec(&data->disabled);
0157
0158 preempt_enable_notrace();
0159 return;
0160 }
0161
0162 static struct fgraph_ops fgraph_wakeup_ops = {
0163 .entryfunc = &wakeup_graph_entry,
0164 .retfunc = &wakeup_graph_return,
0165 };
0166
0167 static void wakeup_trace_open(struct trace_iterator *iter)
0168 {
0169 if (is_graph(iter->tr))
0170 graph_trace_open(iter);
0171 }
0172
0173 static void wakeup_trace_close(struct trace_iterator *iter)
0174 {
0175 if (iter->private)
0176 graph_trace_close(iter);
0177 }
0178
0179 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
0180 TRACE_GRAPH_PRINT_CPU | \
0181 TRACE_GRAPH_PRINT_REL_TIME | \
0182 TRACE_GRAPH_PRINT_DURATION | \
0183 TRACE_GRAPH_PRINT_OVERHEAD | \
0184 TRACE_GRAPH_PRINT_IRQS)
0185
0186 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
0187 {
0188
0189
0190
0191
0192 if (is_graph(iter->tr))
0193 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
0194
0195 return TRACE_TYPE_UNHANDLED;
0196 }
0197
0198 static void wakeup_print_header(struct seq_file *s)
0199 {
0200 if (is_graph(wakeup_trace))
0201 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
0202 else
0203 trace_default_header(s);
0204 }
0205 #endif
0206
0207
0208
0209
0210 static void
0211 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
0212 struct ftrace_ops *op, struct ftrace_regs *fregs)
0213 {
0214 struct trace_array *tr = wakeup_trace;
0215 struct trace_array_cpu *data;
0216 unsigned long flags;
0217 unsigned int trace_ctx;
0218
0219 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
0220 return;
0221
0222 local_irq_save(flags);
0223 trace_function(tr, ip, parent_ip, trace_ctx);
0224 local_irq_restore(flags);
0225
0226 atomic_dec(&data->disabled);
0227 preempt_enable_notrace();
0228 }
0229
0230 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
0231 {
0232 int ret;
0233
0234
0235 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
0236 return 0;
0237
0238 if (graph)
0239 ret = register_ftrace_graph(&fgraph_wakeup_ops);
0240 else
0241 ret = register_ftrace_function(tr->ops);
0242
0243 if (!ret)
0244 function_enabled = true;
0245
0246 return ret;
0247 }
0248
0249 static void unregister_wakeup_function(struct trace_array *tr, int graph)
0250 {
0251 if (!function_enabled)
0252 return;
0253
0254 if (graph)
0255 unregister_ftrace_graph(&fgraph_wakeup_ops);
0256 else
0257 unregister_ftrace_function(tr->ops);
0258
0259 function_enabled = false;
0260 }
0261
0262 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
0263 {
0264 if (!(mask & TRACE_ITER_FUNCTION))
0265 return 0;
0266
0267 if (set)
0268 register_wakeup_function(tr, is_graph(tr), 1);
0269 else
0270 unregister_wakeup_function(tr, is_graph(tr));
0271 return 1;
0272 }
0273 #else
0274 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
0275 {
0276 return 0;
0277 }
0278 static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
0279 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
0280 {
0281 return 0;
0282 }
0283 #endif
0284
0285 #ifndef CONFIG_FUNCTION_GRAPH_TRACER
0286 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
0287 {
0288 return TRACE_TYPE_UNHANDLED;
0289 }
0290
0291 static void wakeup_trace_open(struct trace_iterator *iter) { }
0292 static void wakeup_trace_close(struct trace_iterator *iter) { }
0293
0294 static void wakeup_print_header(struct seq_file *s)
0295 {
0296 trace_default_header(s);
0297 }
0298 #endif
0299
0300 static void
0301 __trace_function(struct trace_array *tr,
0302 unsigned long ip, unsigned long parent_ip,
0303 unsigned int trace_ctx)
0304 {
0305 if (is_graph(tr))
0306 trace_graph_function(tr, ip, parent_ip, trace_ctx);
0307 else
0308 trace_function(tr, ip, parent_ip, trace_ctx);
0309 }
0310
0311 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
0312 {
0313 struct tracer *tracer = tr->current_trace;
0314
0315 if (wakeup_function_set(tr, mask, set))
0316 return 0;
0317
0318 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0319 if (mask & TRACE_ITER_DISPLAY_GRAPH)
0320 return wakeup_display_graph(tr, set);
0321 #endif
0322
0323 return trace_keep_overwrite(tracer, mask, set);
0324 }
0325
0326 static int start_func_tracer(struct trace_array *tr, int graph)
0327 {
0328 int ret;
0329
0330 ret = register_wakeup_function(tr, graph, 0);
0331
0332 if (!ret && tracing_is_enabled())
0333 tracer_enabled = 1;
0334 else
0335 tracer_enabled = 0;
0336
0337 return ret;
0338 }
0339
0340 static void stop_func_tracer(struct trace_array *tr, int graph)
0341 {
0342 tracer_enabled = 0;
0343
0344 unregister_wakeup_function(tr, graph);
0345 }
0346
0347
0348
0349
0350 static bool report_latency(struct trace_array *tr, u64 delta)
0351 {
0352 if (tracing_thresh) {
0353 if (delta < tracing_thresh)
0354 return false;
0355 } else {
0356 if (delta <= tr->max_latency)
0357 return false;
0358 }
0359 return true;
0360 }
0361
0362 static void
0363 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
0364 {
0365 if (task != wakeup_task)
0366 return;
0367
0368 wakeup_current_cpu = cpu;
0369 }
0370
0371 static void
0372 tracing_sched_switch_trace(struct trace_array *tr,
0373 struct task_struct *prev,
0374 struct task_struct *next,
0375 unsigned int trace_ctx)
0376 {
0377 struct trace_event_call *call = &event_context_switch;
0378 struct trace_buffer *buffer = tr->array_buffer.buffer;
0379 struct ring_buffer_event *event;
0380 struct ctx_switch_entry *entry;
0381
0382 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
0383 sizeof(*entry), trace_ctx);
0384 if (!event)
0385 return;
0386 entry = ring_buffer_event_data(event);
0387 entry->prev_pid = prev->pid;
0388 entry->prev_prio = prev->prio;
0389 entry->prev_state = task_state_index(prev);
0390 entry->next_pid = next->pid;
0391 entry->next_prio = next->prio;
0392 entry->next_state = task_state_index(next);
0393 entry->next_cpu = task_cpu(next);
0394
0395 if (!call_filter_check_discard(call, entry, buffer, event))
0396 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
0397 }
0398
0399 static void
0400 tracing_sched_wakeup_trace(struct trace_array *tr,
0401 struct task_struct *wakee,
0402 struct task_struct *curr,
0403 unsigned int trace_ctx)
0404 {
0405 struct trace_event_call *call = &event_wakeup;
0406 struct ring_buffer_event *event;
0407 struct ctx_switch_entry *entry;
0408 struct trace_buffer *buffer = tr->array_buffer.buffer;
0409
0410 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
0411 sizeof(*entry), trace_ctx);
0412 if (!event)
0413 return;
0414 entry = ring_buffer_event_data(event);
0415 entry->prev_pid = curr->pid;
0416 entry->prev_prio = curr->prio;
0417 entry->prev_state = task_state_index(curr);
0418 entry->next_pid = wakee->pid;
0419 entry->next_prio = wakee->prio;
0420 entry->next_state = task_state_index(wakee);
0421 entry->next_cpu = task_cpu(wakee);
0422
0423 if (!call_filter_check_discard(call, entry, buffer, event))
0424 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
0425 }
0426
0427 static void notrace
0428 probe_wakeup_sched_switch(void *ignore, bool preempt,
0429 struct task_struct *prev, struct task_struct *next,
0430 unsigned int prev_state)
0431 {
0432 struct trace_array_cpu *data;
0433 u64 T0, T1, delta;
0434 unsigned long flags;
0435 long disabled;
0436 int cpu;
0437 unsigned int trace_ctx;
0438
0439 tracing_record_cmdline(prev);
0440
0441 if (unlikely(!tracer_enabled))
0442 return;
0443
0444
0445
0446
0447
0448
0449
0450
0451 smp_rmb();
0452
0453 if (next != wakeup_task)
0454 return;
0455
0456
0457 cpu = raw_smp_processor_id();
0458 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
0459 if (likely(disabled != 1))
0460 goto out;
0461
0462 local_irq_save(flags);
0463 trace_ctx = tracing_gen_ctx_flags(flags);
0464
0465 arch_spin_lock(&wakeup_lock);
0466
0467
0468 if (unlikely(!tracer_enabled || next != wakeup_task))
0469 goto out_unlock;
0470
0471
0472 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
0473
0474 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
0475 tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
0476 __trace_stack(wakeup_trace, trace_ctx, 0);
0477
0478 T0 = data->preempt_timestamp;
0479 T1 = ftrace_now(cpu);
0480 delta = T1-T0;
0481
0482 if (!report_latency(wakeup_trace, delta))
0483 goto out_unlock;
0484
0485 if (likely(!is_tracing_stopped())) {
0486 wakeup_trace->max_latency = delta;
0487 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
0488 }
0489
0490 out_unlock:
0491 __wakeup_reset(wakeup_trace);
0492 arch_spin_unlock(&wakeup_lock);
0493 local_irq_restore(flags);
0494 out:
0495 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
0496 }
0497
0498 static void __wakeup_reset(struct trace_array *tr)
0499 {
0500 wakeup_cpu = -1;
0501 wakeup_prio = -1;
0502 tracing_dl = false;
0503
0504 if (wakeup_task)
0505 put_task_struct(wakeup_task);
0506
0507 wakeup_task = NULL;
0508 }
0509
0510 static void wakeup_reset(struct trace_array *tr)
0511 {
0512 unsigned long flags;
0513
0514 tracing_reset_online_cpus(&tr->array_buffer);
0515
0516 local_irq_save(flags);
0517 arch_spin_lock(&wakeup_lock);
0518 __wakeup_reset(tr);
0519 arch_spin_unlock(&wakeup_lock);
0520 local_irq_restore(flags);
0521 }
0522
0523 static void
0524 probe_wakeup(void *ignore, struct task_struct *p)
0525 {
0526 struct trace_array_cpu *data;
0527 int cpu = smp_processor_id();
0528 long disabled;
0529 unsigned int trace_ctx;
0530
0531 if (likely(!tracer_enabled))
0532 return;
0533
0534 tracing_record_cmdline(p);
0535 tracing_record_cmdline(current);
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
0546 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
0547 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
0548 return;
0549
0550 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
0551 if (unlikely(disabled != 1))
0552 goto out;
0553
0554 trace_ctx = tracing_gen_ctx();
0555
0556
0557 arch_spin_lock(&wakeup_lock);
0558
0559
0560 if (!tracer_enabled || tracing_dl ||
0561 (!dl_task(p) && p->prio >= wakeup_prio))
0562 goto out_locked;
0563
0564
0565 __wakeup_reset(wakeup_trace);
0566
0567 wakeup_cpu = task_cpu(p);
0568 wakeup_current_cpu = wakeup_cpu;
0569 wakeup_prio = p->prio;
0570
0571
0572
0573
0574
0575 if (dl_task(p))
0576 tracing_dl = true;
0577 else
0578 tracing_dl = false;
0579
0580 wakeup_task = get_task_struct(p);
0581
0582 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
0583 data->preempt_timestamp = ftrace_now(cpu);
0584 tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
0585 __trace_stack(wakeup_trace, trace_ctx, 0);
0586
0587
0588
0589
0590
0591
0592 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
0593
0594 out_locked:
0595 arch_spin_unlock(&wakeup_lock);
0596 out:
0597 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
0598 }
0599
0600 static void start_wakeup_tracer(struct trace_array *tr)
0601 {
0602 int ret;
0603
0604 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
0605 if (ret) {
0606 pr_info("wakeup trace: Couldn't activate tracepoint"
0607 " probe to kernel_sched_wakeup\n");
0608 return;
0609 }
0610
0611 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
0612 if (ret) {
0613 pr_info("wakeup trace: Couldn't activate tracepoint"
0614 " probe to kernel_sched_wakeup_new\n");
0615 goto fail_deprobe;
0616 }
0617
0618 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
0619 if (ret) {
0620 pr_info("sched trace: Couldn't activate tracepoint"
0621 " probe to kernel_sched_switch\n");
0622 goto fail_deprobe_wake_new;
0623 }
0624
0625 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
0626 if (ret) {
0627 pr_info("wakeup trace: Couldn't activate tracepoint"
0628 " probe to kernel_sched_migrate_task\n");
0629 goto fail_deprobe_sched_switch;
0630 }
0631
0632 wakeup_reset(tr);
0633
0634
0635
0636
0637
0638
0639
0640
0641 smp_wmb();
0642
0643 if (start_func_tracer(tr, is_graph(tr)))
0644 printk(KERN_ERR "failed to start wakeup tracer\n");
0645
0646 return;
0647 fail_deprobe_sched_switch:
0648 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
0649 fail_deprobe_wake_new:
0650 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
0651 fail_deprobe:
0652 unregister_trace_sched_wakeup(probe_wakeup, NULL);
0653 }
0654
0655 static void stop_wakeup_tracer(struct trace_array *tr)
0656 {
0657 tracer_enabled = 0;
0658 stop_func_tracer(tr, is_graph(tr));
0659 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
0660 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
0661 unregister_trace_sched_wakeup(probe_wakeup, NULL);
0662 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
0663 }
0664
0665 static bool wakeup_busy;
0666
0667 static int __wakeup_tracer_init(struct trace_array *tr)
0668 {
0669 save_flags = tr->trace_flags;
0670
0671
0672 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
0673 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
0674
0675 tr->max_latency = 0;
0676 wakeup_trace = tr;
0677 ftrace_init_array_ops(tr, wakeup_tracer_call);
0678 start_wakeup_tracer(tr);
0679
0680 wakeup_busy = true;
0681 return 0;
0682 }
0683
0684 static int wakeup_tracer_init(struct trace_array *tr)
0685 {
0686 if (wakeup_busy)
0687 return -EBUSY;
0688
0689 wakeup_dl = false;
0690 wakeup_rt = false;
0691 return __wakeup_tracer_init(tr);
0692 }
0693
0694 static int wakeup_rt_tracer_init(struct trace_array *tr)
0695 {
0696 if (wakeup_busy)
0697 return -EBUSY;
0698
0699 wakeup_dl = false;
0700 wakeup_rt = true;
0701 return __wakeup_tracer_init(tr);
0702 }
0703
0704 static int wakeup_dl_tracer_init(struct trace_array *tr)
0705 {
0706 if (wakeup_busy)
0707 return -EBUSY;
0708
0709 wakeup_dl = true;
0710 wakeup_rt = false;
0711 return __wakeup_tracer_init(tr);
0712 }
0713
0714 static void wakeup_tracer_reset(struct trace_array *tr)
0715 {
0716 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
0717 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
0718
0719 stop_wakeup_tracer(tr);
0720
0721 wakeup_reset(tr);
0722
0723 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
0724 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
0725 ftrace_reset_array_ops(tr);
0726 wakeup_busy = false;
0727 }
0728
0729 static void wakeup_tracer_start(struct trace_array *tr)
0730 {
0731 wakeup_reset(tr);
0732 tracer_enabled = 1;
0733 }
0734
0735 static void wakeup_tracer_stop(struct trace_array *tr)
0736 {
0737 tracer_enabled = 0;
0738 }
0739
0740 static struct tracer wakeup_tracer __read_mostly =
0741 {
0742 .name = "wakeup",
0743 .init = wakeup_tracer_init,
0744 .reset = wakeup_tracer_reset,
0745 .start = wakeup_tracer_start,
0746 .stop = wakeup_tracer_stop,
0747 .print_max = true,
0748 .print_header = wakeup_print_header,
0749 .print_line = wakeup_print_line,
0750 .flag_changed = wakeup_flag_changed,
0751 #ifdef CONFIG_FTRACE_SELFTEST
0752 .selftest = trace_selftest_startup_wakeup,
0753 #endif
0754 .open = wakeup_trace_open,
0755 .close = wakeup_trace_close,
0756 .allow_instances = true,
0757 .use_max_tr = true,
0758 };
0759
0760 static struct tracer wakeup_rt_tracer __read_mostly =
0761 {
0762 .name = "wakeup_rt",
0763 .init = wakeup_rt_tracer_init,
0764 .reset = wakeup_tracer_reset,
0765 .start = wakeup_tracer_start,
0766 .stop = wakeup_tracer_stop,
0767 .print_max = true,
0768 .print_header = wakeup_print_header,
0769 .print_line = wakeup_print_line,
0770 .flag_changed = wakeup_flag_changed,
0771 #ifdef CONFIG_FTRACE_SELFTEST
0772 .selftest = trace_selftest_startup_wakeup,
0773 #endif
0774 .open = wakeup_trace_open,
0775 .close = wakeup_trace_close,
0776 .allow_instances = true,
0777 .use_max_tr = true,
0778 };
0779
0780 static struct tracer wakeup_dl_tracer __read_mostly =
0781 {
0782 .name = "wakeup_dl",
0783 .init = wakeup_dl_tracer_init,
0784 .reset = wakeup_tracer_reset,
0785 .start = wakeup_tracer_start,
0786 .stop = wakeup_tracer_stop,
0787 .print_max = true,
0788 .print_header = wakeup_print_header,
0789 .print_line = wakeup_print_line,
0790 .flag_changed = wakeup_flag_changed,
0791 #ifdef CONFIG_FTRACE_SELFTEST
0792 .selftest = trace_selftest_startup_wakeup,
0793 #endif
0794 .open = wakeup_trace_open,
0795 .close = wakeup_trace_close,
0796 .allow_instances = true,
0797 .use_max_tr = true,
0798 };
0799
0800 __init static int init_wakeup_tracer(void)
0801 {
0802 int ret;
0803
0804 ret = register_tracer(&wakeup_tracer);
0805 if (ret)
0806 return ret;
0807
0808 ret = register_tracer(&wakeup_rt_tracer);
0809 if (ret)
0810 return ret;
0811
0812 ret = register_tracer(&wakeup_dl_tracer);
0813 if (ret)
0814 return ret;
0815
0816 return 0;
0817 }
0818 core_initcall(init_wakeup_tracer);