0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/kallsyms.h>
0014 #include <linux/uaccess.h>
0015 #include <linux/module.h>
0016 #include <linux/ftrace.h>
0017 #include <linux/kprobes.h>
0018
0019 #include "trace.h"
0020
0021 #include <trace/events/preemptirq.h>
0022
0023 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
0024 static struct trace_array *irqsoff_trace __read_mostly;
0025 static int tracer_enabled __read_mostly;
0026
0027 static DEFINE_PER_CPU(int, tracing_cpu);
0028
0029 static DEFINE_RAW_SPINLOCK(max_trace_lock);
0030
0031 enum {
0032 TRACER_IRQS_OFF = (1 << 1),
0033 TRACER_PREEMPT_OFF = (1 << 2),
0034 };
0035
0036 static int trace_type __read_mostly;
0037
0038 static int save_flags;
0039
0040 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
0041 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
0042
0043 #ifdef CONFIG_PREEMPT_TRACER
0044 static inline int
0045 preempt_trace(int pc)
0046 {
0047 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
0048 }
0049 #else
0050 # define preempt_trace(pc) (0)
0051 #endif
0052
0053 #ifdef CONFIG_IRQSOFF_TRACER
0054 static inline int
0055 irq_trace(void)
0056 {
0057 return ((trace_type & TRACER_IRQS_OFF) &&
0058 irqs_disabled());
0059 }
0060 #else
0061 # define irq_trace() (0)
0062 #endif
0063
0064 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0065 static int irqsoff_display_graph(struct trace_array *tr, int set);
0066 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
0067 #else
0068 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
0069 {
0070 return -EINVAL;
0071 }
0072 # define is_graph(tr) false
0073 #endif
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 static __cacheline_aligned_in_smp unsigned long max_sequence;
0084
0085 #ifdef CONFIG_FUNCTION_TRACER
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 static int func_prolog_dec(struct trace_array *tr,
0100 struct trace_array_cpu **data,
0101 unsigned long *flags)
0102 {
0103 long disabled;
0104 int cpu;
0105
0106
0107
0108
0109
0110
0111
0112 cpu = raw_smp_processor_id();
0113 if (likely(!per_cpu(tracing_cpu, cpu)))
0114 return 0;
0115
0116 local_save_flags(*flags);
0117
0118
0119
0120
0121
0122 if (!irqs_disabled_flags(*flags) && !preempt_count())
0123 return 0;
0124
0125 *data = per_cpu_ptr(tr->array_buffer.data, cpu);
0126 disabled = atomic_inc_return(&(*data)->disabled);
0127
0128 if (likely(disabled == 1))
0129 return 1;
0130
0131 atomic_dec(&(*data)->disabled);
0132
0133 return 0;
0134 }
0135
0136
0137
0138
0139 static void
0140 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
0141 struct ftrace_ops *op, struct ftrace_regs *fregs)
0142 {
0143 struct trace_array *tr = irqsoff_trace;
0144 struct trace_array_cpu *data;
0145 unsigned long flags;
0146 unsigned int trace_ctx;
0147
0148 if (!func_prolog_dec(tr, &data, &flags))
0149 return;
0150
0151 trace_ctx = tracing_gen_ctx_flags(flags);
0152
0153 trace_function(tr, ip, parent_ip, trace_ctx);
0154
0155 atomic_dec(&data->disabled);
0156 }
0157 #endif
0158
0159 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0160 static int irqsoff_display_graph(struct trace_array *tr, int set)
0161 {
0162 int cpu;
0163
0164 if (!(is_graph(tr) ^ set))
0165 return 0;
0166
0167 stop_irqsoff_tracer(irqsoff_trace, !set);
0168
0169 for_each_possible_cpu(cpu)
0170 per_cpu(tracing_cpu, cpu) = 0;
0171
0172 tr->max_latency = 0;
0173 tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
0174
0175 return start_irqsoff_tracer(irqsoff_trace, set);
0176 }
0177
0178 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
0179 {
0180 struct trace_array *tr = irqsoff_trace;
0181 struct trace_array_cpu *data;
0182 unsigned long flags;
0183 unsigned int trace_ctx;
0184 int ret;
0185
0186 if (ftrace_graph_ignore_func(trace))
0187 return 0;
0188
0189
0190
0191
0192
0193
0194
0195 if (ftrace_graph_notrace_addr(trace->func))
0196 return 1;
0197
0198 if (!func_prolog_dec(tr, &data, &flags))
0199 return 0;
0200
0201 trace_ctx = tracing_gen_ctx_flags(flags);
0202 ret = __trace_graph_entry(tr, trace, trace_ctx);
0203 atomic_dec(&data->disabled);
0204
0205 return ret;
0206 }
0207
0208 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
0209 {
0210 struct trace_array *tr = irqsoff_trace;
0211 struct trace_array_cpu *data;
0212 unsigned long flags;
0213 unsigned int trace_ctx;
0214
0215 ftrace_graph_addr_finish(trace);
0216
0217 if (!func_prolog_dec(tr, &data, &flags))
0218 return;
0219
0220 trace_ctx = tracing_gen_ctx_flags(flags);
0221 __trace_graph_return(tr, trace, trace_ctx);
0222 atomic_dec(&data->disabled);
0223 }
0224
0225 static struct fgraph_ops fgraph_ops = {
0226 .entryfunc = &irqsoff_graph_entry,
0227 .retfunc = &irqsoff_graph_return,
0228 };
0229
0230 static void irqsoff_trace_open(struct trace_iterator *iter)
0231 {
0232 if (is_graph(iter->tr))
0233 graph_trace_open(iter);
0234
0235 }
0236
0237 static void irqsoff_trace_close(struct trace_iterator *iter)
0238 {
0239 if (iter->private)
0240 graph_trace_close(iter);
0241 }
0242
0243 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
0244 TRACE_GRAPH_PRINT_PROC | \
0245 TRACE_GRAPH_PRINT_REL_TIME | \
0246 TRACE_GRAPH_PRINT_DURATION)
0247
0248 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
0249 {
0250
0251
0252
0253
0254 if (is_graph(iter->tr))
0255 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
0256
0257 return TRACE_TYPE_UNHANDLED;
0258 }
0259
0260 static void irqsoff_print_header(struct seq_file *s)
0261 {
0262 struct trace_array *tr = irqsoff_trace;
0263
0264 if (is_graph(tr))
0265 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
0266 else
0267 trace_default_header(s);
0268 }
0269
0270 static void
0271 __trace_function(struct trace_array *tr,
0272 unsigned long ip, unsigned long parent_ip,
0273 unsigned int trace_ctx)
0274 {
0275 if (is_graph(tr))
0276 trace_graph_function(tr, ip, parent_ip, trace_ctx);
0277 else
0278 trace_function(tr, ip, parent_ip, trace_ctx);
0279 }
0280
0281 #else
0282 #define __trace_function trace_function
0283
0284 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
0285 {
0286 return TRACE_TYPE_UNHANDLED;
0287 }
0288
0289 static void irqsoff_trace_open(struct trace_iterator *iter) { }
0290 static void irqsoff_trace_close(struct trace_iterator *iter) { }
0291
0292 #ifdef CONFIG_FUNCTION_TRACER
0293 static void irqsoff_print_header(struct seq_file *s)
0294 {
0295 trace_default_header(s);
0296 }
0297 #else
0298 static void irqsoff_print_header(struct seq_file *s)
0299 {
0300 trace_latency_header(s);
0301 }
0302 #endif
0303 #endif
0304
0305
0306
0307
0308 static bool report_latency(struct trace_array *tr, u64 delta)
0309 {
0310 if (tracing_thresh) {
0311 if (delta < tracing_thresh)
0312 return false;
0313 } else {
0314 if (delta <= tr->max_latency)
0315 return false;
0316 }
0317 return true;
0318 }
0319
0320 static void
0321 check_critical_timing(struct trace_array *tr,
0322 struct trace_array_cpu *data,
0323 unsigned long parent_ip,
0324 int cpu)
0325 {
0326 u64 T0, T1, delta;
0327 unsigned long flags;
0328 unsigned int trace_ctx;
0329
0330 T0 = data->preempt_timestamp;
0331 T1 = ftrace_now(cpu);
0332 delta = T1-T0;
0333
0334 trace_ctx = tracing_gen_ctx();
0335
0336 if (!report_latency(tr, delta))
0337 goto out;
0338
0339 raw_spin_lock_irqsave(&max_trace_lock, flags);
0340
0341
0342 if (!report_latency(tr, delta))
0343 goto out_unlock;
0344
0345 __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
0346
0347 __trace_stack(tr, trace_ctx, 5);
0348
0349 if (data->critical_sequence != max_sequence)
0350 goto out_unlock;
0351
0352 data->critical_end = parent_ip;
0353
0354 if (likely(!is_tracing_stopped())) {
0355 tr->max_latency = delta;
0356 update_max_tr_single(tr, current, cpu);
0357 }
0358
0359 max_sequence++;
0360
0361 out_unlock:
0362 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
0363
0364 out:
0365 data->critical_sequence = max_sequence;
0366 data->preempt_timestamp = ftrace_now(cpu);
0367 __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
0368 }
0369
0370 static nokprobe_inline void
0371 start_critical_timing(unsigned long ip, unsigned long parent_ip)
0372 {
0373 int cpu;
0374 struct trace_array *tr = irqsoff_trace;
0375 struct trace_array_cpu *data;
0376
0377 if (!tracer_enabled || !tracing_is_enabled())
0378 return;
0379
0380 cpu = raw_smp_processor_id();
0381
0382 if (per_cpu(tracing_cpu, cpu))
0383 return;
0384
0385 data = per_cpu_ptr(tr->array_buffer.data, cpu);
0386
0387 if (unlikely(!data) || atomic_read(&data->disabled))
0388 return;
0389
0390 atomic_inc(&data->disabled);
0391
0392 data->critical_sequence = max_sequence;
0393 data->preempt_timestamp = ftrace_now(cpu);
0394 data->critical_start = parent_ip ? : ip;
0395
0396 __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
0397
0398 per_cpu(tracing_cpu, cpu) = 1;
0399
0400 atomic_dec(&data->disabled);
0401 }
0402
0403 static nokprobe_inline void
0404 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
0405 {
0406 int cpu;
0407 struct trace_array *tr = irqsoff_trace;
0408 struct trace_array_cpu *data;
0409 unsigned int trace_ctx;
0410
0411 cpu = raw_smp_processor_id();
0412
0413 if (unlikely(per_cpu(tracing_cpu, cpu)))
0414 per_cpu(tracing_cpu, cpu) = 0;
0415 else
0416 return;
0417
0418 if (!tracer_enabled || !tracing_is_enabled())
0419 return;
0420
0421 data = per_cpu_ptr(tr->array_buffer.data, cpu);
0422
0423 if (unlikely(!data) ||
0424 !data->critical_start || atomic_read(&data->disabled))
0425 return;
0426
0427 atomic_inc(&data->disabled);
0428
0429 trace_ctx = tracing_gen_ctx();
0430 __trace_function(tr, ip, parent_ip, trace_ctx);
0431 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
0432 data->critical_start = 0;
0433 atomic_dec(&data->disabled);
0434 }
0435
0436
0437 void start_critical_timings(void)
0438 {
0439 if (preempt_trace(preempt_count()) || irq_trace())
0440 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
0441 }
0442 EXPORT_SYMBOL_GPL(start_critical_timings);
0443 NOKPROBE_SYMBOL(start_critical_timings);
0444
0445 void stop_critical_timings(void)
0446 {
0447 if (preempt_trace(preempt_count()) || irq_trace())
0448 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
0449 }
0450 EXPORT_SYMBOL_GPL(stop_critical_timings);
0451 NOKPROBE_SYMBOL(stop_critical_timings);
0452
0453 #ifdef CONFIG_FUNCTION_TRACER
0454 static bool function_enabled;
0455
0456 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
0457 {
0458 int ret;
0459
0460
0461 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
0462 return 0;
0463
0464 if (graph)
0465 ret = register_ftrace_graph(&fgraph_ops);
0466 else
0467 ret = register_ftrace_function(tr->ops);
0468
0469 if (!ret)
0470 function_enabled = true;
0471
0472 return ret;
0473 }
0474
0475 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
0476 {
0477 if (!function_enabled)
0478 return;
0479
0480 if (graph)
0481 unregister_ftrace_graph(&fgraph_ops);
0482 else
0483 unregister_ftrace_function(tr->ops);
0484
0485 function_enabled = false;
0486 }
0487
0488 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
0489 {
0490 if (!(mask & TRACE_ITER_FUNCTION))
0491 return 0;
0492
0493 if (set)
0494 register_irqsoff_function(tr, is_graph(tr), 1);
0495 else
0496 unregister_irqsoff_function(tr, is_graph(tr));
0497 return 1;
0498 }
0499 #else
0500 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
0501 {
0502 return 0;
0503 }
0504 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
0505 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
0506 {
0507 return 0;
0508 }
0509 #endif
0510
0511 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
0512 {
0513 struct tracer *tracer = tr->current_trace;
0514
0515 if (irqsoff_function_set(tr, mask, set))
0516 return 0;
0517
0518 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0519 if (mask & TRACE_ITER_DISPLAY_GRAPH)
0520 return irqsoff_display_graph(tr, set);
0521 #endif
0522
0523 return trace_keep_overwrite(tracer, mask, set);
0524 }
0525
0526 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
0527 {
0528 int ret;
0529
0530 ret = register_irqsoff_function(tr, graph, 0);
0531
0532 if (!ret && tracing_is_enabled())
0533 tracer_enabled = 1;
0534 else
0535 tracer_enabled = 0;
0536
0537 return ret;
0538 }
0539
0540 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
0541 {
0542 tracer_enabled = 0;
0543
0544 unregister_irqsoff_function(tr, graph);
0545 }
0546
0547 static bool irqsoff_busy;
0548
0549 static int __irqsoff_tracer_init(struct trace_array *tr)
0550 {
0551 if (irqsoff_busy)
0552 return -EBUSY;
0553
0554 save_flags = tr->trace_flags;
0555
0556
0557 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
0558 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
0559
0560 set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
0561
0562 tr->max_latency = 0;
0563 irqsoff_trace = tr;
0564
0565 smp_wmb();
0566
0567 ftrace_init_array_ops(tr, irqsoff_tracer_call);
0568
0569
0570 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
0571 is_graph(tr))))
0572 printk(KERN_ERR "failed to start irqsoff tracer\n");
0573
0574 irqsoff_busy = true;
0575 return 0;
0576 }
0577
0578 static void __irqsoff_tracer_reset(struct trace_array *tr)
0579 {
0580 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
0581 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
0582 int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
0583
0584 stop_irqsoff_tracer(tr, is_graph(tr));
0585
0586 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
0587 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
0588 set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
0589 ftrace_reset_array_ops(tr);
0590
0591 irqsoff_busy = false;
0592 }
0593
0594 static void irqsoff_tracer_start(struct trace_array *tr)
0595 {
0596 tracer_enabled = 1;
0597 }
0598
0599 static void irqsoff_tracer_stop(struct trace_array *tr)
0600 {
0601 tracer_enabled = 0;
0602 }
0603
0604 #ifdef CONFIG_IRQSOFF_TRACER
0605
0606
0607
0608 void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
0609 {
0610 if (!preempt_trace(preempt_count()) && irq_trace())
0611 stop_critical_timing(a0, a1);
0612 }
0613 NOKPROBE_SYMBOL(tracer_hardirqs_on);
0614
0615 void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
0616 {
0617 if (!preempt_trace(preempt_count()) && irq_trace())
0618 start_critical_timing(a0, a1);
0619 }
0620 NOKPROBE_SYMBOL(tracer_hardirqs_off);
0621
0622 static int irqsoff_tracer_init(struct trace_array *tr)
0623 {
0624 trace_type = TRACER_IRQS_OFF;
0625
0626 return __irqsoff_tracer_init(tr);
0627 }
0628
0629 static void irqsoff_tracer_reset(struct trace_array *tr)
0630 {
0631 __irqsoff_tracer_reset(tr);
0632 }
0633
0634 static struct tracer irqsoff_tracer __read_mostly =
0635 {
0636 .name = "irqsoff",
0637 .init = irqsoff_tracer_init,
0638 .reset = irqsoff_tracer_reset,
0639 .start = irqsoff_tracer_start,
0640 .stop = irqsoff_tracer_stop,
0641 .print_max = true,
0642 .print_header = irqsoff_print_header,
0643 .print_line = irqsoff_print_line,
0644 .flag_changed = irqsoff_flag_changed,
0645 #ifdef CONFIG_FTRACE_SELFTEST
0646 .selftest = trace_selftest_startup_irqsoff,
0647 #endif
0648 .open = irqsoff_trace_open,
0649 .close = irqsoff_trace_close,
0650 .allow_instances = true,
0651 .use_max_tr = true,
0652 };
0653 #endif
0654
0655 #ifdef CONFIG_PREEMPT_TRACER
0656 void tracer_preempt_on(unsigned long a0, unsigned long a1)
0657 {
0658 if (preempt_trace(preempt_count()) && !irq_trace())
0659 stop_critical_timing(a0, a1);
0660 }
0661
0662 void tracer_preempt_off(unsigned long a0, unsigned long a1)
0663 {
0664 if (preempt_trace(preempt_count()) && !irq_trace())
0665 start_critical_timing(a0, a1);
0666 }
0667
0668 static int preemptoff_tracer_init(struct trace_array *tr)
0669 {
0670 trace_type = TRACER_PREEMPT_OFF;
0671
0672 return __irqsoff_tracer_init(tr);
0673 }
0674
0675 static void preemptoff_tracer_reset(struct trace_array *tr)
0676 {
0677 __irqsoff_tracer_reset(tr);
0678 }
0679
0680 static struct tracer preemptoff_tracer __read_mostly =
0681 {
0682 .name = "preemptoff",
0683 .init = preemptoff_tracer_init,
0684 .reset = preemptoff_tracer_reset,
0685 .start = irqsoff_tracer_start,
0686 .stop = irqsoff_tracer_stop,
0687 .print_max = true,
0688 .print_header = irqsoff_print_header,
0689 .print_line = irqsoff_print_line,
0690 .flag_changed = irqsoff_flag_changed,
0691 #ifdef CONFIG_FTRACE_SELFTEST
0692 .selftest = trace_selftest_startup_preemptoff,
0693 #endif
0694 .open = irqsoff_trace_open,
0695 .close = irqsoff_trace_close,
0696 .allow_instances = true,
0697 .use_max_tr = true,
0698 };
0699 #endif
0700
0701 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
0702
0703 static int preemptirqsoff_tracer_init(struct trace_array *tr)
0704 {
0705 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
0706
0707 return __irqsoff_tracer_init(tr);
0708 }
0709
0710 static void preemptirqsoff_tracer_reset(struct trace_array *tr)
0711 {
0712 __irqsoff_tracer_reset(tr);
0713 }
0714
0715 static struct tracer preemptirqsoff_tracer __read_mostly =
0716 {
0717 .name = "preemptirqsoff",
0718 .init = preemptirqsoff_tracer_init,
0719 .reset = preemptirqsoff_tracer_reset,
0720 .start = irqsoff_tracer_start,
0721 .stop = irqsoff_tracer_stop,
0722 .print_max = true,
0723 .print_header = irqsoff_print_header,
0724 .print_line = irqsoff_print_line,
0725 .flag_changed = irqsoff_flag_changed,
0726 #ifdef CONFIG_FTRACE_SELFTEST
0727 .selftest = trace_selftest_startup_preemptirqsoff,
0728 #endif
0729 .open = irqsoff_trace_open,
0730 .close = irqsoff_trace_close,
0731 .allow_instances = true,
0732 .use_max_tr = true,
0733 };
0734 #endif
0735
0736 __init static int init_irqsoff_tracer(void)
0737 {
0738 #ifdef CONFIG_IRQSOFF_TRACER
0739 register_tracer(&irqsoff_tracer);
0740 #endif
0741 #ifdef CONFIG_PREEMPT_TRACER
0742 register_tracer(&preemptoff_tracer);
0743 #endif
0744 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
0745 register_tracer(&preemptirqsoff_tracer);
0746 #endif
0747
0748 return 0;
0749 }
0750 core_initcall(init_irqsoff_tracer);
0751 #endif