Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Include in trace.c */
0003 
0004 #include <uapi/linux/sched/types.h>
0005 #include <linux/stringify.h>
0006 #include <linux/kthread.h>
0007 #include <linux/delay.h>
0008 #include <linux/slab.h>
0009 
0010 static inline int trace_valid_entry(struct trace_entry *entry)
0011 {
0012     switch (entry->type) {
0013     case TRACE_FN:
0014     case TRACE_CTX:
0015     case TRACE_WAKE:
0016     case TRACE_STACK:
0017     case TRACE_PRINT:
0018     case TRACE_BRANCH:
0019     case TRACE_GRAPH_ENT:
0020     case TRACE_GRAPH_RET:
0021         return 1;
0022     }
0023     return 0;
0024 }
0025 
0026 static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
0027 {
0028     struct ring_buffer_event *event;
0029     struct trace_entry *entry;
0030     unsigned int loops = 0;
0031 
0032     while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
0033         entry = ring_buffer_event_data(event);
0034 
0035         /*
0036          * The ring buffer is a size of trace_buf_size, if
0037          * we loop more than the size, there's something wrong
0038          * with the ring buffer.
0039          */
0040         if (loops++ > trace_buf_size) {
0041             printk(KERN_CONT ".. bad ring buffer ");
0042             goto failed;
0043         }
0044         if (!trace_valid_entry(entry)) {
0045             printk(KERN_CONT ".. invalid entry %d ",
0046                 entry->type);
0047             goto failed;
0048         }
0049     }
0050     return 0;
0051 
0052  failed:
0053     /* disable tracing */
0054     tracing_disabled = 1;
0055     printk(KERN_CONT ".. corrupted trace buffer .. ");
0056     return -1;
0057 }
0058 
0059 /*
0060  * Test the trace buffer to see if all the elements
0061  * are still sane.
0062  */
0063 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
0064 {
0065     unsigned long flags, cnt = 0;
0066     int cpu, ret = 0;
0067 
0068     /* Don't allow flipping of max traces now */
0069     local_irq_save(flags);
0070     arch_spin_lock(&buf->tr->max_lock);
0071 
0072     cnt = ring_buffer_entries(buf->buffer);
0073 
0074     /*
0075      * The trace_test_buffer_cpu runs a while loop to consume all data.
0076      * If the calling tracer is broken, and is constantly filling
0077      * the buffer, this will run forever, and hard lock the box.
0078      * We disable the ring buffer while we do this test to prevent
0079      * a hard lock up.
0080      */
0081     tracing_off();
0082     for_each_possible_cpu(cpu) {
0083         ret = trace_test_buffer_cpu(buf, cpu);
0084         if (ret)
0085             break;
0086     }
0087     tracing_on();
0088     arch_spin_unlock(&buf->tr->max_lock);
0089     local_irq_restore(flags);
0090 
0091     if (count)
0092         *count = cnt;
0093 
0094     return ret;
0095 }
0096 
0097 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
0098 {
0099     printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
0100         trace->name, init_ret);
0101 }
0102 #ifdef CONFIG_FUNCTION_TRACER
0103 
0104 #ifdef CONFIG_DYNAMIC_FTRACE
0105 
0106 static int trace_selftest_test_probe1_cnt;
0107 static void trace_selftest_test_probe1_func(unsigned long ip,
0108                         unsigned long pip,
0109                         struct ftrace_ops *op,
0110                         struct ftrace_regs *fregs)
0111 {
0112     trace_selftest_test_probe1_cnt++;
0113 }
0114 
0115 static int trace_selftest_test_probe2_cnt;
0116 static void trace_selftest_test_probe2_func(unsigned long ip,
0117                         unsigned long pip,
0118                         struct ftrace_ops *op,
0119                         struct ftrace_regs *fregs)
0120 {
0121     trace_selftest_test_probe2_cnt++;
0122 }
0123 
0124 static int trace_selftest_test_probe3_cnt;
0125 static void trace_selftest_test_probe3_func(unsigned long ip,
0126                         unsigned long pip,
0127                         struct ftrace_ops *op,
0128                         struct ftrace_regs *fregs)
0129 {
0130     trace_selftest_test_probe3_cnt++;
0131 }
0132 
0133 static int trace_selftest_test_global_cnt;
0134 static void trace_selftest_test_global_func(unsigned long ip,
0135                         unsigned long pip,
0136                         struct ftrace_ops *op,
0137                         struct ftrace_regs *fregs)
0138 {
0139     trace_selftest_test_global_cnt++;
0140 }
0141 
0142 static int trace_selftest_test_dyn_cnt;
0143 static void trace_selftest_test_dyn_func(unsigned long ip,
0144                      unsigned long pip,
0145                      struct ftrace_ops *op,
0146                      struct ftrace_regs *fregs)
0147 {
0148     trace_selftest_test_dyn_cnt++;
0149 }
0150 
0151 static struct ftrace_ops test_probe1 = {
0152     .func           = trace_selftest_test_probe1_func,
0153 };
0154 
0155 static struct ftrace_ops test_probe2 = {
0156     .func           = trace_selftest_test_probe2_func,
0157 };
0158 
0159 static struct ftrace_ops test_probe3 = {
0160     .func           = trace_selftest_test_probe3_func,
0161 };
0162 
0163 static void print_counts(void)
0164 {
0165     printk("(%d %d %d %d %d) ",
0166            trace_selftest_test_probe1_cnt,
0167            trace_selftest_test_probe2_cnt,
0168            trace_selftest_test_probe3_cnt,
0169            trace_selftest_test_global_cnt,
0170            trace_selftest_test_dyn_cnt);
0171 }
0172 
0173 static void reset_counts(void)
0174 {
0175     trace_selftest_test_probe1_cnt = 0;
0176     trace_selftest_test_probe2_cnt = 0;
0177     trace_selftest_test_probe3_cnt = 0;
0178     trace_selftest_test_global_cnt = 0;
0179     trace_selftest_test_dyn_cnt = 0;
0180 }
0181 
0182 static int trace_selftest_ops(struct trace_array *tr, int cnt)
0183 {
0184     int save_ftrace_enabled = ftrace_enabled;
0185     struct ftrace_ops *dyn_ops;
0186     char *func1_name;
0187     char *func2_name;
0188     int len1;
0189     int len2;
0190     int ret = -1;
0191 
0192     printk(KERN_CONT "PASSED\n");
0193     pr_info("Testing dynamic ftrace ops #%d: ", cnt);
0194 
0195     ftrace_enabled = 1;
0196     reset_counts();
0197 
0198     /* Handle PPC64 '.' name */
0199     func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
0200     func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
0201     len1 = strlen(func1_name);
0202     len2 = strlen(func2_name);
0203 
0204     /*
0205      * Probe 1 will trace function 1.
0206      * Probe 2 will trace function 2.
0207      * Probe 3 will trace functions 1 and 2.
0208      */
0209     ftrace_set_filter(&test_probe1, func1_name, len1, 1);
0210     ftrace_set_filter(&test_probe2, func2_name, len2, 1);
0211     ftrace_set_filter(&test_probe3, func1_name, len1, 1);
0212     ftrace_set_filter(&test_probe3, func2_name, len2, 0);
0213 
0214     register_ftrace_function(&test_probe1);
0215     register_ftrace_function(&test_probe2);
0216     register_ftrace_function(&test_probe3);
0217     /* First time we are running with main function */
0218     if (cnt > 1) {
0219         ftrace_init_array_ops(tr, trace_selftest_test_global_func);
0220         register_ftrace_function(tr->ops);
0221     }
0222 
0223     DYN_FTRACE_TEST_NAME();
0224 
0225     print_counts();
0226 
0227     if (trace_selftest_test_probe1_cnt != 1)
0228         goto out;
0229     if (trace_selftest_test_probe2_cnt != 0)
0230         goto out;
0231     if (trace_selftest_test_probe3_cnt != 1)
0232         goto out;
0233     if (cnt > 1) {
0234         if (trace_selftest_test_global_cnt == 0)
0235             goto out;
0236     }
0237 
0238     DYN_FTRACE_TEST_NAME2();
0239 
0240     print_counts();
0241 
0242     if (trace_selftest_test_probe1_cnt != 1)
0243         goto out;
0244     if (trace_selftest_test_probe2_cnt != 1)
0245         goto out;
0246     if (trace_selftest_test_probe3_cnt != 2)
0247         goto out;
0248 
0249     /* Add a dynamic probe */
0250     dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
0251     if (!dyn_ops) {
0252         printk("MEMORY ERROR ");
0253         goto out;
0254     }
0255 
0256     dyn_ops->func = trace_selftest_test_dyn_func;
0257 
0258     register_ftrace_function(dyn_ops);
0259 
0260     trace_selftest_test_global_cnt = 0;
0261 
0262     DYN_FTRACE_TEST_NAME();
0263 
0264     print_counts();
0265 
0266     if (trace_selftest_test_probe1_cnt != 2)
0267         goto out_free;
0268     if (trace_selftest_test_probe2_cnt != 1)
0269         goto out_free;
0270     if (trace_selftest_test_probe3_cnt != 3)
0271         goto out_free;
0272     if (cnt > 1) {
0273         if (trace_selftest_test_global_cnt == 0)
0274             goto out_free;
0275     }
0276     if (trace_selftest_test_dyn_cnt == 0)
0277         goto out_free;
0278 
0279     DYN_FTRACE_TEST_NAME2();
0280 
0281     print_counts();
0282 
0283     if (trace_selftest_test_probe1_cnt != 2)
0284         goto out_free;
0285     if (trace_selftest_test_probe2_cnt != 2)
0286         goto out_free;
0287     if (trace_selftest_test_probe3_cnt != 4)
0288         goto out_free;
0289 
0290     /* Remove trace function from probe 3 */
0291     func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
0292     len1 = strlen(func1_name);
0293 
0294     ftrace_set_filter(&test_probe3, func1_name, len1, 0);
0295 
0296     DYN_FTRACE_TEST_NAME();
0297 
0298     print_counts();
0299 
0300     if (trace_selftest_test_probe1_cnt != 3)
0301         goto out_free;
0302     if (trace_selftest_test_probe2_cnt != 2)
0303         goto out_free;
0304     if (trace_selftest_test_probe3_cnt != 4)
0305         goto out_free;
0306     if (cnt > 1) {
0307         if (trace_selftest_test_global_cnt == 0)
0308             goto out_free;
0309     }
0310     if (trace_selftest_test_dyn_cnt == 0)
0311         goto out_free;
0312 
0313     DYN_FTRACE_TEST_NAME2();
0314 
0315     print_counts();
0316 
0317     if (trace_selftest_test_probe1_cnt != 3)
0318         goto out_free;
0319     if (trace_selftest_test_probe2_cnt != 3)
0320         goto out_free;
0321     if (trace_selftest_test_probe3_cnt != 5)
0322         goto out_free;
0323 
0324     ret = 0;
0325  out_free:
0326     unregister_ftrace_function(dyn_ops);
0327     kfree(dyn_ops);
0328 
0329  out:
0330     /* Purposely unregister in the same order */
0331     unregister_ftrace_function(&test_probe1);
0332     unregister_ftrace_function(&test_probe2);
0333     unregister_ftrace_function(&test_probe3);
0334     if (cnt > 1)
0335         unregister_ftrace_function(tr->ops);
0336     ftrace_reset_array_ops(tr);
0337 
0338     /* Make sure everything is off */
0339     reset_counts();
0340     DYN_FTRACE_TEST_NAME();
0341     DYN_FTRACE_TEST_NAME();
0342 
0343     if (trace_selftest_test_probe1_cnt ||
0344         trace_selftest_test_probe2_cnt ||
0345         trace_selftest_test_probe3_cnt ||
0346         trace_selftest_test_global_cnt ||
0347         trace_selftest_test_dyn_cnt)
0348         ret = -1;
0349 
0350     ftrace_enabled = save_ftrace_enabled;
0351 
0352     return ret;
0353 }
0354 
0355 /* Test dynamic code modification and ftrace filters */
0356 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
0357                           struct trace_array *tr,
0358                           int (*func)(void))
0359 {
0360     int save_ftrace_enabled = ftrace_enabled;
0361     unsigned long count;
0362     char *func_name;
0363     int ret;
0364 
0365     /* The ftrace test PASSED */
0366     printk(KERN_CONT "PASSED\n");
0367     pr_info("Testing dynamic ftrace: ");
0368 
0369     /* enable tracing, and record the filter function */
0370     ftrace_enabled = 1;
0371 
0372     /* passed in by parameter to fool gcc from optimizing */
0373     func();
0374 
0375     /*
0376      * Some archs *cough*PowerPC*cough* add characters to the
0377      * start of the function names. We simply put a '*' to
0378      * accommodate them.
0379      */
0380     func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
0381 
0382     /* filter only on our function */
0383     ftrace_set_global_filter(func_name, strlen(func_name), 1);
0384 
0385     /* enable tracing */
0386     ret = tracer_init(trace, tr);
0387     if (ret) {
0388         warn_failed_init_tracer(trace, ret);
0389         goto out;
0390     }
0391 
0392     /* Sleep for a 1/10 of a second */
0393     msleep(100);
0394 
0395     /* we should have nothing in the buffer */
0396     ret = trace_test_buffer(&tr->array_buffer, &count);
0397     if (ret)
0398         goto out;
0399 
0400     if (count) {
0401         ret = -1;
0402         printk(KERN_CONT ".. filter did not filter .. ");
0403         goto out;
0404     }
0405 
0406     /* call our function again */
0407     func();
0408 
0409     /* sleep again */
0410     msleep(100);
0411 
0412     /* stop the tracing. */
0413     tracing_stop();
0414     ftrace_enabled = 0;
0415 
0416     /* check the trace buffer */
0417     ret = trace_test_buffer(&tr->array_buffer, &count);
0418 
0419     ftrace_enabled = 1;
0420     tracing_start();
0421 
0422     /* we should only have one item */
0423     if (!ret && count != 1) {
0424         trace->reset(tr);
0425         printk(KERN_CONT ".. filter failed count=%ld ..", count);
0426         ret = -1;
0427         goto out;
0428     }
0429 
0430     /* Test the ops with global tracing running */
0431     ret = trace_selftest_ops(tr, 1);
0432     trace->reset(tr);
0433 
0434  out:
0435     ftrace_enabled = save_ftrace_enabled;
0436 
0437     /* Enable tracing on all functions again */
0438     ftrace_set_global_filter(NULL, 0, 1);
0439 
0440     /* Test the ops with global tracing off */
0441     if (!ret)
0442         ret = trace_selftest_ops(tr, 2);
0443 
0444     return ret;
0445 }
0446 
0447 static int trace_selftest_recursion_cnt;
0448 static void trace_selftest_test_recursion_func(unsigned long ip,
0449                            unsigned long pip,
0450                            struct ftrace_ops *op,
0451                            struct ftrace_regs *fregs)
0452 {
0453     /*
0454      * This function is registered without the recursion safe flag.
0455      * The ftrace infrastructure should provide the recursion
0456      * protection. If not, this will crash the kernel!
0457      */
0458     if (trace_selftest_recursion_cnt++ > 10)
0459         return;
0460     DYN_FTRACE_TEST_NAME();
0461 }
0462 
0463 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
0464                             unsigned long pip,
0465                             struct ftrace_ops *op,
0466                             struct ftrace_regs *fregs)
0467 {
0468     /*
0469      * We said we would provide our own recursion. By calling
0470      * this function again, we should recurse back into this function
0471      * and count again. But this only happens if the arch supports
0472      * all of ftrace features and nothing else is using the function
0473      * tracing utility.
0474      */
0475     if (trace_selftest_recursion_cnt++)
0476         return;
0477     DYN_FTRACE_TEST_NAME();
0478 }
0479 
0480 static struct ftrace_ops test_rec_probe = {
0481     .func           = trace_selftest_test_recursion_func,
0482     .flags          = FTRACE_OPS_FL_RECURSION,
0483 };
0484 
0485 static struct ftrace_ops test_recsafe_probe = {
0486     .func           = trace_selftest_test_recursion_safe_func,
0487 };
0488 
0489 static int
0490 trace_selftest_function_recursion(void)
0491 {
0492     int save_ftrace_enabled = ftrace_enabled;
0493     char *func_name;
0494     int len;
0495     int ret;
0496 
0497     /* The previous test PASSED */
0498     pr_cont("PASSED\n");
0499     pr_info("Testing ftrace recursion: ");
0500 
0501 
0502     /* enable tracing, and record the filter function */
0503     ftrace_enabled = 1;
0504 
0505     /* Handle PPC64 '.' name */
0506     func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
0507     len = strlen(func_name);
0508 
0509     ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
0510     if (ret) {
0511         pr_cont("*Could not set filter* ");
0512         goto out;
0513     }
0514 
0515     ret = register_ftrace_function(&test_rec_probe);
0516     if (ret) {
0517         pr_cont("*could not register callback* ");
0518         goto out;
0519     }
0520 
0521     DYN_FTRACE_TEST_NAME();
0522 
0523     unregister_ftrace_function(&test_rec_probe);
0524 
0525     ret = -1;
0526     /*
0527      * Recursion allows for transitions between context,
0528      * and may call the callback twice.
0529      */
0530     if (trace_selftest_recursion_cnt != 1 &&
0531         trace_selftest_recursion_cnt != 2) {
0532         pr_cont("*callback not called once (or twice) (%d)* ",
0533             trace_selftest_recursion_cnt);
0534         goto out;
0535     }
0536 
0537     trace_selftest_recursion_cnt = 1;
0538 
0539     pr_cont("PASSED\n");
0540     pr_info("Testing ftrace recursion safe: ");
0541 
0542     ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
0543     if (ret) {
0544         pr_cont("*Could not set filter* ");
0545         goto out;
0546     }
0547 
0548     ret = register_ftrace_function(&test_recsafe_probe);
0549     if (ret) {
0550         pr_cont("*could not register callback* ");
0551         goto out;
0552     }
0553 
0554     DYN_FTRACE_TEST_NAME();
0555 
0556     unregister_ftrace_function(&test_recsafe_probe);
0557 
0558     ret = -1;
0559     if (trace_selftest_recursion_cnt != 2) {
0560         pr_cont("*callback not called expected 2 times (%d)* ",
0561             trace_selftest_recursion_cnt);
0562         goto out;
0563     }
0564 
0565     ret = 0;
0566 out:
0567     ftrace_enabled = save_ftrace_enabled;
0568 
0569     return ret;
0570 }
0571 #else
0572 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
0573 # define trace_selftest_function_recursion() ({ 0; })
0574 #endif /* CONFIG_DYNAMIC_FTRACE */
0575 
0576 static enum {
0577     TRACE_SELFTEST_REGS_START,
0578     TRACE_SELFTEST_REGS_FOUND,
0579     TRACE_SELFTEST_REGS_NOT_FOUND,
0580 } trace_selftest_regs_stat;
0581 
0582 static void trace_selftest_test_regs_func(unsigned long ip,
0583                       unsigned long pip,
0584                       struct ftrace_ops *op,
0585                       struct ftrace_regs *fregs)
0586 {
0587     struct pt_regs *regs = ftrace_get_regs(fregs);
0588 
0589     if (regs)
0590         trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
0591     else
0592         trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
0593 }
0594 
0595 static struct ftrace_ops test_regs_probe = {
0596     .func       = trace_selftest_test_regs_func,
0597     .flags      = FTRACE_OPS_FL_SAVE_REGS,
0598 };
0599 
0600 static int
0601 trace_selftest_function_regs(void)
0602 {
0603     int save_ftrace_enabled = ftrace_enabled;
0604     char *func_name;
0605     int len;
0606     int ret;
0607     int supported = 0;
0608 
0609 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
0610     supported = 1;
0611 #endif
0612 
0613     /* The previous test PASSED */
0614     pr_cont("PASSED\n");
0615     pr_info("Testing ftrace regs%s: ",
0616         !supported ? "(no arch support)" : "");
0617 
0618     /* enable tracing, and record the filter function */
0619     ftrace_enabled = 1;
0620 
0621     /* Handle PPC64 '.' name */
0622     func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
0623     len = strlen(func_name);
0624 
0625     ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
0626     /*
0627      * If DYNAMIC_FTRACE is not set, then we just trace all functions.
0628      * This test really doesn't care.
0629      */
0630     if (ret && ret != -ENODEV) {
0631         pr_cont("*Could not set filter* ");
0632         goto out;
0633     }
0634 
0635     ret = register_ftrace_function(&test_regs_probe);
0636     /*
0637      * Now if the arch does not support passing regs, then this should
0638      * have failed.
0639      */
0640     if (!supported) {
0641         if (!ret) {
0642             pr_cont("*registered save-regs without arch support* ");
0643             goto out;
0644         }
0645         test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
0646         ret = register_ftrace_function(&test_regs_probe);
0647     }
0648     if (ret) {
0649         pr_cont("*could not register callback* ");
0650         goto out;
0651     }
0652 
0653 
0654     DYN_FTRACE_TEST_NAME();
0655 
0656     unregister_ftrace_function(&test_regs_probe);
0657 
0658     ret = -1;
0659 
0660     switch (trace_selftest_regs_stat) {
0661     case TRACE_SELFTEST_REGS_START:
0662         pr_cont("*callback never called* ");
0663         goto out;
0664 
0665     case TRACE_SELFTEST_REGS_FOUND:
0666         if (supported)
0667             break;
0668         pr_cont("*callback received regs without arch support* ");
0669         goto out;
0670 
0671     case TRACE_SELFTEST_REGS_NOT_FOUND:
0672         if (!supported)
0673             break;
0674         pr_cont("*callback received NULL regs* ");
0675         goto out;
0676     }
0677 
0678     ret = 0;
0679 out:
0680     ftrace_enabled = save_ftrace_enabled;
0681 
0682     return ret;
0683 }
0684 
0685 /*
0686  * Simple verification test of ftrace function tracer.
0687  * Enable ftrace, sleep 1/10 second, and then read the trace
0688  * buffer to see if all is in order.
0689  */
0690 __init int
0691 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
0692 {
0693     int save_ftrace_enabled = ftrace_enabled;
0694     unsigned long count;
0695     int ret;
0696 
0697 #ifdef CONFIG_DYNAMIC_FTRACE
0698     if (ftrace_filter_param) {
0699         printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
0700         return 0;
0701     }
0702 #endif
0703 
0704     /* make sure msleep has been recorded */
0705     msleep(1);
0706 
0707     /* start the tracing */
0708     ftrace_enabled = 1;
0709 
0710     ret = tracer_init(trace, tr);
0711     if (ret) {
0712         warn_failed_init_tracer(trace, ret);
0713         goto out;
0714     }
0715 
0716     /* Sleep for a 1/10 of a second */
0717     msleep(100);
0718     /* stop the tracing. */
0719     tracing_stop();
0720     ftrace_enabled = 0;
0721 
0722     /* check the trace buffer */
0723     ret = trace_test_buffer(&tr->array_buffer, &count);
0724 
0725     ftrace_enabled = 1;
0726     trace->reset(tr);
0727     tracing_start();
0728 
0729     if (!ret && !count) {
0730         printk(KERN_CONT ".. no entries found ..");
0731         ret = -1;
0732         goto out;
0733     }
0734 
0735     ret = trace_selftest_startup_dynamic_tracing(trace, tr,
0736                              DYN_FTRACE_TEST_NAME);
0737     if (ret)
0738         goto out;
0739 
0740     ret = trace_selftest_function_recursion();
0741     if (ret)
0742         goto out;
0743 
0744     ret = trace_selftest_function_regs();
0745  out:
0746     ftrace_enabled = save_ftrace_enabled;
0747 
0748     /* kill ftrace totally if we failed */
0749     if (ret)
0750         ftrace_kill();
0751 
0752     return ret;
0753 }
0754 #endif /* CONFIG_FUNCTION_TRACER */
0755 
0756 
0757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
0758 
0759 /* Maximum number of functions to trace before diagnosing a hang */
0760 #define GRAPH_MAX_FUNC_TEST 100000000
0761 
0762 static unsigned int graph_hang_thresh;
0763 
0764 /* Wrap the real function entry probe to avoid possible hanging */
0765 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
0766 {
0767     /* This is harmlessly racy, we want to approximately detect a hang */
0768     if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
0769         ftrace_graph_stop();
0770         printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
0771         if (ftrace_dump_on_oops) {
0772             ftrace_dump(DUMP_ALL);
0773             /* ftrace_dump() disables tracing */
0774             tracing_on();
0775         }
0776         return 0;
0777     }
0778 
0779     return trace_graph_entry(trace);
0780 }
0781 
0782 static struct fgraph_ops fgraph_ops __initdata  = {
0783     .entryfunc      = &trace_graph_entry_watchdog,
0784     .retfunc        = &trace_graph_return,
0785 };
0786 
0787 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
0788 noinline __noclone static void trace_direct_tramp(void) { }
0789 #endif
0790 
0791 /*
0792  * Pretty much the same than for the function tracer from which the selftest
0793  * has been borrowed.
0794  */
0795 __init int
0796 trace_selftest_startup_function_graph(struct tracer *trace,
0797                     struct trace_array *tr)
0798 {
0799     int ret;
0800     unsigned long count;
0801     char *func_name __maybe_unused;
0802 
0803 #ifdef CONFIG_DYNAMIC_FTRACE
0804     if (ftrace_filter_param) {
0805         printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
0806         return 0;
0807     }
0808 #endif
0809 
0810     /*
0811      * Simulate the init() callback but we attach a watchdog callback
0812      * to detect and recover from possible hangs
0813      */
0814     tracing_reset_online_cpus(&tr->array_buffer);
0815     set_graph_array(tr);
0816     ret = register_ftrace_graph(&fgraph_ops);
0817     if (ret) {
0818         warn_failed_init_tracer(trace, ret);
0819         goto out;
0820     }
0821     tracing_start_cmdline_record();
0822 
0823     /* Sleep for a 1/10 of a second */
0824     msleep(100);
0825 
0826     /* Have we just recovered from a hang? */
0827     if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
0828         disable_tracing_selftest("recovering from a hang");
0829         ret = -1;
0830         goto out;
0831     }
0832 
0833     tracing_stop();
0834 
0835     /* check the trace buffer */
0836     ret = trace_test_buffer(&tr->array_buffer, &count);
0837 
0838     /* Need to also simulate the tr->reset to remove this fgraph_ops */
0839     tracing_stop_cmdline_record();
0840     unregister_ftrace_graph(&fgraph_ops);
0841 
0842     tracing_start();
0843 
0844     if (!ret && !count) {
0845         printk(KERN_CONT ".. no entries found ..");
0846         ret = -1;
0847         goto out;
0848     }
0849 
0850 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
0851     tracing_reset_online_cpus(&tr->array_buffer);
0852     set_graph_array(tr);
0853 
0854     /*
0855      * Some archs *cough*PowerPC*cough* add characters to the
0856      * start of the function names. We simply put a '*' to
0857      * accommodate them.
0858      */
0859     func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
0860     ftrace_set_global_filter(func_name, strlen(func_name), 1);
0861 
0862     /*
0863      * Register direct function together with graph tracer
0864      * and make sure we get graph trace.
0865      */
0866     ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME,
0867                      (unsigned long) trace_direct_tramp);
0868     if (ret)
0869         goto out;
0870 
0871     ret = register_ftrace_graph(&fgraph_ops);
0872     if (ret) {
0873         warn_failed_init_tracer(trace, ret);
0874         goto out;
0875     }
0876 
0877     DYN_FTRACE_TEST_NAME();
0878 
0879     count = 0;
0880 
0881     tracing_stop();
0882     /* check the trace buffer */
0883     ret = trace_test_buffer(&tr->array_buffer, &count);
0884 
0885     unregister_ftrace_graph(&fgraph_ops);
0886 
0887     ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME,
0888                        (unsigned long) trace_direct_tramp);
0889     if (ret)
0890         goto out;
0891 
0892     tracing_start();
0893 
0894     if (!ret && !count) {
0895         ret = -1;
0896         goto out;
0897     }
0898 
0899     /* Enable tracing on all functions again */
0900     ftrace_set_global_filter(NULL, 0, 1);
0901 #endif
0902 
0903     /* Don't test dynamic tracing, the function tracer already did */
0904 out:
0905     /* Stop it if we failed */
0906     if (ret)
0907         ftrace_graph_stop();
0908 
0909     return ret;
0910 }
0911 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
0912 
0913 
0914 #ifdef CONFIG_IRQSOFF_TRACER
0915 int
0916 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
0917 {
0918     unsigned long save_max = tr->max_latency;
0919     unsigned long count;
0920     int ret;
0921 
0922     /* start the tracing */
0923     ret = tracer_init(trace, tr);
0924     if (ret) {
0925         warn_failed_init_tracer(trace, ret);
0926         return ret;
0927     }
0928 
0929     /* reset the max latency */
0930     tr->max_latency = 0;
0931     /* disable interrupts for a bit */
0932     local_irq_disable();
0933     udelay(100);
0934     local_irq_enable();
0935 
0936     /*
0937      * Stop the tracer to avoid a warning subsequent
0938      * to buffer flipping failure because tracing_stop()
0939      * disables the tr and max buffers, making flipping impossible
0940      * in case of parallels max irqs off latencies.
0941      */
0942     trace->stop(tr);
0943     /* stop the tracing. */
0944     tracing_stop();
0945     /* check both trace buffers */
0946     ret = trace_test_buffer(&tr->array_buffer, NULL);
0947     if (!ret)
0948         ret = trace_test_buffer(&tr->max_buffer, &count);
0949     trace->reset(tr);
0950     tracing_start();
0951 
0952     if (!ret && !count) {
0953         printk(KERN_CONT ".. no entries found ..");
0954         ret = -1;
0955     }
0956 
0957     tr->max_latency = save_max;
0958 
0959     return ret;
0960 }
0961 #endif /* CONFIG_IRQSOFF_TRACER */
0962 
0963 #ifdef CONFIG_PREEMPT_TRACER
0964 int
0965 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
0966 {
0967     unsigned long save_max = tr->max_latency;
0968     unsigned long count;
0969     int ret;
0970 
0971     /*
0972      * Now that the big kernel lock is no longer preemptible,
0973      * and this is called with the BKL held, it will always
0974      * fail. If preemption is already disabled, simply
0975      * pass the test. When the BKL is removed, or becomes
0976      * preemptible again, we will once again test this,
0977      * so keep it in.
0978      */
0979     if (preempt_count()) {
0980         printk(KERN_CONT "can not test ... force ");
0981         return 0;
0982     }
0983 
0984     /* start the tracing */
0985     ret = tracer_init(trace, tr);
0986     if (ret) {
0987         warn_failed_init_tracer(trace, ret);
0988         return ret;
0989     }
0990 
0991     /* reset the max latency */
0992     tr->max_latency = 0;
0993     /* disable preemption for a bit */
0994     preempt_disable();
0995     udelay(100);
0996     preempt_enable();
0997 
0998     /*
0999      * Stop the tracer to avoid a warning subsequent
1000      * to buffer flipping failure because tracing_stop()
1001      * disables the tr and max buffers, making flipping impossible
1002      * in case of parallels max preempt off latencies.
1003      */
1004     trace->stop(tr);
1005     /* stop the tracing. */
1006     tracing_stop();
1007     /* check both trace buffers */
1008     ret = trace_test_buffer(&tr->array_buffer, NULL);
1009     if (!ret)
1010         ret = trace_test_buffer(&tr->max_buffer, &count);
1011     trace->reset(tr);
1012     tracing_start();
1013 
1014     if (!ret && !count) {
1015         printk(KERN_CONT ".. no entries found ..");
1016         ret = -1;
1017     }
1018 
1019     tr->max_latency = save_max;
1020 
1021     return ret;
1022 }
1023 #endif /* CONFIG_PREEMPT_TRACER */
1024 
1025 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1026 int
1027 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
1028 {
1029     unsigned long save_max = tr->max_latency;
1030     unsigned long count;
1031     int ret;
1032 
1033     /*
1034      * Now that the big kernel lock is no longer preemptible,
1035      * and this is called with the BKL held, it will always
1036      * fail. If preemption is already disabled, simply
1037      * pass the test. When the BKL is removed, or becomes
1038      * preemptible again, we will once again test this,
1039      * so keep it in.
1040      */
1041     if (preempt_count()) {
1042         printk(KERN_CONT "can not test ... force ");
1043         return 0;
1044     }
1045 
1046     /* start the tracing */
1047     ret = tracer_init(trace, tr);
1048     if (ret) {
1049         warn_failed_init_tracer(trace, ret);
1050         goto out_no_start;
1051     }
1052 
1053     /* reset the max latency */
1054     tr->max_latency = 0;
1055 
1056     /* disable preemption and interrupts for a bit */
1057     preempt_disable();
1058     local_irq_disable();
1059     udelay(100);
1060     preempt_enable();
1061     /* reverse the order of preempt vs irqs */
1062     local_irq_enable();
1063 
1064     /*
1065      * Stop the tracer to avoid a warning subsequent
1066      * to buffer flipping failure because tracing_stop()
1067      * disables the tr and max buffers, making flipping impossible
1068      * in case of parallels max irqs/preempt off latencies.
1069      */
1070     trace->stop(tr);
1071     /* stop the tracing. */
1072     tracing_stop();
1073     /* check both trace buffers */
1074     ret = trace_test_buffer(&tr->array_buffer, NULL);
1075     if (ret)
1076         goto out;
1077 
1078     ret = trace_test_buffer(&tr->max_buffer, &count);
1079     if (ret)
1080         goto out;
1081 
1082     if (!ret && !count) {
1083         printk(KERN_CONT ".. no entries found ..");
1084         ret = -1;
1085         goto out;
1086     }
1087 
1088     /* do the test by disabling interrupts first this time */
1089     tr->max_latency = 0;
1090     tracing_start();
1091     trace->start(tr);
1092 
1093     preempt_disable();
1094     local_irq_disable();
1095     udelay(100);
1096     preempt_enable();
1097     /* reverse the order of preempt vs irqs */
1098     local_irq_enable();
1099 
1100     trace->stop(tr);
1101     /* stop the tracing. */
1102     tracing_stop();
1103     /* check both trace buffers */
1104     ret = trace_test_buffer(&tr->array_buffer, NULL);
1105     if (ret)
1106         goto out;
1107 
1108     ret = trace_test_buffer(&tr->max_buffer, &count);
1109 
1110     if (!ret && !count) {
1111         printk(KERN_CONT ".. no entries found ..");
1112         ret = -1;
1113         goto out;
1114     }
1115 
1116 out:
1117     tracing_start();
1118 out_no_start:
1119     trace->reset(tr);
1120     tr->max_latency = save_max;
1121 
1122     return ret;
1123 }
1124 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1125 
1126 #ifdef CONFIG_NOP_TRACER
1127 int
1128 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1129 {
1130     /* What could possibly go wrong? */
1131     return 0;
1132 }
1133 #endif
1134 
1135 #ifdef CONFIG_SCHED_TRACER
1136 
1137 struct wakeup_test_data {
1138     struct completion   is_ready;
1139     int         go;
1140 };
1141 
1142 static int trace_wakeup_test_thread(void *data)
1143 {
1144     /* Make this a -deadline thread */
1145     static const struct sched_attr attr = {
1146         .sched_policy = SCHED_DEADLINE,
1147         .sched_runtime = 100000ULL,
1148         .sched_deadline = 10000000ULL,
1149         .sched_period = 10000000ULL
1150     };
1151     struct wakeup_test_data *x = data;
1152 
1153     sched_setattr(current, &attr);
1154 
1155     /* Make it know we have a new prio */
1156     complete(&x->is_ready);
1157 
1158     /* now go to sleep and let the test wake us up */
1159     set_current_state(TASK_INTERRUPTIBLE);
1160     while (!x->go) {
1161         schedule();
1162         set_current_state(TASK_INTERRUPTIBLE);
1163     }
1164 
1165     complete(&x->is_ready);
1166 
1167     set_current_state(TASK_INTERRUPTIBLE);
1168 
1169     /* we are awake, now wait to disappear */
1170     while (!kthread_should_stop()) {
1171         schedule();
1172         set_current_state(TASK_INTERRUPTIBLE);
1173     }
1174 
1175     __set_current_state(TASK_RUNNING);
1176 
1177     return 0;
1178 }
1179 int
1180 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1181 {
1182     unsigned long save_max = tr->max_latency;
1183     struct task_struct *p;
1184     struct wakeup_test_data data;
1185     unsigned long count;
1186     int ret;
1187 
1188     memset(&data, 0, sizeof(data));
1189 
1190     init_completion(&data.is_ready);
1191 
1192     /* create a -deadline thread */
1193     p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1194     if (IS_ERR(p)) {
1195         printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1196         return -1;
1197     }
1198 
1199     /* make sure the thread is running at -deadline policy */
1200     wait_for_completion(&data.is_ready);
1201 
1202     /* start the tracing */
1203     ret = tracer_init(trace, tr);
1204     if (ret) {
1205         warn_failed_init_tracer(trace, ret);
1206         return ret;
1207     }
1208 
1209     /* reset the max latency */
1210     tr->max_latency = 0;
1211 
1212     while (p->on_rq) {
1213         /*
1214          * Sleep to make sure the -deadline thread is asleep too.
1215          * On virtual machines we can't rely on timings,
1216          * but we want to make sure this test still works.
1217          */
1218         msleep(100);
1219     }
1220 
1221     init_completion(&data.is_ready);
1222 
1223     data.go = 1;
1224     /* memory barrier is in the wake_up_process() */
1225 
1226     wake_up_process(p);
1227 
1228     /* Wait for the task to wake up */
1229     wait_for_completion(&data.is_ready);
1230 
1231     /* stop the tracing. */
1232     tracing_stop();
1233     /* check both trace buffers */
1234     ret = trace_test_buffer(&tr->array_buffer, NULL);
1235     if (!ret)
1236         ret = trace_test_buffer(&tr->max_buffer, &count);
1237 
1238 
1239     trace->reset(tr);
1240     tracing_start();
1241 
1242     tr->max_latency = save_max;
1243 
1244     /* kill the thread */
1245     kthread_stop(p);
1246 
1247     if (!ret && !count) {
1248         printk(KERN_CONT ".. no entries found ..");
1249         ret = -1;
1250     }
1251 
1252     return ret;
1253 }
1254 #endif /* CONFIG_SCHED_TRACER */
1255 
1256 #ifdef CONFIG_BRANCH_TRACER
1257 int
1258 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1259 {
1260     unsigned long count;
1261     int ret;
1262 
1263     /* start the tracing */
1264     ret = tracer_init(trace, tr);
1265     if (ret) {
1266         warn_failed_init_tracer(trace, ret);
1267         return ret;
1268     }
1269 
1270     /* Sleep for a 1/10 of a second */
1271     msleep(100);
1272     /* stop the tracing. */
1273     tracing_stop();
1274     /* check the trace buffer */
1275     ret = trace_test_buffer(&tr->array_buffer, &count);
1276     trace->reset(tr);
1277     tracing_start();
1278 
1279     if (!ret && !count) {
1280         printk(KERN_CONT ".. no entries found ..");
1281         ret = -1;
1282     }
1283 
1284     return ret;
1285 }
1286 #endif /* CONFIG_BRANCH_TRACER */
1287