Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 #include <linux/seq_file.h>
0004 #include <linux/kallsyms.h>
0005 #include <linux/module.h>
0006 #include <linux/ftrace.h>
0007 #include <linux/fs.h>
0008 
0009 #include "trace_output.h"
0010 
0011 struct recursed_functions {
0012     unsigned long       ip;
0013     unsigned long       parent_ip;
0014 };
0015 
0016 static struct recursed_functions recursed_functions[CONFIG_FTRACE_RECORD_RECURSION_SIZE];
0017 static atomic_t nr_records;
0018 
0019 /*
0020  * Cache the last found function. Yes, updates to this is racey, but
0021  * so is memory cache ;-)
0022  */
0023 static unsigned long cached_function;
0024 
0025 void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip)
0026 {
0027     int index = 0;
0028     int i;
0029     unsigned long old;
0030 
0031  again:
0032     /* First check the last one recorded */
0033     if (ip == cached_function)
0034         return;
0035 
0036     i = atomic_read(&nr_records);
0037     /* nr_records is -1 when clearing records */
0038     smp_mb__after_atomic();
0039     if (i < 0)
0040         return;
0041 
0042     /*
0043      * If there's two writers and this writer comes in second,
0044      * the cmpxchg() below to update the ip will fail. Then this
0045      * writer will try again. It is possible that index will now
0046      * be greater than nr_records. This is because the writer
0047      * that succeeded has not updated the nr_records yet.
0048      * This writer could keep trying again until the other writer
0049      * updates nr_records. But if the other writer takes an
0050      * interrupt, and that interrupt locks up that CPU, we do
0051      * not want this CPU to lock up due to the recursion protection,
0052      * and have a bug report showing this CPU as the cause of
0053      * locking up the computer. To not lose this record, this
0054      * writer will simply use the next position to update the
0055      * recursed_functions, and it will update the nr_records
0056      * accordingly.
0057      */
0058     if (index < i)
0059         index = i;
0060     if (index >= CONFIG_FTRACE_RECORD_RECURSION_SIZE)
0061         return;
0062 
0063     for (i = index - 1; i >= 0; i--) {
0064         if (recursed_functions[i].ip == ip) {
0065             cached_function = ip;
0066             return;
0067         }
0068     }
0069 
0070     cached_function = ip;
0071 
0072     /*
0073      * We only want to add a function if it hasn't been added before.
0074      * Add to the current location before incrementing the count.
0075      * If it fails to add, then increment the index (save in i)
0076      * and try again.
0077      */
0078     old = cmpxchg(&recursed_functions[index].ip, 0, ip);
0079     if (old != 0) {
0080         /* Did something else already added this for us? */
0081         if (old == ip)
0082             return;
0083         /* Try the next location (use i for the next index) */
0084         index++;
0085         goto again;
0086     }
0087 
0088     recursed_functions[index].parent_ip = parent_ip;
0089 
0090     /*
0091      * It's still possible that we could race with the clearing
0092      *    CPU0                                    CPU1
0093      *    ----                                    ----
0094      *                                       ip = func
0095      *  nr_records = -1;
0096      *  recursed_functions[0] = 0;
0097      *                                       i = -1
0098      *                                       if (i < 0)
0099      *  nr_records = 0;
0100      *  (new recursion detected)
0101      *      recursed_functions[0] = func
0102      *                                            cmpxchg(recursed_functions[0],
0103      *                                                    func, 0)
0104      *
0105      * But the worse that could happen is that we get a zero in
0106      * the recursed_functions array, and it's likely that "func" will
0107      * be recorded again.
0108      */
0109     i = atomic_read(&nr_records);
0110     smp_mb__after_atomic();
0111     if (i < 0)
0112         cmpxchg(&recursed_functions[index].ip, ip, 0);
0113     else if (i <= index)
0114         atomic_cmpxchg(&nr_records, i, index + 1);
0115 }
0116 EXPORT_SYMBOL_GPL(ftrace_record_recursion);
0117 
0118 static DEFINE_MUTEX(recursed_function_lock);
0119 static struct trace_seq *tseq;
0120 
0121 static void *recursed_function_seq_start(struct seq_file *m, loff_t *pos)
0122 {
0123     void *ret = NULL;
0124     int index;
0125 
0126     mutex_lock(&recursed_function_lock);
0127     index = atomic_read(&nr_records);
0128     if (*pos < index) {
0129         ret = &recursed_functions[*pos];
0130     }
0131 
0132     tseq = kzalloc(sizeof(*tseq), GFP_KERNEL);
0133     if (!tseq)
0134         return ERR_PTR(-ENOMEM);
0135 
0136     trace_seq_init(tseq);
0137 
0138     return ret;
0139 }
0140 
0141 static void *recursed_function_seq_next(struct seq_file *m, void *v, loff_t *pos)
0142 {
0143     int index;
0144     int p;
0145 
0146     index = atomic_read(&nr_records);
0147     p = ++(*pos);
0148 
0149     return p < index ? &recursed_functions[p] : NULL;
0150 }
0151 
0152 static void recursed_function_seq_stop(struct seq_file *m, void *v)
0153 {
0154     kfree(tseq);
0155     mutex_unlock(&recursed_function_lock);
0156 }
0157 
0158 static int recursed_function_seq_show(struct seq_file *m, void *v)
0159 {
0160     struct recursed_functions *record = v;
0161     int ret = 0;
0162 
0163     if (record) {
0164         trace_seq_print_sym(tseq, record->parent_ip, true);
0165         trace_seq_puts(tseq, ":\t");
0166         trace_seq_print_sym(tseq, record->ip, true);
0167         trace_seq_putc(tseq, '\n');
0168         ret = trace_print_seq(m, tseq);
0169     }
0170 
0171     return ret;
0172 }
0173 
0174 static const struct seq_operations recursed_function_seq_ops = {
0175     .start  = recursed_function_seq_start,
0176     .next   = recursed_function_seq_next,
0177     .stop   = recursed_function_seq_stop,
0178     .show   = recursed_function_seq_show
0179 };
0180 
0181 static int recursed_function_open(struct inode *inode, struct file *file)
0182 {
0183     int ret = 0;
0184 
0185     mutex_lock(&recursed_function_lock);
0186     /* If this file was opened for write, then erase contents */
0187     if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
0188         /* disable updating records */
0189         atomic_set(&nr_records, -1);
0190         smp_mb__after_atomic();
0191         memset(recursed_functions, 0, sizeof(recursed_functions));
0192         smp_wmb();
0193         /* enable them again */
0194         atomic_set(&nr_records, 0);
0195     }
0196     if (file->f_mode & FMODE_READ)
0197         ret = seq_open(file, &recursed_function_seq_ops);
0198     mutex_unlock(&recursed_function_lock);
0199 
0200     return ret;
0201 }
0202 
0203 static ssize_t recursed_function_write(struct file *file,
0204                        const char __user *buffer,
0205                        size_t count, loff_t *ppos)
0206 {
0207     return count;
0208 }
0209 
0210 static int recursed_function_release(struct inode *inode, struct file *file)
0211 {
0212     if (file->f_mode & FMODE_READ)
0213         seq_release(inode, file);
0214     return 0;
0215 }
0216 
0217 static const struct file_operations recursed_functions_fops = {
0218     .open           = recursed_function_open,
0219     .write      = recursed_function_write,
0220     .read           = seq_read,
0221     .llseek         = seq_lseek,
0222     .release        = recursed_function_release,
0223 };
0224 
0225 __init static int create_recursed_functions(void)
0226 {
0227 
0228     trace_create_file("recursed_functions", TRACE_MODE_WRITE,
0229               NULL, NULL, &recursed_functions_fops);
0230     return 0;
0231 }
0232 
0233 fs_initcall(create_recursed_functions);