Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * This program is free software; you can redistribute it and/or modify
0004  * it under the terms of the GNU General Public License as published by
0005  * the Free Software Foundation; either version 2 of the License, or
0006  * (at your option) any later version.
0007  *
0008  * This program is distributed in the hope that it will be useful,
0009  * but WITHOUT ANY WARRANTY; without even the implied warranty of
0010  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0011  * GNU General Public License for more details.
0012  *
0013  * Authors: Waiman Long <waiman.long@hpe.com>
0014  */
0015 
0016 /*
0017  * Collect locking event counts
0018  */
0019 #include <linux/debugfs.h>
0020 #include <linux/sched.h>
0021 #include <linux/sched/clock.h>
0022 #include <linux/fs.h>
0023 
0024 #include "lock_events.h"
0025 
0026 #undef  LOCK_EVENT
0027 #define LOCK_EVENT(name)    [LOCKEVENT_ ## name] = #name,
0028 
0029 #define LOCK_EVENTS_DIR     "lock_event_counts"
0030 
0031 /*
0032  * When CONFIG_LOCK_EVENT_COUNTS is enabled, event counts of different
0033  * types of locks will be reported under the <debugfs>/lock_event_counts/
0034  * directory. See lock_events_list.h for the list of available locking
0035  * events.
0036  *
0037  * Writing to the special ".reset_counts" file will reset all the above
0038  * locking event counts. This is a very slow operation and so should not
0039  * be done frequently.
0040  *
0041  * These event counts are implemented as per-cpu variables which are
0042  * summed and computed whenever the corresponding debugfs files are read. This
0043  * minimizes added overhead making the counts usable even in a production
0044  * environment.
0045  */
0046 static const char * const lockevent_names[lockevent_num + 1] = {
0047 
0048 #include "lock_events_list.h"
0049 
0050     [LOCKEVENT_reset_cnts] = ".reset_counts",
0051 };
0052 
0053 /*
0054  * Per-cpu counts
0055  */
0056 DEFINE_PER_CPU(unsigned long, lockevents[lockevent_num]);
0057 
0058 /*
0059  * The lockevent_read() function can be overridden.
0060  */
0061 ssize_t __weak lockevent_read(struct file *file, char __user *user_buf,
0062                   size_t count, loff_t *ppos)
0063 {
0064     char buf[64];
0065     int cpu, id, len;
0066     u64 sum = 0;
0067 
0068     /*
0069      * Get the counter ID stored in file->f_inode->i_private
0070      */
0071     id = (long)file_inode(file)->i_private;
0072 
0073     if (id >= lockevent_num)
0074         return -EBADF;
0075 
0076     for_each_possible_cpu(cpu)
0077         sum += per_cpu(lockevents[id], cpu);
0078     len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
0079 
0080     return simple_read_from_buffer(user_buf, count, ppos, buf, len);
0081 }
0082 
0083 /*
0084  * Function to handle write request
0085  *
0086  * When idx = reset_cnts, reset all the counts.
0087  */
0088 static ssize_t lockevent_write(struct file *file, const char __user *user_buf,
0089                size_t count, loff_t *ppos)
0090 {
0091     int cpu;
0092 
0093     /*
0094      * Get the counter ID stored in file->f_inode->i_private
0095      */
0096     if ((long)file_inode(file)->i_private != LOCKEVENT_reset_cnts)
0097         return count;
0098 
0099     for_each_possible_cpu(cpu) {
0100         int i;
0101         unsigned long *ptr = per_cpu_ptr(lockevents, cpu);
0102 
0103         for (i = 0 ; i < lockevent_num; i++)
0104             WRITE_ONCE(ptr[i], 0);
0105     }
0106     return count;
0107 }
0108 
0109 /*
0110  * Debugfs data structures
0111  */
0112 static const struct file_operations fops_lockevent = {
0113     .read = lockevent_read,
0114     .write = lockevent_write,
0115     .llseek = default_llseek,
0116 };
0117 
0118 #ifdef CONFIG_PARAVIRT_SPINLOCKS
0119 #include <asm/paravirt.h>
0120 
0121 static bool __init skip_lockevent(const char *name)
0122 {
0123     static int pv_on __initdata = -1;
0124 
0125     if (pv_on < 0)
0126         pv_on = !pv_is_native_spin_unlock();
0127     /*
0128      * Skip PV qspinlock events on bare metal.
0129      */
0130     if (!pv_on && !memcmp(name, "pv_", 3))
0131         return true;
0132     return false;
0133 }
0134 #else
0135 static inline bool skip_lockevent(const char *name)
0136 {
0137     return false;
0138 }
0139 #endif
0140 
0141 /*
0142  * Initialize debugfs for the locking event counts.
0143  */
0144 static int __init init_lockevent_counts(void)
0145 {
0146     struct dentry *d_counts = debugfs_create_dir(LOCK_EVENTS_DIR, NULL);
0147     int i;
0148 
0149     if (!d_counts)
0150         goto out;
0151 
0152     /*
0153      * Create the debugfs files
0154      *
0155      * As reading from and writing to the stat files can be slow, only
0156      * root is allowed to do the read/write to limit impact to system
0157      * performance.
0158      */
0159     for (i = 0; i < lockevent_num; i++) {
0160         if (skip_lockevent(lockevent_names[i]))
0161             continue;
0162         if (!debugfs_create_file(lockevent_names[i], 0400, d_counts,
0163                      (void *)(long)i, &fops_lockevent))
0164             goto fail_undo;
0165     }
0166 
0167     if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200,
0168                  d_counts, (void *)(long)LOCKEVENT_reset_cnts,
0169                  &fops_lockevent))
0170         goto fail_undo;
0171 
0172     return 0;
0173 fail_undo:
0174     debugfs_remove_recursive(d_counts);
0175 out:
0176     pr_warn("Could not create '%s' debugfs entries\n", LOCK_EVENTS_DIR);
0177     return -ENOMEM;
0178 }
0179 fs_initcall(init_lockevent_counts);