0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/kernel.h>
0009 #include <linux/percpu.h>
0010 #include <linux/debugfs.h>
0011 #include <linux/seq_file.h>
0012 #include <linux/cpumask.h>
0013 #include <asm/hvcall.h>
0014 #include <asm/firmware.h>
0015 #include <asm/cputable.h>
0016 #include <asm/trace.h>
0017 #include <asm/machdep.h>
0018
0019
0020 struct hcall_stats {
0021 unsigned long num_calls;
0022 unsigned long tb_total;
0023 unsigned long purr_total;
0024 unsigned long tb_start;
0025 unsigned long purr_start;
0026 };
0027 #define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
0028
0029 static DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
0030
0031
0032
0033
0034 static void *hc_start(struct seq_file *m, loff_t *pos)
0035 {
0036 if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1))
0037 return (void *)(unsigned long)(*pos + 1);
0038
0039 return NULL;
0040 }
0041
0042 static void *hc_next(struct seq_file *m, void *p, loff_t * pos)
0043 {
0044 ++*pos;
0045
0046 return hc_start(m, pos);
0047 }
0048
0049 static void hc_stop(struct seq_file *m, void *p)
0050 {
0051 }
0052
0053 static int hc_show(struct seq_file *m, void *p)
0054 {
0055 unsigned long h_num = (unsigned long)p;
0056 struct hcall_stats *hs = m->private;
0057
0058 if (hs[h_num].num_calls) {
0059 if (cpu_has_feature(CPU_FTR_PURR))
0060 seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2,
0061 hs[h_num].num_calls,
0062 hs[h_num].tb_total,
0063 hs[h_num].purr_total);
0064 else
0065 seq_printf(m, "%lu %lu %lu\n", h_num<<2,
0066 hs[h_num].num_calls,
0067 hs[h_num].tb_total);
0068 }
0069
0070 return 0;
0071 }
0072
0073 static const struct seq_operations hcall_inst_sops = {
0074 .start = hc_start,
0075 .next = hc_next,
0076 .stop = hc_stop,
0077 .show = hc_show
0078 };
0079
0080 DEFINE_SEQ_ATTRIBUTE(hcall_inst);
0081
0082 #define HCALL_ROOT_DIR "hcall_inst"
0083 #define CPU_NAME_BUF_SIZE 32
0084
0085
0086 static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args)
0087 {
0088 struct hcall_stats *h;
0089
0090 if (opcode > MAX_HCALL_OPCODE)
0091 return;
0092
0093 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
0094 h->tb_start = mftb();
0095 h->purr_start = mfspr(SPRN_PURR);
0096 }
0097
0098 static void probe_hcall_exit(void *ignored, unsigned long opcode, long retval,
0099 unsigned long *retbuf)
0100 {
0101 struct hcall_stats *h;
0102
0103 if (opcode > MAX_HCALL_OPCODE)
0104 return;
0105
0106 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
0107 h->num_calls++;
0108 h->tb_total += mftb() - h->tb_start;
0109 h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
0110 }
0111
0112 static int __init hcall_inst_init(void)
0113 {
0114 struct dentry *hcall_root;
0115 char cpu_name_buf[CPU_NAME_BUF_SIZE];
0116 int cpu;
0117
0118 if (!firmware_has_feature(FW_FEATURE_LPAR))
0119 return 0;
0120
0121 if (register_trace_hcall_entry(probe_hcall_entry, NULL))
0122 return -EINVAL;
0123
0124 if (register_trace_hcall_exit(probe_hcall_exit, NULL)) {
0125 unregister_trace_hcall_entry(probe_hcall_entry, NULL);
0126 return -EINVAL;
0127 }
0128
0129 hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL);
0130
0131 for_each_possible_cpu(cpu) {
0132 snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu);
0133 debugfs_create_file(cpu_name_buf, 0444, hcall_root,
0134 per_cpu(hcall_stats, cpu),
0135 &hcall_inst_fops);
0136 }
0137
0138 return 0;
0139 }
0140 machine_device_initcall(pseries, hcall_inst_init);