0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/mod_devicetable.h>
0012 #include <linux/perf/riscv_pmu.h>
0013 #include <linux/platform_device.h>
0014
0015 #define RISCV_PMU_LEGACY_CYCLE 0
0016 #define RISCV_PMU_LEGACY_INSTRET 1
0017 #define RISCV_PMU_LEGACY_NUM_CTR 2
0018
0019 static bool pmu_init_done;
0020
0021 static int pmu_legacy_ctr_get_idx(struct perf_event *event)
0022 {
0023 struct perf_event_attr *attr = &event->attr;
0024
0025 if (event->attr.type != PERF_TYPE_HARDWARE)
0026 return -EOPNOTSUPP;
0027 if (attr->config == PERF_COUNT_HW_CPU_CYCLES)
0028 return RISCV_PMU_LEGACY_CYCLE;
0029 else if (attr->config == PERF_COUNT_HW_INSTRUCTIONS)
0030 return RISCV_PMU_LEGACY_INSTRET;
0031 else
0032 return -EOPNOTSUPP;
0033 }
0034
0035
0036 static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
0037 {
0038 return pmu_legacy_ctr_get_idx(event);
0039 }
0040
0041 static u64 pmu_legacy_read_ctr(struct perf_event *event)
0042 {
0043 struct hw_perf_event *hwc = &event->hw;
0044 int idx = hwc->idx;
0045 u64 val;
0046
0047 if (idx == RISCV_PMU_LEGACY_CYCLE) {
0048 val = riscv_pmu_ctr_read_csr(CSR_CYCLE);
0049 if (IS_ENABLED(CONFIG_32BIT))
0050 val = (u64)riscv_pmu_ctr_read_csr(CSR_CYCLEH) << 32 | val;
0051 } else if (idx == RISCV_PMU_LEGACY_INSTRET) {
0052 val = riscv_pmu_ctr_read_csr(CSR_INSTRET);
0053 if (IS_ENABLED(CONFIG_32BIT))
0054 val = ((u64)riscv_pmu_ctr_read_csr(CSR_INSTRETH)) << 32 | val;
0055 } else
0056 return 0;
0057
0058 return val;
0059 }
0060
0061 static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
0062 {
0063 struct hw_perf_event *hwc = &event->hw;
0064 u64 initial_val = pmu_legacy_read_ctr(event);
0065
0066
0067
0068
0069
0070
0071
0072 local64_set(&hwc->prev_count, initial_val);
0073 }
0074
0075
0076
0077
0078
0079
0080
0081
0082 static void pmu_legacy_init(struct riscv_pmu *pmu)
0083 {
0084 pr_info("Legacy PMU implementation is available\n");
0085
0086 pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR;
0087 pmu->ctr_start = pmu_legacy_ctr_start;
0088 pmu->ctr_stop = NULL;
0089 pmu->event_map = pmu_legacy_event_map;
0090 pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
0091 pmu->ctr_get_width = NULL;
0092 pmu->ctr_clear_idx = NULL;
0093 pmu->ctr_read = pmu_legacy_read_ctr;
0094
0095 perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
0096 }
0097
0098 static int pmu_legacy_device_probe(struct platform_device *pdev)
0099 {
0100 struct riscv_pmu *pmu = NULL;
0101
0102 pmu = riscv_pmu_alloc();
0103 if (!pmu)
0104 return -ENOMEM;
0105 pmu_legacy_init(pmu);
0106
0107 return 0;
0108 }
0109
0110 static struct platform_driver pmu_legacy_driver = {
0111 .probe = pmu_legacy_device_probe,
0112 .driver = {
0113 .name = RISCV_PMU_LEGACY_PDEV_NAME,
0114 },
0115 };
0116
0117 static int __init riscv_pmu_legacy_devinit(void)
0118 {
0119 int ret;
0120 struct platform_device *pdev;
0121
0122 if (likely(pmu_init_done))
0123 return 0;
0124
0125 ret = platform_driver_register(&pmu_legacy_driver);
0126 if (ret)
0127 return ret;
0128
0129 pdev = platform_device_register_simple(RISCV_PMU_LEGACY_PDEV_NAME, -1, NULL, 0);
0130 if (IS_ERR(pdev)) {
0131 platform_driver_unregister(&pmu_legacy_driver);
0132 return PTR_ERR(pdev);
0133 }
0134
0135 return ret;
0136 }
0137 late_initcall(riscv_pmu_legacy_devinit);
0138
0139 void riscv_pmu_legacy_skip_init(void)
0140 {
0141 pmu_init_done = true;
0142 }