0001
0002
0003
0004 #ifndef _PERFMON_H_
0005 #define _PERFMON_H_
0006
0007 #include <linux/slab.h>
0008 #include <linux/pci.h>
0009 #include <linux/sbitmap.h>
0010 #include <linux/dmaengine.h>
0011 #include <linux/percpu-rwsem.h>
0012 #include <linux/wait.h>
0013 #include <linux/cdev.h>
0014 #include <linux/uuid.h>
0015 #include <linux/idxd.h>
0016 #include <linux/perf_event.h>
0017 #include "registers.h"
0018
0019 static inline struct idxd_pmu *event_to_pmu(struct perf_event *event)
0020 {
0021 struct idxd_pmu *idxd_pmu;
0022 struct pmu *pmu;
0023
0024 pmu = event->pmu;
0025 idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
0026
0027 return idxd_pmu;
0028 }
0029
0030 static inline struct idxd_device *event_to_idxd(struct perf_event *event)
0031 {
0032 struct idxd_pmu *idxd_pmu;
0033 struct pmu *pmu;
0034
0035 pmu = event->pmu;
0036 idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
0037
0038 return idxd_pmu->idxd;
0039 }
0040
0041 static inline struct idxd_device *pmu_to_idxd(struct pmu *pmu)
0042 {
0043 struct idxd_pmu *idxd_pmu;
0044
0045 idxd_pmu = container_of(pmu, struct idxd_pmu, pmu);
0046
0047 return idxd_pmu->idxd;
0048 }
0049
0050 enum dsa_perf_events {
0051 DSA_PERF_EVENT_WQ = 0,
0052 DSA_PERF_EVENT_ENGINE,
0053 DSA_PERF_EVENT_ADDR_TRANS,
0054 DSA_PERF_EVENT_OP,
0055 DSA_PERF_EVENT_COMPL,
0056 DSA_PERF_EVENT_MAX,
0057 };
0058
0059 enum filter_enc {
0060 FLT_WQ = 0,
0061 FLT_TC,
0062 FLT_PG_SZ,
0063 FLT_XFER_SZ,
0064 FLT_ENG,
0065 FLT_MAX,
0066 };
0067
0068 #define CONFIG_RESET 0x0000000000000001
0069 #define CNTR_RESET 0x0000000000000002
0070 #define CNTR_ENABLE 0x0000000000000001
0071 #define INTR_OVFL 0x0000000000000002
0072
0073 #define COUNTER_FREEZE 0x00000000FFFFFFFF
0074 #define COUNTER_UNFREEZE 0x0000000000000000
0075 #define OVERFLOW_SIZE 32
0076
0077 #define CNTRCFG_ENABLE BIT(0)
0078 #define CNTRCFG_IRQ_OVERFLOW BIT(1)
0079 #define CNTRCFG_CATEGORY_SHIFT 8
0080 #define CNTRCFG_EVENT_SHIFT 32
0081
0082 #define PERFMON_TABLE_OFFSET(_idxd) \
0083 ({ \
0084 typeof(_idxd) __idxd = (_idxd); \
0085 ((__idxd)->reg_base + (__idxd)->perfmon_offset); \
0086 })
0087 #define PERFMON_REG_OFFSET(idxd, offset) \
0088 (PERFMON_TABLE_OFFSET(idxd) + (offset))
0089
0090 #define PERFCAP_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFCAP_OFFSET))
0091 #define PERFRST_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFRST_OFFSET))
0092 #define OVFSTATUS_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_OVFSTATUS_OFFSET))
0093 #define PERFFRZ_REG(idxd) (PERFMON_REG_OFFSET(idxd, IDXD_PERFFRZ_OFFSET))
0094
0095 #define FLTCFG_REG(idxd, cntr, flt) \
0096 (PERFMON_REG_OFFSET(idxd, IDXD_FLTCFG_OFFSET) + ((cntr) * 32) + ((flt) * 4))
0097
0098 #define CNTRCFG_REG(idxd, cntr) \
0099 (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCFG_OFFSET) + ((cntr) * 8))
0100 #define CNTRDATA_REG(idxd, cntr) \
0101 (PERFMON_REG_OFFSET(idxd, IDXD_CNTRDATA_OFFSET) + ((cntr) * 8))
0102 #define CNTRCAP_REG(idxd, cntr) \
0103 (PERFMON_REG_OFFSET(idxd, IDXD_CNTRCAP_OFFSET) + ((cntr) * 8))
0104
0105 #define EVNTCAP_REG(idxd, category) \
0106 (PERFMON_REG_OFFSET(idxd, IDXD_EVNTCAP_OFFSET) + ((category) * 8))
0107
0108 #define DEFINE_PERFMON_FORMAT_ATTR(_name, _format) \
0109 static ssize_t __perfmon_idxd_##_name##_show(struct kobject *kobj, \
0110 struct kobj_attribute *attr, \
0111 char *page) \
0112 { \
0113 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
0114 return sprintf(page, _format "\n"); \
0115 } \
0116 static struct kobj_attribute format_attr_idxd_##_name = \
0117 __ATTR(_name, 0444, __perfmon_idxd_##_name##_show, NULL)
0118
0119 #endif