0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/bitfield.h>
0009 #include <linux/bitmap.h>
0010 #include <linux/bug.h>
0011 #include <linux/cpuhotplug.h>
0012 #include <linux/cpumask.h>
0013 #include <linux/delay.h>
0014 #include <linux/device.h>
0015 #include <linux/err.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/iopoll.h>
0018 #include <linux/io-64-nonatomic-hi-lo.h>
0019 #include <linux/irq.h>
0020 #include <linux/kernel.h>
0021 #include <linux/list.h>
0022 #include <linux/module.h>
0023 #include <linux/pci.h>
0024 #include <linux/pci-epf.h>
0025 #include <linux/perf_event.h>
0026 #include <linux/smp.h>
0027
0028
0029 #define HNS3_PMU_REG_GLOBAL_CTRL 0x0000
0030 #define HNS3_PMU_REG_CLOCK_FREQ 0x0020
0031 #define HNS3_PMU_REG_BDF 0x0fe0
0032 #define HNS3_PMU_REG_VERSION 0x0fe4
0033 #define HNS3_PMU_REG_DEVICE_ID 0x0fe8
0034
0035 #define HNS3_PMU_REG_EVENT_OFFSET 0x1000
0036 #define HNS3_PMU_REG_EVENT_SIZE 0x1000
0037 #define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00
0038 #define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04
0039 #define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08
0040 #define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c
0041 #define HNS3_PMU_REG_EVENT_COUNTER 0x10
0042 #define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18
0043 #define HNS3_PMU_REG_EVENT_QID_CTRL 0x28
0044 #define HNS3_PMU_REG_EVENT_QID_PARA 0x2c
0045
0046 #define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0)
0047 #define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1)
0048 #define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2)
0049 #define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3)
0050 #define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4)
0051 #define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5)
0052
0053 #define HNS3_PMU_FILTER_ALL_TC 0xf
0054 #define HNS3_PMU_FILTER_ALL_QUEUE 0xffff
0055
0056 #define HNS3_PMU_CTRL_SUBEVENT_S 4
0057 #define HNS3_PMU_CTRL_FILTER_MODE_S 24
0058
0059 #define HNS3_PMU_GLOBAL_START BIT(0)
0060
0061 #define HNS3_PMU_EVENT_STATUS_RESET BIT(11)
0062 #define HNS3_PMU_EVENT_EN BIT(12)
0063 #define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15)
0064
0065 #define HNS3_PMU_QID_PARA_FUNC_S 0
0066 #define HNS3_PMU_QID_PARA_QUEUE_S 16
0067
0068 #define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0)
0069 #define HNS3_PMU_QID_CTRL_DONE BIT(1)
0070 #define HNS3_PMU_QID_CTRL_MISS BIT(2)
0071
0072 #define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1)
0073
0074 #define HNS3_PMU_MAX_HW_EVENTS 8
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 #define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001
0108 #define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001
0109 #define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002
0110 #define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002
0111 #define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003
0112 #define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003
0113 #define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004
0114 #define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004
0115 #define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005
0116 #define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005
0117 #define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006
0118 #define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006
0119 #define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008
0120 #define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008
0121 #define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009
0122 #define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009
0123 #define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a
0124 #define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a
0125 #define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b
0126 #define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b
0127 #define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c
0128 #define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c
0129 #define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d
0130 #define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d
0131 #define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e
0132 #define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e
0133 #define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f
0134 #define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f
0135 #define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010
0136 #define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010
0137
0138
0139 #define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100
0140 #define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100
0141 #define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101
0142 #define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101
0143 #define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102
0144 #define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102
0145 #define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103
0146 #define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103
0147 #define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104
0148 #define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104
0149 #define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105
0150 #define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105
0151 #define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106
0152 #define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106
0153 #define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107
0154 #define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107
0155 #define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108
0156 #define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108
0157 #define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109
0158 #define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109
0159 #define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a
0160 #define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a
0161 #define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b
0162 #define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b
0163 #define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c
0164 #define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c
0165 #define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d
0166 #define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d
0167 #define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e
0168 #define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e
0169 #define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f
0170 #define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f
0171 #define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110
0172 #define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110
0173 #define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111
0174 #define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111
0175 #define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112
0176 #define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112
0177
0178
0179 #define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202
0180 #define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202
0181 #define HNS3_PMU_EVT_DLY_TX_TIME 0x00204
0182 #define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204
0183 #define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206
0184 #define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206
0185 #define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207
0186 #define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207
0187 #define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208
0188 #define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208
0189 #define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209
0190 #define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209
0191 #define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e
0192 #define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e
0193 #define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f
0194 #define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f
0195 #define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210
0196 #define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210
0197 #define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211
0198 #define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211
0199 #define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212
0200 #define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212
0201 #define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213
0202 #define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213
0203 #define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214
0204 #define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214
0205 #define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215
0206 #define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215
0207 #define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216
0208 #define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216
0209 #define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217
0210 #define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217
0211 #define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218
0212 #define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218
0213 #define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219
0214 #define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219
0215 #define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a
0216 #define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a
0217 #define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c
0218 #define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c
0219
0220
0221 #define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300
0222 #define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300
0223
0224
0225 #define HNS3_PMU_FILTER_BW_SSU_EGU 0x07
0226 #define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f
0227 #define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f
0228 #define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f
0229 #define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f
0230 #define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11
0231 #define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11
0232 #define HNS3_PMU_FILTER_BW_WR_FBD 0x1b
0233 #define HNS3_PMU_FILTER_BW_WR_EBD 0x11
0234 #define HNS3_PMU_FILTER_BW_RD_FBD 0x01
0235 #define HNS3_PMU_FILTER_BW_RD_EBD 0x1b
0236 #define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01
0237 #define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01
0238 #define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01
0239 #define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01
0240
0241
0242 #define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07
0243 #define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07
0244 #define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f
0245 #define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f
0246 #define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f
0247 #define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f
0248 #define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11
0249 #define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f
0250 #define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11
0251 #define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b
0252 #define HNS3_PMU_FILTER_PPS_WR_EBD 0x11
0253 #define HNS3_PMU_FILTER_PPS_RD_FBD 0x01
0254 #define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b
0255 #define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01
0256 #define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01
0257 #define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01
0258 #define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01
0259 #define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01
0260 #define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01
0261
0262
0263 #define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01
0264 #define HNS3_PMU_FILTER_DLY_TX 0x01
0265 #define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07
0266 #define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07
0267 #define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07
0268 #define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07
0269 #define HNS3_PMU_FILTER_DLY_RPU 0x11
0270 #define HNS3_PMU_FILTER_DLY_TPU 0x1f
0271 #define HNS3_PMU_FILTER_DLY_RPE 0x01
0272 #define HNS3_PMU_FILTER_DLY_TPE 0x0b
0273 #define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b
0274 #define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b
0275 #define HNS3_PMU_FILTER_DLY_WR_EBD 0x11
0276 #define HNS3_PMU_FILTER_DLY_RD_FBD 0x01
0277 #define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b
0278 #define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01
0279 #define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01
0280 #define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01
0281 #define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01
0282 #define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01
0283
0284
0285 #define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01
0286
0287 enum hns3_pmu_hw_filter_mode {
0288 HNS3_PMU_HW_FILTER_GLOBAL,
0289 HNS3_PMU_HW_FILTER_PORT,
0290 HNS3_PMU_HW_FILTER_PORT_TC,
0291 HNS3_PMU_HW_FILTER_FUNC,
0292 HNS3_PMU_HW_FILTER_FUNC_QUEUE,
0293 HNS3_PMU_HW_FILTER_FUNC_INTR,
0294 };
0295
0296 struct hns3_pmu_event_attr {
0297 u32 event;
0298 u16 filter_support;
0299 };
0300
0301 struct hns3_pmu {
0302 struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS];
0303 struct hlist_node node;
0304 struct pci_dev *pdev;
0305 struct pmu pmu;
0306 void __iomem *base;
0307 int irq;
0308 int on_cpu;
0309 u32 identifier;
0310 u32 hw_clk_freq;
0311
0312 u16 bdf_min;
0313 u16 bdf_max;
0314 };
0315
0316 #define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu))
0317
0318 #define GET_PCI_DEVFN(bdf) ((bdf) & 0xff)
0319
0320 #define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff)
0321 #define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07))
0322 #define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func))
0323
0324 #define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \
0325 static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \
0326 { \
0327 return FIELD_GET(GENMASK_ULL(_end, _start), \
0328 event->attr._config); \
0329 }
0330
0331 HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7);
0332 HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15);
0333 HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16);
0334 HNS3_PMU_FILTER_ATTR(port, config1, 0, 3);
0335 HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7);
0336 HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23);
0337 HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39);
0338 HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51);
0339 HNS3_PMU_FILTER_ATTR(global, config1, 52, 52);
0340
0341 #define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\
0342 HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \
0343 HNS3_PMU_FILTER_BW_##_name})
0344 #define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
0345 HNS3_PMU_EVT_BW_##_name##_TIME, \
0346 HNS3_PMU_FILTER_BW_##_name})
0347 #define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
0348 HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \
0349 HNS3_PMU_FILTER_PPS_##_name})
0350 #define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
0351 HNS3_PMU_EVT_PPS_##_name##_TIME, \
0352 HNS3_PMU_FILTER_PPS_##_name})
0353 #define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
0354 HNS3_PMU_EVT_DLY_##_name##_TIME, \
0355 HNS3_PMU_FILTER_DLY_##_name})
0356 #define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\
0357 HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \
0358 HNS3_PMU_FILTER_DLY_##_name})
0359 #define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\
0360 HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \
0361 HNS3_PMU_FILTER_INTR_##_name})
0362 #define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\
0363 HNS3_PMU_EVT_PPS_##_name##_TIME, \
0364 HNS3_PMU_FILTER_INTR_##_name})
0365
0366 static ssize_t hns3_pmu_format_show(struct device *dev,
0367 struct device_attribute *attr, char *buf)
0368 {
0369 struct dev_ext_attribute *eattr;
0370
0371 eattr = container_of(attr, struct dev_ext_attribute, attr);
0372
0373 return sysfs_emit(buf, "%s\n", (char *)eattr->var);
0374 }
0375
0376 static ssize_t hns3_pmu_event_show(struct device *dev,
0377 struct device_attribute *attr, char *buf)
0378 {
0379 struct hns3_pmu_event_attr *event;
0380 struct dev_ext_attribute *eattr;
0381
0382 eattr = container_of(attr, struct dev_ext_attribute, attr);
0383 event = eattr->var;
0384
0385 return sysfs_emit(buf, "config=0x%x\n", event->event);
0386 }
0387
0388 static ssize_t hns3_pmu_filter_mode_show(struct device *dev,
0389 struct device_attribute *attr,
0390 char *buf)
0391 {
0392 struct hns3_pmu_event_attr *event;
0393 struct dev_ext_attribute *eattr;
0394 int len;
0395
0396 eattr = container_of(attr, struct dev_ext_attribute, attr);
0397 event = eattr->var;
0398
0399 len = sysfs_emit_at(buf, 0, "filter mode supported: ");
0400 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)
0401 len += sysfs_emit_at(buf, len, "global ");
0402 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)
0403 len += sysfs_emit_at(buf, len, "port ");
0404 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)
0405 len += sysfs_emit_at(buf, len, "port-tc ");
0406 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)
0407 len += sysfs_emit_at(buf, len, "func ");
0408 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)
0409 len += sysfs_emit_at(buf, len, "func-queue ");
0410 if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)
0411 len += sysfs_emit_at(buf, len, "func-intr ");
0412
0413 len += sysfs_emit_at(buf, len, "\n");
0414
0415 return len;
0416 }
0417
0418 #define HNS3_PMU_ATTR(_name, _func, _config) \
0419 (&((struct dev_ext_attribute[]) { \
0420 { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
0421 })[0].attr.attr)
0422
0423 #define HNS3_PMU_FORMAT_ATTR(_name, _format) \
0424 HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format)
0425 #define HNS3_PMU_EVENT_ATTR(_name, _event) \
0426 HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event)
0427 #define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \
0428 HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event)
0429
0430 #define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \
0431 HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
0432 HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
0433 #define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \
0434 HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
0435 HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
0436 #define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \
0437 HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
0438 HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
0439 #define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \
0440 HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
0441 HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
0442
0443 #define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \
0444 HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \
0445 HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro))
0446 #define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \
0447 HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \
0448 HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro))
0449 #define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \
0450 HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \
0451 HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro))
0452 #define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \
0453 HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \
0454 HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro))
0455
0456 static u8 hns3_pmu_hw_filter_modes[] = {
0457 HNS3_PMU_HW_FILTER_GLOBAL,
0458 HNS3_PMU_HW_FILTER_PORT,
0459 HNS3_PMU_HW_FILTER_PORT_TC,
0460 HNS3_PMU_HW_FILTER_FUNC,
0461 HNS3_PMU_HW_FILTER_FUNC_QUEUE,
0462 HNS3_PMU_HW_FILTER_FUNC_INTR,
0463 };
0464
0465 #define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \
0466 ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)])
0467
0468 static ssize_t identifier_show(struct device *dev,
0469 struct device_attribute *attr, char *buf)
0470 {
0471 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
0472
0473 return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier);
0474 }
0475 static DEVICE_ATTR_RO(identifier);
0476
0477 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
0478 char *buf)
0479 {
0480 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
0481
0482 return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu);
0483 }
0484 static DEVICE_ATTR_RO(cpumask);
0485
0486 static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr,
0487 char *buf)
0488 {
0489 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
0490 u16 bdf = hns3_pmu->bdf_min;
0491
0492 return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
0493 PCI_SLOT(bdf), PCI_FUNC(bdf));
0494 }
0495 static DEVICE_ATTR_RO(bdf_min);
0496
0497 static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr,
0498 char *buf)
0499 {
0500 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
0501 u16 bdf = hns3_pmu->bdf_max;
0502
0503 return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf),
0504 PCI_SLOT(bdf), PCI_FUNC(bdf));
0505 }
0506 static DEVICE_ATTR_RO(bdf_max);
0507
0508 static ssize_t hw_clk_freq_show(struct device *dev,
0509 struct device_attribute *attr, char *buf)
0510 {
0511 struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev));
0512
0513 return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq);
0514 }
0515 static DEVICE_ATTR_RO(hw_clk_freq);
0516
0517 static struct attribute *hns3_pmu_events_attr[] = {
0518
0519 HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU),
0520 HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU),
0521 HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE),
0522 HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU),
0523 HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU),
0524 HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
0525 HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
0526 HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD),
0527 HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD),
0528 HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD),
0529 HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD),
0530 HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0),
0531 HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1),
0532 HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0),
0533 HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1),
0534
0535
0536 HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU),
0537 HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU),
0538 HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU),
0539 HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE),
0540 HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU),
0541 HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU),
0542 HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
0543 HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
0544 HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
0545 HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD),
0546 HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD),
0547 HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD),
0548 HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD),
0549 HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0),
0550 HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1),
0551 HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0),
0552 HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1),
0553 HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
0554 HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
0555
0556
0557 HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH),
0558 HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX),
0559 HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
0560 HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
0561 HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
0562 HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
0563 HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU),
0564 HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU),
0565 HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE),
0566 HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE),
0567 HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH),
0568 HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD),
0569 HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD),
0570 HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD),
0571 HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD),
0572 HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0),
0573 HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1),
0574 HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0),
0575 HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1),
0576 HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE),
0577
0578
0579 HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC),
0580
0581 NULL
0582 };
0583
0584 static struct attribute *hns3_pmu_filter_mode_attr[] = {
0585
0586 HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU),
0587 HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU),
0588 HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE),
0589 HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU),
0590 HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU),
0591 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX),
0592 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH),
0593 HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD),
0594 HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD),
0595 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD),
0596 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD),
0597 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0),
0598 HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1),
0599 HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0),
0600 HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1),
0601
0602
0603 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU),
0604 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU),
0605 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU),
0606 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE),
0607 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU),
0608 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU),
0609 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX),
0610 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU),
0611 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH),
0612 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD),
0613 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD),
0614 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD),
0615 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD),
0616 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0),
0617 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1),
0618 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0),
0619 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1),
0620 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE),
0621 HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE),
0622
0623
0624 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH),
0625 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX),
0626 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC),
0627 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE),
0628 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC),
0629 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE),
0630 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU),
0631 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU),
0632 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE),
0633 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE),
0634 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH),
0635 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD),
0636 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD),
0637 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD),
0638 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD),
0639 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0),
0640 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1),
0641 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0),
0642 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1),
0643 HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE),
0644
0645
0646 HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC),
0647
0648 NULL
0649 };
0650
0651 static struct attribute_group hns3_pmu_events_group = {
0652 .name = "events",
0653 .attrs = hns3_pmu_events_attr,
0654 };
0655
0656 static struct attribute_group hns3_pmu_filter_mode_group = {
0657 .name = "filtermode",
0658 .attrs = hns3_pmu_filter_mode_attr,
0659 };
0660
0661 static struct attribute *hns3_pmu_format_attr[] = {
0662 HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"),
0663 HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"),
0664 HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"),
0665 HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"),
0666 HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"),
0667 HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"),
0668 HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"),
0669 HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"),
0670 HNS3_PMU_FORMAT_ATTR(global, "config1:52"),
0671 NULL
0672 };
0673
0674 static struct attribute_group hns3_pmu_format_group = {
0675 .name = "format",
0676 .attrs = hns3_pmu_format_attr,
0677 };
0678
0679 static struct attribute *hns3_pmu_cpumask_attrs[] = {
0680 &dev_attr_cpumask.attr,
0681 NULL
0682 };
0683
0684 static struct attribute_group hns3_pmu_cpumask_attr_group = {
0685 .attrs = hns3_pmu_cpumask_attrs,
0686 };
0687
0688 static struct attribute *hns3_pmu_identifier_attrs[] = {
0689 &dev_attr_identifier.attr,
0690 NULL
0691 };
0692
0693 static struct attribute_group hns3_pmu_identifier_attr_group = {
0694 .attrs = hns3_pmu_identifier_attrs,
0695 };
0696
0697 static struct attribute *hns3_pmu_bdf_range_attrs[] = {
0698 &dev_attr_bdf_min.attr,
0699 &dev_attr_bdf_max.attr,
0700 NULL
0701 };
0702
0703 static struct attribute_group hns3_pmu_bdf_range_attr_group = {
0704 .attrs = hns3_pmu_bdf_range_attrs,
0705 };
0706
0707 static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = {
0708 &dev_attr_hw_clk_freq.attr,
0709 NULL
0710 };
0711
0712 static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = {
0713 .attrs = hns3_pmu_hw_clk_freq_attrs,
0714 };
0715
0716 static const struct attribute_group *hns3_pmu_attr_groups[] = {
0717 &hns3_pmu_events_group,
0718 &hns3_pmu_filter_mode_group,
0719 &hns3_pmu_format_group,
0720 &hns3_pmu_cpumask_attr_group,
0721 &hns3_pmu_identifier_attr_group,
0722 &hns3_pmu_bdf_range_attr_group,
0723 &hns3_pmu_hw_clk_freq_attr_group,
0724 NULL
0725 };
0726
0727 static u32 hns3_pmu_get_event(struct perf_event *event)
0728 {
0729 return hns3_pmu_get_ext_counter_used(event) << 16 |
0730 hns3_pmu_get_event_type(event) << 8 |
0731 hns3_pmu_get_subevent(event);
0732 }
0733
0734 static u32 hns3_pmu_get_real_event(struct perf_event *event)
0735 {
0736 return hns3_pmu_get_event_type(event) << 8 |
0737 hns3_pmu_get_subevent(event);
0738 }
0739
0740 static u32 hns3_pmu_get_offset(u32 offset, u32 idx)
0741 {
0742 return offset + HNS3_PMU_REG_EVENT_OFFSET +
0743 HNS3_PMU_REG_EVENT_SIZE * idx;
0744 }
0745
0746 static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
0747 {
0748 u32 offset = hns3_pmu_get_offset(reg_offset, idx);
0749
0750 return readl(hns3_pmu->base + offset);
0751 }
0752
0753 static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
0754 u32 val)
0755 {
0756 u32 offset = hns3_pmu_get_offset(reg_offset, idx);
0757
0758 writel(val, hns3_pmu->base + offset);
0759 }
0760
0761 static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx)
0762 {
0763 u32 offset = hns3_pmu_get_offset(reg_offset, idx);
0764
0765 return readq(hns3_pmu->base + offset);
0766 }
0767
0768 static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx,
0769 u64 val)
0770 {
0771 u32 offset = hns3_pmu_get_offset(reg_offset, idx);
0772
0773 writeq(val, hns3_pmu->base + offset);
0774 }
0775
0776 static bool hns3_pmu_cmp_event(struct perf_event *target,
0777 struct perf_event *event)
0778 {
0779 return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event);
0780 }
0781
0782 static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu,
0783 struct perf_event *event)
0784 {
0785 struct perf_event *sibling;
0786 int hw_event_used = 0;
0787 int idx;
0788
0789 for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
0790 sibling = hns3_pmu->hw_events[idx];
0791 if (!sibling)
0792 continue;
0793
0794 hw_event_used++;
0795
0796 if (!hns3_pmu_cmp_event(sibling, event))
0797 continue;
0798
0799
0800 if (sibling->group_leader == event->group_leader)
0801 return idx;
0802 }
0803
0804
0805 if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS)
0806 return -EBUSY;
0807
0808
0809 return -ENOENT;
0810 }
0811
0812 static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu)
0813 {
0814 int idx;
0815
0816 for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
0817 if (!hns3_pmu->hw_events[idx])
0818 return idx;
0819 }
0820
0821 return -EBUSY;
0822 }
0823
0824 static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf)
0825 {
0826 struct pci_dev *pdev;
0827
0828 if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) {
0829 pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf);
0830 return false;
0831 }
0832
0833 pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus),
0834 PCI_BUS_NUM(bdf),
0835 GET_PCI_DEVFN(bdf));
0836 if (!pdev) {
0837 pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf);
0838 return false;
0839 }
0840
0841 pci_dev_put(pdev);
0842 return true;
0843 }
0844
0845 static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
0846 u16 queue)
0847 {
0848 u32 val;
0849
0850 val = GET_PCI_DEVFN(bdf);
0851 val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S;
0852 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val);
0853 }
0854
0855 static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx)
0856 {
0857 bool queue_id_valid = false;
0858 u32 reg_qid_ctrl, val;
0859 int err;
0860
0861
0862 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx,
0863 HNS3_PMU_QID_CTRL_REQ_ENABLE);
0864
0865 reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx);
0866 err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val,
0867 val & HNS3_PMU_QID_CTRL_DONE, 1, 1000);
0868 if (err == -ETIMEDOUT) {
0869 pci_err(hns3_pmu->pdev, "QID request timeout!\n");
0870 goto out;
0871 }
0872
0873 queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS);
0874
0875 out:
0876
0877 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0);
0878
0879 return queue_id_valid;
0880 }
0881
0882 static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf,
0883 u16 queue)
0884 {
0885 hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue);
0886
0887 return hns3_pmu_qid_req_start(hns3_pmu, idx);
0888 }
0889
0890 static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event)
0891 {
0892 struct hns3_pmu_event_attr *pmu_event;
0893 struct dev_ext_attribute *eattr;
0894 struct device_attribute *dattr;
0895 struct attribute *attr;
0896 u32 i;
0897
0898 for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) {
0899 attr = hns3_pmu_events_attr[i];
0900 dattr = container_of(attr, struct device_attribute, attr);
0901 eattr = container_of(dattr, struct dev_ext_attribute, attr);
0902 pmu_event = eattr->var;
0903
0904 if (event == pmu_event->event)
0905 return pmu_event;
0906 }
0907
0908 return NULL;
0909 }
0910
0911 static int hns3_pmu_set_func_mode(struct perf_event *event,
0912 struct hns3_pmu *hns3_pmu)
0913 {
0914 struct hw_perf_event *hwc = &event->hw;
0915 u16 bdf = hns3_pmu_get_bdf(event);
0916
0917 if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
0918 return -ENOENT;
0919
0920 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC);
0921
0922 return 0;
0923 }
0924
0925 static int hns3_pmu_set_func_queue_mode(struct perf_event *event,
0926 struct hns3_pmu *hns3_pmu)
0927 {
0928 u16 queue_id = hns3_pmu_get_queue(event);
0929 struct hw_perf_event *hwc = &event->hw;
0930 u16 bdf = hns3_pmu_get_bdf(event);
0931
0932 if (!hns3_pmu_valid_bdf(hns3_pmu, bdf))
0933 return -ENOENT;
0934
0935 if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) {
0936 pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id);
0937 return -ENOENT;
0938 }
0939
0940 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE);
0941
0942 return 0;
0943 }
0944
0945 static bool
0946 hns3_pmu_is_enabled_global_mode(struct perf_event *event,
0947 struct hns3_pmu_event_attr *pmu_event)
0948 {
0949 u8 global = hns3_pmu_get_global(event);
0950
0951 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL))
0952 return false;
0953
0954 return global;
0955 }
0956
0957 static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event,
0958 struct hns3_pmu_event_attr *pmu_event)
0959 {
0960 u16 queue_id = hns3_pmu_get_queue(event);
0961 u16 bdf = hns3_pmu_get_bdf(event);
0962
0963 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC))
0964 return false;
0965 else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE)
0966 return false;
0967
0968 return bdf;
0969 }
0970
0971 static bool
0972 hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event,
0973 struct hns3_pmu_event_attr *pmu_event)
0974 {
0975 u16 queue_id = hns3_pmu_get_queue(event);
0976 u16 bdf = hns3_pmu_get_bdf(event);
0977
0978 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE))
0979 return false;
0980 else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE)
0981 return false;
0982
0983 return bdf;
0984 }
0985
0986 static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event,
0987 struct hns3_pmu_event_attr *pmu_event)
0988 {
0989 u8 tc_id = hns3_pmu_get_tc(event);
0990
0991 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT))
0992 return false;
0993
0994 return tc_id == HNS3_PMU_FILTER_ALL_TC;
0995 }
0996
0997 static bool
0998 hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event,
0999 struct hns3_pmu_event_attr *pmu_event)
1000 {
1001 u8 tc_id = hns3_pmu_get_tc(event);
1002
1003 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC))
1004 return false;
1005
1006 return tc_id != HNS3_PMU_FILTER_ALL_TC;
1007 }
1008
1009 static bool
1010 hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event,
1011 struct hns3_pmu *hns3_pmu,
1012 struct hns3_pmu_event_attr *pmu_event)
1013 {
1014 u16 bdf = hns3_pmu_get_bdf(event);
1015
1016 if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR))
1017 return false;
1018
1019 return hns3_pmu_valid_bdf(hns3_pmu, bdf);
1020 }
1021
1022 static int hns3_pmu_select_filter_mode(struct perf_event *event,
1023 struct hns3_pmu *hns3_pmu)
1024 {
1025 u32 event_id = hns3_pmu_get_event(event);
1026 struct hw_perf_event *hwc = &event->hw;
1027 struct hns3_pmu_event_attr *pmu_event;
1028
1029 pmu_event = hns3_pmu_get_pmu_event(event_id);
1030 if (!pmu_event) {
1031 pci_err(hns3_pmu->pdev, "Invalid pmu event\n");
1032 return -ENOENT;
1033 }
1034
1035 if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) {
1036 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL);
1037 return 0;
1038 }
1039
1040 if (hns3_pmu_is_enabled_func_mode(event, pmu_event))
1041 return hns3_pmu_set_func_mode(event, hns3_pmu);
1042
1043 if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event))
1044 return hns3_pmu_set_func_queue_mode(event, hns3_pmu);
1045
1046 if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) {
1047 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT);
1048 return 0;
1049 }
1050
1051 if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) {
1052 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC);
1053 return 0;
1054 }
1055
1056 if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) {
1057 HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR);
1058 return 0;
1059 }
1060
1061 return -ENOENT;
1062 }
1063
1064 static bool hns3_pmu_validate_event_group(struct perf_event *event)
1065 {
1066 struct perf_event *sibling, *leader = event->group_leader;
1067 struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS];
1068 int counters = 1;
1069 int num;
1070
1071 event_group[0] = leader;
1072 if (!is_software_event(leader)) {
1073 if (leader->pmu != event->pmu)
1074 return false;
1075
1076 if (leader != event && !hns3_pmu_cmp_event(leader, event))
1077 event_group[counters++] = event;
1078 }
1079
1080 for_each_sibling_event(sibling, event->group_leader) {
1081 if (is_software_event(sibling))
1082 continue;
1083
1084 if (sibling->pmu != event->pmu)
1085 return false;
1086
1087 for (num = 0; num < counters; num++) {
1088 if (hns3_pmu_cmp_event(event_group[num], sibling))
1089 break;
1090 }
1091
1092 if (num == counters)
1093 event_group[counters++] = sibling;
1094 }
1095
1096 return counters <= HNS3_PMU_MAX_HW_EVENTS;
1097 }
1098
1099 static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
1100 {
1101 struct hw_perf_event *hwc = &event->hw;
1102 u16 intr_id = hns3_pmu_get_intr(event);
1103 u8 port_id = hns3_pmu_get_port(event);
1104 u16 bdf = hns3_pmu_get_bdf(event);
1105 u8 tc_id = hns3_pmu_get_tc(event);
1106 u8 filter_mode;
1107
1108 filter_mode = *(u8 *)hwc->addr_filters;
1109 switch (filter_mode) {
1110 case HNS3_PMU_HW_FILTER_PORT:
1111 return FILTER_CONDITION_PORT(port_id);
1112 case HNS3_PMU_HW_FILTER_PORT_TC:
1113 return FILTER_CONDITION_PORT_TC(port_id, tc_id);
1114 case HNS3_PMU_HW_FILTER_FUNC:
1115 case HNS3_PMU_HW_FILTER_FUNC_QUEUE:
1116 return GET_PCI_DEVFN(bdf);
1117 case HNS3_PMU_HW_FILTER_FUNC_INTR:
1118 return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id);
1119 default:
1120 break;
1121 }
1122
1123 return 0;
1124 }
1125
1126 static void hns3_pmu_config_filter(struct perf_event *event)
1127 {
1128 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1129 u8 event_type = hns3_pmu_get_event_type(event);
1130 u8 subevent_id = hns3_pmu_get_subevent(event);
1131 u16 queue_id = hns3_pmu_get_queue(event);
1132 struct hw_perf_event *hwc = &event->hw;
1133 u8 filter_mode = *(u8 *)hwc->addr_filters;
1134 u16 bdf = hns3_pmu_get_bdf(event);
1135 u32 idx = hwc->idx;
1136 u32 val;
1137
1138 val = event_type;
1139 val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S;
1140 val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S;
1141 val |= HNS3_PMU_EVENT_OVERFLOW_RESTART;
1142 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1143
1144 val = hns3_pmu_get_filter_condition(event);
1145 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val);
1146
1147 if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE)
1148 hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id);
1149 }
1150
1151 static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu,
1152 struct hw_perf_event *hwc)
1153 {
1154 u32 idx = hwc->idx;
1155 u32 val;
1156
1157 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
1158 val |= HNS3_PMU_EVENT_EN;
1159 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1160 }
1161
1162 static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu,
1163 struct hw_perf_event *hwc)
1164 {
1165 u32 idx = hwc->idx;
1166 u32 val;
1167
1168 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
1169 val &= ~HNS3_PMU_EVENT_EN;
1170 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1171 }
1172
1173 static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu,
1174 struct hw_perf_event *hwc)
1175 {
1176 u32 idx = hwc->idx;
1177 u32 val;
1178
1179 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
1180 val &= ~HNS3_PMU_INTR_MASK_OVERFLOW;
1181 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
1182 }
1183
1184 static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu,
1185 struct hw_perf_event *hwc)
1186 {
1187 u32 idx = hwc->idx;
1188 u32 val;
1189
1190 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx);
1191 val |= HNS3_PMU_INTR_MASK_OVERFLOW;
1192 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val);
1193 }
1194
1195 static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx)
1196 {
1197 u32 val;
1198
1199 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
1200 val |= HNS3_PMU_EVENT_STATUS_RESET;
1201 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1202
1203 val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx);
1204 val &= ~HNS3_PMU_EVENT_STATUS_RESET;
1205 hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val);
1206 }
1207
1208 static u64 hns3_pmu_read_counter(struct perf_event *event)
1209 {
1210 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1211
1212 return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx);
1213 }
1214
1215 static void hns3_pmu_write_counter(struct perf_event *event, u64 value)
1216 {
1217 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1218 u32 idx = event->hw.idx;
1219
1220 hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value);
1221 hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value);
1222 }
1223
1224 static void hns3_pmu_init_counter(struct perf_event *event)
1225 {
1226 struct hw_perf_event *hwc = &event->hw;
1227
1228 local64_set(&hwc->prev_count, 0);
1229 hns3_pmu_write_counter(event, 0);
1230 }
1231
1232 static int hns3_pmu_event_init(struct perf_event *event)
1233 {
1234 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1235 struct hw_perf_event *hwc = &event->hw;
1236 int idx;
1237 int ret;
1238
1239 if (event->attr.type != event->pmu->type)
1240 return -ENOENT;
1241
1242
1243 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1244 return -EOPNOTSUPP;
1245
1246 event->cpu = hns3_pmu->on_cpu;
1247
1248 idx = hns3_pmu_get_event_idx(hns3_pmu);
1249 if (idx < 0) {
1250 pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n",
1251 HNS3_PMU_MAX_HW_EVENTS);
1252 return -EBUSY;
1253 }
1254
1255 hwc->idx = idx;
1256
1257 ret = hns3_pmu_select_filter_mode(event, hns3_pmu);
1258 if (ret) {
1259 pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret);
1260 return ret;
1261 }
1262
1263 if (!hns3_pmu_validate_event_group(event)) {
1264 pci_err(hns3_pmu->pdev, "Invalid event group.\n");
1265 return -EINVAL;
1266 }
1267
1268 if (hns3_pmu_get_ext_counter_used(event))
1269 hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER;
1270 else
1271 hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER;
1272
1273 return 0;
1274 }
1275
1276 static void hns3_pmu_read(struct perf_event *event)
1277 {
1278 struct hw_perf_event *hwc = &event->hw;
1279 u64 new_cnt, prev_cnt, delta;
1280
1281 do {
1282 prev_cnt = local64_read(&hwc->prev_count);
1283 new_cnt = hns3_pmu_read_counter(event);
1284 } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) !=
1285 prev_cnt);
1286
1287 delta = new_cnt - prev_cnt;
1288 local64_add(delta, &event->count);
1289 }
1290
1291 static void hns3_pmu_start(struct perf_event *event, int flags)
1292 {
1293 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1294 struct hw_perf_event *hwc = &event->hw;
1295
1296 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
1297 return;
1298
1299 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
1300 hwc->state = 0;
1301
1302 hns3_pmu_config_filter(event);
1303 hns3_pmu_init_counter(event);
1304 hns3_pmu_enable_intr(hns3_pmu, hwc);
1305 hns3_pmu_enable_counter(hns3_pmu, hwc);
1306
1307 perf_event_update_userpage(event);
1308 }
1309
1310 static void hns3_pmu_stop(struct perf_event *event, int flags)
1311 {
1312 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1313 struct hw_perf_event *hwc = &event->hw;
1314
1315 hns3_pmu_disable_counter(hns3_pmu, hwc);
1316 hns3_pmu_disable_intr(hns3_pmu, hwc);
1317
1318 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1319 hwc->state |= PERF_HES_STOPPED;
1320
1321 if (hwc->state & PERF_HES_UPTODATE)
1322 return;
1323
1324
1325 hns3_pmu_read(event);
1326 hwc->state |= PERF_HES_UPTODATE;
1327 }
1328
1329 static int hns3_pmu_add(struct perf_event *event, int flags)
1330 {
1331 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1332 struct hw_perf_event *hwc = &event->hw;
1333 int idx;
1334
1335 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1336
1337
1338 idx = hns3_pmu_find_related_event_idx(hns3_pmu, event);
1339 if (idx < 0 && idx != -ENOENT)
1340 return idx;
1341
1342
1343 if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) {
1344 hwc->idx = idx;
1345 goto start_count;
1346 }
1347
1348 idx = hns3_pmu_get_event_idx(hns3_pmu);
1349 if (idx < 0)
1350 return idx;
1351
1352 hwc->idx = idx;
1353 hns3_pmu->hw_events[idx] = event;
1354
1355 start_count:
1356 if (flags & PERF_EF_START)
1357 hns3_pmu_start(event, PERF_EF_RELOAD);
1358
1359 return 0;
1360 }
1361
1362 static void hns3_pmu_del(struct perf_event *event, int flags)
1363 {
1364 struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu);
1365 struct hw_perf_event *hwc = &event->hw;
1366
1367 hns3_pmu_stop(event, PERF_EF_UPDATE);
1368 hns3_pmu->hw_events[hwc->idx] = NULL;
1369 perf_event_update_userpage(event);
1370 }
1371
1372 static void hns3_pmu_enable(struct pmu *pmu)
1373 {
1374 struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
1375 u32 val;
1376
1377 val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
1378 val |= HNS3_PMU_GLOBAL_START;
1379 writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
1380 }
1381
1382 static void hns3_pmu_disable(struct pmu *pmu)
1383 {
1384 struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu);
1385 u32 val;
1386
1387 val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
1388 val &= ~HNS3_PMU_GLOBAL_START;
1389 writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL);
1390 }
1391
1392 static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
1393 {
1394 u16 device_id;
1395 char *name;
1396 u32 val;
1397
1398 hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2];
1399 if (!hns3_pmu->base) {
1400 pci_err(pdev, "ioremap failed\n");
1401 return -ENOMEM;
1402 }
1403
1404 hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ);
1405
1406 val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF);
1407 hns3_pmu->bdf_min = val & 0xffff;
1408 hns3_pmu->bdf_max = val >> 16;
1409
1410 val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID);
1411 device_id = val & 0xffff;
1412 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id);
1413 if (!name)
1414 return -ENOMEM;
1415
1416 hns3_pmu->pdev = pdev;
1417 hns3_pmu->on_cpu = -1;
1418 hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION);
1419 hns3_pmu->pmu = (struct pmu) {
1420 .name = name,
1421 .module = THIS_MODULE,
1422 .event_init = hns3_pmu_event_init,
1423 .pmu_enable = hns3_pmu_enable,
1424 .pmu_disable = hns3_pmu_disable,
1425 .add = hns3_pmu_add,
1426 .del = hns3_pmu_del,
1427 .start = hns3_pmu_start,
1428 .stop = hns3_pmu_stop,
1429 .read = hns3_pmu_read,
1430 .task_ctx_nr = perf_invalid_context,
1431 .attr_groups = hns3_pmu_attr_groups,
1432 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1433 };
1434
1435 return 0;
1436 }
1437
1438 static irqreturn_t hns3_pmu_irq(int irq, void *data)
1439 {
1440 struct hns3_pmu *hns3_pmu = data;
1441 u32 intr_status, idx;
1442
1443 for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) {
1444 intr_status = hns3_pmu_readl(hns3_pmu,
1445 HNS3_PMU_REG_EVENT_INTR_STATUS,
1446 idx);
1447
1448
1449
1450
1451
1452 if (intr_status)
1453 hns3_pmu_clear_intr_status(hns3_pmu, idx);
1454 }
1455
1456 return IRQ_HANDLED;
1457 }
1458
1459 static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
1460 {
1461 struct hns3_pmu *hns3_pmu;
1462
1463 hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
1464 if (!hns3_pmu)
1465 return -ENODEV;
1466
1467 if (hns3_pmu->on_cpu == -1) {
1468 hns3_pmu->on_cpu = cpu;
1469 irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu));
1470 }
1471
1472 return 0;
1473 }
1474
1475 static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1476 {
1477 struct hns3_pmu *hns3_pmu;
1478 unsigned int target;
1479
1480 hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node);
1481 if (!hns3_pmu)
1482 return -ENODEV;
1483
1484
1485 if (hns3_pmu->on_cpu != cpu)
1486 return 0;
1487
1488
1489 target = cpumask_any_but(cpu_online_mask, cpu);
1490 if (target >= nr_cpu_ids)
1491 return 0;
1492
1493 perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target);
1494 hns3_pmu->on_cpu = target;
1495 irq_set_affinity(hns3_pmu->irq, cpumask_of(target));
1496
1497 return 0;
1498 }
1499
1500 static void hns3_pmu_free_irq(void *data)
1501 {
1502 struct pci_dev *pdev = data;
1503
1504 pci_free_irq_vectors(pdev);
1505 }
1506
1507 static int hns3_pmu_irq_register(struct pci_dev *pdev,
1508 struct hns3_pmu *hns3_pmu)
1509 {
1510 int irq, ret;
1511
1512 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1513 if (ret < 0) {
1514 pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret);
1515 return ret;
1516 }
1517
1518 ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
1519 if (ret) {
1520 pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
1521 return ret;
1522 }
1523
1524 irq = pci_irq_vector(pdev, 0);
1525 ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0,
1526 hns3_pmu->pmu.name, hns3_pmu);
1527 if (ret) {
1528 pci_err(pdev, "failed to register irq, ret = %d.\n", ret);
1529 return ret;
1530 }
1531
1532 hns3_pmu->irq = irq;
1533
1534 return 0;
1535 }
1536
1537 static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
1538 {
1539 int ret;
1540
1541 ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu);
1542 if (ret)
1543 return ret;
1544
1545 ret = hns3_pmu_irq_register(pdev, hns3_pmu);
1546 if (ret)
1547 return ret;
1548
1549 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
1550 &hns3_pmu->node);
1551 if (ret) {
1552 pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret);
1553 return ret;
1554 }
1555
1556 ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
1557 if (ret) {
1558 pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
1559 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
1560 &hns3_pmu->node);
1561 }
1562
1563 return ret;
1564 }
1565
1566 static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
1567 {
1568 struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
1569
1570 perf_pmu_unregister(&hns3_pmu->pmu);
1571 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
1572 &hns3_pmu->node);
1573 }
1574
1575 static int hns3_pmu_init_dev(struct pci_dev *pdev)
1576 {
1577 int ret;
1578
1579 ret = pcim_enable_device(pdev);
1580 if (ret) {
1581 pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret);
1582 return ret;
1583 }
1584
1585 ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu");
1586 if (ret < 0) {
1587 pci_err(pdev, "failed to request pci region, ret = %d.\n", ret);
1588 return ret;
1589 }
1590
1591 pci_set_master(pdev);
1592
1593 return 0;
1594 }
1595
1596 static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1597 {
1598 struct hns3_pmu *hns3_pmu;
1599 int ret;
1600
1601 hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL);
1602 if (!hns3_pmu)
1603 return -ENOMEM;
1604
1605 ret = hns3_pmu_init_dev(pdev);
1606 if (ret)
1607 return ret;
1608
1609 ret = hns3_pmu_init_pmu(pdev, hns3_pmu);
1610 if (ret) {
1611 pci_clear_master(pdev);
1612 return ret;
1613 }
1614
1615 pci_set_drvdata(pdev, hns3_pmu);
1616
1617 return ret;
1618 }
1619
1620 static void hns3_pmu_remove(struct pci_dev *pdev)
1621 {
1622 hns3_pmu_uninit_pmu(pdev);
1623 pci_clear_master(pdev);
1624 pci_set_drvdata(pdev, NULL);
1625 }
1626
1627 static const struct pci_device_id hns3_pmu_ids[] = {
1628 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) },
1629 { 0, }
1630 };
1631 MODULE_DEVICE_TABLE(pci, hns3_pmu_ids);
1632
1633 static struct pci_driver hns3_pmu_driver = {
1634 .name = "hns3_pmu",
1635 .id_table = hns3_pmu_ids,
1636 .probe = hns3_pmu_probe,
1637 .remove = hns3_pmu_remove,
1638 };
1639
1640 static int __init hns3_pmu_module_init(void)
1641 {
1642 int ret;
1643
1644 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
1645 "AP_PERF_ARM_HNS3_PMU_ONLINE",
1646 hns3_pmu_online_cpu,
1647 hns3_pmu_offline_cpu);
1648 if (ret) {
1649 pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret);
1650 return ret;
1651 }
1652
1653 ret = pci_register_driver(&hns3_pmu_driver);
1654 if (ret) {
1655 pr_err("failed to register pci driver, ret = %d.\n", ret);
1656 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
1657 }
1658
1659 return ret;
1660 }
1661 module_init(hns3_pmu_module_init);
1662
1663 static void __exit hns3_pmu_module_exit(void)
1664 {
1665 pci_unregister_driver(&hns3_pmu_driver);
1666 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE);
1667 }
1668 module_exit(hns3_pmu_module_exit);
1669
1670 MODULE_DESCRIPTION("HNS3 PMU driver");
1671 MODULE_LICENSE("GPL v2");