0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/acpi.h>
0011 #include <linux/clk.h>
0012 #include <linux/cpuhotplug.h>
0013 #include <linux/cpumask.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/io.h>
0016 #include <linux/mfd/syscon.h>
0017 #include <linux/module.h>
0018 #include <linux/of_address.h>
0019 #include <linux/of_fdt.h>
0020 #include <linux/of_irq.h>
0021 #include <linux/of_platform.h>
0022 #include <linux/perf_event.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/regmap.h>
0025 #include <linux/slab.h>
0026
0027 #define CSW_CSWCR 0x0000
0028 #define CSW_CSWCR_DUALMCB_MASK BIT(0)
0029 #define CSW_CSWCR_MCB0_ROUTING(x) (((x) & 0x0C) >> 2)
0030 #define CSW_CSWCR_MCB1_ROUTING(x) (((x) & 0x30) >> 4)
0031 #define MCBADDRMR 0x0000
0032 #define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
0033
0034 #define PCPPMU_INTSTATUS_REG 0x000
0035 #define PCPPMU_INTMASK_REG 0x004
0036 #define PCPPMU_INTMASK 0x0000000F
0037 #define PCPPMU_INTENMASK 0xFFFFFFFF
0038 #define PCPPMU_INTCLRMASK 0xFFFFFFF0
0039 #define PCPPMU_INT_MCU BIT(0)
0040 #define PCPPMU_INT_MCB BIT(1)
0041 #define PCPPMU_INT_L3C BIT(2)
0042 #define PCPPMU_INT_IOB BIT(3)
0043
0044 #define PCPPMU_V3_INTMASK 0x00FF33FF
0045 #define PCPPMU_V3_INTENMASK 0xFFFFFFFF
0046 #define PCPPMU_V3_INTCLRMASK 0xFF00CC00
0047 #define PCPPMU_V3_INT_MCU 0x000000FF
0048 #define PCPPMU_V3_INT_MCB 0x00000300
0049 #define PCPPMU_V3_INT_L3C 0x00FF0000
0050 #define PCPPMU_V3_INT_IOB 0x00003000
0051
0052 #define PMU_MAX_COUNTERS 4
0053 #define PMU_CNT_MAX_PERIOD 0xFFFFFFFFULL
0054 #define PMU_V3_CNT_MAX_PERIOD 0xFFFFFFFFFFFFFFFFULL
0055 #define PMU_OVERFLOW_MASK 0xF
0056 #define PMU_PMCR_E BIT(0)
0057 #define PMU_PMCR_P BIT(1)
0058
0059 #define PMU_PMEVCNTR0 0x000
0060 #define PMU_PMEVCNTR1 0x004
0061 #define PMU_PMEVCNTR2 0x008
0062 #define PMU_PMEVCNTR3 0x00C
0063 #define PMU_PMEVTYPER0 0x400
0064 #define PMU_PMEVTYPER1 0x404
0065 #define PMU_PMEVTYPER2 0x408
0066 #define PMU_PMEVTYPER3 0x40C
0067 #define PMU_PMAMR0 0xA00
0068 #define PMU_PMAMR1 0xA04
0069 #define PMU_PMCNTENSET 0xC00
0070 #define PMU_PMCNTENCLR 0xC20
0071 #define PMU_PMINTENSET 0xC40
0072 #define PMU_PMINTENCLR 0xC60
0073 #define PMU_PMOVSR 0xC80
0074 #define PMU_PMCR 0xE04
0075
0076
0077 #define PMU_PMOVSCLR 0xC80
0078 #define PMU_PMOVSSET 0xCC0
0079
0080 #define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu)
0081 #define GET_CNTR(ev) (ev->hw.idx)
0082 #define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
0083 #define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
0084 #define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
0085
0086 struct hw_pmu_info {
0087 u32 type;
0088 u32 enable_mask;
0089 void __iomem *csr;
0090 };
0091
0092 struct xgene_pmu_dev {
0093 struct hw_pmu_info *inf;
0094 struct xgene_pmu *parent;
0095 struct pmu pmu;
0096 u8 max_counters;
0097 DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
0098 u64 max_period;
0099 const struct attribute_group **attr_groups;
0100 struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
0101 };
0102
0103 struct xgene_pmu_ops {
0104 void (*mask_int)(struct xgene_pmu *pmu);
0105 void (*unmask_int)(struct xgene_pmu *pmu);
0106 u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx);
0107 void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val);
0108 void (*write_evttype)(struct xgene_pmu_dev *pmu_dev, int idx, u32 val);
0109 void (*write_agentmsk)(struct xgene_pmu_dev *pmu_dev, u32 val);
0110 void (*write_agent1msk)(struct xgene_pmu_dev *pmu_dev, u32 val);
0111 void (*enable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
0112 void (*disable_counter)(struct xgene_pmu_dev *pmu_dev, int idx);
0113 void (*enable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
0114 void (*disable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx);
0115 void (*reset_counters)(struct xgene_pmu_dev *pmu_dev);
0116 void (*start_counters)(struct xgene_pmu_dev *pmu_dev);
0117 void (*stop_counters)(struct xgene_pmu_dev *pmu_dev);
0118 };
0119
0120 struct xgene_pmu {
0121 struct device *dev;
0122 struct hlist_node node;
0123 int version;
0124 void __iomem *pcppmu_csr;
0125 u32 mcb_active_mask;
0126 u32 mc_active_mask;
0127 u32 l3c_active_mask;
0128 cpumask_t cpu;
0129 int irq;
0130 raw_spinlock_t lock;
0131 const struct xgene_pmu_ops *ops;
0132 struct list_head l3cpmus;
0133 struct list_head iobpmus;
0134 struct list_head mcbpmus;
0135 struct list_head mcpmus;
0136 };
0137
0138 struct xgene_pmu_dev_ctx {
0139 char *name;
0140 struct list_head next;
0141 struct xgene_pmu_dev *pmu_dev;
0142 struct hw_pmu_info inf;
0143 };
0144
0145 struct xgene_pmu_data {
0146 int id;
0147 u32 data;
0148 };
0149
0150 enum xgene_pmu_version {
0151 PCP_PMU_V1 = 1,
0152 PCP_PMU_V2,
0153 PCP_PMU_V3,
0154 };
0155
0156 enum xgene_pmu_dev_type {
0157 PMU_TYPE_L3C = 0,
0158 PMU_TYPE_IOB,
0159 PMU_TYPE_IOB_SLOW,
0160 PMU_TYPE_MCB,
0161 PMU_TYPE_MC,
0162 };
0163
0164
0165
0166
0167 static ssize_t xgene_pmu_format_show(struct device *dev,
0168 struct device_attribute *attr, char *buf)
0169 {
0170 struct dev_ext_attribute *eattr;
0171
0172 eattr = container_of(attr, struct dev_ext_attribute, attr);
0173 return sysfs_emit(buf, "%s\n", (char *) eattr->var);
0174 }
0175
0176 #define XGENE_PMU_FORMAT_ATTR(_name, _config) \
0177 (&((struct dev_ext_attribute[]) { \
0178 { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
0179 .var = (void *) _config, } \
0180 })[0].attr.attr)
0181
0182 static struct attribute *l3c_pmu_format_attrs[] = {
0183 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
0184 XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
0185 NULL,
0186 };
0187
0188 static struct attribute *iob_pmu_format_attrs[] = {
0189 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
0190 XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
0191 NULL,
0192 };
0193
0194 static struct attribute *mcb_pmu_format_attrs[] = {
0195 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
0196 XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
0197 NULL,
0198 };
0199
0200 static struct attribute *mc_pmu_format_attrs[] = {
0201 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
0202 NULL,
0203 };
0204
0205 static const struct attribute_group l3c_pmu_format_attr_group = {
0206 .name = "format",
0207 .attrs = l3c_pmu_format_attrs,
0208 };
0209
0210 static const struct attribute_group iob_pmu_format_attr_group = {
0211 .name = "format",
0212 .attrs = iob_pmu_format_attrs,
0213 };
0214
0215 static const struct attribute_group mcb_pmu_format_attr_group = {
0216 .name = "format",
0217 .attrs = mcb_pmu_format_attrs,
0218 };
0219
0220 static const struct attribute_group mc_pmu_format_attr_group = {
0221 .name = "format",
0222 .attrs = mc_pmu_format_attrs,
0223 };
0224
0225 static struct attribute *l3c_pmu_v3_format_attrs[] = {
0226 XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-39"),
0227 NULL,
0228 };
0229
0230 static struct attribute *iob_pmu_v3_format_attrs[] = {
0231 XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-47"),
0232 NULL,
0233 };
0234
0235 static struct attribute *iob_slow_pmu_v3_format_attrs[] = {
0236 XGENE_PMU_FORMAT_ATTR(iob_slow_eventid, "config:0-16"),
0237 NULL,
0238 };
0239
0240 static struct attribute *mcb_pmu_v3_format_attrs[] = {
0241 XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-35"),
0242 NULL,
0243 };
0244
0245 static struct attribute *mc_pmu_v3_format_attrs[] = {
0246 XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-44"),
0247 NULL,
0248 };
0249
0250 static const struct attribute_group l3c_pmu_v3_format_attr_group = {
0251 .name = "format",
0252 .attrs = l3c_pmu_v3_format_attrs,
0253 };
0254
0255 static const struct attribute_group iob_pmu_v3_format_attr_group = {
0256 .name = "format",
0257 .attrs = iob_pmu_v3_format_attrs,
0258 };
0259
0260 static const struct attribute_group iob_slow_pmu_v3_format_attr_group = {
0261 .name = "format",
0262 .attrs = iob_slow_pmu_v3_format_attrs,
0263 };
0264
0265 static const struct attribute_group mcb_pmu_v3_format_attr_group = {
0266 .name = "format",
0267 .attrs = mcb_pmu_v3_format_attrs,
0268 };
0269
0270 static const struct attribute_group mc_pmu_v3_format_attr_group = {
0271 .name = "format",
0272 .attrs = mc_pmu_v3_format_attrs,
0273 };
0274
0275
0276
0277
0278 static ssize_t xgene_pmu_event_show(struct device *dev,
0279 struct device_attribute *attr, char *buf)
0280 {
0281 struct perf_pmu_events_attr *pmu_attr =
0282 container_of(attr, struct perf_pmu_events_attr, attr);
0283
0284 return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id);
0285 }
0286
0287 #define XGENE_PMU_EVENT_ATTR(_name, _config) \
0288 PMU_EVENT_ATTR_ID(_name, xgene_pmu_event_show, _config)
0289
0290 static struct attribute *l3c_pmu_events_attrs[] = {
0291 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0292 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
0293 XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
0294 XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
0295 XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
0296 XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
0297 XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
0298 XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
0299 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
0300 XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
0301 XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
0302 XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
0303 XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
0304 XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
0305 NULL,
0306 };
0307
0308 static struct attribute *iob_pmu_events_attrs[] = {
0309 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0310 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
0311 XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
0312 XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
0313 XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
0314 XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
0315 XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
0316 XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
0317 XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
0318 XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
0319 XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
0320 XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
0321 XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
0322 NULL,
0323 };
0324
0325 static struct attribute *mcb_pmu_events_attrs[] = {
0326 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0327 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
0328 XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
0329 XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
0330 XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
0331 XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
0332 NULL,
0333 };
0334
0335 static struct attribute *mc_pmu_events_attrs[] = {
0336 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0337 XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
0338 XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
0339 XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
0340 XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
0341 XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
0342 XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
0343 XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
0344 XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
0345 XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
0346 XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
0347 XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
0348 XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
0349 XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
0350 XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
0351 XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
0352 XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
0353 XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
0354 XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
0355 XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
0356 XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
0357 XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
0358 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
0359 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
0360 XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
0361 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
0362 XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
0363 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
0364 XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
0365 NULL,
0366 };
0367
0368 static const struct attribute_group l3c_pmu_events_attr_group = {
0369 .name = "events",
0370 .attrs = l3c_pmu_events_attrs,
0371 };
0372
0373 static const struct attribute_group iob_pmu_events_attr_group = {
0374 .name = "events",
0375 .attrs = iob_pmu_events_attrs,
0376 };
0377
0378 static const struct attribute_group mcb_pmu_events_attr_group = {
0379 .name = "events",
0380 .attrs = mcb_pmu_events_attrs,
0381 };
0382
0383 static const struct attribute_group mc_pmu_events_attr_group = {
0384 .name = "events",
0385 .attrs = mc_pmu_events_attrs,
0386 };
0387
0388 static struct attribute *l3c_pmu_v3_events_attrs[] = {
0389 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0390 XGENE_PMU_EVENT_ATTR(read-hit, 0x01),
0391 XGENE_PMU_EVENT_ATTR(read-miss, 0x02),
0392 XGENE_PMU_EVENT_ATTR(index-flush-eviction, 0x03),
0393 XGENE_PMU_EVENT_ATTR(write-caused-replacement, 0x04),
0394 XGENE_PMU_EVENT_ATTR(write-not-caused-replacement, 0x05),
0395 XGENE_PMU_EVENT_ATTR(clean-eviction, 0x06),
0396 XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07),
0397 XGENE_PMU_EVENT_ATTR(read, 0x08),
0398 XGENE_PMU_EVENT_ATTR(write, 0x09),
0399 XGENE_PMU_EVENT_ATTR(request, 0x0a),
0400 XGENE_PMU_EVENT_ATTR(tq-bank-conflict-issue-stall, 0x0b),
0401 XGENE_PMU_EVENT_ATTR(tq-full, 0x0c),
0402 XGENE_PMU_EVENT_ATTR(ackq-full, 0x0d),
0403 XGENE_PMU_EVENT_ATTR(wdb-full, 0x0e),
0404 XGENE_PMU_EVENT_ATTR(odb-full, 0x10),
0405 XGENE_PMU_EVENT_ATTR(wbq-full, 0x11),
0406 XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall, 0x12),
0407 XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall, 0x13),
0408 XGENE_PMU_EVENT_ATTR(output-data-async-fifo-stall, 0x14),
0409 XGENE_PMU_EVENT_ATTR(total-insertion, 0x15),
0410 XGENE_PMU_EVENT_ATTR(sip-insertions-r-set, 0x16),
0411 XGENE_PMU_EVENT_ATTR(sip-insertions-r-clear, 0x17),
0412 XGENE_PMU_EVENT_ATTR(dip-insertions-r-set, 0x18),
0413 XGENE_PMU_EVENT_ATTR(dip-insertions-r-clear, 0x19),
0414 XGENE_PMU_EVENT_ATTR(dip-insertions-force-r-set, 0x1a),
0415 XGENE_PMU_EVENT_ATTR(egression, 0x1b),
0416 XGENE_PMU_EVENT_ATTR(replacement, 0x1c),
0417 XGENE_PMU_EVENT_ATTR(old-replacement, 0x1d),
0418 XGENE_PMU_EVENT_ATTR(young-replacement, 0x1e),
0419 XGENE_PMU_EVENT_ATTR(r-set-replacement, 0x1f),
0420 XGENE_PMU_EVENT_ATTR(r-clear-replacement, 0x20),
0421 XGENE_PMU_EVENT_ATTR(old-r-replacement, 0x21),
0422 XGENE_PMU_EVENT_ATTR(old-nr-replacement, 0x22),
0423 XGENE_PMU_EVENT_ATTR(young-r-replacement, 0x23),
0424 XGENE_PMU_EVENT_ATTR(young-nr-replacement, 0x24),
0425 XGENE_PMU_EVENT_ATTR(bloomfilter-clearing, 0x25),
0426 XGENE_PMU_EVENT_ATTR(generation-flip, 0x26),
0427 XGENE_PMU_EVENT_ATTR(vcc-droop-detected, 0x27),
0428 NULL,
0429 };
0430
0431 static struct attribute *iob_fast_pmu_v3_events_attrs[] = {
0432 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0433 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all, 0x01),
0434 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd, 0x02),
0435 XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr, 0x03),
0436 XGENE_PMU_EVENT_ATTR(pa-all-cp-req, 0x04),
0437 XGENE_PMU_EVENT_ATTR(pa-cp-blk-req, 0x05),
0438 XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req, 0x06),
0439 XGENE_PMU_EVENT_ATTR(pa-cp-rd-req, 0x07),
0440 XGENE_PMU_EVENT_ATTR(pa-cp-wr-req, 0x08),
0441 XGENE_PMU_EVENT_ATTR(ba-all-req, 0x09),
0442 XGENE_PMU_EVENT_ATTR(ba-rd-req, 0x0a),
0443 XGENE_PMU_EVENT_ATTR(ba-wr-req, 0x0b),
0444 XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued, 0x10),
0445 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11),
0446 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12),
0447 XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13),
0448 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable, 0x14),
0449 XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15),
0450 XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req, 0x16),
0451 XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req, 0x17),
0452 XGENE_PMU_EVENT_ATTR(pa-wr-back-clean-data, 0x18),
0453 XGENE_PMU_EVENT_ATTR(pa-wr-back-cancelled-on-SS, 0x1b),
0454 XGENE_PMU_EVENT_ATTR(pa-barrier-occurrence, 0x1c),
0455 XGENE_PMU_EVENT_ATTR(pa-barrier-cycles, 0x1d),
0456 XGENE_PMU_EVENT_ATTR(pa-total-cp-snoops, 0x20),
0457 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop, 0x21),
0458 XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop-hit, 0x22),
0459 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23),
0460 XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24),
0461 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop, 0x25),
0462 XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop-hit, 0x26),
0463 XGENE_PMU_EVENT_ATTR(pa-req-buffer-full, 0x28),
0464 XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full, 0x29),
0465 XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a),
0466 XGENE_PMU_EVENT_ATTR(cswlf-outbound-lack-fifo-full, 0x2b),
0467 XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c),
0468 XGENE_PMU_EVENT_ATTR(cswlf-outbound-data-fifo-full, 0x2d),
0469 XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e),
0470 XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure, 0x2f),
0471 NULL,
0472 };
0473
0474 static struct attribute *iob_slow_pmu_v3_events_attrs[] = {
0475 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0476 XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req, 0x01),
0477 XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req, 0x02),
0478 XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req, 0x03),
0479 XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req, 0x04),
0480 XGENE_PMU_EVENT_ATTR(ba-all-axi-req, 0x07),
0481 XGENE_PMU_EVENT_ATTR(ba-axi-rd-req, 0x08),
0482 XGENE_PMU_EVENT_ATTR(ba-axi-wr-req, 0x09),
0483 XGENE_PMU_EVENT_ATTR(ba-free-list-empty, 0x10),
0484 NULL,
0485 };
0486
0487 static struct attribute *mcb_pmu_v3_events_attrs[] = {
0488 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0489 XGENE_PMU_EVENT_ATTR(req-receive, 0x01),
0490 XGENE_PMU_EVENT_ATTR(rd-req-recv, 0x02),
0491 XGENE_PMU_EVENT_ATTR(rd-req-recv-2, 0x03),
0492 XGENE_PMU_EVENT_ATTR(wr-req-recv, 0x04),
0493 XGENE_PMU_EVENT_ATTR(wr-req-recv-2, 0x05),
0494 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu, 0x06),
0495 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2, 0x07),
0496 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu, 0x08),
0497 XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2, 0x09),
0498 XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a),
0499 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b),
0500 XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c),
0501 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req, 0x0d),
0502 XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2, 0x0e),
0503 XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu, 0x0f),
0504 XGENE_PMU_EVENT_ATTR(gack-recv, 0x10),
0505 XGENE_PMU_EVENT_ATTR(rd-gack-recv, 0x11),
0506 XGENE_PMU_EVENT_ATTR(wr-gack-recv, 0x12),
0507 XGENE_PMU_EVENT_ATTR(cancel-rd-gack, 0x13),
0508 XGENE_PMU_EVENT_ATTR(cancel-wr-gack, 0x14),
0509 XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall, 0x15),
0510 XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked, 0x16),
0511 XGENE_PMU_EVENT_ATTR(mcb-mcu-rd-intf-stall, 0x17),
0512 XGENE_PMU_EVENT_ATTR(csw-rd-intf-blocked, 0x18),
0513 XGENE_PMU_EVENT_ATTR(csw-local-ack-intf-blocked, 0x19),
0514 XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a),
0515 XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b),
0516 XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c),
0517 XGENE_PMU_EVENT_ATTR(mcu-rdreceipt-resp, 0x1d),
0518 XGENE_PMU_EVENT_ATTR(mcu-wrcomplete-resp, 0x1e),
0519 XGENE_PMU_EVENT_ATTR(mcu-retryack-resp, 0x1f),
0520 XGENE_PMU_EVENT_ATTR(mcu-pcrdgrant-resp, 0x20),
0521 XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21),
0522 XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22),
0523 XGENE_PMU_EVENT_ATTR(volt-droop-detect, 0x23),
0524 NULL,
0525 };
0526
0527 static struct attribute *mc_pmu_v3_events_attrs[] = {
0528 XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
0529 XGENE_PMU_EVENT_ATTR(act-sent, 0x01),
0530 XGENE_PMU_EVENT_ATTR(pre-sent, 0x02),
0531 XGENE_PMU_EVENT_ATTR(rd-sent, 0x03),
0532 XGENE_PMU_EVENT_ATTR(rda-sent, 0x04),
0533 XGENE_PMU_EVENT_ATTR(wr-sent, 0x05),
0534 XGENE_PMU_EVENT_ATTR(wra-sent, 0x06),
0535 XGENE_PMU_EVENT_ATTR(pd-entry-vld, 0x07),
0536 XGENE_PMU_EVENT_ATTR(sref-entry-vld, 0x08),
0537 XGENE_PMU_EVENT_ATTR(prea-sent, 0x09),
0538 XGENE_PMU_EVENT_ATTR(ref-sent, 0x0a),
0539 XGENE_PMU_EVENT_ATTR(rd-rda-sent, 0x0b),
0540 XGENE_PMU_EVENT_ATTR(wr-wra-sent, 0x0c),
0541 XGENE_PMU_EVENT_ATTR(raw-hazard, 0x0d),
0542 XGENE_PMU_EVENT_ATTR(war-hazard, 0x0e),
0543 XGENE_PMU_EVENT_ATTR(waw-hazard, 0x0f),
0544 XGENE_PMU_EVENT_ATTR(rar-hazard, 0x10),
0545 XGENE_PMU_EVENT_ATTR(raw-war-waw-hazard, 0x11),
0546 XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld, 0x12),
0547 XGENE_PMU_EVENT_ATTR(lprd-req-vld, 0x13),
0548 XGENE_PMU_EVENT_ATTR(hprd-req-vld, 0x14),
0549 XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld, 0x15),
0550 XGENE_PMU_EVENT_ATTR(wr-req-vld, 0x16),
0551 XGENE_PMU_EVENT_ATTR(partial-wr-req-vld, 0x17),
0552 XGENE_PMU_EVENT_ATTR(rd-retry, 0x18),
0553 XGENE_PMU_EVENT_ATTR(wr-retry, 0x19),
0554 XGENE_PMU_EVENT_ATTR(retry-gnt, 0x1a),
0555 XGENE_PMU_EVENT_ATTR(rank-change, 0x1b),
0556 XGENE_PMU_EVENT_ATTR(dir-change, 0x1c),
0557 XGENE_PMU_EVENT_ATTR(rank-dir-change, 0x1d),
0558 XGENE_PMU_EVENT_ATTR(rank-active, 0x1e),
0559 XGENE_PMU_EVENT_ATTR(rank-idle, 0x1f),
0560 XGENE_PMU_EVENT_ATTR(rank-pd, 0x20),
0561 XGENE_PMU_EVENT_ATTR(rank-sref, 0x21),
0562 XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22),
0563 XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23),
0564 XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24),
0565 XGENE_PMU_EVENT_ATTR(phy-updt-complt, 0x25),
0566 XGENE_PMU_EVENT_ATTR(tz-fail, 0x26),
0567 XGENE_PMU_EVENT_ATTR(dram-errc, 0x27),
0568 XGENE_PMU_EVENT_ATTR(dram-errd, 0x28),
0569 XGENE_PMU_EVENT_ATTR(rd-enq, 0x29),
0570 XGENE_PMU_EVENT_ATTR(wr-enq, 0x2a),
0571 XGENE_PMU_EVENT_ATTR(tmac-limit-reached, 0x2b),
0572 XGENE_PMU_EVENT_ATTR(tmaw-tracker-full, 0x2c),
0573 NULL,
0574 };
0575
0576 static const struct attribute_group l3c_pmu_v3_events_attr_group = {
0577 .name = "events",
0578 .attrs = l3c_pmu_v3_events_attrs,
0579 };
0580
0581 static const struct attribute_group iob_fast_pmu_v3_events_attr_group = {
0582 .name = "events",
0583 .attrs = iob_fast_pmu_v3_events_attrs,
0584 };
0585
0586 static const struct attribute_group iob_slow_pmu_v3_events_attr_group = {
0587 .name = "events",
0588 .attrs = iob_slow_pmu_v3_events_attrs,
0589 };
0590
0591 static const struct attribute_group mcb_pmu_v3_events_attr_group = {
0592 .name = "events",
0593 .attrs = mcb_pmu_v3_events_attrs,
0594 };
0595
0596 static const struct attribute_group mc_pmu_v3_events_attr_group = {
0597 .name = "events",
0598 .attrs = mc_pmu_v3_events_attrs,
0599 };
0600
0601
0602
0603
0604 static ssize_t cpumask_show(struct device *dev,
0605 struct device_attribute *attr, char *buf)
0606 {
0607 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
0608
0609 return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
0610 }
0611
0612 static DEVICE_ATTR_RO(cpumask);
0613
0614 static struct attribute *xgene_pmu_cpumask_attrs[] = {
0615 &dev_attr_cpumask.attr,
0616 NULL,
0617 };
0618
0619 static const struct attribute_group pmu_cpumask_attr_group = {
0620 .attrs = xgene_pmu_cpumask_attrs,
0621 };
0622
0623
0624
0625
0626 static const struct attribute_group *l3c_pmu_attr_groups[] = {
0627 &l3c_pmu_format_attr_group,
0628 &pmu_cpumask_attr_group,
0629 &l3c_pmu_events_attr_group,
0630 NULL
0631 };
0632
0633 static const struct attribute_group *iob_pmu_attr_groups[] = {
0634 &iob_pmu_format_attr_group,
0635 &pmu_cpumask_attr_group,
0636 &iob_pmu_events_attr_group,
0637 NULL
0638 };
0639
0640 static const struct attribute_group *mcb_pmu_attr_groups[] = {
0641 &mcb_pmu_format_attr_group,
0642 &pmu_cpumask_attr_group,
0643 &mcb_pmu_events_attr_group,
0644 NULL
0645 };
0646
0647 static const struct attribute_group *mc_pmu_attr_groups[] = {
0648 &mc_pmu_format_attr_group,
0649 &pmu_cpumask_attr_group,
0650 &mc_pmu_events_attr_group,
0651 NULL
0652 };
0653
0654
0655
0656
0657 static const struct attribute_group *l3c_pmu_v3_attr_groups[] = {
0658 &l3c_pmu_v3_format_attr_group,
0659 &pmu_cpumask_attr_group,
0660 &l3c_pmu_v3_events_attr_group,
0661 NULL
0662 };
0663
0664 static const struct attribute_group *iob_fast_pmu_v3_attr_groups[] = {
0665 &iob_pmu_v3_format_attr_group,
0666 &pmu_cpumask_attr_group,
0667 &iob_fast_pmu_v3_events_attr_group,
0668 NULL
0669 };
0670
0671 static const struct attribute_group *iob_slow_pmu_v3_attr_groups[] = {
0672 &iob_slow_pmu_v3_format_attr_group,
0673 &pmu_cpumask_attr_group,
0674 &iob_slow_pmu_v3_events_attr_group,
0675 NULL
0676 };
0677
0678 static const struct attribute_group *mcb_pmu_v3_attr_groups[] = {
0679 &mcb_pmu_v3_format_attr_group,
0680 &pmu_cpumask_attr_group,
0681 &mcb_pmu_v3_events_attr_group,
0682 NULL
0683 };
0684
0685 static const struct attribute_group *mc_pmu_v3_attr_groups[] = {
0686 &mc_pmu_v3_format_attr_group,
0687 &pmu_cpumask_attr_group,
0688 &mc_pmu_v3_events_attr_group,
0689 NULL
0690 };
0691
0692 static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
0693 {
0694 int cntr;
0695
0696 cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
0697 pmu_dev->max_counters);
0698 if (cntr == pmu_dev->max_counters)
0699 return -ENOSPC;
0700 set_bit(cntr, pmu_dev->cntr_assign_mask);
0701
0702 return cntr;
0703 }
0704
0705 static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
0706 {
0707 clear_bit(cntr, pmu_dev->cntr_assign_mask);
0708 }
0709
0710 static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
0711 {
0712 writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
0713 }
0714
0715 static inline void xgene_pmu_v3_mask_int(struct xgene_pmu *xgene_pmu)
0716 {
0717 writel(PCPPMU_V3_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
0718 }
0719
0720 static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
0721 {
0722 writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
0723 }
0724
0725 static inline void xgene_pmu_v3_unmask_int(struct xgene_pmu *xgene_pmu)
0726 {
0727 writel(PCPPMU_V3_INTCLRMASK,
0728 xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
0729 }
0730
0731 static inline u64 xgene_pmu_read_counter32(struct xgene_pmu_dev *pmu_dev,
0732 int idx)
0733 {
0734 return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
0735 }
0736
0737 static inline u64 xgene_pmu_read_counter64(struct xgene_pmu_dev *pmu_dev,
0738 int idx)
0739 {
0740 u32 lo, hi;
0741
0742
0743
0744
0745
0746
0747
0748 do {
0749 hi = xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1);
0750 lo = xgene_pmu_read_counter32(pmu_dev, 2 * idx);
0751 } while (hi != xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1));
0752
0753 return (((u64)hi << 32) | lo);
0754 }
0755
0756 static inline void
0757 xgene_pmu_write_counter32(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
0758 {
0759 writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
0760 }
0761
0762 static inline void
0763 xgene_pmu_write_counter64(struct xgene_pmu_dev *pmu_dev, int idx, u64 val)
0764 {
0765 u32 cnt_lo, cnt_hi;
0766
0767 cnt_hi = upper_32_bits(val);
0768 cnt_lo = lower_32_bits(val);
0769
0770
0771 xgene_pmu_write_counter32(pmu_dev, 2 * idx, cnt_lo);
0772 xgene_pmu_write_counter32(pmu_dev, 2 * idx + 1, cnt_hi);
0773 }
0774
0775 static inline void
0776 xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
0777 {
0778 writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
0779 }
0780
0781 static inline void
0782 xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
0783 {
0784 writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
0785 }
0786
0787 static inline void
0788 xgene_pmu_v3_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
0789
0790 static inline void
0791 xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
0792 {
0793 writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
0794 }
0795
0796 static inline void
0797 xgene_pmu_v3_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) { }
0798
0799 static inline void
0800 xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
0801 {
0802 u32 val;
0803
0804 val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
0805 val |= 1 << idx;
0806 writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
0807 }
0808
0809 static inline void
0810 xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
0811 {
0812 u32 val;
0813
0814 val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
0815 val |= 1 << idx;
0816 writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
0817 }
0818
0819 static inline void
0820 xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
0821 {
0822 u32 val;
0823
0824 val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
0825 val |= 1 << idx;
0826 writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
0827 }
0828
0829 static inline void
0830 xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
0831 {
0832 u32 val;
0833
0834 val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
0835 val |= 1 << idx;
0836 writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
0837 }
0838
0839 static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
0840 {
0841 u32 val;
0842
0843 val = readl(pmu_dev->inf->csr + PMU_PMCR);
0844 val |= PMU_PMCR_P;
0845 writel(val, pmu_dev->inf->csr + PMU_PMCR);
0846 }
0847
0848 static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
0849 {
0850 u32 val;
0851
0852 val = readl(pmu_dev->inf->csr + PMU_PMCR);
0853 val |= PMU_PMCR_E;
0854 writel(val, pmu_dev->inf->csr + PMU_PMCR);
0855 }
0856
0857 static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
0858 {
0859 u32 val;
0860
0861 val = readl(pmu_dev->inf->csr + PMU_PMCR);
0862 val &= ~PMU_PMCR_E;
0863 writel(val, pmu_dev->inf->csr + PMU_PMCR);
0864 }
0865
0866 static void xgene_perf_pmu_enable(struct pmu *pmu)
0867 {
0868 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
0869 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
0870 bool enabled = !bitmap_empty(pmu_dev->cntr_assign_mask,
0871 pmu_dev->max_counters);
0872
0873 if (!enabled)
0874 return;
0875
0876 xgene_pmu->ops->start_counters(pmu_dev);
0877 }
0878
0879 static void xgene_perf_pmu_disable(struct pmu *pmu)
0880 {
0881 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
0882 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
0883
0884 xgene_pmu->ops->stop_counters(pmu_dev);
0885 }
0886
0887 static int xgene_perf_event_init(struct perf_event *event)
0888 {
0889 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
0890 struct hw_perf_event *hw = &event->hw;
0891 struct perf_event *sibling;
0892
0893
0894 if (event->attr.type != event->pmu->type)
0895 return -ENOENT;
0896
0897
0898
0899
0900
0901
0902 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
0903 return -EINVAL;
0904
0905 if (event->cpu < 0)
0906 return -EINVAL;
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916 event->cpu = cpumask_first(&pmu_dev->parent->cpu);
0917
0918 hw->config = event->attr.config;
0919
0920
0921
0922
0923
0924
0925 hw->config_base = event->attr.config1;
0926
0927
0928
0929
0930
0931 if (event->group_leader->pmu != event->pmu &&
0932 !is_software_event(event->group_leader))
0933 return -EINVAL;
0934
0935 for_each_sibling_event(sibling, event->group_leader) {
0936 if (sibling->pmu != event->pmu &&
0937 !is_software_event(sibling))
0938 return -EINVAL;
0939 }
0940
0941 return 0;
0942 }
0943
0944 static void xgene_perf_enable_event(struct perf_event *event)
0945 {
0946 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
0947 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
0948
0949 xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event),
0950 GET_EVENTID(event));
0951 xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
0952 if (pmu_dev->inf->type == PMU_TYPE_IOB)
0953 xgene_pmu->ops->write_agent1msk(pmu_dev,
0954 ~((u32)GET_AGENT1ID(event)));
0955
0956 xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event));
0957 xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event));
0958 }
0959
0960 static void xgene_perf_disable_event(struct perf_event *event)
0961 {
0962 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
0963 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
0964
0965 xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event));
0966 xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event));
0967 }
0968
0969 static void xgene_perf_event_set_period(struct perf_event *event)
0970 {
0971 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
0972 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
0973 struct hw_perf_event *hw = &event->hw;
0974
0975
0976
0977
0978
0979
0980
0981 u64 val = 1ULL << 31;
0982
0983 local64_set(&hw->prev_count, val);
0984 xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val);
0985 }
0986
0987 static void xgene_perf_event_update(struct perf_event *event)
0988 {
0989 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
0990 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
0991 struct hw_perf_event *hw = &event->hw;
0992 u64 delta, prev_raw_count, new_raw_count;
0993
0994 again:
0995 prev_raw_count = local64_read(&hw->prev_count);
0996 new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event));
0997
0998 if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
0999 new_raw_count) != prev_raw_count)
1000 goto again;
1001
1002 delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
1003
1004 local64_add(delta, &event->count);
1005 }
1006
1007 static void xgene_perf_read(struct perf_event *event)
1008 {
1009 xgene_perf_event_update(event);
1010 }
1011
1012 static void xgene_perf_start(struct perf_event *event, int flags)
1013 {
1014 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1015 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
1016 struct hw_perf_event *hw = &event->hw;
1017
1018 if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
1019 return;
1020
1021 WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
1022 hw->state = 0;
1023
1024 xgene_perf_event_set_period(event);
1025
1026 if (flags & PERF_EF_RELOAD) {
1027 u64 prev_raw_count = local64_read(&hw->prev_count);
1028
1029 xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event),
1030 prev_raw_count);
1031 }
1032
1033 xgene_perf_enable_event(event);
1034 perf_event_update_userpage(event);
1035 }
1036
1037 static void xgene_perf_stop(struct perf_event *event, int flags)
1038 {
1039 struct hw_perf_event *hw = &event->hw;
1040
1041 if (hw->state & PERF_HES_UPTODATE)
1042 return;
1043
1044 xgene_perf_disable_event(event);
1045 WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
1046 hw->state |= PERF_HES_STOPPED;
1047
1048 if (hw->state & PERF_HES_UPTODATE)
1049 return;
1050
1051 xgene_perf_read(event);
1052 hw->state |= PERF_HES_UPTODATE;
1053 }
1054
1055 static int xgene_perf_add(struct perf_event *event, int flags)
1056 {
1057 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1058 struct hw_perf_event *hw = &event->hw;
1059
1060 hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1061
1062
1063 hw->idx = get_next_avail_cntr(pmu_dev);
1064 if (hw->idx < 0)
1065 return -EAGAIN;
1066
1067
1068 pmu_dev->pmu_counter_event[hw->idx] = event;
1069
1070 if (flags & PERF_EF_START)
1071 xgene_perf_start(event, PERF_EF_RELOAD);
1072
1073 return 0;
1074 }
1075
1076 static void xgene_perf_del(struct perf_event *event, int flags)
1077 {
1078 struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
1079 struct hw_perf_event *hw = &event->hw;
1080
1081 xgene_perf_stop(event, PERF_EF_UPDATE);
1082
1083
1084 clear_avail_cntr(pmu_dev, GET_CNTR(event));
1085
1086 perf_event_update_userpage(event);
1087 pmu_dev->pmu_counter_event[hw->idx] = NULL;
1088 }
1089
1090 static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
1091 {
1092 struct xgene_pmu *xgene_pmu;
1093
1094 if (pmu_dev->parent->version == PCP_PMU_V3)
1095 pmu_dev->max_period = PMU_V3_CNT_MAX_PERIOD;
1096 else
1097 pmu_dev->max_period = PMU_CNT_MAX_PERIOD;
1098
1099 xgene_pmu = pmu_dev->parent;
1100 if (xgene_pmu->version == PCP_PMU_V1)
1101 pmu_dev->max_counters = 1;
1102 else
1103 pmu_dev->max_counters = PMU_MAX_COUNTERS;
1104
1105
1106 pmu_dev->pmu = (struct pmu) {
1107 .attr_groups = pmu_dev->attr_groups,
1108 .task_ctx_nr = perf_invalid_context,
1109 .pmu_enable = xgene_perf_pmu_enable,
1110 .pmu_disable = xgene_perf_pmu_disable,
1111 .event_init = xgene_perf_event_init,
1112 .add = xgene_perf_add,
1113 .del = xgene_perf_del,
1114 .start = xgene_perf_start,
1115 .stop = xgene_perf_stop,
1116 .read = xgene_perf_read,
1117 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1118 };
1119
1120
1121 xgene_pmu->ops->stop_counters(pmu_dev);
1122 xgene_pmu->ops->reset_counters(pmu_dev);
1123
1124 return perf_pmu_register(&pmu_dev->pmu, name, -1);
1125 }
1126
1127 static int
1128 xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
1129 {
1130 struct device *dev = xgene_pmu->dev;
1131 struct xgene_pmu_dev *pmu;
1132
1133 pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
1134 if (!pmu)
1135 return -ENOMEM;
1136 pmu->parent = xgene_pmu;
1137 pmu->inf = &ctx->inf;
1138 ctx->pmu_dev = pmu;
1139
1140 switch (pmu->inf->type) {
1141 case PMU_TYPE_L3C:
1142 if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask))
1143 return -ENODEV;
1144 if (xgene_pmu->version == PCP_PMU_V3)
1145 pmu->attr_groups = l3c_pmu_v3_attr_groups;
1146 else
1147 pmu->attr_groups = l3c_pmu_attr_groups;
1148 break;
1149 case PMU_TYPE_IOB:
1150 if (xgene_pmu->version == PCP_PMU_V3)
1151 pmu->attr_groups = iob_fast_pmu_v3_attr_groups;
1152 else
1153 pmu->attr_groups = iob_pmu_attr_groups;
1154 break;
1155 case PMU_TYPE_IOB_SLOW:
1156 if (xgene_pmu->version == PCP_PMU_V3)
1157 pmu->attr_groups = iob_slow_pmu_v3_attr_groups;
1158 break;
1159 case PMU_TYPE_MCB:
1160 if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
1161 return -ENODEV;
1162 if (xgene_pmu->version == PCP_PMU_V3)
1163 pmu->attr_groups = mcb_pmu_v3_attr_groups;
1164 else
1165 pmu->attr_groups = mcb_pmu_attr_groups;
1166 break;
1167 case PMU_TYPE_MC:
1168 if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
1169 return -ENODEV;
1170 if (xgene_pmu->version == PCP_PMU_V3)
1171 pmu->attr_groups = mc_pmu_v3_attr_groups;
1172 else
1173 pmu->attr_groups = mc_pmu_attr_groups;
1174 break;
1175 default:
1176 return -EINVAL;
1177 }
1178
1179 if (xgene_init_perf(pmu, ctx->name)) {
1180 dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
1181 return -ENODEV;
1182 }
1183
1184 dev_info(dev, "%s PMU registered\n", ctx->name);
1185
1186 return 0;
1187 }
1188
1189 static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
1190 {
1191 struct xgene_pmu *xgene_pmu = pmu_dev->parent;
1192 void __iomem *csr = pmu_dev->inf->csr;
1193 u32 pmovsr;
1194 int idx;
1195
1196 xgene_pmu->ops->stop_counters(pmu_dev);
1197
1198 if (xgene_pmu->version == PCP_PMU_V3)
1199 pmovsr = readl(csr + PMU_PMOVSSET) & PMU_OVERFLOW_MASK;
1200 else
1201 pmovsr = readl(csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
1202
1203 if (!pmovsr)
1204 goto out;
1205
1206
1207 if (xgene_pmu->version == PCP_PMU_V1)
1208 writel(0x0, csr + PMU_PMOVSR);
1209 else if (xgene_pmu->version == PCP_PMU_V2)
1210 writel(pmovsr, csr + PMU_PMOVSR);
1211 else
1212 writel(pmovsr, csr + PMU_PMOVSCLR);
1213
1214 for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
1215 struct perf_event *event = pmu_dev->pmu_counter_event[idx];
1216 int overflowed = pmovsr & BIT(idx);
1217
1218
1219 if (!event || !overflowed)
1220 continue;
1221 xgene_perf_event_update(event);
1222 xgene_perf_event_set_period(event);
1223 }
1224
1225 out:
1226 xgene_pmu->ops->start_counters(pmu_dev);
1227 }
1228
1229 static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
1230 {
1231 u32 intr_mcu, intr_mcb, intr_l3c, intr_iob;
1232 struct xgene_pmu_dev_ctx *ctx;
1233 struct xgene_pmu *xgene_pmu = dev_id;
1234 u32 val;
1235
1236 raw_spin_lock(&xgene_pmu->lock);
1237
1238
1239 val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
1240 if (xgene_pmu->version == PCP_PMU_V3) {
1241 intr_mcu = PCPPMU_V3_INT_MCU;
1242 intr_mcb = PCPPMU_V3_INT_MCB;
1243 intr_l3c = PCPPMU_V3_INT_L3C;
1244 intr_iob = PCPPMU_V3_INT_IOB;
1245 } else {
1246 intr_mcu = PCPPMU_INT_MCU;
1247 intr_mcb = PCPPMU_INT_MCB;
1248 intr_l3c = PCPPMU_INT_L3C;
1249 intr_iob = PCPPMU_INT_IOB;
1250 }
1251 if (val & intr_mcu) {
1252 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
1253 _xgene_pmu_isr(irq, ctx->pmu_dev);
1254 }
1255 }
1256 if (val & intr_mcb) {
1257 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
1258 _xgene_pmu_isr(irq, ctx->pmu_dev);
1259 }
1260 }
1261 if (val & intr_l3c) {
1262 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
1263 _xgene_pmu_isr(irq, ctx->pmu_dev);
1264 }
1265 }
1266 if (val & intr_iob) {
1267 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
1268 _xgene_pmu_isr(irq, ctx->pmu_dev);
1269 }
1270 }
1271
1272 raw_spin_unlock(&xgene_pmu->lock);
1273
1274 return IRQ_HANDLED;
1275 }
1276
1277 static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1278 struct platform_device *pdev)
1279 {
1280 void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
1281 unsigned int reg;
1282
1283 csw_csr = devm_platform_ioremap_resource(pdev, 1);
1284 if (IS_ERR(csw_csr)) {
1285 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
1286 return PTR_ERR(csw_csr);
1287 }
1288
1289 mcba_csr = devm_platform_ioremap_resource(pdev, 2);
1290 if (IS_ERR(mcba_csr)) {
1291 dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
1292 return PTR_ERR(mcba_csr);
1293 }
1294
1295 mcbb_csr = devm_platform_ioremap_resource(pdev, 3);
1296 if (IS_ERR(mcbb_csr)) {
1297 dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
1298 return PTR_ERR(mcbb_csr);
1299 }
1300
1301 xgene_pmu->l3c_active_mask = 0x1;
1302
1303 reg = readl(csw_csr + CSW_CSWCR);
1304 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1305
1306 xgene_pmu->mcb_active_mask = 0x3;
1307
1308 reg = readl(mcbb_csr + CSW_CSWCR);
1309 xgene_pmu->mc_active_mask =
1310 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
1311 } else {
1312
1313 xgene_pmu->mcb_active_mask = 0x1;
1314
1315 reg = readl(mcba_csr + CSW_CSWCR);
1316 xgene_pmu->mc_active_mask =
1317 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
1318 }
1319
1320 return 0;
1321 }
1322
1323 static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1324 struct platform_device *pdev)
1325 {
1326 void __iomem *csw_csr;
1327 unsigned int reg;
1328 u32 mcb0routing;
1329 u32 mcb1routing;
1330
1331 csw_csr = devm_platform_ioremap_resource(pdev, 1);
1332 if (IS_ERR(csw_csr)) {
1333 dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
1334 return PTR_ERR(csw_csr);
1335 }
1336
1337 reg = readl(csw_csr + CSW_CSWCR);
1338 mcb0routing = CSW_CSWCR_MCB0_ROUTING(reg);
1339 mcb1routing = CSW_CSWCR_MCB1_ROUTING(reg);
1340 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1341
1342 xgene_pmu->mcb_active_mask = 0x3;
1343
1344 xgene_pmu->l3c_active_mask = 0xFF;
1345
1346 if ((mcb0routing == 0x2) && (mcb1routing == 0x2))
1347 xgene_pmu->mc_active_mask = 0xFF;
1348 else if ((mcb0routing == 0x1) && (mcb1routing == 0x1))
1349 xgene_pmu->mc_active_mask = 0x33;
1350 else
1351 xgene_pmu->mc_active_mask = 0x11;
1352 } else {
1353
1354 xgene_pmu->mcb_active_mask = 0x1;
1355
1356 xgene_pmu->l3c_active_mask = 0x0F;
1357
1358 if (mcb0routing == 0x2)
1359 xgene_pmu->mc_active_mask = 0x0F;
1360 else if (mcb0routing == 0x1)
1361 xgene_pmu->mc_active_mask = 0x03;
1362 else
1363 xgene_pmu->mc_active_mask = 0x01;
1364 }
1365
1366 return 0;
1367 }
1368
1369 static int fdt_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1370 struct platform_device *pdev)
1371 {
1372 struct regmap *csw_map, *mcba_map, *mcbb_map;
1373 struct device_node *np = pdev->dev.of_node;
1374 unsigned int reg;
1375
1376 csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
1377 if (IS_ERR(csw_map)) {
1378 dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
1379 return PTR_ERR(csw_map);
1380 }
1381
1382 mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
1383 if (IS_ERR(mcba_map)) {
1384 dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
1385 return PTR_ERR(mcba_map);
1386 }
1387
1388 mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
1389 if (IS_ERR(mcbb_map)) {
1390 dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
1391 return PTR_ERR(mcbb_map);
1392 }
1393
1394 xgene_pmu->l3c_active_mask = 0x1;
1395 if (regmap_read(csw_map, CSW_CSWCR, ®))
1396 return -EINVAL;
1397
1398 if (reg & CSW_CSWCR_DUALMCB_MASK) {
1399
1400 xgene_pmu->mcb_active_mask = 0x3;
1401
1402 if (regmap_read(mcbb_map, MCBADDRMR, ®))
1403 return 0;
1404 xgene_pmu->mc_active_mask =
1405 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
1406 } else {
1407
1408 xgene_pmu->mcb_active_mask = 0x1;
1409
1410 if (regmap_read(mcba_map, MCBADDRMR, ®))
1411 return 0;
1412 xgene_pmu->mc_active_mask =
1413 (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
1414 }
1415
1416 return 0;
1417 }
1418
1419 static int xgene_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu,
1420 struct platform_device *pdev)
1421 {
1422 if (has_acpi_companion(&pdev->dev)) {
1423 if (xgene_pmu->version == PCP_PMU_V3)
1424 return acpi_pmu_v3_probe_active_mcb_mcu_l3c(xgene_pmu,
1425 pdev);
1426 else
1427 return acpi_pmu_probe_active_mcb_mcu_l3c(xgene_pmu,
1428 pdev);
1429 }
1430 return fdt_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
1431 }
1432
1433 static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
1434 {
1435 switch (type) {
1436 case PMU_TYPE_L3C:
1437 return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
1438 case PMU_TYPE_IOB:
1439 return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
1440 case PMU_TYPE_IOB_SLOW:
1441 return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
1442 case PMU_TYPE_MCB:
1443 return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
1444 case PMU_TYPE_MC:
1445 return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
1446 default:
1447 return devm_kasprintf(dev, GFP_KERNEL, "unknown");
1448 }
1449 }
1450
1451 #if defined(CONFIG_ACPI)
1452 static struct
1453 xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1454 struct acpi_device *adev, u32 type)
1455 {
1456 struct device *dev = xgene_pmu->dev;
1457 struct list_head resource_list;
1458 struct xgene_pmu_dev_ctx *ctx;
1459 const union acpi_object *obj;
1460 struct hw_pmu_info *inf;
1461 void __iomem *dev_csr;
1462 struct resource res;
1463 struct resource_entry *rentry;
1464 int enable_bit;
1465 int rc;
1466
1467 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1468 if (!ctx)
1469 return NULL;
1470
1471 INIT_LIST_HEAD(&resource_list);
1472 rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
1473 if (rc <= 0) {
1474 dev_err(dev, "PMU type %d: No resources found\n", type);
1475 return NULL;
1476 }
1477
1478 list_for_each_entry(rentry, &resource_list, node) {
1479 if (resource_type(rentry->res) == IORESOURCE_MEM) {
1480 res = *rentry->res;
1481 rentry = NULL;
1482 break;
1483 }
1484 }
1485 acpi_dev_free_resource_list(&resource_list);
1486
1487 if (rentry) {
1488 dev_err(dev, "PMU type %d: No memory resource found\n", type);
1489 return NULL;
1490 }
1491
1492 dev_csr = devm_ioremap_resource(dev, &res);
1493 if (IS_ERR(dev_csr)) {
1494 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1495 return NULL;
1496 }
1497
1498
1499 rc = acpi_dev_get_property(adev, "enable-bit-index",
1500 ACPI_TYPE_INTEGER, &obj);
1501 if (rc < 0)
1502 enable_bit = 0;
1503 else
1504 enable_bit = (int) obj->integer.value;
1505
1506 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1507 if (!ctx->name) {
1508 dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1509 return NULL;
1510 }
1511 inf = &ctx->inf;
1512 inf->type = type;
1513 inf->csr = dev_csr;
1514 inf->enable_mask = 1 << enable_bit;
1515
1516 return ctx;
1517 }
1518
1519 static const struct acpi_device_id xgene_pmu_acpi_type_match[] = {
1520 {"APMC0D5D", PMU_TYPE_L3C},
1521 {"APMC0D5E", PMU_TYPE_IOB},
1522 {"APMC0D5F", PMU_TYPE_MCB},
1523 {"APMC0D60", PMU_TYPE_MC},
1524 {"APMC0D84", PMU_TYPE_L3C},
1525 {"APMC0D85", PMU_TYPE_IOB},
1526 {"APMC0D86", PMU_TYPE_IOB_SLOW},
1527 {"APMC0D87", PMU_TYPE_MCB},
1528 {"APMC0D88", PMU_TYPE_MC},
1529 {},
1530 };
1531
1532 static const struct acpi_device_id *xgene_pmu_acpi_match_type(
1533 const struct acpi_device_id *ids,
1534 struct acpi_device *adev)
1535 {
1536 const struct acpi_device_id *match_id = NULL;
1537 const struct acpi_device_id *id;
1538
1539 for (id = ids; id->id[0] || id->cls; id++) {
1540 if (!acpi_match_device_ids(adev, id))
1541 match_id = id;
1542 else if (match_id)
1543 break;
1544 }
1545
1546 return match_id;
1547 }
1548
1549 static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
1550 void *data, void **return_value)
1551 {
1552 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
1553 const struct acpi_device_id *acpi_id;
1554 struct xgene_pmu *xgene_pmu = data;
1555 struct xgene_pmu_dev_ctx *ctx;
1556
1557 if (!adev || acpi_bus_get_status(adev) || !adev->status.present)
1558 return AE_OK;
1559
1560 acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev);
1561 if (!acpi_id)
1562 return AE_OK;
1563
1564 ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, (u32)acpi_id->driver_data);
1565 if (!ctx)
1566 return AE_OK;
1567
1568 if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1569
1570 devm_kfree(xgene_pmu->dev, ctx);
1571 return AE_OK;
1572 }
1573
1574 switch (ctx->inf.type) {
1575 case PMU_TYPE_L3C:
1576 list_add(&ctx->next, &xgene_pmu->l3cpmus);
1577 break;
1578 case PMU_TYPE_IOB:
1579 list_add(&ctx->next, &xgene_pmu->iobpmus);
1580 break;
1581 case PMU_TYPE_IOB_SLOW:
1582 list_add(&ctx->next, &xgene_pmu->iobpmus);
1583 break;
1584 case PMU_TYPE_MCB:
1585 list_add(&ctx->next, &xgene_pmu->mcbpmus);
1586 break;
1587 case PMU_TYPE_MC:
1588 list_add(&ctx->next, &xgene_pmu->mcpmus);
1589 break;
1590 }
1591 return AE_OK;
1592 }
1593
1594 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1595 struct platform_device *pdev)
1596 {
1597 struct device *dev = xgene_pmu->dev;
1598 acpi_handle handle;
1599 acpi_status status;
1600
1601 handle = ACPI_HANDLE(dev);
1602 if (!handle)
1603 return -EINVAL;
1604
1605 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1606 acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
1607 if (ACPI_FAILURE(status)) {
1608 dev_err(dev, "failed to probe PMU devices\n");
1609 return -ENODEV;
1610 }
1611
1612 return 0;
1613 }
1614 #else
1615 static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1616 struct platform_device *pdev)
1617 {
1618 return 0;
1619 }
1620 #endif
1621
1622 static struct
1623 xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1624 struct device_node *np, u32 type)
1625 {
1626 struct device *dev = xgene_pmu->dev;
1627 struct xgene_pmu_dev_ctx *ctx;
1628 struct hw_pmu_info *inf;
1629 void __iomem *dev_csr;
1630 struct resource res;
1631 int enable_bit;
1632
1633 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1634 if (!ctx)
1635 return NULL;
1636
1637 if (of_address_to_resource(np, 0, &res) < 0) {
1638 dev_err(dev, "PMU type %d: No resource address found\n", type);
1639 return NULL;
1640 }
1641
1642 dev_csr = devm_ioremap_resource(dev, &res);
1643 if (IS_ERR(dev_csr)) {
1644 dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1645 return NULL;
1646 }
1647
1648
1649 if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
1650 enable_bit = 0;
1651
1652 ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1653 if (!ctx->name) {
1654 dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1655 return NULL;
1656 }
1657
1658 inf = &ctx->inf;
1659 inf->type = type;
1660 inf->csr = dev_csr;
1661 inf->enable_mask = 1 << enable_bit;
1662
1663 return ctx;
1664 }
1665
1666 static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1667 struct platform_device *pdev)
1668 {
1669 struct xgene_pmu_dev_ctx *ctx;
1670 struct device_node *np;
1671
1672 for_each_child_of_node(pdev->dev.of_node, np) {
1673 if (!of_device_is_available(np))
1674 continue;
1675
1676 if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
1677 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
1678 else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
1679 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
1680 else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
1681 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
1682 else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
1683 ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
1684 else
1685 ctx = NULL;
1686
1687 if (!ctx)
1688 continue;
1689
1690 if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1691
1692 devm_kfree(xgene_pmu->dev, ctx);
1693 continue;
1694 }
1695
1696 switch (ctx->inf.type) {
1697 case PMU_TYPE_L3C:
1698 list_add(&ctx->next, &xgene_pmu->l3cpmus);
1699 break;
1700 case PMU_TYPE_IOB:
1701 list_add(&ctx->next, &xgene_pmu->iobpmus);
1702 break;
1703 case PMU_TYPE_IOB_SLOW:
1704 list_add(&ctx->next, &xgene_pmu->iobpmus);
1705 break;
1706 case PMU_TYPE_MCB:
1707 list_add(&ctx->next, &xgene_pmu->mcbpmus);
1708 break;
1709 case PMU_TYPE_MC:
1710 list_add(&ctx->next, &xgene_pmu->mcpmus);
1711 break;
1712 }
1713 }
1714
1715 return 0;
1716 }
1717
1718 static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1719 struct platform_device *pdev)
1720 {
1721 if (has_acpi_companion(&pdev->dev))
1722 return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
1723 return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
1724 }
1725
1726 static const struct xgene_pmu_data xgene_pmu_data = {
1727 .id = PCP_PMU_V1,
1728 };
1729
1730 static const struct xgene_pmu_data xgene_pmu_v2_data = {
1731 .id = PCP_PMU_V2,
1732 };
1733
1734 static const struct xgene_pmu_ops xgene_pmu_ops = {
1735 .mask_int = xgene_pmu_mask_int,
1736 .unmask_int = xgene_pmu_unmask_int,
1737 .read_counter = xgene_pmu_read_counter32,
1738 .write_counter = xgene_pmu_write_counter32,
1739 .write_evttype = xgene_pmu_write_evttype,
1740 .write_agentmsk = xgene_pmu_write_agentmsk,
1741 .write_agent1msk = xgene_pmu_write_agent1msk,
1742 .enable_counter = xgene_pmu_enable_counter,
1743 .disable_counter = xgene_pmu_disable_counter,
1744 .enable_counter_int = xgene_pmu_enable_counter_int,
1745 .disable_counter_int = xgene_pmu_disable_counter_int,
1746 .reset_counters = xgene_pmu_reset_counters,
1747 .start_counters = xgene_pmu_start_counters,
1748 .stop_counters = xgene_pmu_stop_counters,
1749 };
1750
1751 static const struct xgene_pmu_ops xgene_pmu_v3_ops = {
1752 .mask_int = xgene_pmu_v3_mask_int,
1753 .unmask_int = xgene_pmu_v3_unmask_int,
1754 .read_counter = xgene_pmu_read_counter64,
1755 .write_counter = xgene_pmu_write_counter64,
1756 .write_evttype = xgene_pmu_write_evttype,
1757 .write_agentmsk = xgene_pmu_v3_write_agentmsk,
1758 .write_agent1msk = xgene_pmu_v3_write_agent1msk,
1759 .enable_counter = xgene_pmu_enable_counter,
1760 .disable_counter = xgene_pmu_disable_counter,
1761 .enable_counter_int = xgene_pmu_enable_counter_int,
1762 .disable_counter_int = xgene_pmu_disable_counter_int,
1763 .reset_counters = xgene_pmu_reset_counters,
1764 .start_counters = xgene_pmu_start_counters,
1765 .stop_counters = xgene_pmu_stop_counters,
1766 };
1767
1768 static const struct of_device_id xgene_pmu_of_match[] = {
1769 { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
1770 { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
1771 {},
1772 };
1773 MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
1774 #ifdef CONFIG_ACPI
1775 static const struct acpi_device_id xgene_pmu_acpi_match[] = {
1776 {"APMC0D5B", PCP_PMU_V1},
1777 {"APMC0D5C", PCP_PMU_V2},
1778 {"APMC0D83", PCP_PMU_V3},
1779 {},
1780 };
1781 MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
1782 #endif
1783
1784 static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
1785 {
1786 struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
1787 node);
1788
1789 if (cpumask_empty(&xgene_pmu->cpu))
1790 cpumask_set_cpu(cpu, &xgene_pmu->cpu);
1791
1792
1793 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
1794
1795 return 0;
1796 }
1797
1798 static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1799 {
1800 struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
1801 node);
1802 struct xgene_pmu_dev_ctx *ctx;
1803 unsigned int target;
1804
1805 if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
1806 return 0;
1807 target = cpumask_any_but(cpu_online_mask, cpu);
1808 if (target >= nr_cpu_ids)
1809 return 0;
1810
1811 list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
1812 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1813 }
1814 list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
1815 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1816 }
1817 list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
1818 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1819 }
1820 list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
1821 perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
1822 }
1823
1824 cpumask_set_cpu(target, &xgene_pmu->cpu);
1825
1826 WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
1827
1828 return 0;
1829 }
1830
1831 static int xgene_pmu_probe(struct platform_device *pdev)
1832 {
1833 const struct xgene_pmu_data *dev_data;
1834 const struct of_device_id *of_id;
1835 struct xgene_pmu *xgene_pmu;
1836 struct resource *res;
1837 int irq, rc;
1838 int version;
1839
1840
1841 rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1842 "CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE",
1843 xgene_pmu_online_cpu,
1844 xgene_pmu_offline_cpu);
1845 if (rc)
1846 return rc;
1847
1848 xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
1849 if (!xgene_pmu)
1850 return -ENOMEM;
1851 xgene_pmu->dev = &pdev->dev;
1852 platform_set_drvdata(pdev, xgene_pmu);
1853
1854 version = -EINVAL;
1855 of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
1856 if (of_id) {
1857 dev_data = (const struct xgene_pmu_data *) of_id->data;
1858 version = dev_data->id;
1859 }
1860
1861 #ifdef CONFIG_ACPI
1862 if (ACPI_COMPANION(&pdev->dev)) {
1863 const struct acpi_device_id *acpi_id;
1864
1865 acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
1866 if (acpi_id)
1867 version = (int) acpi_id->driver_data;
1868 }
1869 #endif
1870 if (version < 0)
1871 return -ENODEV;
1872
1873 if (version == PCP_PMU_V3)
1874 xgene_pmu->ops = &xgene_pmu_v3_ops;
1875 else
1876 xgene_pmu->ops = &xgene_pmu_ops;
1877
1878 INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
1879 INIT_LIST_HEAD(&xgene_pmu->iobpmus);
1880 INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
1881 INIT_LIST_HEAD(&xgene_pmu->mcpmus);
1882
1883 xgene_pmu->version = version;
1884 dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
1885
1886 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1887 xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
1888 if (IS_ERR(xgene_pmu->pcppmu_csr)) {
1889 dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
1890 return PTR_ERR(xgene_pmu->pcppmu_csr);
1891 }
1892
1893 irq = platform_get_irq(pdev, 0);
1894 if (irq < 0)
1895 return -EINVAL;
1896
1897 rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
1898 IRQF_NOBALANCING | IRQF_NO_THREAD,
1899 dev_name(&pdev->dev), xgene_pmu);
1900 if (rc) {
1901 dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
1902 return rc;
1903 }
1904
1905 xgene_pmu->irq = irq;
1906
1907 raw_spin_lock_init(&xgene_pmu->lock);
1908
1909
1910 rc = xgene_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev);
1911 if (rc) {
1912 dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
1913 xgene_pmu->mcb_active_mask = 0x1;
1914 xgene_pmu->mc_active_mask = 0x1;
1915 }
1916
1917
1918 rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1919 &xgene_pmu->node);
1920 if (rc) {
1921 dev_err(&pdev->dev, "Error %d registering hotplug", rc);
1922 return rc;
1923 }
1924
1925
1926 rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
1927 if (rc) {
1928 dev_err(&pdev->dev, "No PMU perf devices found!\n");
1929 goto out_unregister;
1930 }
1931
1932
1933 xgene_pmu->ops->unmask_int(xgene_pmu);
1934
1935 return 0;
1936
1937 out_unregister:
1938 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1939 &xgene_pmu->node);
1940 return rc;
1941 }
1942
1943 static void
1944 xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
1945 {
1946 struct xgene_pmu_dev_ctx *ctx;
1947
1948 list_for_each_entry(ctx, pmus, next) {
1949 perf_pmu_unregister(&ctx->pmu_dev->pmu);
1950 }
1951 }
1952
1953 static int xgene_pmu_remove(struct platform_device *pdev)
1954 {
1955 struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
1956
1957 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
1958 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
1959 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
1960 xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
1961 cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
1962 &xgene_pmu->node);
1963
1964 return 0;
1965 }
1966
1967 static struct platform_driver xgene_pmu_driver = {
1968 .probe = xgene_pmu_probe,
1969 .remove = xgene_pmu_remove,
1970 .driver = {
1971 .name = "xgene-pmu",
1972 .of_match_table = xgene_pmu_of_match,
1973 .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
1974 .suppress_bind_attrs = true,
1975 },
1976 };
1977
1978 builtin_platform_driver(xgene_pmu_driver);