0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #ifdef CONFIG_CPU_XSCALE
0017
0018 #include <asm/cputype.h>
0019 #include <asm/irq_regs.h>
0020
0021 #include <linux/of.h>
0022 #include <linux/perf/arm_pmu.h>
0023 #include <linux/platform_device.h>
0024
0025 enum xscale_perf_types {
0026 XSCALE_PERFCTR_ICACHE_MISS = 0x00,
0027 XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
0028 XSCALE_PERFCTR_DATA_STALL = 0x02,
0029 XSCALE_PERFCTR_ITLB_MISS = 0x03,
0030 XSCALE_PERFCTR_DTLB_MISS = 0x04,
0031 XSCALE_PERFCTR_BRANCH = 0x05,
0032 XSCALE_PERFCTR_BRANCH_MISS = 0x06,
0033 XSCALE_PERFCTR_INSTRUCTION = 0x07,
0034 XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
0035 XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
0036 XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
0037 XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
0038 XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
0039 XSCALE_PERFCTR_PC_CHANGED = 0x0D,
0040 XSCALE_PERFCTR_BCU_REQUEST = 0x10,
0041 XSCALE_PERFCTR_BCU_FULL = 0x11,
0042 XSCALE_PERFCTR_BCU_DRAIN = 0x12,
0043 XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
0044 XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
0045 XSCALE_PERFCTR_RMW = 0x16,
0046
0047 XSCALE_PERFCTR_CCNT = 0xFE,
0048 XSCALE_PERFCTR_UNUSED = 0xFF,
0049 };
0050
0051 enum xscale_counters {
0052 XSCALE_CYCLE_COUNTER = 0,
0053 XSCALE_COUNTER0,
0054 XSCALE_COUNTER1,
0055 XSCALE_COUNTER2,
0056 XSCALE_COUNTER3,
0057 };
0058
0059 static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
0060 PERF_MAP_ALL_UNSUPPORTED,
0061 [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
0062 [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
0063 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
0064 [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
0065 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
0066 };
0067
0068 static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0069 [PERF_COUNT_HW_CACHE_OP_MAX]
0070 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0071 PERF_CACHE_MAP_ALL_UNSUPPORTED,
0072
0073 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
0074 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
0075 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
0076 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
0077
0078 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
0079
0080 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
0081 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
0082
0083 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
0084 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
0085 };
0086
0087 #define XSCALE_PMU_ENABLE 0x001
0088 #define XSCALE_PMN_RESET 0x002
0089 #define XSCALE_CCNT_RESET 0x004
0090 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
0091 #define XSCALE_PMU_CNT64 0x008
0092
0093 #define XSCALE1_OVERFLOWED_MASK 0x700
0094 #define XSCALE1_CCOUNT_OVERFLOW 0x400
0095 #define XSCALE1_COUNT0_OVERFLOW 0x100
0096 #define XSCALE1_COUNT1_OVERFLOW 0x200
0097 #define XSCALE1_CCOUNT_INT_EN 0x040
0098 #define XSCALE1_COUNT0_INT_EN 0x010
0099 #define XSCALE1_COUNT1_INT_EN 0x020
0100 #define XSCALE1_COUNT0_EVT_SHFT 12
0101 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
0102 #define XSCALE1_COUNT1_EVT_SHFT 20
0103 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
0104
0105 static inline u32
0106 xscale1pmu_read_pmnc(void)
0107 {
0108 u32 val;
0109 asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
0110 return val;
0111 }
0112
0113 static inline void
0114 xscale1pmu_write_pmnc(u32 val)
0115 {
0116
0117 val &= 0xffff77f;
0118 asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
0119 }
0120
0121 static inline int
0122 xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
0123 enum xscale_counters counter)
0124 {
0125 int ret = 0;
0126
0127 switch (counter) {
0128 case XSCALE_CYCLE_COUNTER:
0129 ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
0130 break;
0131 case XSCALE_COUNTER0:
0132 ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
0133 break;
0134 case XSCALE_COUNTER1:
0135 ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
0136 break;
0137 default:
0138 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
0139 }
0140
0141 return ret;
0142 }
0143
0144 static irqreturn_t
0145 xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
0146 {
0147 unsigned long pmnc;
0148 struct perf_sample_data data;
0149 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
0150 struct pt_regs *regs;
0151 int idx;
0152
0153
0154
0155
0156
0157
0158
0159 pmnc = xscale1pmu_read_pmnc();
0160
0161
0162
0163
0164
0165
0166 xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
0167
0168 if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
0169 return IRQ_NONE;
0170
0171 regs = get_irq_regs();
0172
0173 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
0174 struct perf_event *event = cpuc->events[idx];
0175 struct hw_perf_event *hwc;
0176
0177 if (!event)
0178 continue;
0179
0180 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
0181 continue;
0182
0183 hwc = &event->hw;
0184 armpmu_event_update(event);
0185 perf_sample_data_init(&data, 0, hwc->last_period);
0186 if (!armpmu_event_set_period(event))
0187 continue;
0188
0189 if (perf_event_overflow(event, &data, regs))
0190 cpu_pmu->disable(event);
0191 }
0192
0193 irq_work_run();
0194
0195
0196
0197
0198 pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
0199 xscale1pmu_write_pmnc(pmnc);
0200
0201 return IRQ_HANDLED;
0202 }
0203
0204 static void xscale1pmu_enable_event(struct perf_event *event)
0205 {
0206 unsigned long val, mask, evt, flags;
0207 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0208 struct hw_perf_event *hwc = &event->hw;
0209 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0210 int idx = hwc->idx;
0211
0212 switch (idx) {
0213 case XSCALE_CYCLE_COUNTER:
0214 mask = 0;
0215 evt = XSCALE1_CCOUNT_INT_EN;
0216 break;
0217 case XSCALE_COUNTER0:
0218 mask = XSCALE1_COUNT0_EVT_MASK;
0219 evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
0220 XSCALE1_COUNT0_INT_EN;
0221 break;
0222 case XSCALE_COUNTER1:
0223 mask = XSCALE1_COUNT1_EVT_MASK;
0224 evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
0225 XSCALE1_COUNT1_INT_EN;
0226 break;
0227 default:
0228 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
0229 return;
0230 }
0231
0232 raw_spin_lock_irqsave(&events->pmu_lock, flags);
0233 val = xscale1pmu_read_pmnc();
0234 val &= ~mask;
0235 val |= evt;
0236 xscale1pmu_write_pmnc(val);
0237 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0238 }
0239
0240 static void xscale1pmu_disable_event(struct perf_event *event)
0241 {
0242 unsigned long val, mask, evt, flags;
0243 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0244 struct hw_perf_event *hwc = &event->hw;
0245 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0246 int idx = hwc->idx;
0247
0248 switch (idx) {
0249 case XSCALE_CYCLE_COUNTER:
0250 mask = XSCALE1_CCOUNT_INT_EN;
0251 evt = 0;
0252 break;
0253 case XSCALE_COUNTER0:
0254 mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
0255 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
0256 break;
0257 case XSCALE_COUNTER1:
0258 mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
0259 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
0260 break;
0261 default:
0262 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
0263 return;
0264 }
0265
0266 raw_spin_lock_irqsave(&events->pmu_lock, flags);
0267 val = xscale1pmu_read_pmnc();
0268 val &= ~mask;
0269 val |= evt;
0270 xscale1pmu_write_pmnc(val);
0271 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0272 }
0273
0274 static int
0275 xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
0276 struct perf_event *event)
0277 {
0278 struct hw_perf_event *hwc = &event->hw;
0279 if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
0280 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
0281 return -EAGAIN;
0282
0283 return XSCALE_CYCLE_COUNTER;
0284 } else {
0285 if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
0286 return XSCALE_COUNTER1;
0287
0288 if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
0289 return XSCALE_COUNTER0;
0290
0291 return -EAGAIN;
0292 }
0293 }
0294
0295 static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
0296 struct perf_event *event)
0297 {
0298 clear_bit(event->hw.idx, cpuc->used_mask);
0299 }
0300
0301 static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
0302 {
0303 unsigned long flags, val;
0304 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0305
0306 raw_spin_lock_irqsave(&events->pmu_lock, flags);
0307 val = xscale1pmu_read_pmnc();
0308 val |= XSCALE_PMU_ENABLE;
0309 xscale1pmu_write_pmnc(val);
0310 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0311 }
0312
0313 static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
0314 {
0315 unsigned long flags, val;
0316 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0317
0318 raw_spin_lock_irqsave(&events->pmu_lock, flags);
0319 val = xscale1pmu_read_pmnc();
0320 val &= ~XSCALE_PMU_ENABLE;
0321 xscale1pmu_write_pmnc(val);
0322 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0323 }
0324
0325 static inline u64 xscale1pmu_read_counter(struct perf_event *event)
0326 {
0327 struct hw_perf_event *hwc = &event->hw;
0328 int counter = hwc->idx;
0329 u32 val = 0;
0330
0331 switch (counter) {
0332 case XSCALE_CYCLE_COUNTER:
0333 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
0334 break;
0335 case XSCALE_COUNTER0:
0336 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
0337 break;
0338 case XSCALE_COUNTER1:
0339 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
0340 break;
0341 }
0342
0343 return val;
0344 }
0345
0346 static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
0347 {
0348 struct hw_perf_event *hwc = &event->hw;
0349 int counter = hwc->idx;
0350
0351 switch (counter) {
0352 case XSCALE_CYCLE_COUNTER:
0353 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
0354 break;
0355 case XSCALE_COUNTER0:
0356 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
0357 break;
0358 case XSCALE_COUNTER1:
0359 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
0360 break;
0361 }
0362 }
0363
0364 static int xscale_map_event(struct perf_event *event)
0365 {
0366 return armpmu_map_event(event, &xscale_perf_map,
0367 &xscale_perf_cache_map, 0xFF);
0368 }
0369
0370 static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
0371 {
0372 cpu_pmu->name = "armv5_xscale1";
0373 cpu_pmu->handle_irq = xscale1pmu_handle_irq;
0374 cpu_pmu->enable = xscale1pmu_enable_event;
0375 cpu_pmu->disable = xscale1pmu_disable_event;
0376 cpu_pmu->read_counter = xscale1pmu_read_counter;
0377 cpu_pmu->write_counter = xscale1pmu_write_counter;
0378 cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
0379 cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
0380 cpu_pmu->start = xscale1pmu_start;
0381 cpu_pmu->stop = xscale1pmu_stop;
0382 cpu_pmu->map_event = xscale_map_event;
0383 cpu_pmu->num_events = 3;
0384
0385 return 0;
0386 }
0387
0388 #define XSCALE2_OVERFLOWED_MASK 0x01f
0389 #define XSCALE2_CCOUNT_OVERFLOW 0x001
0390 #define XSCALE2_COUNT0_OVERFLOW 0x002
0391 #define XSCALE2_COUNT1_OVERFLOW 0x004
0392 #define XSCALE2_COUNT2_OVERFLOW 0x008
0393 #define XSCALE2_COUNT3_OVERFLOW 0x010
0394 #define XSCALE2_CCOUNT_INT_EN 0x001
0395 #define XSCALE2_COUNT0_INT_EN 0x002
0396 #define XSCALE2_COUNT1_INT_EN 0x004
0397 #define XSCALE2_COUNT2_INT_EN 0x008
0398 #define XSCALE2_COUNT3_INT_EN 0x010
0399 #define XSCALE2_COUNT0_EVT_SHFT 0
0400 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
0401 #define XSCALE2_COUNT1_EVT_SHFT 8
0402 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
0403 #define XSCALE2_COUNT2_EVT_SHFT 16
0404 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
0405 #define XSCALE2_COUNT3_EVT_SHFT 24
0406 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
0407
0408 static inline u32
0409 xscale2pmu_read_pmnc(void)
0410 {
0411 u32 val;
0412 asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
0413
0414 return val & 0xff000009;
0415 }
0416
0417 static inline void
0418 xscale2pmu_write_pmnc(u32 val)
0419 {
0420
0421 val &= 0xf;
0422 asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
0423 }
0424
0425 static inline u32
0426 xscale2pmu_read_overflow_flags(void)
0427 {
0428 u32 val;
0429 asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
0430 return val;
0431 }
0432
0433 static inline void
0434 xscale2pmu_write_overflow_flags(u32 val)
0435 {
0436 asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
0437 }
0438
0439 static inline u32
0440 xscale2pmu_read_event_select(void)
0441 {
0442 u32 val;
0443 asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
0444 return val;
0445 }
0446
0447 static inline void
0448 xscale2pmu_write_event_select(u32 val)
0449 {
0450 asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
0451 }
0452
0453 static inline u32
0454 xscale2pmu_read_int_enable(void)
0455 {
0456 u32 val;
0457 asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
0458 return val;
0459 }
0460
0461 static void
0462 xscale2pmu_write_int_enable(u32 val)
0463 {
0464 asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
0465 }
0466
0467 static inline int
0468 xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
0469 enum xscale_counters counter)
0470 {
0471 int ret = 0;
0472
0473 switch (counter) {
0474 case XSCALE_CYCLE_COUNTER:
0475 ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
0476 break;
0477 case XSCALE_COUNTER0:
0478 ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
0479 break;
0480 case XSCALE_COUNTER1:
0481 ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
0482 break;
0483 case XSCALE_COUNTER2:
0484 ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
0485 break;
0486 case XSCALE_COUNTER3:
0487 ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
0488 break;
0489 default:
0490 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
0491 }
0492
0493 return ret;
0494 }
0495
0496 static irqreturn_t
0497 xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
0498 {
0499 unsigned long pmnc, of_flags;
0500 struct perf_sample_data data;
0501 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
0502 struct pt_regs *regs;
0503 int idx;
0504
0505
0506 pmnc = xscale2pmu_read_pmnc();
0507 xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
0508
0509
0510 of_flags = xscale2pmu_read_overflow_flags();
0511 if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
0512 return IRQ_NONE;
0513
0514
0515 xscale2pmu_write_overflow_flags(of_flags);
0516
0517 regs = get_irq_regs();
0518
0519 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
0520 struct perf_event *event = cpuc->events[idx];
0521 struct hw_perf_event *hwc;
0522
0523 if (!event)
0524 continue;
0525
0526 if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
0527 continue;
0528
0529 hwc = &event->hw;
0530 armpmu_event_update(event);
0531 perf_sample_data_init(&data, 0, hwc->last_period);
0532 if (!armpmu_event_set_period(event))
0533 continue;
0534
0535 if (perf_event_overflow(event, &data, regs))
0536 cpu_pmu->disable(event);
0537 }
0538
0539 irq_work_run();
0540
0541
0542
0543
0544 pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
0545 xscale2pmu_write_pmnc(pmnc);
0546
0547 return IRQ_HANDLED;
0548 }
0549
0550 static void xscale2pmu_enable_event(struct perf_event *event)
0551 {
0552 unsigned long flags, ien, evtsel;
0553 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0554 struct hw_perf_event *hwc = &event->hw;
0555 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0556 int idx = hwc->idx;
0557
0558 ien = xscale2pmu_read_int_enable();
0559 evtsel = xscale2pmu_read_event_select();
0560
0561 switch (idx) {
0562 case XSCALE_CYCLE_COUNTER:
0563 ien |= XSCALE2_CCOUNT_INT_EN;
0564 break;
0565 case XSCALE_COUNTER0:
0566 ien |= XSCALE2_COUNT0_INT_EN;
0567 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
0568 evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
0569 break;
0570 case XSCALE_COUNTER1:
0571 ien |= XSCALE2_COUNT1_INT_EN;
0572 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
0573 evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
0574 break;
0575 case XSCALE_COUNTER2:
0576 ien |= XSCALE2_COUNT2_INT_EN;
0577 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
0578 evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
0579 break;
0580 case XSCALE_COUNTER3:
0581 ien |= XSCALE2_COUNT3_INT_EN;
0582 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
0583 evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
0584 break;
0585 default:
0586 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
0587 return;
0588 }
0589
0590 raw_spin_lock_irqsave(&events->pmu_lock, flags);
0591 xscale2pmu_write_event_select(evtsel);
0592 xscale2pmu_write_int_enable(ien);
0593 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0594 }
0595
0596 static void xscale2pmu_disable_event(struct perf_event *event)
0597 {
0598 unsigned long flags, ien, evtsel, of_flags;
0599 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0600 struct hw_perf_event *hwc = &event->hw;
0601 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0602 int idx = hwc->idx;
0603
0604 ien = xscale2pmu_read_int_enable();
0605 evtsel = xscale2pmu_read_event_select();
0606
0607 switch (idx) {
0608 case XSCALE_CYCLE_COUNTER:
0609 ien &= ~XSCALE2_CCOUNT_INT_EN;
0610 of_flags = XSCALE2_CCOUNT_OVERFLOW;
0611 break;
0612 case XSCALE_COUNTER0:
0613 ien &= ~XSCALE2_COUNT0_INT_EN;
0614 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
0615 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
0616 of_flags = XSCALE2_COUNT0_OVERFLOW;
0617 break;
0618 case XSCALE_COUNTER1:
0619 ien &= ~XSCALE2_COUNT1_INT_EN;
0620 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
0621 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
0622 of_flags = XSCALE2_COUNT1_OVERFLOW;
0623 break;
0624 case XSCALE_COUNTER2:
0625 ien &= ~XSCALE2_COUNT2_INT_EN;
0626 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
0627 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
0628 of_flags = XSCALE2_COUNT2_OVERFLOW;
0629 break;
0630 case XSCALE_COUNTER3:
0631 ien &= ~XSCALE2_COUNT3_INT_EN;
0632 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
0633 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
0634 of_flags = XSCALE2_COUNT3_OVERFLOW;
0635 break;
0636 default:
0637 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
0638 return;
0639 }
0640
0641 raw_spin_lock_irqsave(&events->pmu_lock, flags);
0642 xscale2pmu_write_event_select(evtsel);
0643 xscale2pmu_write_int_enable(ien);
0644 xscale2pmu_write_overflow_flags(of_flags);
0645 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0646 }
0647
0648 static int
0649 xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
0650 struct perf_event *event)
0651 {
0652 int idx = xscale1pmu_get_event_idx(cpuc, event);
0653 if (idx >= 0)
0654 goto out;
0655
0656 if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
0657 idx = XSCALE_COUNTER3;
0658 else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
0659 idx = XSCALE_COUNTER2;
0660 out:
0661 return idx;
0662 }
0663
0664 static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
0665 {
0666 unsigned long flags, val;
0667 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0668
0669 raw_spin_lock_irqsave(&events->pmu_lock, flags);
0670 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
0671 val |= XSCALE_PMU_ENABLE;
0672 xscale2pmu_write_pmnc(val);
0673 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0674 }
0675
0676 static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
0677 {
0678 unsigned long flags, val;
0679 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0680
0681 raw_spin_lock_irqsave(&events->pmu_lock, flags);
0682 val = xscale2pmu_read_pmnc();
0683 val &= ~XSCALE_PMU_ENABLE;
0684 xscale2pmu_write_pmnc(val);
0685 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0686 }
0687
0688 static inline u64 xscale2pmu_read_counter(struct perf_event *event)
0689 {
0690 struct hw_perf_event *hwc = &event->hw;
0691 int counter = hwc->idx;
0692 u32 val = 0;
0693
0694 switch (counter) {
0695 case XSCALE_CYCLE_COUNTER:
0696 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
0697 break;
0698 case XSCALE_COUNTER0:
0699 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
0700 break;
0701 case XSCALE_COUNTER1:
0702 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
0703 break;
0704 case XSCALE_COUNTER2:
0705 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
0706 break;
0707 case XSCALE_COUNTER3:
0708 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
0709 break;
0710 }
0711
0712 return val;
0713 }
0714
0715 static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
0716 {
0717 struct hw_perf_event *hwc = &event->hw;
0718 int counter = hwc->idx;
0719
0720 switch (counter) {
0721 case XSCALE_CYCLE_COUNTER:
0722 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
0723 break;
0724 case XSCALE_COUNTER0:
0725 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
0726 break;
0727 case XSCALE_COUNTER1:
0728 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
0729 break;
0730 case XSCALE_COUNTER2:
0731 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
0732 break;
0733 case XSCALE_COUNTER3:
0734 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
0735 break;
0736 }
0737 }
0738
0739 static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
0740 {
0741 cpu_pmu->name = "armv5_xscale2";
0742 cpu_pmu->handle_irq = xscale2pmu_handle_irq;
0743 cpu_pmu->enable = xscale2pmu_enable_event;
0744 cpu_pmu->disable = xscale2pmu_disable_event;
0745 cpu_pmu->read_counter = xscale2pmu_read_counter;
0746 cpu_pmu->write_counter = xscale2pmu_write_counter;
0747 cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
0748 cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
0749 cpu_pmu->start = xscale2pmu_start;
0750 cpu_pmu->stop = xscale2pmu_stop;
0751 cpu_pmu->map_event = xscale_map_event;
0752 cpu_pmu->num_events = 5;
0753
0754 return 0;
0755 }
0756
0757 static const struct pmu_probe_info xscale_pmu_probe_table[] = {
0758 XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
0759 XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
0760 { }
0761 };
0762
0763 static int xscale_pmu_device_probe(struct platform_device *pdev)
0764 {
0765 return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
0766 }
0767
0768 static struct platform_driver xscale_pmu_driver = {
0769 .driver = {
0770 .name = "xscale-pmu",
0771 },
0772 .probe = xscale_pmu_device_probe,
0773 };
0774
0775 builtin_platform_driver(xscale_pmu_driver);
0776 #endif