Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * ARMv6 Performance counter handling code.
0004  *
0005  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
0006  *
0007  * ARMv6 has 2 configurable performance counters and a single cycle counter.
0008  * They all share a single reset bit but can be written to zero so we can use
0009  * that for a reset.
0010  *
0011  * The counters can't be individually enabled or disabled so when we remove
0012  * one event and replace it with another we could get spurious counts from the
0013  * wrong event. However, we can take advantage of the fact that the
0014  * performance counters can export events to the event bus, and the event bus
0015  * itself can be monitored. This requires that we *don't* export the events to
0016  * the event bus. The procedure for disabling a configurable counter is:
0017  *  - change the counter to count the ETMEXTOUT[0] signal (0x20). This
0018  *    effectively stops the counter from counting.
0019  *  - disable the counter's interrupt generation (each counter has it's
0020  *    own interrupt enable bit).
0021  * Once stopped, the counter value can be written as 0 to reset.
0022  *
0023  * To enable a counter:
0024  *  - enable the counter's interrupt generation.
0025  *  - set the new event type.
0026  *
0027  * Note: the dedicated cycle counter only counts cycles and can't be
0028  * enabled/disabled independently of the others. When we want to disable the
0029  * cycle counter, we have to just disable the interrupt reporting and start
0030  * ignoring that counter. When re-enabling, we have to reset the value and
0031  * enable the interrupt.
0032  */
0033 
0034 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
0035 
0036 #include <asm/cputype.h>
0037 #include <asm/irq_regs.h>
0038 
0039 #include <linux/of.h>
0040 #include <linux/perf/arm_pmu.h>
0041 #include <linux/platform_device.h>
0042 
0043 enum armv6_perf_types {
0044     ARMV6_PERFCTR_ICACHE_MISS       = 0x0,
0045     ARMV6_PERFCTR_IBUF_STALL        = 0x1,
0046     ARMV6_PERFCTR_DDEP_STALL        = 0x2,
0047     ARMV6_PERFCTR_ITLB_MISS         = 0x3,
0048     ARMV6_PERFCTR_DTLB_MISS         = 0x4,
0049     ARMV6_PERFCTR_BR_EXEC           = 0x5,
0050     ARMV6_PERFCTR_BR_MISPREDICT     = 0x6,
0051     ARMV6_PERFCTR_INSTR_EXEC        = 0x7,
0052     ARMV6_PERFCTR_DCACHE_HIT        = 0x9,
0053     ARMV6_PERFCTR_DCACHE_ACCESS     = 0xA,
0054     ARMV6_PERFCTR_DCACHE_MISS       = 0xB,
0055     ARMV6_PERFCTR_DCACHE_WBACK      = 0xC,
0056     ARMV6_PERFCTR_SW_PC_CHANGE      = 0xD,
0057     ARMV6_PERFCTR_MAIN_TLB_MISS     = 0xF,
0058     ARMV6_PERFCTR_EXPL_D_ACCESS     = 0x10,
0059     ARMV6_PERFCTR_LSU_FULL_STALL        = 0x11,
0060     ARMV6_PERFCTR_WBUF_DRAINED      = 0x12,
0061     ARMV6_PERFCTR_CPU_CYCLES        = 0xFF,
0062     ARMV6_PERFCTR_NOP           = 0x20,
0063 };
0064 
0065 enum armv6_counters {
0066     ARMV6_CYCLE_COUNTER = 0,
0067     ARMV6_COUNTER0,
0068     ARMV6_COUNTER1,
0069 };
0070 
0071 /*
0072  * The hardware events that we support. We do support cache operations but
0073  * we have harvard caches and no way to combine instruction and data
0074  * accesses/misses in hardware.
0075  */
0076 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
0077     PERF_MAP_ALL_UNSUPPORTED,
0078     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV6_PERFCTR_CPU_CYCLES,
0079     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6_PERFCTR_INSTR_EXEC,
0080     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
0081     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6_PERFCTR_BR_MISPREDICT,
0082     [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
0083     [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6_PERFCTR_LSU_FULL_STALL,
0084 };
0085 
0086 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0087                       [PERF_COUNT_HW_CACHE_OP_MAX]
0088                       [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0089     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0090 
0091     /*
0092      * The performance counters don't differentiate between read and write
0093      * accesses/misses so this isn't strictly correct, but it's the best we
0094      * can do. Writes and reads get combined.
0095      */
0096     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV6_PERFCTR_DCACHE_ACCESS,
0097     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6_PERFCTR_DCACHE_MISS,
0098     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
0099     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV6_PERFCTR_DCACHE_MISS,
0100 
0101     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6_PERFCTR_ICACHE_MISS,
0102 
0103     /*
0104      * The ARM performance counters can count micro DTLB misses, micro ITLB
0105      * misses and main TLB misses. There isn't an event for TLB misses, so
0106      * use the micro misses here and if users want the main TLB misses they
0107      * can use a raw counter.
0108      */
0109     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6_PERFCTR_DTLB_MISS,
0110     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6_PERFCTR_DTLB_MISS,
0111 
0112     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6_PERFCTR_ITLB_MISS,
0113     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6_PERFCTR_ITLB_MISS,
0114 };
0115 
0116 enum armv6mpcore_perf_types {
0117     ARMV6MPCORE_PERFCTR_ICACHE_MISS     = 0x0,
0118     ARMV6MPCORE_PERFCTR_IBUF_STALL      = 0x1,
0119     ARMV6MPCORE_PERFCTR_DDEP_STALL      = 0x2,
0120     ARMV6MPCORE_PERFCTR_ITLB_MISS       = 0x3,
0121     ARMV6MPCORE_PERFCTR_DTLB_MISS       = 0x4,
0122     ARMV6MPCORE_PERFCTR_BR_EXEC     = 0x5,
0123     ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
0124     ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
0125     ARMV6MPCORE_PERFCTR_INSTR_EXEC      = 0x8,
0126     ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
0127     ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
0128     ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
0129     ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
0130     ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
0131     ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
0132     ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
0133     ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
0134     ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
0135     ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
0136     ARMV6MPCORE_PERFCTR_CPU_CYCLES      = 0xFF,
0137 };
0138 
0139 /*
0140  * The hardware events that we support. We do support cache operations but
0141  * we have harvard caches and no way to combine instruction and data
0142  * accesses/misses in hardware.
0143  */
0144 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
0145     PERF_MAP_ALL_UNSUPPORTED,
0146     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
0147     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
0148     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
0149     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
0150     [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
0151     [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
0152 };
0153 
0154 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0155                     [PERF_COUNT_HW_CACHE_OP_MAX]
0156                     [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0157     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0158 
0159     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
0160     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
0161     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
0162     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
0163 
0164     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
0165 
0166     /*
0167      * The ARM performance counters can count micro DTLB misses, micro ITLB
0168      * misses and main TLB misses. There isn't an event for TLB misses, so
0169      * use the micro misses here and if users want the main TLB misses they
0170      * can use a raw counter.
0171      */
0172     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_DTLB_MISS,
0173     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6MPCORE_PERFCTR_DTLB_MISS,
0174 
0175     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV6MPCORE_PERFCTR_ITLB_MISS,
0176     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV6MPCORE_PERFCTR_ITLB_MISS,
0177 };
0178 
0179 static inline unsigned long
0180 armv6_pmcr_read(void)
0181 {
0182     u32 val;
0183     asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
0184     return val;
0185 }
0186 
0187 static inline void
0188 armv6_pmcr_write(unsigned long val)
0189 {
0190     asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
0191 }
0192 
0193 #define ARMV6_PMCR_ENABLE       (1 << 0)
0194 #define ARMV6_PMCR_CTR01_RESET      (1 << 1)
0195 #define ARMV6_PMCR_CCOUNT_RESET     (1 << 2)
0196 #define ARMV6_PMCR_CCOUNT_DIV       (1 << 3)
0197 #define ARMV6_PMCR_COUNT0_IEN       (1 << 4)
0198 #define ARMV6_PMCR_COUNT1_IEN       (1 << 5)
0199 #define ARMV6_PMCR_CCOUNT_IEN       (1 << 6)
0200 #define ARMV6_PMCR_COUNT0_OVERFLOW  (1 << 8)
0201 #define ARMV6_PMCR_COUNT1_OVERFLOW  (1 << 9)
0202 #define ARMV6_PMCR_CCOUNT_OVERFLOW  (1 << 10)
0203 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
0204 #define ARMV6_PMCR_EVT_COUNT0_MASK  (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
0205 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
0206 #define ARMV6_PMCR_EVT_COUNT1_MASK  (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
0207 
0208 #define ARMV6_PMCR_OVERFLOWED_MASK \
0209     (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
0210      ARMV6_PMCR_CCOUNT_OVERFLOW)
0211 
0212 static inline int
0213 armv6_pmcr_has_overflowed(unsigned long pmcr)
0214 {
0215     return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
0216 }
0217 
0218 static inline int
0219 armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
0220                   enum armv6_counters counter)
0221 {
0222     int ret = 0;
0223 
0224     if (ARMV6_CYCLE_COUNTER == counter)
0225         ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
0226     else if (ARMV6_COUNTER0 == counter)
0227         ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
0228     else if (ARMV6_COUNTER1 == counter)
0229         ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
0230     else
0231         WARN_ONCE(1, "invalid counter number (%d)\n", counter);
0232 
0233     return ret;
0234 }
0235 
0236 static inline u64 armv6pmu_read_counter(struct perf_event *event)
0237 {
0238     struct hw_perf_event *hwc = &event->hw;
0239     int counter = hwc->idx;
0240     unsigned long value = 0;
0241 
0242     if (ARMV6_CYCLE_COUNTER == counter)
0243         asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
0244     else if (ARMV6_COUNTER0 == counter)
0245         asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
0246     else if (ARMV6_COUNTER1 == counter)
0247         asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
0248     else
0249         WARN_ONCE(1, "invalid counter number (%d)\n", counter);
0250 
0251     return value;
0252 }
0253 
0254 static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
0255 {
0256     struct hw_perf_event *hwc = &event->hw;
0257     int counter = hwc->idx;
0258 
0259     if (ARMV6_CYCLE_COUNTER == counter)
0260         asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
0261     else if (ARMV6_COUNTER0 == counter)
0262         asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
0263     else if (ARMV6_COUNTER1 == counter)
0264         asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
0265     else
0266         WARN_ONCE(1, "invalid counter number (%d)\n", counter);
0267 }
0268 
0269 static void armv6pmu_enable_event(struct perf_event *event)
0270 {
0271     unsigned long val, mask, evt, flags;
0272     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0273     struct hw_perf_event *hwc = &event->hw;
0274     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0275     int idx = hwc->idx;
0276 
0277     if (ARMV6_CYCLE_COUNTER == idx) {
0278         mask    = 0;
0279         evt = ARMV6_PMCR_CCOUNT_IEN;
0280     } else if (ARMV6_COUNTER0 == idx) {
0281         mask    = ARMV6_PMCR_EVT_COUNT0_MASK;
0282         evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
0283               ARMV6_PMCR_COUNT0_IEN;
0284     } else if (ARMV6_COUNTER1 == idx) {
0285         mask    = ARMV6_PMCR_EVT_COUNT1_MASK;
0286         evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
0287               ARMV6_PMCR_COUNT1_IEN;
0288     } else {
0289         WARN_ONCE(1, "invalid counter number (%d)\n", idx);
0290         return;
0291     }
0292 
0293     /*
0294      * Mask out the current event and set the counter to count the event
0295      * that we're interested in.
0296      */
0297     raw_spin_lock_irqsave(&events->pmu_lock, flags);
0298     val = armv6_pmcr_read();
0299     val &= ~mask;
0300     val |= evt;
0301     armv6_pmcr_write(val);
0302     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0303 }
0304 
0305 static irqreturn_t
0306 armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
0307 {
0308     unsigned long pmcr = armv6_pmcr_read();
0309     struct perf_sample_data data;
0310     struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
0311     struct pt_regs *regs;
0312     int idx;
0313 
0314     if (!armv6_pmcr_has_overflowed(pmcr))
0315         return IRQ_NONE;
0316 
0317     regs = get_irq_regs();
0318 
0319     /*
0320      * The interrupts are cleared by writing the overflow flags back to
0321      * the control register. All of the other bits don't have any effect
0322      * if they are rewritten, so write the whole value back.
0323      */
0324     armv6_pmcr_write(pmcr);
0325 
0326     for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
0327         struct perf_event *event = cpuc->events[idx];
0328         struct hw_perf_event *hwc;
0329 
0330         /* Ignore if we don't have an event. */
0331         if (!event)
0332             continue;
0333 
0334         /*
0335          * We have a single interrupt for all counters. Check that
0336          * each counter has overflowed before we process it.
0337          */
0338         if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
0339             continue;
0340 
0341         hwc = &event->hw;
0342         armpmu_event_update(event);
0343         perf_sample_data_init(&data, 0, hwc->last_period);
0344         if (!armpmu_event_set_period(event))
0345             continue;
0346 
0347         if (perf_event_overflow(event, &data, regs))
0348             cpu_pmu->disable(event);
0349     }
0350 
0351     /*
0352      * Handle the pending perf events.
0353      *
0354      * Note: this call *must* be run with interrupts disabled. For
0355      * platforms that can have the PMU interrupts raised as an NMI, this
0356      * will not work.
0357      */
0358     irq_work_run();
0359 
0360     return IRQ_HANDLED;
0361 }
0362 
0363 static void armv6pmu_start(struct arm_pmu *cpu_pmu)
0364 {
0365     unsigned long flags, val;
0366     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0367 
0368     raw_spin_lock_irqsave(&events->pmu_lock, flags);
0369     val = armv6_pmcr_read();
0370     val |= ARMV6_PMCR_ENABLE;
0371     armv6_pmcr_write(val);
0372     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0373 }
0374 
0375 static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
0376 {
0377     unsigned long flags, val;
0378     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0379 
0380     raw_spin_lock_irqsave(&events->pmu_lock, flags);
0381     val = armv6_pmcr_read();
0382     val &= ~ARMV6_PMCR_ENABLE;
0383     armv6_pmcr_write(val);
0384     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0385 }
0386 
0387 static int
0388 armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
0389                 struct perf_event *event)
0390 {
0391     struct hw_perf_event *hwc = &event->hw;
0392     /* Always place a cycle counter into the cycle counter. */
0393     if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
0394         if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
0395             return -EAGAIN;
0396 
0397         return ARMV6_CYCLE_COUNTER;
0398     } else {
0399         /*
0400          * For anything other than a cycle counter, try and use
0401          * counter0 and counter1.
0402          */
0403         if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
0404             return ARMV6_COUNTER1;
0405 
0406         if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
0407             return ARMV6_COUNTER0;
0408 
0409         /* The counters are all in use. */
0410         return -EAGAIN;
0411     }
0412 }
0413 
0414 static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
0415                      struct perf_event *event)
0416 {
0417     clear_bit(event->hw.idx, cpuc->used_mask);
0418 }
0419 
0420 static void armv6pmu_disable_event(struct perf_event *event)
0421 {
0422     unsigned long val, mask, evt, flags;
0423     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0424     struct hw_perf_event *hwc = &event->hw;
0425     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0426     int idx = hwc->idx;
0427 
0428     if (ARMV6_CYCLE_COUNTER == idx) {
0429         mask    = ARMV6_PMCR_CCOUNT_IEN;
0430         evt = 0;
0431     } else if (ARMV6_COUNTER0 == idx) {
0432         mask    = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
0433         evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
0434     } else if (ARMV6_COUNTER1 == idx) {
0435         mask    = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
0436         evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
0437     } else {
0438         WARN_ONCE(1, "invalid counter number (%d)\n", idx);
0439         return;
0440     }
0441 
0442     /*
0443      * Mask out the current event and set the counter to count the number
0444      * of ETM bus signal assertion cycles. The external reporting should
0445      * be disabled and so this should never increment.
0446      */
0447     raw_spin_lock_irqsave(&events->pmu_lock, flags);
0448     val = armv6_pmcr_read();
0449     val &= ~mask;
0450     val |= evt;
0451     armv6_pmcr_write(val);
0452     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0453 }
0454 
0455 static void armv6mpcore_pmu_disable_event(struct perf_event *event)
0456 {
0457     unsigned long val, mask, flags, evt = 0;
0458     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0459     struct hw_perf_event *hwc = &event->hw;
0460     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0461     int idx = hwc->idx;
0462 
0463     if (ARMV6_CYCLE_COUNTER == idx) {
0464         mask    = ARMV6_PMCR_CCOUNT_IEN;
0465     } else if (ARMV6_COUNTER0 == idx) {
0466         mask    = ARMV6_PMCR_COUNT0_IEN;
0467     } else if (ARMV6_COUNTER1 == idx) {
0468         mask    = ARMV6_PMCR_COUNT1_IEN;
0469     } else {
0470         WARN_ONCE(1, "invalid counter number (%d)\n", idx);
0471         return;
0472     }
0473 
0474     /*
0475      * Unlike UP ARMv6, we don't have a way of stopping the counters. We
0476      * simply disable the interrupt reporting.
0477      */
0478     raw_spin_lock_irqsave(&events->pmu_lock, flags);
0479     val = armv6_pmcr_read();
0480     val &= ~mask;
0481     val |= evt;
0482     armv6_pmcr_write(val);
0483     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0484 }
0485 
0486 static int armv6_map_event(struct perf_event *event)
0487 {
0488     return armpmu_map_event(event, &armv6_perf_map,
0489                 &armv6_perf_cache_map, 0xFF);
0490 }
0491 
0492 static void armv6pmu_init(struct arm_pmu *cpu_pmu)
0493 {
0494     cpu_pmu->handle_irq = armv6pmu_handle_irq;
0495     cpu_pmu->enable     = armv6pmu_enable_event;
0496     cpu_pmu->disable    = armv6pmu_disable_event;
0497     cpu_pmu->read_counter   = armv6pmu_read_counter;
0498     cpu_pmu->write_counter  = armv6pmu_write_counter;
0499     cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
0500     cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
0501     cpu_pmu->start      = armv6pmu_start;
0502     cpu_pmu->stop       = armv6pmu_stop;
0503     cpu_pmu->map_event  = armv6_map_event;
0504     cpu_pmu->num_events = 3;
0505 }
0506 
0507 static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
0508 {
0509     armv6pmu_init(cpu_pmu);
0510     cpu_pmu->name       = "armv6_1136";
0511     return 0;
0512 }
0513 
0514 static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
0515 {
0516     armv6pmu_init(cpu_pmu);
0517     cpu_pmu->name       = "armv6_1156";
0518     return 0;
0519 }
0520 
0521 static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
0522 {
0523     armv6pmu_init(cpu_pmu);
0524     cpu_pmu->name       = "armv6_1176";
0525     return 0;
0526 }
0527 
0528 /*
0529  * ARMv6mpcore is almost identical to single core ARMv6 with the exception
0530  * that some of the events have different enumerations and that there is no
0531  * *hack* to stop the programmable counters. To stop the counters we simply
0532  * disable the interrupt reporting and update the event. When unthrottling we
0533  * reset the period and enable the interrupt reporting.
0534  */
0535 
0536 static int armv6mpcore_map_event(struct perf_event *event)
0537 {
0538     return armpmu_map_event(event, &armv6mpcore_perf_map,
0539                 &armv6mpcore_perf_cache_map, 0xFF);
0540 }
0541 
0542 static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
0543 {
0544     cpu_pmu->name       = "armv6_11mpcore";
0545     cpu_pmu->handle_irq = armv6pmu_handle_irq;
0546     cpu_pmu->enable     = armv6pmu_enable_event;
0547     cpu_pmu->disable    = armv6mpcore_pmu_disable_event;
0548     cpu_pmu->read_counter   = armv6pmu_read_counter;
0549     cpu_pmu->write_counter  = armv6pmu_write_counter;
0550     cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
0551     cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
0552     cpu_pmu->start      = armv6pmu_start;
0553     cpu_pmu->stop       = armv6pmu_stop;
0554     cpu_pmu->map_event  = armv6mpcore_map_event;
0555     cpu_pmu->num_events = 3;
0556 
0557     return 0;
0558 }
0559 
0560 static const struct of_device_id armv6_pmu_of_device_ids[] = {
0561     {.compatible = "arm,arm11mpcore-pmu",   .data = armv6mpcore_pmu_init},
0562     {.compatible = "arm,arm1176-pmu",   .data = armv6_1176_pmu_init},
0563     {.compatible = "arm,arm1136-pmu",   .data = armv6_1136_pmu_init},
0564     { /* sentinel value */ }
0565 };
0566 
0567 static const struct pmu_probe_info armv6_pmu_probe_table[] = {
0568     ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
0569     ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
0570     ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
0571     ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
0572     { /* sentinel value */ }
0573 };
0574 
0575 static int armv6_pmu_device_probe(struct platform_device *pdev)
0576 {
0577     return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
0578                     armv6_pmu_probe_table);
0579 }
0580 
0581 static struct platform_driver armv6_pmu_driver = {
0582     .driver     = {
0583         .name   = "armv6-pmu",
0584         .of_match_table = armv6_pmu_of_device_ids,
0585     },
0586     .probe      = armv6_pmu_device_probe,
0587 };
0588 
0589 builtin_platform_driver(armv6_pmu_driver);
0590 #endif  /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */