Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
0004  *
0005  * ARMv7 support: Jean Pihet <jpihet@mvista.com>
0006  * 2010 (c) MontaVista Software, LLC.
0007  *
0008  * Copied from ARMv6 code, with the low level code inspired
0009  *  by the ARMv7 Oprofile code.
0010  *
0011  * Cortex-A8 has up to 4 configurable performance counters and
0012  *  a single cycle counter.
0013  * Cortex-A9 has up to 31 configurable performance counters and
0014  *  a single cycle counter.
0015  *
0016  * All counters can be enabled/disabled and IRQ masked separately. The cycle
0017  *  counter and all 4 performance counters together can be reset separately.
0018  */
0019 
0020 #ifdef CONFIG_CPU_V7
0021 
0022 #include <asm/cp15.h>
0023 #include <asm/cputype.h>
0024 #include <asm/irq_regs.h>
0025 #include <asm/vfp.h>
0026 #include "../vfp/vfpinstr.h"
0027 
0028 #include <linux/of.h>
0029 #include <linux/perf/arm_pmu.h>
0030 #include <linux/platform_device.h>
0031 
0032 /*
0033  * Common ARMv7 event types
0034  *
0035  * Note: An implementation may not be able to count all of these events
0036  * but the encodings are considered to be `reserved' in the case that
0037  * they are not available.
0038  */
0039 #define ARMV7_PERFCTR_PMNC_SW_INCR          0x00
0040 #define ARMV7_PERFCTR_L1_ICACHE_REFILL          0x01
0041 #define ARMV7_PERFCTR_ITLB_REFILL           0x02
0042 #define ARMV7_PERFCTR_L1_DCACHE_REFILL          0x03
0043 #define ARMV7_PERFCTR_L1_DCACHE_ACCESS          0x04
0044 #define ARMV7_PERFCTR_DTLB_REFILL           0x05
0045 #define ARMV7_PERFCTR_MEM_READ              0x06
0046 #define ARMV7_PERFCTR_MEM_WRITE             0x07
0047 #define ARMV7_PERFCTR_INSTR_EXECUTED            0x08
0048 #define ARMV7_PERFCTR_EXC_TAKEN             0x09
0049 #define ARMV7_PERFCTR_EXC_EXECUTED          0x0A
0050 #define ARMV7_PERFCTR_CID_WRITE             0x0B
0051 
0052 /*
0053  * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
0054  * It counts:
0055  *  - all (taken) branch instructions,
0056  *  - instructions that explicitly write the PC,
0057  *  - exception generating instructions.
0058  */
0059 #define ARMV7_PERFCTR_PC_WRITE              0x0C
0060 #define ARMV7_PERFCTR_PC_IMM_BRANCH         0x0D
0061 #define ARMV7_PERFCTR_PC_PROC_RETURN            0x0E
0062 #define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS      0x0F
0063 #define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED        0x10
0064 #define ARMV7_PERFCTR_CLOCK_CYCLES          0x11
0065 #define ARMV7_PERFCTR_PC_BRANCH_PRED            0x12
0066 
0067 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
0068 #define ARMV7_PERFCTR_MEM_ACCESS            0x13
0069 #define ARMV7_PERFCTR_L1_ICACHE_ACCESS          0x14
0070 #define ARMV7_PERFCTR_L1_DCACHE_WB          0x15
0071 #define ARMV7_PERFCTR_L2_CACHE_ACCESS           0x16
0072 #define ARMV7_PERFCTR_L2_CACHE_REFILL           0x17
0073 #define ARMV7_PERFCTR_L2_CACHE_WB           0x18
0074 #define ARMV7_PERFCTR_BUS_ACCESS            0x19
0075 #define ARMV7_PERFCTR_MEM_ERROR             0x1A
0076 #define ARMV7_PERFCTR_INSTR_SPEC            0x1B
0077 #define ARMV7_PERFCTR_TTBR_WRITE            0x1C
0078 #define ARMV7_PERFCTR_BUS_CYCLES            0x1D
0079 
0080 #define ARMV7_PERFCTR_CPU_CYCLES            0xFF
0081 
0082 /* ARMv7 Cortex-A8 specific event types */
0083 #define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS        0x43
0084 #define ARMV7_A8_PERFCTR_L2_CACHE_REFILL        0x44
0085 #define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS       0x50
0086 #define ARMV7_A8_PERFCTR_STALL_ISIDE            0x56
0087 
0088 /* ARMv7 Cortex-A9 specific event types */
0089 #define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME      0x68
0090 #define ARMV7_A9_PERFCTR_STALL_ICACHE           0x60
0091 #define ARMV7_A9_PERFCTR_STALL_DISPATCH         0x66
0092 
0093 /* ARMv7 Cortex-A5 specific event types */
0094 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL      0xc2
0095 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP     0xc3
0096 
0097 /* ARMv7 Cortex-A15 specific event types */
0098 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ     0x40
0099 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE    0x41
0100 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ     0x42
0101 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE    0x43
0102 
0103 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ       0x4C
0104 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE      0x4D
0105 
0106 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ      0x50
0107 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE     0x51
0108 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ      0x52
0109 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE     0x53
0110 
0111 #define ARMV7_A15_PERFCTR_PC_WRITE_SPEC         0x76
0112 
0113 /* ARMv7 Cortex-A12 specific event types */
0114 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ     0x40
0115 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE    0x41
0116 
0117 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ      0x50
0118 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE     0x51
0119 
0120 #define ARMV7_A12_PERFCTR_PC_WRITE_SPEC         0x76
0121 
0122 #define ARMV7_A12_PERFCTR_PF_TLB_REFILL         0xe7
0123 
0124 /* ARMv7 Krait specific event types */
0125 #define KRAIT_PMRESR0_GROUP0                0xcc
0126 #define KRAIT_PMRESR1_GROUP0                0xd0
0127 #define KRAIT_PMRESR2_GROUP0                0xd4
0128 #define KRAIT_VPMRESR0_GROUP0               0xd8
0129 
0130 #define KRAIT_PERFCTR_L1_ICACHE_ACCESS          0x10011
0131 #define KRAIT_PERFCTR_L1_ICACHE_MISS            0x10010
0132 
0133 #define KRAIT_PERFCTR_L1_ITLB_ACCESS            0x12222
0134 #define KRAIT_PERFCTR_L1_DTLB_ACCESS            0x12210
0135 
0136 /* ARMv7 Scorpion specific event types */
0137 #define SCORPION_LPM0_GROUP0                0x4c
0138 #define SCORPION_LPM1_GROUP0                0x50
0139 #define SCORPION_LPM2_GROUP0                0x54
0140 #define SCORPION_L2LPM_GROUP0               0x58
0141 #define SCORPION_VLPM_GROUP0                0x5c
0142 
0143 #define SCORPION_ICACHE_ACCESS              0x10053
0144 #define SCORPION_ICACHE_MISS                0x10052
0145 
0146 #define SCORPION_DTLB_ACCESS                0x12013
0147 #define SCORPION_DTLB_MISS              0x12012
0148 
0149 #define SCORPION_ITLB_MISS              0x12021
0150 
0151 /*
0152  * Cortex-A8 HW events mapping
0153  *
0154  * The hardware events that we support. We do support cache operations but
0155  * we have harvard caches and no way to combine instruction and data
0156  * accesses/misses in hardware.
0157  */
0158 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
0159     PERF_MAP_ALL_UNSUPPORTED,
0160     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0161     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
0162     [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0163     [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0164     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
0165     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0166     [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
0167 };
0168 
0169 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0170                       [PERF_COUNT_HW_CACHE_OP_MAX]
0171                       [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0172     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0173 
0174     /*
0175      * The performance counters don't differentiate between read and write
0176      * accesses/misses so this isn't strictly correct, but it's the best we
0177      * can do. Writes and reads get combined.
0178      */
0179     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0180     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0181     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0182     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0183 
0184     [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
0185     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
0186 
0187     [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
0188     [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
0189     [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
0190     [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
0191 
0192     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
0193     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
0194 
0195     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
0196     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
0197 
0198     [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
0199     [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0200     [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0201     [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0202 };
0203 
0204 /*
0205  * Cortex-A9 HW events mapping
0206  */
0207 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
0208     PERF_MAP_ALL_UNSUPPORTED,
0209     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0210     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
0211     [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0212     [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0213     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
0214     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0215     [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
0216     [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV7_A9_PERFCTR_STALL_DISPATCH,
0217 };
0218 
0219 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0220                       [PERF_COUNT_HW_CACHE_OP_MAX]
0221                       [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0222     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0223 
0224     /*
0225      * The performance counters don't differentiate between read and write
0226      * accesses/misses so this isn't strictly correct, but it's the best we
0227      * can do. Writes and reads get combined.
0228      */
0229     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0230     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0231     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0232     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0233 
0234     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
0235 
0236     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
0237     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
0238 
0239     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
0240     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
0241 
0242     [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
0243     [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0244     [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0245     [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0246 };
0247 
0248 /*
0249  * Cortex-A5 HW events mapping
0250  */
0251 static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
0252     PERF_MAP_ALL_UNSUPPORTED,
0253     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0254     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
0255     [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0256     [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0257     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
0258     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0259 };
0260 
0261 static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0262                     [PERF_COUNT_HW_CACHE_OP_MAX]
0263                     [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0264     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0265 
0266     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0267     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0268     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0269     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0270     [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]  = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
0271     [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]    = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
0272 
0273     [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
0274     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
0275     /*
0276      * The prefetch counters don't differentiate between the I side and the
0277      * D side.
0278      */
0279     [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]  = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
0280     [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]    = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
0281 
0282     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
0283     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
0284 
0285     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
0286     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
0287 
0288     [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
0289     [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0290     [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0291     [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0292 };
0293 
0294 /*
0295  * Cortex-A15 HW events mapping
0296  */
0297 static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
0298     PERF_MAP_ALL_UNSUPPORTED,
0299     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0300     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
0301     [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0302     [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0303     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
0304     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0305     [PERF_COUNT_HW_BUS_CYCLES]      = ARMV7_PERFCTR_BUS_CYCLES,
0306 };
0307 
0308 static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0309                     [PERF_COUNT_HW_CACHE_OP_MAX]
0310                     [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0311     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0312 
0313     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
0314     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
0315     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
0316     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
0317 
0318     /*
0319      * Not all performance counters differentiate between read and write
0320      * accesses/misses so we're not always strictly correct, but it's the
0321      * best we can do. Writes and reads get combined in these cases.
0322      */
0323     [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
0324     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
0325 
0326     [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
0327     [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
0328     [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
0329     [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
0330 
0331     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
0332     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
0333 
0334     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
0335     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
0336 
0337     [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
0338     [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0339     [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0340     [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0341 };
0342 
0343 /*
0344  * Cortex-A7 HW events mapping
0345  */
0346 static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
0347     PERF_MAP_ALL_UNSUPPORTED,
0348     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0349     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
0350     [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0351     [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0352     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
0353     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0354     [PERF_COUNT_HW_BUS_CYCLES]      = ARMV7_PERFCTR_BUS_CYCLES,
0355 };
0356 
0357 static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0358                     [PERF_COUNT_HW_CACHE_OP_MAX]
0359                     [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0360     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0361 
0362     /*
0363      * The performance counters don't differentiate between read and write
0364      * accesses/misses so this isn't strictly correct, but it's the best we
0365      * can do. Writes and reads get combined.
0366      */
0367     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0368     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0369     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0370     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0371 
0372     [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
0373     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
0374 
0375     [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_PERFCTR_L2_CACHE_ACCESS,
0376     [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
0377     [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L2_CACHE_ACCESS,
0378     [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L2_CACHE_REFILL,
0379 
0380     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
0381     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
0382 
0383     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
0384     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
0385 
0386     [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
0387     [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0388     [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0389     [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0390 };
0391 
0392 /*
0393  * Cortex-A12 HW events mapping
0394  */
0395 static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
0396     PERF_MAP_ALL_UNSUPPORTED,
0397     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0398     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
0399     [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0400     [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0401     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
0402     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0403     [PERF_COUNT_HW_BUS_CYCLES]      = ARMV7_PERFCTR_BUS_CYCLES,
0404 };
0405 
0406 static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0407                     [PERF_COUNT_HW_CACHE_OP_MAX]
0408                     [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0409     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0410 
0411     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
0412     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0413     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
0414     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0415 
0416     /*
0417      * Not all performance counters differentiate between read and write
0418      * accesses/misses so we're not always strictly correct, but it's the
0419      * best we can do. Writes and reads get combined in these cases.
0420      */
0421     [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
0422     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
0423 
0424     [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
0425     [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
0426     [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
0427     [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L2_CACHE_REFILL,
0428 
0429     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
0430     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
0431     [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]   = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
0432 
0433     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
0434     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
0435 
0436     [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
0437     [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0438     [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0439     [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0440 };
0441 
0442 /*
0443  * Krait HW events mapping
0444  */
0445 static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
0446     PERF_MAP_ALL_UNSUPPORTED,
0447     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0448     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
0449     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
0450     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0451     [PERF_COUNT_HW_BUS_CYCLES]      = ARMV7_PERFCTR_CLOCK_CYCLES,
0452 };
0453 
0454 static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
0455     PERF_MAP_ALL_UNSUPPORTED,
0456     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0457     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
0458     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0459     [PERF_COUNT_HW_BUS_CYCLES]      = ARMV7_PERFCTR_CLOCK_CYCLES,
0460 };
0461 
0462 static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0463                       [PERF_COUNT_HW_CACHE_OP_MAX]
0464                       [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0465     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0466 
0467     /*
0468      * The performance counters don't differentiate between read and write
0469      * accesses/misses so this isn't strictly correct, but it's the best we
0470      * can do. Writes and reads get combined.
0471      */
0472     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0473     [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0474     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0475     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0476 
0477     [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
0478     [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = KRAIT_PERFCTR_L1_ICACHE_MISS,
0479 
0480     [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
0481     [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]    = KRAIT_PERFCTR_L1_DTLB_ACCESS,
0482 
0483     [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
0484     [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]    = KRAIT_PERFCTR_L1_ITLB_ACCESS,
0485 
0486     [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
0487     [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0488     [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0489     [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0490 };
0491 
0492 /*
0493  * Scorpion HW events mapping
0494  */
0495 static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
0496     PERF_MAP_ALL_UNSUPPORTED,
0497     [PERF_COUNT_HW_CPU_CYCLES]      = ARMV7_PERFCTR_CPU_CYCLES,
0498     [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
0499     [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
0500     [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0501     [PERF_COUNT_HW_BUS_CYCLES]      = ARMV7_PERFCTR_CLOCK_CYCLES,
0502 };
0503 
0504 static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
0505                         [PERF_COUNT_HW_CACHE_OP_MAX]
0506                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
0507     PERF_CACHE_MAP_ALL_UNSUPPORTED,
0508     /*
0509      * The performance counters don't differentiate between read and write
0510      * accesses/misses so this isn't strictly correct, but it's the best we
0511      * can do. Writes and reads get combined.
0512      */
0513     [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0514     [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0515     [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
0516     [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
0517     [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
0518     [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
0519     /*
0520      * Only ITLB misses and DTLB refills are supported.  If users want the
0521      * DTLB refills misses a raw counter must be used.
0522      */
0523     [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
0524     [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
0525     [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
0526     [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
0527     [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
0528     [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
0529     [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0530     [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0531     [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
0532     [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0533 };
0534 
0535 PMU_FORMAT_ATTR(event, "config:0-7");
0536 
0537 static struct attribute *armv7_pmu_format_attrs[] = {
0538     &format_attr_event.attr,
0539     NULL,
0540 };
0541 
0542 static struct attribute_group armv7_pmu_format_attr_group = {
0543     .name = "format",
0544     .attrs = armv7_pmu_format_attrs,
0545 };
0546 
0547 #define ARMV7_EVENT_ATTR_RESOLVE(m) #m
0548 #define ARMV7_EVENT_ATTR(name, config) \
0549     PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
0550                   "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
0551 
0552 ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
0553 ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
0554 ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
0555 ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
0556 ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
0557 ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
0558 ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
0559 ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
0560 ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
0561 ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
0562 ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
0563 ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
0564 ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
0565 ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
0566 ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
0567 ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
0568 ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
0569 ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
0570 ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
0571 
0572 static struct attribute *armv7_pmuv1_event_attrs[] = {
0573     &armv7_event_attr_sw_incr.attr.attr,
0574     &armv7_event_attr_l1i_cache_refill.attr.attr,
0575     &armv7_event_attr_l1i_tlb_refill.attr.attr,
0576     &armv7_event_attr_l1d_cache_refill.attr.attr,
0577     &armv7_event_attr_l1d_cache.attr.attr,
0578     &armv7_event_attr_l1d_tlb_refill.attr.attr,
0579     &armv7_event_attr_ld_retired.attr.attr,
0580     &armv7_event_attr_st_retired.attr.attr,
0581     &armv7_event_attr_inst_retired.attr.attr,
0582     &armv7_event_attr_exc_taken.attr.attr,
0583     &armv7_event_attr_exc_return.attr.attr,
0584     &armv7_event_attr_cid_write_retired.attr.attr,
0585     &armv7_event_attr_pc_write_retired.attr.attr,
0586     &armv7_event_attr_br_immed_retired.attr.attr,
0587     &armv7_event_attr_br_return_retired.attr.attr,
0588     &armv7_event_attr_unaligned_ldst_retired.attr.attr,
0589     &armv7_event_attr_br_mis_pred.attr.attr,
0590     &armv7_event_attr_cpu_cycles.attr.attr,
0591     &armv7_event_attr_br_pred.attr.attr,
0592     NULL,
0593 };
0594 
0595 static struct attribute_group armv7_pmuv1_events_attr_group = {
0596     .name = "events",
0597     .attrs = armv7_pmuv1_event_attrs,
0598 };
0599 
0600 ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
0601 ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
0602 ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
0603 ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
0604 ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
0605 ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
0606 ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
0607 ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
0608 ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
0609 ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
0610 ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
0611 
0612 static struct attribute *armv7_pmuv2_event_attrs[] = {
0613     &armv7_event_attr_sw_incr.attr.attr,
0614     &armv7_event_attr_l1i_cache_refill.attr.attr,
0615     &armv7_event_attr_l1i_tlb_refill.attr.attr,
0616     &armv7_event_attr_l1d_cache_refill.attr.attr,
0617     &armv7_event_attr_l1d_cache.attr.attr,
0618     &armv7_event_attr_l1d_tlb_refill.attr.attr,
0619     &armv7_event_attr_ld_retired.attr.attr,
0620     &armv7_event_attr_st_retired.attr.attr,
0621     &armv7_event_attr_inst_retired.attr.attr,
0622     &armv7_event_attr_exc_taken.attr.attr,
0623     &armv7_event_attr_exc_return.attr.attr,
0624     &armv7_event_attr_cid_write_retired.attr.attr,
0625     &armv7_event_attr_pc_write_retired.attr.attr,
0626     &armv7_event_attr_br_immed_retired.attr.attr,
0627     &armv7_event_attr_br_return_retired.attr.attr,
0628     &armv7_event_attr_unaligned_ldst_retired.attr.attr,
0629     &armv7_event_attr_br_mis_pred.attr.attr,
0630     &armv7_event_attr_cpu_cycles.attr.attr,
0631     &armv7_event_attr_br_pred.attr.attr,
0632     &armv7_event_attr_mem_access.attr.attr,
0633     &armv7_event_attr_l1i_cache.attr.attr,
0634     &armv7_event_attr_l1d_cache_wb.attr.attr,
0635     &armv7_event_attr_l2d_cache.attr.attr,
0636     &armv7_event_attr_l2d_cache_refill.attr.attr,
0637     &armv7_event_attr_l2d_cache_wb.attr.attr,
0638     &armv7_event_attr_bus_access.attr.attr,
0639     &armv7_event_attr_memory_error.attr.attr,
0640     &armv7_event_attr_inst_spec.attr.attr,
0641     &armv7_event_attr_ttbr_write_retired.attr.attr,
0642     &armv7_event_attr_bus_cycles.attr.attr,
0643     NULL,
0644 };
0645 
0646 static struct attribute_group armv7_pmuv2_events_attr_group = {
0647     .name = "events",
0648     .attrs = armv7_pmuv2_event_attrs,
0649 };
0650 
0651 /*
0652  * Perf Events' indices
0653  */
0654 #define ARMV7_IDX_CYCLE_COUNTER 0
0655 #define ARMV7_IDX_COUNTER0  1
0656 #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
0657     (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
0658 
0659 #define ARMV7_MAX_COUNTERS  32
0660 #define ARMV7_COUNTER_MASK  (ARMV7_MAX_COUNTERS - 1)
0661 
0662 /*
0663  * ARMv7 low level PMNC access
0664  */
0665 
0666 /*
0667  * Perf Event to low level counters mapping
0668  */
0669 #define ARMV7_IDX_TO_COUNTER(x) \
0670     (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
0671 
0672 /*
0673  * Per-CPU PMNC: config reg
0674  */
0675 #define ARMV7_PMNC_E        (1 << 0) /* Enable all counters */
0676 #define ARMV7_PMNC_P        (1 << 1) /* Reset all counters */
0677 #define ARMV7_PMNC_C        (1 << 2) /* Cycle counter reset */
0678 #define ARMV7_PMNC_D        (1 << 3) /* CCNT counts every 64th cpu cycle */
0679 #define ARMV7_PMNC_X        (1 << 4) /* Export to ETM */
0680 #define ARMV7_PMNC_DP       (1 << 5) /* Disable CCNT if non-invasive debug*/
0681 #define ARMV7_PMNC_N_SHIFT  11   /* Number of counters supported */
0682 #define ARMV7_PMNC_N_MASK   0x1f
0683 #define ARMV7_PMNC_MASK     0x3f     /* Mask for writable bits */
0684 
0685 /*
0686  * FLAG: counters overflow flag status reg
0687  */
0688 #define ARMV7_FLAG_MASK     0xffffffff  /* Mask for writable bits */
0689 #define ARMV7_OVERFLOWED_MASK   ARMV7_FLAG_MASK
0690 
0691 /*
0692  * PMXEVTYPER: Event selection reg
0693  */
0694 #define ARMV7_EVTYPE_MASK   0xc80000ff  /* Mask for writable bits */
0695 #define ARMV7_EVTYPE_EVENT  0xff        /* Mask for EVENT bits */
0696 
0697 /*
0698  * Event filters for PMUv2
0699  */
0700 #define ARMV7_EXCLUDE_PL1   BIT(31)
0701 #define ARMV7_EXCLUDE_USER  BIT(30)
0702 #define ARMV7_INCLUDE_HYP   BIT(27)
0703 
0704 /*
0705  * Secure debug enable reg
0706  */
0707 #define ARMV7_SDER_SUNIDEN  BIT(1) /* Permit non-invasive debug */
0708 
0709 static inline u32 armv7_pmnc_read(void)
0710 {
0711     u32 val;
0712     asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
0713     return val;
0714 }
0715 
0716 static inline void armv7_pmnc_write(u32 val)
0717 {
0718     val &= ARMV7_PMNC_MASK;
0719     isb();
0720     asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
0721 }
0722 
0723 static inline int armv7_pmnc_has_overflowed(u32 pmnc)
0724 {
0725     return pmnc & ARMV7_OVERFLOWED_MASK;
0726 }
0727 
0728 static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
0729 {
0730     return idx >= ARMV7_IDX_CYCLE_COUNTER &&
0731         idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
0732 }
0733 
0734 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
0735 {
0736     return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
0737 }
0738 
0739 static inline void armv7_pmnc_select_counter(int idx)
0740 {
0741     u32 counter = ARMV7_IDX_TO_COUNTER(idx);
0742     asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
0743     isb();
0744 }
0745 
0746 static inline u64 armv7pmu_read_counter(struct perf_event *event)
0747 {
0748     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0749     struct hw_perf_event *hwc = &event->hw;
0750     int idx = hwc->idx;
0751     u32 value = 0;
0752 
0753     if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
0754         pr_err("CPU%u reading wrong counter %d\n",
0755             smp_processor_id(), idx);
0756     } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
0757         asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
0758     } else {
0759         armv7_pmnc_select_counter(idx);
0760         asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
0761     }
0762 
0763     return value;
0764 }
0765 
0766 static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
0767 {
0768     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0769     struct hw_perf_event *hwc = &event->hw;
0770     int idx = hwc->idx;
0771 
0772     if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
0773         pr_err("CPU%u writing wrong counter %d\n",
0774             smp_processor_id(), idx);
0775     } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
0776         asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
0777     } else {
0778         armv7_pmnc_select_counter(idx);
0779         asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
0780     }
0781 }
0782 
0783 static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
0784 {
0785     armv7_pmnc_select_counter(idx);
0786     val &= ARMV7_EVTYPE_MASK;
0787     asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
0788 }
0789 
0790 static inline void armv7_pmnc_enable_counter(int idx)
0791 {
0792     u32 counter = ARMV7_IDX_TO_COUNTER(idx);
0793     asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
0794 }
0795 
0796 static inline void armv7_pmnc_disable_counter(int idx)
0797 {
0798     u32 counter = ARMV7_IDX_TO_COUNTER(idx);
0799     asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
0800 }
0801 
0802 static inline void armv7_pmnc_enable_intens(int idx)
0803 {
0804     u32 counter = ARMV7_IDX_TO_COUNTER(idx);
0805     asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
0806 }
0807 
0808 static inline void armv7_pmnc_disable_intens(int idx)
0809 {
0810     u32 counter = ARMV7_IDX_TO_COUNTER(idx);
0811     asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
0812     isb();
0813     /* Clear the overflow flag in case an interrupt is pending. */
0814     asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
0815     isb();
0816 }
0817 
0818 static inline u32 armv7_pmnc_getreset_flags(void)
0819 {
0820     u32 val;
0821 
0822     /* Read */
0823     asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
0824 
0825     /* Write to clear flags */
0826     val &= ARMV7_FLAG_MASK;
0827     asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
0828 
0829     return val;
0830 }
0831 
0832 #ifdef DEBUG
0833 static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
0834 {
0835     u32 val;
0836     unsigned int cnt;
0837 
0838     pr_info("PMNC registers dump:\n");
0839 
0840     asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
0841     pr_info("PMNC  =0x%08x\n", val);
0842 
0843     asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
0844     pr_info("CNTENS=0x%08x\n", val);
0845 
0846     asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
0847     pr_info("INTENS=0x%08x\n", val);
0848 
0849     asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
0850     pr_info("FLAGS =0x%08x\n", val);
0851 
0852     asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
0853     pr_info("SELECT=0x%08x\n", val);
0854 
0855     asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
0856     pr_info("CCNT  =0x%08x\n", val);
0857 
0858     for (cnt = ARMV7_IDX_COUNTER0;
0859             cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
0860         armv7_pmnc_select_counter(cnt);
0861         asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
0862         pr_info("CNT[%d] count =0x%08x\n",
0863             ARMV7_IDX_TO_COUNTER(cnt), val);
0864         asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
0865         pr_info("CNT[%d] evtsel=0x%08x\n",
0866             ARMV7_IDX_TO_COUNTER(cnt), val);
0867     }
0868 }
0869 #endif
0870 
0871 static void armv7pmu_enable_event(struct perf_event *event)
0872 {
0873     unsigned long flags;
0874     struct hw_perf_event *hwc = &event->hw;
0875     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0876     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0877     int idx = hwc->idx;
0878 
0879     if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
0880         pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
0881             smp_processor_id(), idx);
0882         return;
0883     }
0884 
0885     /*
0886      * Enable counter and interrupt, and set the counter to count
0887      * the event that we're interested in.
0888      */
0889     raw_spin_lock_irqsave(&events->pmu_lock, flags);
0890 
0891     /*
0892      * Disable counter
0893      */
0894     armv7_pmnc_disable_counter(idx);
0895 
0896     /*
0897      * Set event (if destined for PMNx counters)
0898      * We only need to set the event for the cycle counter if we
0899      * have the ability to perform event filtering.
0900      */
0901     if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
0902         armv7_pmnc_write_evtsel(idx, hwc->config_base);
0903 
0904     /*
0905      * Enable interrupt for this counter
0906      */
0907     armv7_pmnc_enable_intens(idx);
0908 
0909     /*
0910      * Enable counter
0911      */
0912     armv7_pmnc_enable_counter(idx);
0913 
0914     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0915 }
0916 
0917 static void armv7pmu_disable_event(struct perf_event *event)
0918 {
0919     unsigned long flags;
0920     struct hw_perf_event *hwc = &event->hw;
0921     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
0922     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
0923     int idx = hwc->idx;
0924 
0925     if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
0926         pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
0927             smp_processor_id(), idx);
0928         return;
0929     }
0930 
0931     /*
0932      * Disable counter and interrupt
0933      */
0934     raw_spin_lock_irqsave(&events->pmu_lock, flags);
0935 
0936     /*
0937      * Disable counter
0938      */
0939     armv7_pmnc_disable_counter(idx);
0940 
0941     /*
0942      * Disable interrupt for this counter
0943      */
0944     armv7_pmnc_disable_intens(idx);
0945 
0946     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
0947 }
0948 
0949 static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
0950 {
0951     u32 pmnc;
0952     struct perf_sample_data data;
0953     struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
0954     struct pt_regs *regs;
0955     int idx;
0956 
0957     /*
0958      * Get and reset the IRQ flags
0959      */
0960     pmnc = armv7_pmnc_getreset_flags();
0961 
0962     /*
0963      * Did an overflow occur?
0964      */
0965     if (!armv7_pmnc_has_overflowed(pmnc))
0966         return IRQ_NONE;
0967 
0968     /*
0969      * Handle the counter(s) overflow(s)
0970      */
0971     regs = get_irq_regs();
0972 
0973     for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
0974         struct perf_event *event = cpuc->events[idx];
0975         struct hw_perf_event *hwc;
0976 
0977         /* Ignore if we don't have an event. */
0978         if (!event)
0979             continue;
0980 
0981         /*
0982          * We have a single interrupt for all counters. Check that
0983          * each counter has overflowed before we process it.
0984          */
0985         if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
0986             continue;
0987 
0988         hwc = &event->hw;
0989         armpmu_event_update(event);
0990         perf_sample_data_init(&data, 0, hwc->last_period);
0991         if (!armpmu_event_set_period(event))
0992             continue;
0993 
0994         if (perf_event_overflow(event, &data, regs))
0995             cpu_pmu->disable(event);
0996     }
0997 
0998     /*
0999      * Handle the pending perf events.
1000      *
1001      * Note: this call *must* be run with interrupts disabled. For
1002      * platforms that can have the PMU interrupts raised as an NMI, this
1003      * will not work.
1004      */
1005     irq_work_run();
1006 
1007     return IRQ_HANDLED;
1008 }
1009 
1010 static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1011 {
1012     unsigned long flags;
1013     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1014 
1015     raw_spin_lock_irqsave(&events->pmu_lock, flags);
1016     /* Enable all counters */
1017     armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1018     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1019 }
1020 
1021 static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1022 {
1023     unsigned long flags;
1024     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1025 
1026     raw_spin_lock_irqsave(&events->pmu_lock, flags);
1027     /* Disable all counters */
1028     armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1029     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1030 }
1031 
1032 static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1033                   struct perf_event *event)
1034 {
1035     int idx;
1036     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1037     struct hw_perf_event *hwc = &event->hw;
1038     unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1039 
1040     /* Always place a cycle counter into the cycle counter. */
1041     if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1042         if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1043             return -EAGAIN;
1044 
1045         return ARMV7_IDX_CYCLE_COUNTER;
1046     }
1047 
1048     /*
1049      * For anything other than a cycle counter, try and use
1050      * the events counters
1051      */
1052     for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1053         if (!test_and_set_bit(idx, cpuc->used_mask))
1054             return idx;
1055     }
1056 
1057     /* The counters are all in use. */
1058     return -EAGAIN;
1059 }
1060 
1061 static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1062                      struct perf_event *event)
1063 {
1064     clear_bit(event->hw.idx, cpuc->used_mask);
1065 }
1066 
1067 /*
1068  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1069  */
1070 static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1071                      struct perf_event_attr *attr)
1072 {
1073     unsigned long config_base = 0;
1074 
1075     if (attr->exclude_idle)
1076         return -EPERM;
1077     if (attr->exclude_user)
1078         config_base |= ARMV7_EXCLUDE_USER;
1079     if (attr->exclude_kernel)
1080         config_base |= ARMV7_EXCLUDE_PL1;
1081     if (!attr->exclude_hv)
1082         config_base |= ARMV7_INCLUDE_HYP;
1083 
1084     /*
1085      * Install the filter into config_base as this is used to
1086      * construct the event type.
1087      */
1088     event->config_base = config_base;
1089 
1090     return 0;
1091 }
1092 
1093 static void armv7pmu_reset(void *info)
1094 {
1095     struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1096     u32 idx, nb_cnt = cpu_pmu->num_events, val;
1097 
1098     if (cpu_pmu->secure_access) {
1099         asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
1100         val |= ARMV7_SDER_SUNIDEN;
1101         asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
1102     }
1103 
1104     /* The counter and interrupt enable registers are unknown at reset. */
1105     for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1106         armv7_pmnc_disable_counter(idx);
1107         armv7_pmnc_disable_intens(idx);
1108     }
1109 
1110     /* Initialize & Reset PMNC: C and P bits */
1111     armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1112 }
1113 
1114 static int armv7_a8_map_event(struct perf_event *event)
1115 {
1116     return armpmu_map_event(event, &armv7_a8_perf_map,
1117                 &armv7_a8_perf_cache_map, 0xFF);
1118 }
1119 
1120 static int armv7_a9_map_event(struct perf_event *event)
1121 {
1122     return armpmu_map_event(event, &armv7_a9_perf_map,
1123                 &armv7_a9_perf_cache_map, 0xFF);
1124 }
1125 
1126 static int armv7_a5_map_event(struct perf_event *event)
1127 {
1128     return armpmu_map_event(event, &armv7_a5_perf_map,
1129                 &armv7_a5_perf_cache_map, 0xFF);
1130 }
1131 
1132 static int armv7_a15_map_event(struct perf_event *event)
1133 {
1134     return armpmu_map_event(event, &armv7_a15_perf_map,
1135                 &armv7_a15_perf_cache_map, 0xFF);
1136 }
1137 
1138 static int armv7_a7_map_event(struct perf_event *event)
1139 {
1140     return armpmu_map_event(event, &armv7_a7_perf_map,
1141                 &armv7_a7_perf_cache_map, 0xFF);
1142 }
1143 
1144 static int armv7_a12_map_event(struct perf_event *event)
1145 {
1146     return armpmu_map_event(event, &armv7_a12_perf_map,
1147                 &armv7_a12_perf_cache_map, 0xFF);
1148 }
1149 
1150 static int krait_map_event(struct perf_event *event)
1151 {
1152     return armpmu_map_event(event, &krait_perf_map,
1153                 &krait_perf_cache_map, 0xFFFFF);
1154 }
1155 
1156 static int krait_map_event_no_branch(struct perf_event *event)
1157 {
1158     return armpmu_map_event(event, &krait_perf_map_no_branch,
1159                 &krait_perf_cache_map, 0xFFFFF);
1160 }
1161 
1162 static int scorpion_map_event(struct perf_event *event)
1163 {
1164     return armpmu_map_event(event, &scorpion_perf_map,
1165                 &scorpion_perf_cache_map, 0xFFFFF);
1166 }
1167 
1168 static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1169 {
1170     cpu_pmu->handle_irq = armv7pmu_handle_irq;
1171     cpu_pmu->enable     = armv7pmu_enable_event;
1172     cpu_pmu->disable    = armv7pmu_disable_event;
1173     cpu_pmu->read_counter   = armv7pmu_read_counter;
1174     cpu_pmu->write_counter  = armv7pmu_write_counter;
1175     cpu_pmu->get_event_idx  = armv7pmu_get_event_idx;
1176     cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
1177     cpu_pmu->start      = armv7pmu_start;
1178     cpu_pmu->stop       = armv7pmu_stop;
1179     cpu_pmu->reset      = armv7pmu_reset;
1180 };
1181 
1182 static void armv7_read_num_pmnc_events(void *info)
1183 {
1184     int *nb_cnt = info;
1185 
1186     /* Read the nb of CNTx counters supported from PMNC */
1187     *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1188 
1189     /* Add the CPU cycles counter */
1190     *nb_cnt += 1;
1191 }
1192 
1193 static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1194 {
1195     return smp_call_function_any(&arm_pmu->supported_cpus,
1196                      armv7_read_num_pmnc_events,
1197                      &arm_pmu->num_events, 1);
1198 }
1199 
1200 static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1201 {
1202     armv7pmu_init(cpu_pmu);
1203     cpu_pmu->name       = "armv7_cortex_a8";
1204     cpu_pmu->map_event  = armv7_a8_map_event;
1205     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1206         &armv7_pmuv1_events_attr_group;
1207     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1208         &armv7_pmu_format_attr_group;
1209     return armv7_probe_num_events(cpu_pmu);
1210 }
1211 
1212 static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1213 {
1214     armv7pmu_init(cpu_pmu);
1215     cpu_pmu->name       = "armv7_cortex_a9";
1216     cpu_pmu->map_event  = armv7_a9_map_event;
1217     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1218         &armv7_pmuv1_events_attr_group;
1219     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1220         &armv7_pmu_format_attr_group;
1221     return armv7_probe_num_events(cpu_pmu);
1222 }
1223 
1224 static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1225 {
1226     armv7pmu_init(cpu_pmu);
1227     cpu_pmu->name       = "armv7_cortex_a5";
1228     cpu_pmu->map_event  = armv7_a5_map_event;
1229     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1230         &armv7_pmuv1_events_attr_group;
1231     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1232         &armv7_pmu_format_attr_group;
1233     return armv7_probe_num_events(cpu_pmu);
1234 }
1235 
1236 static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1237 {
1238     armv7pmu_init(cpu_pmu);
1239     cpu_pmu->name       = "armv7_cortex_a15";
1240     cpu_pmu->map_event  = armv7_a15_map_event;
1241     cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1242     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1243         &armv7_pmuv2_events_attr_group;
1244     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1245         &armv7_pmu_format_attr_group;
1246     return armv7_probe_num_events(cpu_pmu);
1247 }
1248 
1249 static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1250 {
1251     armv7pmu_init(cpu_pmu);
1252     cpu_pmu->name       = "armv7_cortex_a7";
1253     cpu_pmu->map_event  = armv7_a7_map_event;
1254     cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1255     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1256         &armv7_pmuv2_events_attr_group;
1257     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1258         &armv7_pmu_format_attr_group;
1259     return armv7_probe_num_events(cpu_pmu);
1260 }
1261 
1262 static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1263 {
1264     armv7pmu_init(cpu_pmu);
1265     cpu_pmu->name       = "armv7_cortex_a12";
1266     cpu_pmu->map_event  = armv7_a12_map_event;
1267     cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1268     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1269         &armv7_pmuv2_events_attr_group;
1270     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1271         &armv7_pmu_format_attr_group;
1272     return armv7_probe_num_events(cpu_pmu);
1273 }
1274 
1275 static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1276 {
1277     int ret = armv7_a12_pmu_init(cpu_pmu);
1278     cpu_pmu->name = "armv7_cortex_a17";
1279     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1280         &armv7_pmuv2_events_attr_group;
1281     cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1282         &armv7_pmu_format_attr_group;
1283     return ret;
1284 }
1285 
1286 /*
1287  * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1288  *
1289  *            31   30     24     16     8      0
1290  *            +--------------------------------+
1291  *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1292  *            +--------------------------------+
1293  *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1294  *            +--------------------------------+
1295  *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1296  *            +--------------------------------+
1297  *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1298  *            +--------------------------------+
1299  *              EN | G=3  | G=2  | G=1  | G=0
1300  *
1301  *  Event Encoding:
1302  *
1303  *      hwc->config_base = 0xNRCCG
1304  *
1305  *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1306  *      R  = region register
1307  *      CC = class of events the group G is choosing from
1308  *      G  = group or particular event
1309  *
1310  *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1311  *
1312  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1313  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1314  *  events (interrupts for example). An event code is broken down into
1315  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1316  *  example).
1317  */
1318 
1319 #define KRAIT_EVENT     (1 << 16)
1320 #define VENUM_EVENT     (2 << 16)
1321 #define KRAIT_EVENT_MASK    (KRAIT_EVENT | VENUM_EVENT)
1322 #define PMRESRn_EN      BIT(31)
1323 
1324 #define EVENT_REGION(event) (((event) >> 12) & 0xf)     /* R */
1325 #define EVENT_GROUP(event)  ((event) & 0xf)         /* G */
1326 #define EVENT_CODE(event)   (((event) >> 4) & 0xff)     /* CC */
1327 #define EVENT_VENUM(event)  (!!(event & VENUM_EVENT))   /* N=2 */
1328 #define EVENT_CPU(event)    (!!(event & KRAIT_EVENT))   /* N=1 */
1329 
1330 static u32 krait_read_pmresrn(int n)
1331 {
1332     u32 val;
1333 
1334     switch (n) {
1335     case 0:
1336         asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1337         break;
1338     case 1:
1339         asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1340         break;
1341     case 2:
1342         asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1343         break;
1344     default:
1345         BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1346     }
1347 
1348     return val;
1349 }
1350 
1351 static void krait_write_pmresrn(int n, u32 val)
1352 {
1353     switch (n) {
1354     case 0:
1355         asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1356         break;
1357     case 1:
1358         asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1359         break;
1360     case 2:
1361         asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1362         break;
1363     default:
1364         BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1365     }
1366 }
1367 
1368 static u32 venum_read_pmresr(void)
1369 {
1370     u32 val;
1371     asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1372     return val;
1373 }
1374 
1375 static void venum_write_pmresr(u32 val)
1376 {
1377     asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1378 }
1379 
1380 static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1381 {
1382     u32 venum_new_val;
1383     u32 fp_new_val;
1384 
1385     BUG_ON(preemptible());
1386     /* CPACR Enable CP10 and CP11 access */
1387     *venum_orig_val = get_copro_access();
1388     venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1389     set_copro_access(venum_new_val);
1390 
1391     /* Enable FPEXC */
1392     *fp_orig_val = fmrx(FPEXC);
1393     fp_new_val = *fp_orig_val | FPEXC_EN;
1394     fmxr(FPEXC, fp_new_val);
1395 }
1396 
1397 static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1398 {
1399     BUG_ON(preemptible());
1400     /* Restore FPEXC */
1401     fmxr(FPEXC, fp_orig_val);
1402     isb();
1403     /* Restore CPACR */
1404     set_copro_access(venum_orig_val);
1405 }
1406 
1407 static u32 krait_get_pmresrn_event(unsigned int region)
1408 {
1409     static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1410                          KRAIT_PMRESR1_GROUP0,
1411                          KRAIT_PMRESR2_GROUP0 };
1412     return pmresrn_table[region];
1413 }
1414 
1415 static void krait_evt_setup(int idx, u32 config_base)
1416 {
1417     u32 val;
1418     u32 mask;
1419     u32 vval, fval;
1420     unsigned int region = EVENT_REGION(config_base);
1421     unsigned int group = EVENT_GROUP(config_base);
1422     unsigned int code = EVENT_CODE(config_base);
1423     unsigned int group_shift;
1424     bool venum_event = EVENT_VENUM(config_base);
1425 
1426     group_shift = group * 8;
1427     mask = 0xff << group_shift;
1428 
1429     /* Configure evtsel for the region and group */
1430     if (venum_event)
1431         val = KRAIT_VPMRESR0_GROUP0;
1432     else
1433         val = krait_get_pmresrn_event(region);
1434     val += group;
1435     /* Mix in mode-exclusion bits */
1436     val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1437     armv7_pmnc_write_evtsel(idx, val);
1438 
1439     if (venum_event) {
1440         venum_pre_pmresr(&vval, &fval);
1441         val = venum_read_pmresr();
1442         val &= ~mask;
1443         val |= code << group_shift;
1444         val |= PMRESRn_EN;
1445         venum_write_pmresr(val);
1446         venum_post_pmresr(vval, fval);
1447     } else {
1448         val = krait_read_pmresrn(region);
1449         val &= ~mask;
1450         val |= code << group_shift;
1451         val |= PMRESRn_EN;
1452         krait_write_pmresrn(region, val);
1453     }
1454 }
1455 
1456 static u32 clear_pmresrn_group(u32 val, int group)
1457 {
1458     u32 mask;
1459     int group_shift;
1460 
1461     group_shift = group * 8;
1462     mask = 0xff << group_shift;
1463     val &= ~mask;
1464 
1465     /* Don't clear enable bit if entire region isn't disabled */
1466     if (val & ~PMRESRn_EN)
1467         return val |= PMRESRn_EN;
1468 
1469     return 0;
1470 }
1471 
1472 static void krait_clearpmu(u32 config_base)
1473 {
1474     u32 val;
1475     u32 vval, fval;
1476     unsigned int region = EVENT_REGION(config_base);
1477     unsigned int group = EVENT_GROUP(config_base);
1478     bool venum_event = EVENT_VENUM(config_base);
1479 
1480     if (venum_event) {
1481         venum_pre_pmresr(&vval, &fval);
1482         val = venum_read_pmresr();
1483         val = clear_pmresrn_group(val, group);
1484         venum_write_pmresr(val);
1485         venum_post_pmresr(vval, fval);
1486     } else {
1487         val = krait_read_pmresrn(region);
1488         val = clear_pmresrn_group(val, group);
1489         krait_write_pmresrn(region, val);
1490     }
1491 }
1492 
1493 static void krait_pmu_disable_event(struct perf_event *event)
1494 {
1495     unsigned long flags;
1496     struct hw_perf_event *hwc = &event->hw;
1497     int idx = hwc->idx;
1498     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1499     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1500 
1501     /* Disable counter and interrupt */
1502     raw_spin_lock_irqsave(&events->pmu_lock, flags);
1503 
1504     /* Disable counter */
1505     armv7_pmnc_disable_counter(idx);
1506 
1507     /*
1508      * Clear pmresr code (if destined for PMNx counters)
1509      */
1510     if (hwc->config_base & KRAIT_EVENT_MASK)
1511         krait_clearpmu(hwc->config_base);
1512 
1513     /* Disable interrupt for this counter */
1514     armv7_pmnc_disable_intens(idx);
1515 
1516     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1517 }
1518 
1519 static void krait_pmu_enable_event(struct perf_event *event)
1520 {
1521     unsigned long flags;
1522     struct hw_perf_event *hwc = &event->hw;
1523     int idx = hwc->idx;
1524     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1525     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1526 
1527     /*
1528      * Enable counter and interrupt, and set the counter to count
1529      * the event that we're interested in.
1530      */
1531     raw_spin_lock_irqsave(&events->pmu_lock, flags);
1532 
1533     /* Disable counter */
1534     armv7_pmnc_disable_counter(idx);
1535 
1536     /*
1537      * Set event (if destined for PMNx counters)
1538      * We set the event for the cycle counter because we
1539      * have the ability to perform event filtering.
1540      */
1541     if (hwc->config_base & KRAIT_EVENT_MASK)
1542         krait_evt_setup(idx, hwc->config_base);
1543     else
1544         armv7_pmnc_write_evtsel(idx, hwc->config_base);
1545 
1546     /* Enable interrupt for this counter */
1547     armv7_pmnc_enable_intens(idx);
1548 
1549     /* Enable counter */
1550     armv7_pmnc_enable_counter(idx);
1551 
1552     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1553 }
1554 
1555 static void krait_pmu_reset(void *info)
1556 {
1557     u32 vval, fval;
1558     struct arm_pmu *cpu_pmu = info;
1559     u32 idx, nb_cnt = cpu_pmu->num_events;
1560 
1561     armv7pmu_reset(info);
1562 
1563     /* Clear all pmresrs */
1564     krait_write_pmresrn(0, 0);
1565     krait_write_pmresrn(1, 0);
1566     krait_write_pmresrn(2, 0);
1567 
1568     venum_pre_pmresr(&vval, &fval);
1569     venum_write_pmresr(0);
1570     venum_post_pmresr(vval, fval);
1571 
1572     /* Reset PMxEVNCTCR to sane default */
1573     for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1574         armv7_pmnc_select_counter(idx);
1575         asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1576     }
1577 
1578 }
1579 
1580 static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1581                   unsigned int group)
1582 {
1583     int bit;
1584     struct hw_perf_event *hwc = &event->hw;
1585     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1586 
1587     if (hwc->config_base & VENUM_EVENT)
1588         bit = KRAIT_VPMRESR0_GROUP0;
1589     else
1590         bit = krait_get_pmresrn_event(region);
1591     bit -= krait_get_pmresrn_event(0);
1592     bit += group;
1593     /*
1594      * Lower bits are reserved for use by the counters (see
1595      * armv7pmu_get_event_idx() for more info)
1596      */
1597     bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1598 
1599     return bit;
1600 }
1601 
1602 /*
1603  * We check for column exclusion constraints here.
1604  * Two events cant use the same group within a pmresr register.
1605  */
1606 static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1607                    struct perf_event *event)
1608 {
1609     int idx;
1610     int bit = -1;
1611     struct hw_perf_event *hwc = &event->hw;
1612     unsigned int region = EVENT_REGION(hwc->config_base);
1613     unsigned int code = EVENT_CODE(hwc->config_base);
1614     unsigned int group = EVENT_GROUP(hwc->config_base);
1615     bool venum_event = EVENT_VENUM(hwc->config_base);
1616     bool krait_event = EVENT_CPU(hwc->config_base);
1617 
1618     if (venum_event || krait_event) {
1619         /* Ignore invalid events */
1620         if (group > 3 || region > 2)
1621             return -EINVAL;
1622         if (venum_event && (code & 0xe0))
1623             return -EINVAL;
1624 
1625         bit = krait_event_to_bit(event, region, group);
1626         if (test_and_set_bit(bit, cpuc->used_mask))
1627             return -EAGAIN;
1628     }
1629 
1630     idx = armv7pmu_get_event_idx(cpuc, event);
1631     if (idx < 0 && bit >= 0)
1632         clear_bit(bit, cpuc->used_mask);
1633 
1634     return idx;
1635 }
1636 
1637 static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1638                       struct perf_event *event)
1639 {
1640     int bit;
1641     struct hw_perf_event *hwc = &event->hw;
1642     unsigned int region = EVENT_REGION(hwc->config_base);
1643     unsigned int group = EVENT_GROUP(hwc->config_base);
1644     bool venum_event = EVENT_VENUM(hwc->config_base);
1645     bool krait_event = EVENT_CPU(hwc->config_base);
1646 
1647     armv7pmu_clear_event_idx(cpuc, event);
1648     if (venum_event || krait_event) {
1649         bit = krait_event_to_bit(event, region, group);
1650         clear_bit(bit, cpuc->used_mask);
1651     }
1652 }
1653 
1654 static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1655 {
1656     armv7pmu_init(cpu_pmu);
1657     cpu_pmu->name       = "armv7_krait";
1658     /* Some early versions of Krait don't support PC write events */
1659     if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1660                   "qcom,no-pc-write"))
1661         cpu_pmu->map_event = krait_map_event_no_branch;
1662     else
1663         cpu_pmu->map_event = krait_map_event;
1664     cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1665     cpu_pmu->reset      = krait_pmu_reset;
1666     cpu_pmu->enable     = krait_pmu_enable_event;
1667     cpu_pmu->disable    = krait_pmu_disable_event;
1668     cpu_pmu->get_event_idx  = krait_pmu_get_event_idx;
1669     cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1670     return armv7_probe_num_events(cpu_pmu);
1671 }
1672 
1673 /*
1674  * Scorpion Local Performance Monitor Register (LPMn)
1675  *
1676  *            31   30     24     16     8      0
1677  *            +--------------------------------+
1678  *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1679  *            +--------------------------------+
1680  *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1681  *            +--------------------------------+
1682  *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1683  *            +--------------------------------+
1684  *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1685  *            +--------------------------------+
1686  *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1687  *            +--------------------------------+
1688  *              EN | G=3  | G=2  | G=1  | G=0
1689  *
1690  *
1691  *  Event Encoding:
1692  *
1693  *      hwc->config_base = 0xNRCCG
1694  *
1695  *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1696  *      R  = region register
1697  *      CC = class of events the group G is choosing from
1698  *      G  = group or particular event
1699  *
1700  *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1701  *
1702  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1703  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1704  *  events (interrupts for example). An event code is broken down into
1705  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1706  *  example).
1707  */
1708 
1709 static u32 scorpion_read_pmresrn(int n)
1710 {
1711     u32 val;
1712 
1713     switch (n) {
1714     case 0:
1715         asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1716         break;
1717     case 1:
1718         asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1719         break;
1720     case 2:
1721         asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1722         break;
1723     case 3:
1724         asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1725         break;
1726     default:
1727         BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1728     }
1729 
1730     return val;
1731 }
1732 
1733 static void scorpion_write_pmresrn(int n, u32 val)
1734 {
1735     switch (n) {
1736     case 0:
1737         asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1738         break;
1739     case 1:
1740         asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1741         break;
1742     case 2:
1743         asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1744         break;
1745     case 3:
1746         asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1747         break;
1748     default:
1749         BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1750     }
1751 }
1752 
1753 static u32 scorpion_get_pmresrn_event(unsigned int region)
1754 {
1755     static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1756                          SCORPION_LPM1_GROUP0,
1757                          SCORPION_LPM2_GROUP0,
1758                          SCORPION_L2LPM_GROUP0 };
1759     return pmresrn_table[region];
1760 }
1761 
1762 static void scorpion_evt_setup(int idx, u32 config_base)
1763 {
1764     u32 val;
1765     u32 mask;
1766     u32 vval, fval;
1767     unsigned int region = EVENT_REGION(config_base);
1768     unsigned int group = EVENT_GROUP(config_base);
1769     unsigned int code = EVENT_CODE(config_base);
1770     unsigned int group_shift;
1771     bool venum_event = EVENT_VENUM(config_base);
1772 
1773     group_shift = group * 8;
1774     mask = 0xff << group_shift;
1775 
1776     /* Configure evtsel for the region and group */
1777     if (venum_event)
1778         val = SCORPION_VLPM_GROUP0;
1779     else
1780         val = scorpion_get_pmresrn_event(region);
1781     val += group;
1782     /* Mix in mode-exclusion bits */
1783     val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1784     armv7_pmnc_write_evtsel(idx, val);
1785 
1786     asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1787 
1788     if (venum_event) {
1789         venum_pre_pmresr(&vval, &fval);
1790         val = venum_read_pmresr();
1791         val &= ~mask;
1792         val |= code << group_shift;
1793         val |= PMRESRn_EN;
1794         venum_write_pmresr(val);
1795         venum_post_pmresr(vval, fval);
1796     } else {
1797         val = scorpion_read_pmresrn(region);
1798         val &= ~mask;
1799         val |= code << group_shift;
1800         val |= PMRESRn_EN;
1801         scorpion_write_pmresrn(region, val);
1802     }
1803 }
1804 
1805 static void scorpion_clearpmu(u32 config_base)
1806 {
1807     u32 val;
1808     u32 vval, fval;
1809     unsigned int region = EVENT_REGION(config_base);
1810     unsigned int group = EVENT_GROUP(config_base);
1811     bool venum_event = EVENT_VENUM(config_base);
1812 
1813     if (venum_event) {
1814         venum_pre_pmresr(&vval, &fval);
1815         val = venum_read_pmresr();
1816         val = clear_pmresrn_group(val, group);
1817         venum_write_pmresr(val);
1818         venum_post_pmresr(vval, fval);
1819     } else {
1820         val = scorpion_read_pmresrn(region);
1821         val = clear_pmresrn_group(val, group);
1822         scorpion_write_pmresrn(region, val);
1823     }
1824 }
1825 
1826 static void scorpion_pmu_disable_event(struct perf_event *event)
1827 {
1828     unsigned long flags;
1829     struct hw_perf_event *hwc = &event->hw;
1830     int idx = hwc->idx;
1831     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1832     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1833 
1834     /* Disable counter and interrupt */
1835     raw_spin_lock_irqsave(&events->pmu_lock, flags);
1836 
1837     /* Disable counter */
1838     armv7_pmnc_disable_counter(idx);
1839 
1840     /*
1841      * Clear pmresr code (if destined for PMNx counters)
1842      */
1843     if (hwc->config_base & KRAIT_EVENT_MASK)
1844         scorpion_clearpmu(hwc->config_base);
1845 
1846     /* Disable interrupt for this counter */
1847     armv7_pmnc_disable_intens(idx);
1848 
1849     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1850 }
1851 
1852 static void scorpion_pmu_enable_event(struct perf_event *event)
1853 {
1854     unsigned long flags;
1855     struct hw_perf_event *hwc = &event->hw;
1856     int idx = hwc->idx;
1857     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1858     struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1859 
1860     /*
1861      * Enable counter and interrupt, and set the counter to count
1862      * the event that we're interested in.
1863      */
1864     raw_spin_lock_irqsave(&events->pmu_lock, flags);
1865 
1866     /* Disable counter */
1867     armv7_pmnc_disable_counter(idx);
1868 
1869     /*
1870      * Set event (if destined for PMNx counters)
1871      * We don't set the event for the cycle counter because we
1872      * don't have the ability to perform event filtering.
1873      */
1874     if (hwc->config_base & KRAIT_EVENT_MASK)
1875         scorpion_evt_setup(idx, hwc->config_base);
1876     else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1877         armv7_pmnc_write_evtsel(idx, hwc->config_base);
1878 
1879     /* Enable interrupt for this counter */
1880     armv7_pmnc_enable_intens(idx);
1881 
1882     /* Enable counter */
1883     armv7_pmnc_enable_counter(idx);
1884 
1885     raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1886 }
1887 
1888 static void scorpion_pmu_reset(void *info)
1889 {
1890     u32 vval, fval;
1891     struct arm_pmu *cpu_pmu = info;
1892     u32 idx, nb_cnt = cpu_pmu->num_events;
1893 
1894     armv7pmu_reset(info);
1895 
1896     /* Clear all pmresrs */
1897     scorpion_write_pmresrn(0, 0);
1898     scorpion_write_pmresrn(1, 0);
1899     scorpion_write_pmresrn(2, 0);
1900     scorpion_write_pmresrn(3, 0);
1901 
1902     venum_pre_pmresr(&vval, &fval);
1903     venum_write_pmresr(0);
1904     venum_post_pmresr(vval, fval);
1905 
1906     /* Reset PMxEVNCTCR to sane default */
1907     for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1908         armv7_pmnc_select_counter(idx);
1909         asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1910     }
1911 }
1912 
1913 static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1914                   unsigned int group)
1915 {
1916     int bit;
1917     struct hw_perf_event *hwc = &event->hw;
1918     struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1919 
1920     if (hwc->config_base & VENUM_EVENT)
1921         bit = SCORPION_VLPM_GROUP0;
1922     else
1923         bit = scorpion_get_pmresrn_event(region);
1924     bit -= scorpion_get_pmresrn_event(0);
1925     bit += group;
1926     /*
1927      * Lower bits are reserved for use by the counters (see
1928      * armv7pmu_get_event_idx() for more info)
1929      */
1930     bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1931 
1932     return bit;
1933 }
1934 
1935 /*
1936  * We check for column exclusion constraints here.
1937  * Two events cant use the same group within a pmresr register.
1938  */
1939 static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1940                    struct perf_event *event)
1941 {
1942     int idx;
1943     int bit = -1;
1944     struct hw_perf_event *hwc = &event->hw;
1945     unsigned int region = EVENT_REGION(hwc->config_base);
1946     unsigned int group = EVENT_GROUP(hwc->config_base);
1947     bool venum_event = EVENT_VENUM(hwc->config_base);
1948     bool scorpion_event = EVENT_CPU(hwc->config_base);
1949 
1950     if (venum_event || scorpion_event) {
1951         /* Ignore invalid events */
1952         if (group > 3 || region > 3)
1953             return -EINVAL;
1954 
1955         bit = scorpion_event_to_bit(event, region, group);
1956         if (test_and_set_bit(bit, cpuc->used_mask))
1957             return -EAGAIN;
1958     }
1959 
1960     idx = armv7pmu_get_event_idx(cpuc, event);
1961     if (idx < 0 && bit >= 0)
1962         clear_bit(bit, cpuc->used_mask);
1963 
1964     return idx;
1965 }
1966 
1967 static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1968                       struct perf_event *event)
1969 {
1970     int bit;
1971     struct hw_perf_event *hwc = &event->hw;
1972     unsigned int region = EVENT_REGION(hwc->config_base);
1973     unsigned int group = EVENT_GROUP(hwc->config_base);
1974     bool venum_event = EVENT_VENUM(hwc->config_base);
1975     bool scorpion_event = EVENT_CPU(hwc->config_base);
1976 
1977     armv7pmu_clear_event_idx(cpuc, event);
1978     if (venum_event || scorpion_event) {
1979         bit = scorpion_event_to_bit(event, region, group);
1980         clear_bit(bit, cpuc->used_mask);
1981     }
1982 }
1983 
1984 static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1985 {
1986     armv7pmu_init(cpu_pmu);
1987     cpu_pmu->name       = "armv7_scorpion";
1988     cpu_pmu->map_event  = scorpion_map_event;
1989     cpu_pmu->reset      = scorpion_pmu_reset;
1990     cpu_pmu->enable     = scorpion_pmu_enable_event;
1991     cpu_pmu->disable    = scorpion_pmu_disable_event;
1992     cpu_pmu->get_event_idx  = scorpion_pmu_get_event_idx;
1993     cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1994     return armv7_probe_num_events(cpu_pmu);
1995 }
1996 
1997 static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1998 {
1999     armv7pmu_init(cpu_pmu);
2000     cpu_pmu->name       = "armv7_scorpion_mp";
2001     cpu_pmu->map_event  = scorpion_map_event;
2002     cpu_pmu->reset      = scorpion_pmu_reset;
2003     cpu_pmu->enable     = scorpion_pmu_enable_event;
2004     cpu_pmu->disable    = scorpion_pmu_disable_event;
2005     cpu_pmu->get_event_idx  = scorpion_pmu_get_event_idx;
2006     cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
2007     return armv7_probe_num_events(cpu_pmu);
2008 }
2009 
2010 static const struct of_device_id armv7_pmu_of_device_ids[] = {
2011     {.compatible = "arm,cortex-a17-pmu",    .data = armv7_a17_pmu_init},
2012     {.compatible = "arm,cortex-a15-pmu",    .data = armv7_a15_pmu_init},
2013     {.compatible = "arm,cortex-a12-pmu",    .data = armv7_a12_pmu_init},
2014     {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
2015     {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
2016     {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
2017     {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
2018     {.compatible = "qcom,krait-pmu",    .data = krait_pmu_init},
2019     {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
2020     {.compatible = "qcom,scorpion-mp-pmu",  .data = scorpion_mp_pmu_init},
2021     {},
2022 };
2023 
2024 static const struct pmu_probe_info armv7_pmu_probe_table[] = {
2025     ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
2026     ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
2027     { /* sentinel value */ }
2028 };
2029 
2030 
2031 static int armv7_pmu_device_probe(struct platform_device *pdev)
2032 {
2033     return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
2034                     armv7_pmu_probe_table);
2035 }
2036 
2037 static struct platform_driver armv7_pmu_driver = {
2038     .driver     = {
2039         .name   = "armv7-pmu",
2040         .of_match_table = armv7_pmu_of_device_ids,
2041         .suppress_bind_attrs = true,
2042     },
2043     .probe      = armv7_pmu_device_probe,
2044 };
2045 
2046 builtin_platform_driver(armv7_pmu_driver);
2047 #endif  /* CONFIG_CPU_V7 */