Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 // Copyright (C) 2016-2020 Arm Limited
0003 // CMN-600 Coherent Mesh Network PMU driver
0004 
0005 #include <linux/acpi.h>
0006 #include <linux/bitfield.h>
0007 #include <linux/bitops.h>
0008 #include <linux/debugfs.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/io.h>
0011 #include <linux/io-64-nonatomic-lo-hi.h>
0012 #include <linux/kernel.h>
0013 #include <linux/list.h>
0014 #include <linux/module.h>
0015 #include <linux/of.h>
0016 #include <linux/perf_event.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/slab.h>
0019 #include <linux/sort.h>
0020 
0021 /* Common register stuff */
0022 #define CMN_NODE_INFO           0x0000
0023 #define CMN_NI_NODE_TYPE        GENMASK_ULL(15, 0)
0024 #define CMN_NI_NODE_ID          GENMASK_ULL(31, 16)
0025 #define CMN_NI_LOGICAL_ID       GENMASK_ULL(47, 32)
0026 
0027 #define CMN_NODEID_DEVID(reg)       ((reg) & 3)
0028 #define CMN_NODEID_EXT_DEVID(reg)   ((reg) & 1)
0029 #define CMN_NODEID_PID(reg)     (((reg) >> 2) & 1)
0030 #define CMN_NODEID_EXT_PID(reg)     (((reg) >> 1) & 3)
0031 #define CMN_NODEID_1x1_PID(reg)     (((reg) >> 2) & 7)
0032 #define CMN_NODEID_X(reg, bits)     ((reg) >> (3 + (bits)))
0033 #define CMN_NODEID_Y(reg, bits)     (((reg) >> 3) & ((1U << (bits)) - 1))
0034 
0035 #define CMN_CHILD_INFO          0x0080
0036 #define CMN_CI_CHILD_COUNT      GENMASK_ULL(15, 0)
0037 #define CMN_CI_CHILD_PTR_OFFSET     GENMASK_ULL(31, 16)
0038 
0039 #define CMN_CHILD_NODE_ADDR     GENMASK(29, 0)
0040 #define CMN_CHILD_NODE_EXTERNAL     BIT(31)
0041 
0042 #define CMN_MAX_DIMENSION       12
0043 #define CMN_MAX_XPS         (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
0044 #define CMN_MAX_DTMS            (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
0045 
0046 /* The CFG node has various info besides the discovery tree */
0047 #define CMN_CFGM_PERIPH_ID_2        0x0010
0048 #define CMN_CFGM_PID2_REVISION      GENMASK(7, 4)
0049 
0050 #define CMN_CFGM_INFO_GLOBAL        0x900
0051 #define CMN_INFO_MULTIPLE_DTM_EN    BIT_ULL(63)
0052 #define CMN_INFO_RSP_VC_NUM     GENMASK_ULL(53, 52)
0053 #define CMN_INFO_DAT_VC_NUM     GENMASK_ULL(51, 50)
0054 
0055 #define CMN_CFGM_INFO_GLOBAL_1      0x908
0056 #define CMN_INFO_SNP_VC_NUM     GENMASK_ULL(3, 2)
0057 #define CMN_INFO_REQ_VC_NUM     GENMASK_ULL(1, 0)
0058 
0059 /* XPs also have some local topology info which has uses too */
0060 #define CMN_MXP__CONNECT_INFO_P0    0x0008
0061 #define CMN_MXP__CONNECT_INFO_P1    0x0010
0062 #define CMN_MXP__CONNECT_INFO_P2    0x0028
0063 #define CMN_MXP__CONNECT_INFO_P3    0x0030
0064 #define CMN_MXP__CONNECT_INFO_P4    0x0038
0065 #define CMN_MXP__CONNECT_INFO_P5    0x0040
0066 #define CMN__CONNECT_INFO_DEVICE_TYPE   GENMASK_ULL(4, 0)
0067 
0068 /* PMU registers occupy the 3rd 4KB page of each node's region */
0069 #define CMN_PMU_OFFSET          0x2000
0070 
0071 /* For most nodes, this is all there is */
0072 #define CMN_PMU_EVENT_SEL       0x000
0073 #define CMN__PMU_CBUSY_SNTHROTTLE_SEL   GENMASK_ULL(44, 42)
0074 #define CMN__PMU_CLASS_OCCUP_ID     GENMASK_ULL(36, 35)
0075 /* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
0076 #define CMN__PMU_OCCUP1_ID      GENMASK_ULL(34, 32)
0077 
0078 /* HN-Ps are weird... */
0079 #define CMN_HNP_PMU_EVENT_SEL       0x008
0080 
0081 /* DTMs live in the PMU space of XP registers */
0082 #define CMN_DTM_WPn(n)          (0x1A0 + (n) * 0x18)
0083 #define CMN_DTM_WPn_CONFIG(n)       (CMN_DTM_WPn(n) + 0x00)
0084 #define CMN_DTM_WPn_CONFIG_WP_CHN_NUM   GENMASK_ULL(20, 19)
0085 #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2  GENMASK_ULL(18, 17)
0086 #define CMN_DTM_WPn_CONFIG_WP_COMBINE   BIT(9)
0087 #define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
0088 #define CMN600_WPn_CONFIG_WP_COMBINE    BIT(6)
0089 #define CMN600_WPn_CONFIG_WP_EXCLUSIVE  BIT(5)
0090 #define CMN_DTM_WPn_CONFIG_WP_GRP   GENMASK_ULL(5, 4)
0091 #define CMN_DTM_WPn_CONFIG_WP_CHN_SEL   GENMASK_ULL(3, 1)
0092 #define CMN_DTM_WPn_CONFIG_WP_DEV_SEL   BIT(0)
0093 #define CMN_DTM_WPn_VAL(n)      (CMN_DTM_WPn(n) + 0x08)
0094 #define CMN_DTM_WPn_MASK(n)     (CMN_DTM_WPn(n) + 0x10)
0095 
0096 #define CMN_DTM_PMU_CONFIG      0x210
0097 #define CMN__PMEVCNT0_INPUT_SEL     GENMASK_ULL(37, 32)
0098 #define CMN__PMEVCNT0_INPUT_SEL_WP  0x00
0099 #define CMN__PMEVCNT0_INPUT_SEL_XP  0x04
0100 #define CMN__PMEVCNT0_INPUT_SEL_DEV 0x10
0101 #define CMN__PMEVCNT0_GLOBAL_NUM    GENMASK_ULL(18, 16)
0102 #define CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(n)   ((n) * 4)
0103 #define CMN__PMEVCNT_PAIRED(n)      BIT(4 + (n))
0104 #define CMN__PMEVCNT23_COMBINED     BIT(2)
0105 #define CMN__PMEVCNT01_COMBINED     BIT(1)
0106 #define CMN_DTM_PMU_CONFIG_PMU_EN   BIT(0)
0107 
0108 #define CMN_DTM_PMEVCNT         0x220
0109 
0110 #define CMN_DTM_PMEVCNTSR       0x240
0111 
0112 #define CMN_DTM_UNIT_INFO       0x0910
0113 
0114 #define CMN_DTM_NUM_COUNTERS        4
0115 /* Want more local counters? Why not replicate the whole DTM! Ugh... */
0116 #define CMN_DTM_OFFSET(n)       ((n) * 0x200)
0117 
0118 /* The DTC node is where the magic happens */
0119 #define CMN_DT_DTC_CTL          0x0a00
0120 #define CMN_DT_DTC_CTL_DT_EN        BIT(0)
0121 
0122 /* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
0123 #define _CMN_DT_CNT_REG(n)      ((((n) / 2) * 4 + (n) % 2) * 4)
0124 #define CMN_DT_PMEVCNT(n)       (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n))
0125 #define CMN_DT_PMCCNTR          (CMN_PMU_OFFSET + 0x40)
0126 
0127 #define CMN_DT_PMEVCNTSR(n)     (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n))
0128 #define CMN_DT_PMCCNTRSR        (CMN_PMU_OFFSET + 0x90)
0129 
0130 #define CMN_DT_PMCR         (CMN_PMU_OFFSET + 0x100)
0131 #define CMN_DT_PMCR_PMU_EN      BIT(0)
0132 #define CMN_DT_PMCR_CNTR_RST        BIT(5)
0133 #define CMN_DT_PMCR_OVFL_INTR_EN    BIT(6)
0134 
0135 #define CMN_DT_PMOVSR           (CMN_PMU_OFFSET + 0x118)
0136 #define CMN_DT_PMOVSR_CLR       (CMN_PMU_OFFSET + 0x120)
0137 
0138 #define CMN_DT_PMSSR            (CMN_PMU_OFFSET + 0x128)
0139 #define CMN_DT_PMSSR_SS_STATUS(n)   BIT(n)
0140 
0141 #define CMN_DT_PMSRR            (CMN_PMU_OFFSET + 0x130)
0142 #define CMN_DT_PMSRR_SS_REQ     BIT(0)
0143 
0144 #define CMN_DT_NUM_COUNTERS     8
0145 #define CMN_MAX_DTCS            4
0146 
0147 /*
0148  * Even in the worst case a DTC counter can't wrap in fewer than 2^42 cycles,
0149  * so throwing away one bit to make overflow handling easy is no big deal.
0150  */
0151 #define CMN_COUNTER_INIT        0x80000000
0152 /* Similarly for the 40-bit cycle counter */
0153 #define CMN_CC_INIT         0x8000000000ULL
0154 
0155 
0156 /* Event attributes */
0157 #define CMN_CONFIG_TYPE         GENMASK_ULL(15, 0)
0158 #define CMN_CONFIG_EVENTID      GENMASK_ULL(26, 16)
0159 #define CMN_CONFIG_OCCUPID      GENMASK_ULL(30, 27)
0160 #define CMN_CONFIG_BYNODEID     BIT_ULL(31)
0161 #define CMN_CONFIG_NODEID       GENMASK_ULL(47, 32)
0162 
0163 #define CMN_EVENT_TYPE(event)       FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config)
0164 #define CMN_EVENT_EVENTID(event)    FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config)
0165 #define CMN_EVENT_OCCUPID(event)    FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config)
0166 #define CMN_EVENT_BYNODEID(event)   FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
0167 #define CMN_EVENT_NODEID(event)     FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
0168 
0169 #define CMN_CONFIG_WP_COMBINE       GENMASK_ULL(27, 24)
0170 #define CMN_CONFIG_WP_DEV_SEL       GENMASK_ULL(50, 48)
0171 #define CMN_CONFIG_WP_CHN_SEL       GENMASK_ULL(55, 51)
0172 /* Note that we don't yet support the tertiary match group on newer IPs */
0173 #define CMN_CONFIG_WP_GRP       BIT_ULL(56)
0174 #define CMN_CONFIG_WP_EXCLUSIVE     BIT_ULL(57)
0175 #define CMN_CONFIG1_WP_VAL      GENMASK_ULL(63, 0)
0176 #define CMN_CONFIG2_WP_MASK     GENMASK_ULL(63, 0)
0177 
0178 #define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config)
0179 #define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config)
0180 #define CMN_EVENT_WP_CHN_SEL(event) FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config)
0181 #define CMN_EVENT_WP_GRP(event)     FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config)
0182 #define CMN_EVENT_WP_EXCLUSIVE(event)   FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config)
0183 #define CMN_EVENT_WP_VAL(event)     FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1)
0184 #define CMN_EVENT_WP_MASK(event)    FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2)
0185 
0186 /* Made-up event IDs for watchpoint direction */
0187 #define CMN_WP_UP           0
0188 #define CMN_WP_DOWN         2
0189 
0190 
0191 enum cmn_model {
0192     CMN600 = 1,
0193     CMN650 = 2,
0194     CMN700 = 4,
0195     CI700 = 8,
0196     /* ...and then we can use bitmap tricks for commonality */
0197     CMN_ANY = -1,
0198     NOT_CMN600 = -2,
0199     CMN_650ON = CMN650 | CMN700,
0200 };
0201 
0202 /* CMN-600 r0px shouldn't exist in silicon, thankfully */
0203 enum cmn_revision {
0204     CMN600_R1P0,
0205     CMN600_R1P1,
0206     CMN600_R1P2,
0207     CMN600_R1P3,
0208     CMN600_R2P0,
0209     CMN600_R3P0,
0210     CMN600_R3P1,
0211     CMN650_R0P0 = 0,
0212     CMN650_R1P0,
0213     CMN650_R1P1,
0214     CMN650_R2P0,
0215     CMN650_R1P2,
0216     CMN700_R0P0 = 0,
0217     CMN700_R1P0,
0218     CMN700_R2P0,
0219     CI700_R0P0 = 0,
0220     CI700_R1P0,
0221     CI700_R2P0,
0222 };
0223 
0224 enum cmn_node_type {
0225     CMN_TYPE_INVALID,
0226     CMN_TYPE_DVM,
0227     CMN_TYPE_CFG,
0228     CMN_TYPE_DTC,
0229     CMN_TYPE_HNI,
0230     CMN_TYPE_HNF,
0231     CMN_TYPE_XP,
0232     CMN_TYPE_SBSX,
0233     CMN_TYPE_MPAM_S,
0234     CMN_TYPE_MPAM_NS,
0235     CMN_TYPE_RNI,
0236     CMN_TYPE_RND = 0xd,
0237     CMN_TYPE_RNSAM = 0xf,
0238     CMN_TYPE_MTSX,
0239     CMN_TYPE_HNP,
0240     CMN_TYPE_CXRA = 0x100,
0241     CMN_TYPE_CXHA,
0242     CMN_TYPE_CXLA,
0243     CMN_TYPE_CCRA,
0244     CMN_TYPE_CCHA,
0245     CMN_TYPE_CCLA,
0246     CMN_TYPE_CCLA_RNI,
0247     /* Not a real node type */
0248     CMN_TYPE_WP = 0x7770
0249 };
0250 
0251 enum cmn_filter_select {
0252     SEL_NONE = -1,
0253     SEL_OCCUP1ID,
0254     SEL_CLASS_OCCUP_ID,
0255     SEL_CBUSY_SNTHROTTLE_SEL,
0256     SEL_MAX
0257 };
0258 
0259 struct arm_cmn_node {
0260     void __iomem *pmu_base;
0261     u16 id, logid;
0262     enum cmn_node_type type;
0263 
0264     int dtm;
0265     union {
0266         /* DN/HN-F/CXHA */
0267         struct {
0268             u8 val : 4;
0269             u8 count : 4;
0270         } occupid[SEL_MAX];
0271         /* XP */
0272         u8 dtc;
0273     };
0274     union {
0275         u8 event[4];
0276         __le32 event_sel;
0277         u16 event_w[4];
0278         __le64 event_sel_w;
0279     };
0280 };
0281 
0282 struct arm_cmn_dtm {
0283     void __iomem *base;
0284     u32 pmu_config_low;
0285     union {
0286         u8 input_sel[4];
0287         __le32 pmu_config_high;
0288     };
0289     s8 wp_event[4];
0290 };
0291 
0292 struct arm_cmn_dtc {
0293     void __iomem *base;
0294     int irq;
0295     int irq_friend;
0296     bool cc_active;
0297 
0298     struct perf_event *counters[CMN_DT_NUM_COUNTERS];
0299     struct perf_event *cycles;
0300 };
0301 
0302 #define CMN_STATE_DISABLED  BIT(0)
0303 #define CMN_STATE_TXN       BIT(1)
0304 
0305 struct arm_cmn {
0306     struct device *dev;
0307     void __iomem *base;
0308     unsigned int state;
0309 
0310     enum cmn_revision rev;
0311     enum cmn_model model;
0312     u8 mesh_x;
0313     u8 mesh_y;
0314     u16 num_xps;
0315     u16 num_dns;
0316     bool multi_dtm;
0317     u8 ports_used;
0318     struct {
0319         unsigned int rsp_vc_num : 2;
0320         unsigned int dat_vc_num : 2;
0321         unsigned int snp_vc_num : 2;
0322         unsigned int req_vc_num : 2;
0323     };
0324 
0325     struct arm_cmn_node *xps;
0326     struct arm_cmn_node *dns;
0327 
0328     struct arm_cmn_dtm *dtms;
0329     struct arm_cmn_dtc *dtc;
0330     unsigned int num_dtcs;
0331 
0332     int cpu;
0333     struct hlist_node cpuhp_node;
0334 
0335     struct pmu pmu;
0336     struct dentry *debug;
0337 };
0338 
0339 #define to_cmn(p)   container_of(p, struct arm_cmn, pmu)
0340 
0341 static int arm_cmn_hp_state;
0342 
0343 struct arm_cmn_nodeid {
0344     u8 x;
0345     u8 y;
0346     u8 port;
0347     u8 dev;
0348 };
0349 
0350 static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
0351 {
0352     return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
0353 }
0354 
0355 static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
0356 {
0357     struct arm_cmn_nodeid nid;
0358 
0359     if (cmn->num_xps == 1) {
0360         nid.x = 0;
0361         nid.y = 0;
0362         nid.port = CMN_NODEID_1x1_PID(id);
0363         nid.dev = CMN_NODEID_DEVID(id);
0364     } else {
0365         int bits = arm_cmn_xyidbits(cmn);
0366 
0367         nid.x = CMN_NODEID_X(id, bits);
0368         nid.y = CMN_NODEID_Y(id, bits);
0369         if (cmn->ports_used & 0xc) {
0370             nid.port = CMN_NODEID_EXT_PID(id);
0371             nid.dev = CMN_NODEID_EXT_DEVID(id);
0372         } else {
0373             nid.port = CMN_NODEID_PID(id);
0374             nid.dev = CMN_NODEID_DEVID(id);
0375         }
0376     }
0377     return nid;
0378 }
0379 
0380 static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
0381                            const struct arm_cmn_node *dn)
0382 {
0383     struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
0384     int xp_idx = cmn->mesh_x * nid.y + nid.x;
0385 
0386     return cmn->xps + xp_idx;
0387 }
0388 static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
0389                      enum cmn_node_type type)
0390 {
0391     struct arm_cmn_node *dn;
0392 
0393     for (dn = cmn->dns; dn->type; dn++)
0394         if (dn->type == type)
0395             return dn;
0396     return NULL;
0397 }
0398 
0399 static struct dentry *arm_cmn_debugfs;
0400 
0401 #ifdef CONFIG_DEBUG_FS
0402 static const char *arm_cmn_device_type(u8 type)
0403 {
0404     switch(FIELD_GET(CMN__CONNECT_INFO_DEVICE_TYPE, type)) {
0405         case 0x00: return "        |";
0406         case 0x01: return "  RN-I  |";
0407         case 0x02: return "  RN-D  |";
0408         case 0x04: return " RN-F_B |";
0409         case 0x05: return "RN-F_B_E|";
0410         case 0x06: return " RN-F_A |";
0411         case 0x07: return "RN-F_A_E|";
0412         case 0x08: return "  HN-T  |";
0413         case 0x09: return "  HN-I  |";
0414         case 0x0a: return "  HN-D  |";
0415         case 0x0b: return "  HN-P  |";
0416         case 0x0c: return "  SN-F  |";
0417         case 0x0d: return "  SBSX  |";
0418         case 0x0e: return "  HN-F  |";
0419         case 0x0f: return " SN-F_E |";
0420         case 0x10: return " SN-F_D |";
0421         case 0x11: return "  CXHA  |";
0422         case 0x12: return "  CXRA  |";
0423         case 0x13: return "  CXRH  |";
0424         case 0x14: return " RN-F_D |";
0425         case 0x15: return "RN-F_D_E|";
0426         case 0x16: return " RN-F_C |";
0427         case 0x17: return "RN-F_C_E|";
0428         case 0x18: return " RN-F_E |";
0429         case 0x19: return "RN-F_E_E|";
0430         case 0x1c: return "  MTSX  |";
0431         case 0x1d: return "  HN-V  |";
0432         case 0x1e: return "  CCG   |";
0433         default:   return "  ????  |";
0434     }
0435 }
0436 
0437 static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
0438 {
0439     struct arm_cmn *cmn = s->private;
0440     struct arm_cmn_node *dn;
0441 
0442     for (dn = cmn->dns; dn->type; dn++) {
0443         struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
0444 
0445         if (dn->type == CMN_TYPE_XP)
0446             continue;
0447         /* Ignore the extra components that will overlap on some ports */
0448         if (dn->type < CMN_TYPE_HNI)
0449             continue;
0450 
0451         if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
0452             continue;
0453 
0454         seq_printf(s, "   #%-2d  |", dn->logid);
0455         return;
0456     }
0457     seq_puts(s, "        |");
0458 }
0459 
0460 static int arm_cmn_map_show(struct seq_file *s, void *data)
0461 {
0462     struct arm_cmn *cmn = s->private;
0463     int x, y, p, pmax = fls(cmn->ports_used);
0464 
0465     seq_puts(s, "     X");
0466     for (x = 0; x < cmn->mesh_x; x++)
0467         seq_printf(s, "    %d    ", x);
0468     seq_puts(s, "\nY P D+");
0469     y = cmn->mesh_y;
0470     while (y--) {
0471         int xp_base = cmn->mesh_x * y;
0472         u8 port[6][CMN_MAX_DIMENSION];
0473 
0474         for (x = 0; x < cmn->mesh_x; x++)
0475             seq_puts(s, "--------+");
0476 
0477         seq_printf(s, "\n%d    |", y);
0478         for (x = 0; x < cmn->mesh_x; x++) {
0479             struct arm_cmn_node *xp = cmn->xps + xp_base + x;
0480             void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET;
0481 
0482             port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0);
0483             port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1);
0484             port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2);
0485             port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3);
0486             port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4);
0487             port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5);
0488             seq_printf(s, " XP #%-2d |", xp_base + x);
0489         }
0490 
0491         seq_puts(s, "\n     |");
0492         for (x = 0; x < cmn->mesh_x; x++) {
0493             u8 dtc = cmn->xps[xp_base + x].dtc;
0494 
0495             if (dtc & (dtc - 1))
0496                 seq_puts(s, " DTC ?? |");
0497             else
0498                 seq_printf(s, " DTC %ld  |", __ffs(dtc));
0499         }
0500         seq_puts(s, "\n     |");
0501         for (x = 0; x < cmn->mesh_x; x++)
0502             seq_puts(s, "........|");
0503 
0504         for (p = 0; p < pmax; p++) {
0505             seq_printf(s, "\n  %d  |", p);
0506             for (x = 0; x < cmn->mesh_x; x++)
0507                 seq_puts(s, arm_cmn_device_type(port[p][x]));
0508             seq_puts(s, "\n    0|");
0509             for (x = 0; x < cmn->mesh_x; x++)
0510                 arm_cmn_show_logid(s, x, y, p, 0);
0511             seq_puts(s, "\n    1|");
0512             for (x = 0; x < cmn->mesh_x; x++)
0513                 arm_cmn_show_logid(s, x, y, p, 1);
0514         }
0515         seq_puts(s, "\n-----+");
0516     }
0517     for (x = 0; x < cmn->mesh_x; x++)
0518         seq_puts(s, "--------+");
0519     seq_puts(s, "\n");
0520     return 0;
0521 }
0522 DEFINE_SHOW_ATTRIBUTE(arm_cmn_map);
0523 
0524 static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id)
0525 {
0526     const char *name  = "map";
0527 
0528     if (id > 0)
0529         name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id);
0530     if (!name)
0531         return;
0532 
0533     cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops);
0534 }
0535 #else
0536 static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
0537 #endif
0538 
0539 struct arm_cmn_hw_event {
0540     struct arm_cmn_node *dn;
0541     u64 dtm_idx[4];
0542     unsigned int dtc_idx;
0543     u8 dtcs_used;
0544     u8 num_dns;
0545     u8 dtm_offset;
0546     bool wide_sel;
0547     enum cmn_filter_select filter_sel;
0548 };
0549 
0550 #define for_each_hw_dn(hw, dn, i) \
0551     for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
0552 
0553 static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
0554 {
0555     BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
0556     return (struct arm_cmn_hw_event *)&event->hw;
0557 }
0558 
0559 static void arm_cmn_set_index(u64 x[], unsigned int pos, unsigned int val)
0560 {
0561     x[pos / 32] |= (u64)val << ((pos % 32) * 2);
0562 }
0563 
0564 static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
0565 {
0566     return (x[pos / 32] >> ((pos % 32) * 2)) & 3;
0567 }
0568 
0569 struct arm_cmn_event_attr {
0570     struct device_attribute attr;
0571     enum cmn_model model;
0572     enum cmn_node_type type;
0573     enum cmn_filter_select fsel;
0574     u16 eventid;
0575     u8 occupid;
0576 };
0577 
0578 struct arm_cmn_format_attr {
0579     struct device_attribute attr;
0580     u64 field;
0581     int config;
0582 };
0583 
0584 #define _CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid, _fsel)\
0585     (&((struct arm_cmn_event_attr[]) {{             \
0586         .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL),  \
0587         .model = _model,                    \
0588         .type = _type,                      \
0589         .eventid = _eventid,                    \
0590         .occupid = _occupid,                    \
0591         .fsel = _fsel,                      \
0592     }})[0].attr.attr)
0593 #define CMN_EVENT_ATTR(_model, _name, _type, _eventid)          \
0594     _CMN_EVENT_ATTR(_model, _name, _type, _eventid, 0, SEL_NONE)
0595 
0596 static ssize_t arm_cmn_event_show(struct device *dev,
0597                   struct device_attribute *attr, char *buf)
0598 {
0599     struct arm_cmn_event_attr *eattr;
0600 
0601     eattr = container_of(attr, typeof(*eattr), attr);
0602 
0603     if (eattr->type == CMN_TYPE_DTC)
0604         return sysfs_emit(buf, "type=0x%x\n", eattr->type);
0605 
0606     if (eattr->type == CMN_TYPE_WP)
0607         return sysfs_emit(buf,
0608                   "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
0609                   eattr->type, eattr->eventid);
0610 
0611     if (eattr->fsel > SEL_NONE)
0612         return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
0613                   eattr->type, eattr->eventid, eattr->occupid);
0614 
0615     return sysfs_emit(buf, "type=0x%x,eventid=0x%x\n", eattr->type,
0616               eattr->eventid);
0617 }
0618 
0619 static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
0620                          struct attribute *attr,
0621                          int unused)
0622 {
0623     struct device *dev = kobj_to_dev(kobj);
0624     struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
0625     struct arm_cmn_event_attr *eattr;
0626     enum cmn_node_type type;
0627     u16 eventid;
0628 
0629     eattr = container_of(attr, typeof(*eattr), attr.attr);
0630 
0631     if (!(eattr->model & cmn->model))
0632         return 0;
0633 
0634     type = eattr->type;
0635     eventid = eattr->eventid;
0636 
0637     /* Watchpoints aren't nodes, so avoid confusion */
0638     if (type == CMN_TYPE_WP)
0639         return attr->mode;
0640 
0641     /* Hide XP events for unused interfaces/channels */
0642     if (type == CMN_TYPE_XP) {
0643         unsigned int intf = (eventid >> 2) & 7;
0644         unsigned int chan = eventid >> 5;
0645 
0646         if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
0647             return 0;
0648 
0649         if (chan == 4 && cmn->model == CMN600)
0650             return 0;
0651 
0652         if ((chan == 5 && cmn->rsp_vc_num < 2) ||
0653             (chan == 6 && cmn->dat_vc_num < 2) ||
0654             (chan == 7 && cmn->snp_vc_num < 2) ||
0655             (chan == 8 && cmn->req_vc_num < 2))
0656             return 0;
0657     }
0658 
0659     /* Revision-specific differences */
0660     if (cmn->model == CMN600) {
0661         if (cmn->rev < CMN600_R1P3) {
0662             if (type == CMN_TYPE_CXRA && eventid > 0x10)
0663                 return 0;
0664         }
0665         if (cmn->rev < CMN600_R1P2) {
0666             if (type == CMN_TYPE_HNF && eventid == 0x1b)
0667                 return 0;
0668             if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
0669                 return 0;
0670         }
0671     } else if (cmn->model == CMN650) {
0672         if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) {
0673             if (type == CMN_TYPE_HNF && eventid > 0x22)
0674                 return 0;
0675             if (type == CMN_TYPE_SBSX && eventid == 0x17)
0676                 return 0;
0677             if (type == CMN_TYPE_RNI && eventid > 0x10)
0678                 return 0;
0679         }
0680     } else if (cmn->model == CMN700) {
0681         if (cmn->rev < CMN700_R2P0) {
0682             if (type == CMN_TYPE_HNF && eventid > 0x2c)
0683                 return 0;
0684             if (type == CMN_TYPE_CCHA && eventid > 0x74)
0685                 return 0;
0686             if (type == CMN_TYPE_CCLA && eventid > 0x27)
0687                 return 0;
0688         }
0689         if (cmn->rev < CMN700_R1P0) {
0690             if (type == CMN_TYPE_HNF && eventid > 0x2b)
0691                 return 0;
0692         }
0693     }
0694 
0695     if (!arm_cmn_node(cmn, type))
0696         return 0;
0697 
0698     return attr->mode;
0699 }
0700 
0701 #define _CMN_EVENT_DVM(_model, _name, _event, _occup, _fsel)    \
0702     _CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel)
0703 #define CMN_EVENT_DTC(_name)                    \
0704     CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0)
0705 #define _CMN_EVENT_HNF(_model, _name, _event, _occup, _fsel)        \
0706     _CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup, _fsel)
0707 #define CMN_EVENT_HNI(_name, _event)                \
0708     CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event)
0709 #define CMN_EVENT_HNP(_name, _event)                \
0710     CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event)
0711 #define __CMN_EVENT_XP(_name, _event)               \
0712     CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event)
0713 #define CMN_EVENT_SBSX(_model, _name, _event)           \
0714     CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event)
0715 #define CMN_EVENT_RNID(_model, _name, _event)           \
0716     CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event)
0717 #define CMN_EVENT_MTSX(_name, _event)               \
0718     CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event)
0719 #define CMN_EVENT_CXRA(_model, _name, _event)               \
0720     CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event)
0721 #define CMN_EVENT_CXHA(_name, _event)               \
0722     CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
0723 #define CMN_EVENT_CCRA(_name, _event)               \
0724     CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
0725 #define CMN_EVENT_CCHA(_name, _event)               \
0726     CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event)
0727 #define CMN_EVENT_CCLA(_name, _event)               \
0728     CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
0729 #define CMN_EVENT_CCLA_RNI(_name, _event)               \
0730     CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
0731 
0732 #define CMN_EVENT_DVM(_model, _name, _event)            \
0733     _CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE)
0734 #define CMN_EVENT_DVM_OCC(_model, _name, _event)            \
0735     _CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID),   \
0736     _CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID), \
0737     _CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID)
0738 #define CMN_EVENT_HNF(_model, _name, _event)            \
0739     _CMN_EVENT_HNF(_model, _name, _event, 0, SEL_NONE)
0740 #define CMN_EVENT_HNF_CLS(_model, _name, _event)            \
0741     _CMN_EVENT_HNF(_model, _name##_class0, _event, 0, SEL_CLASS_OCCUP_ID), \
0742     _CMN_EVENT_HNF(_model, _name##_class1, _event, 1, SEL_CLASS_OCCUP_ID), \
0743     _CMN_EVENT_HNF(_model, _name##_class2, _event, 2, SEL_CLASS_OCCUP_ID), \
0744     _CMN_EVENT_HNF(_model, _name##_class3, _event, 3, SEL_CLASS_OCCUP_ID)
0745 #define CMN_EVENT_HNF_SNT(_model, _name, _event)            \
0746     _CMN_EVENT_HNF(_model, _name##_all, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \
0747     _CMN_EVENT_HNF(_model, _name##_group0_read, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \
0748     _CMN_EVENT_HNF(_model, _name##_group0_write, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \
0749     _CMN_EVENT_HNF(_model, _name##_group1_read, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \
0750     _CMN_EVENT_HNF(_model, _name##_group1_write, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \
0751     _CMN_EVENT_HNF(_model, _name##_read, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \
0752     _CMN_EVENT_HNF(_model, _name##_write, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL)
0753 
0754 #define _CMN_EVENT_XP(_name, _event)                \
0755     __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)),     \
0756     __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)),     \
0757     __CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)),     \
0758     __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)),     \
0759     __CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)),    \
0760     __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)),    \
0761     __CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)),    \
0762     __CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2))
0763 
0764 /* Good thing there are only 3 fundamental XP events... */
0765 #define CMN_EVENT_XP(_name, _event)             \
0766     _CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)),    \
0767     _CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)),    \
0768     _CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)),    \
0769     _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)),    \
0770     _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)),    \
0771     _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)),   \
0772     _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)),   \
0773     _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)),   \
0774     _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
0775 
0776 
0777 static struct attribute *arm_cmn_event_attrs[] = {
0778     CMN_EVENT_DTC(cycles),
0779 
0780     /*
0781      * DVM node events conflict with HN-I events in the equivalent PMU
0782      * slot, but our lazy short-cut of using the DTM counter index for
0783      * the PMU index as well happens to avoid that by construction.
0784      */
0785     CMN_EVENT_DVM(CMN600, rxreq_dvmop,      0x01),
0786     CMN_EVENT_DVM(CMN600, rxreq_dvmsync,        0x02),
0787     CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03),
0788     CMN_EVENT_DVM(CMN600, rxreq_retried,        0x04),
0789     CMN_EVENT_DVM_OCC(CMN600, rxreq_trk_occupancy,  0x05),
0790     CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi,       0x01),
0791     CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi,        0x02),
0792     CMN_EVENT_DVM(NOT_CMN600, dvmop_pici,       0x03),
0793     CMN_EVENT_DVM(NOT_CMN600, dvmop_vici,       0x04),
0794     CMN_EVENT_DVM(NOT_CMN600, dvmsync,      0x05),
0795     CMN_EVENT_DVM(NOT_CMN600, vmid_filtered,    0x06),
0796     CMN_EVENT_DVM(NOT_CMN600, rndop_filtered,   0x07),
0797     CMN_EVENT_DVM(NOT_CMN600, retry,        0x08),
0798     CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv,      0x09),
0799     CMN_EVENT_DVM(NOT_CMN600, txsnp_stall,      0x0a),
0800     CMN_EVENT_DVM(NOT_CMN600, trkfull,      0x0b),
0801     CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy,    0x0c),
0802     CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_cxha,   0x0d),
0803     CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_pdn,    0x0e),
0804     CMN_EVENT_DVM(CMN700, trk_alloc,        0x0f),
0805     CMN_EVENT_DVM(CMN700, trk_cxha_alloc,       0x10),
0806     CMN_EVENT_DVM(CMN700, trk_pdn_alloc,        0x11),
0807     CMN_EVENT_DVM(CMN700, txsnp_stall_limit,    0x12),
0808     CMN_EVENT_DVM(CMN700, rxsnp_stall_starv,    0x13),
0809     CMN_EVENT_DVM(CMN700, txsnp_sync_stall_op,  0x14),
0810 
0811     CMN_EVENT_HNF(CMN_ANY, cache_miss,      0x01),
0812     CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02),
0813     CMN_EVENT_HNF(CMN_ANY, cache_fill,      0x03),
0814     CMN_EVENT_HNF(CMN_ANY, pocq_retry,      0x04),
0815     CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd,     0x05),
0816     CMN_EVENT_HNF(CMN_ANY, sf_hit,          0x06),
0817     CMN_EVENT_HNF(CMN_ANY, sf_evictions,        0x07),
0818     CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent,     0x08),
0819     CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent,     0x09),
0820     CMN_EVENT_HNF(CMN_ANY, slc_eviction,        0x0a),
0821     CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way,    0x0b),
0822     CMN_EVENT_HNF(CMN_ANY, mc_retries,      0x0c),
0823     CMN_EVENT_HNF(CMN_ANY, mc_reqs,         0x0d),
0824     CMN_EVENT_HNF(CMN_ANY, qos_hh_retry,        0x0e),
0825     _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0, SEL_OCCUP1ID),
0826     _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1, SEL_OCCUP1ID),
0827     _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2, SEL_OCCUP1ID),
0828     _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3, SEL_OCCUP1ID),
0829     _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4, SEL_OCCUP1ID),
0830     CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz,        0x10),
0831     CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11),
0832     CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full,  0x12),
0833     CMN_EVENT_HNF(CMN_ANY, cmp_adq_full,        0x13),
0834     CMN_EVENT_HNF(CMN_ANY, txdat_stall,     0x14),
0835     CMN_EVENT_HNF(CMN_ANY, txrsp_stall,     0x15),
0836     CMN_EVENT_HNF(CMN_ANY, seq_full,        0x16),
0837     CMN_EVENT_HNF(CMN_ANY, seq_hit,         0x17),
0838     CMN_EVENT_HNF(CMN_ANY, snp_sent,        0x18),
0839     CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent,   0x19),
0840     CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent,   0x1a),
0841     CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk,      0x1b),
0842     CMN_EVENT_HNF(CMN_ANY, intv_dirty,      0x1c),
0843     CMN_EVENT_HNF(CMN_ANY, stash_snp_sent,      0x1d),
0844     CMN_EVENT_HNF(CMN_ANY, stash_data_pull,     0x1e),
0845     CMN_EVENT_HNF(CMN_ANY, snp_fwded,       0x1f),
0846     CMN_EVENT_HNF(NOT_CMN600, atomic_fwd,       0x20),
0847     CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim,     0x21),
0848     CMN_EVENT_HNF(NOT_CMN600, mpam_softlim,     0x22),
0849     CMN_EVENT_HNF(CMN_650ON, snp_sent_cluster,  0x23),
0850     CMN_EVENT_HNF(CMN_650ON, sf_imprecise_evict,    0x24),
0851     CMN_EVENT_HNF(CMN_650ON, sf_evict_shared_line,  0x25),
0852     CMN_EVENT_HNF_CLS(CMN700, pocq_class_occup, 0x26),
0853     CMN_EVENT_HNF_CLS(CMN700, pocq_class_retry, 0x27),
0854     CMN_EVENT_HNF_CLS(CMN700, class_mc_reqs,    0x28),
0855     CMN_EVENT_HNF_CLS(CMN700, class_cgnt_cmin,  0x29),
0856     CMN_EVENT_HNF_SNT(CMN700, sn_throttle,      0x2a),
0857     CMN_EVENT_HNF_SNT(CMN700, sn_throttle_min,  0x2b),
0858     CMN_EVENT_HNF(CMN700, sf_precise_to_imprecise,  0x2c),
0859     CMN_EVENT_HNF(CMN700, snp_intv_cln,     0x2d),
0860     CMN_EVENT_HNF(CMN700, nc_excl,          0x2e),
0861     CMN_EVENT_HNF(CMN700, excl_mon_ovfl,        0x2f),
0862 
0863     CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl,      0x20),
0864     CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl,      0x21),
0865     CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl,      0x22),
0866     CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl,      0x23),
0867     CMN_EVENT_HNI(wdb_occ_cnt_ovfl,         0x24),
0868     CMN_EVENT_HNI(rrt_rd_alloc,         0x25),
0869     CMN_EVENT_HNI(rrt_wr_alloc,         0x26),
0870     CMN_EVENT_HNI(rdt_rd_alloc,         0x27),
0871     CMN_EVENT_HNI(rdt_wr_alloc,         0x28),
0872     CMN_EVENT_HNI(wdb_alloc,            0x29),
0873     CMN_EVENT_HNI(txrsp_retryack,           0x2a),
0874     CMN_EVENT_HNI(arvalid_no_arready,       0x2b),
0875     CMN_EVENT_HNI(arready_no_arvalid,       0x2c),
0876     CMN_EVENT_HNI(awvalid_no_awready,       0x2d),
0877     CMN_EVENT_HNI(awready_no_awvalid,       0x2e),
0878     CMN_EVENT_HNI(wvalid_no_wready,         0x2f),
0879     CMN_EVENT_HNI(txdat_stall,          0x30),
0880     CMN_EVENT_HNI(nonpcie_serialization,        0x31),
0881     CMN_EVENT_HNI(pcie_serialization,       0x32),
0882 
0883     /*
0884      * HN-P events squat on top of the HN-I similarly to DVM events, except
0885      * for being crammed into the same physical node as well. And of course
0886      * where would the fun be if the same events were in the same order...
0887      */
0888     CMN_EVENT_HNP(rrt_wr_occ_cnt_ovfl,      0x01),
0889     CMN_EVENT_HNP(rdt_wr_occ_cnt_ovfl,      0x02),
0890     CMN_EVENT_HNP(wdb_occ_cnt_ovfl,         0x03),
0891     CMN_EVENT_HNP(rrt_wr_alloc,         0x04),
0892     CMN_EVENT_HNP(rdt_wr_alloc,         0x05),
0893     CMN_EVENT_HNP(wdb_alloc,            0x06),
0894     CMN_EVENT_HNP(awvalid_no_awready,       0x07),
0895     CMN_EVENT_HNP(awready_no_awvalid,       0x08),
0896     CMN_EVENT_HNP(wvalid_no_wready,         0x09),
0897     CMN_EVENT_HNP(rrt_rd_occ_cnt_ovfl,      0x11),
0898     CMN_EVENT_HNP(rdt_rd_occ_cnt_ovfl,      0x12),
0899     CMN_EVENT_HNP(rrt_rd_alloc,         0x13),
0900     CMN_EVENT_HNP(rdt_rd_alloc,         0x14),
0901     CMN_EVENT_HNP(arvalid_no_arready,       0x15),
0902     CMN_EVENT_HNP(arready_no_arvalid,       0x16),
0903 
0904     CMN_EVENT_XP(txflit_valid,          0x01),
0905     CMN_EVENT_XP(txflit_stall,          0x02),
0906     CMN_EVENT_XP(partial_dat_flit,          0x03),
0907     /* We treat watchpoints as a special made-up class of XP events */
0908     CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP),
0909     CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN),
0910 
0911     CMN_EVENT_SBSX(CMN_ANY, rd_req,         0x01),
0912     CMN_EVENT_SBSX(CMN_ANY, wr_req,         0x02),
0913     CMN_EVENT_SBSX(CMN_ANY, cmo_req,        0x03),
0914     CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack,     0x04),
0915     CMN_EVENT_SBSX(CMN_ANY, txdat_flitv,        0x05),
0916     CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv,        0x06),
0917     CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11),
0918     CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12),
0919     CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13),
0920     CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl,   0x14),
0921     CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15),
0922     CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16),
0923     CMN_EVENT_SBSX(NOT_CMN600, rdb_occ_cnt_ovfl,    0x17),
0924     CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21),
0925     CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22),
0926     CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready,   0x23),
0927     CMN_EVENT_SBSX(CMN_ANY, txdat_stall,        0x24),
0928     CMN_EVENT_SBSX(CMN_ANY, txrsp_stall,        0x25),
0929 
0930     CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats,     0x01),
0931     CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats,     0x02),
0932     CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats,     0x03),
0933     CMN_EVENT_RNID(CMN_ANY, rxdat_flits,        0x04),
0934     CMN_EVENT_RNID(CMN_ANY, txdat_flits,        0x05),
0935     CMN_EVENT_RNID(CMN_ANY, txreq_flits_total,  0x06),
0936     CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried,    0x07),
0937     CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl,       0x08),
0938     CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl,       0x09),
0939     CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed,   0x0a),
0940     CMN_EVENT_RNID(CMN_ANY, wrcancel_sent,      0x0b),
0941     CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats,     0x0c),
0942     CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats,     0x0d),
0943     CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats,     0x0e),
0944     CMN_EVENT_RNID(CMN_ANY, rrt_alloc,      0x0f),
0945     CMN_EVENT_RNID(CMN_ANY, wrt_alloc,      0x10),
0946     CMN_EVENT_RNID(CMN600, rdb_unord,       0x11),
0947     CMN_EVENT_RNID(CMN600, rdb_replay,      0x12),
0948     CMN_EVENT_RNID(CMN600, rdb_hybrid,      0x13),
0949     CMN_EVENT_RNID(CMN600, rdb_ord,         0x14),
0950     CMN_EVENT_RNID(NOT_CMN600, padb_occ_ovfl,   0x11),
0951     CMN_EVENT_RNID(NOT_CMN600, rpdb_occ_ovfl,   0x12),
0952     CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice1, 0x13),
0953     CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14),
0954     CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15),
0955     CMN_EVENT_RNID(NOT_CMN600, wrt_throttled,   0x16),
0956     CMN_EVENT_RNID(CMN700, ldb_full,        0x17),
0957     CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice0, 0x18),
0958     CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice1, 0x19),
0959     CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice2, 0x1a),
0960     CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice3, 0x1b),
0961     CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice0, 0x1c),
0962     CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice1, 0x1d),
0963     CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice2, 0x1e),
0964     CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice3, 0x1f),
0965     CMN_EVENT_RNID(CMN700, rrt_burst_alloc,     0x20),
0966     CMN_EVENT_RNID(CMN700, awid_hash,       0x21),
0967     CMN_EVENT_RNID(CMN700, atomic_alloc,        0x22),
0968     CMN_EVENT_RNID(CMN700, atomic_occ_ovfl,     0x23),
0969 
0970     CMN_EVENT_MTSX(tc_lookup,           0x01),
0971     CMN_EVENT_MTSX(tc_fill,             0x02),
0972     CMN_EVENT_MTSX(tc_miss,             0x03),
0973     CMN_EVENT_MTSX(tdb_forward,         0x04),
0974     CMN_EVENT_MTSX(tcq_hazard,          0x05),
0975     CMN_EVENT_MTSX(tcq_rd_alloc,            0x06),
0976     CMN_EVENT_MTSX(tcq_wr_alloc,            0x07),
0977     CMN_EVENT_MTSX(tcq_cmo_alloc,           0x08),
0978     CMN_EVENT_MTSX(axi_rd_req,          0x09),
0979     CMN_EVENT_MTSX(axi_wr_req,          0x0a),
0980     CMN_EVENT_MTSX(tcq_occ_cnt_ovfl,        0x0b),
0981     CMN_EVENT_MTSX(tdb_occ_cnt_ovfl,        0x0c),
0982 
0983     CMN_EVENT_CXRA(CMN_ANY, rht_occ,        0x01),
0984     CMN_EVENT_CXRA(CMN_ANY, sht_occ,        0x02),
0985     CMN_EVENT_CXRA(CMN_ANY, rdb_occ,        0x03),
0986     CMN_EVENT_CXRA(CMN_ANY, wdb_occ,        0x04),
0987     CMN_EVENT_CXRA(CMN_ANY, ssb_occ,        0x05),
0988     CMN_EVENT_CXRA(CMN_ANY, snp_bcasts,     0x06),
0989     CMN_EVENT_CXRA(CMN_ANY, req_chains,     0x07),
0990     CMN_EVENT_CXRA(CMN_ANY, req_chain_avglen,   0x08),
0991     CMN_EVENT_CXRA(CMN_ANY, chirsp_stalls,      0x09),
0992     CMN_EVENT_CXRA(CMN_ANY, chidat_stalls,      0x0a),
0993     CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link0, 0x0b),
0994     CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link1, 0x0c),
0995     CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link2, 0x0d),
0996     CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link0, 0x0e),
0997     CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link1, 0x0f),
0998     CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link2, 0x10),
0999     CMN_EVENT_CXRA(CMN_ANY, external_chirsp_stalls, 0x11),
1000     CMN_EVENT_CXRA(CMN_ANY, external_chidat_stalls, 0x12),
1001     CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link0, 0x13),
1002     CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link1, 0x14),
1003     CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link2, 0x15),
1004 
1005     CMN_EVENT_CXHA(rddatbyp,            0x21),
1006     CMN_EVENT_CXHA(chirsp_up_stall,         0x22),
1007     CMN_EVENT_CXHA(chidat_up_stall,         0x23),
1008     CMN_EVENT_CXHA(snppcrd_link0_stall,     0x24),
1009     CMN_EVENT_CXHA(snppcrd_link1_stall,     0x25),
1010     CMN_EVENT_CXHA(snppcrd_link2_stall,     0x26),
1011     CMN_EVENT_CXHA(reqtrk_occ,          0x27),
1012     CMN_EVENT_CXHA(rdb_occ,             0x28),
1013     CMN_EVENT_CXHA(rdbyp_occ,           0x29),
1014     CMN_EVENT_CXHA(wdb_occ,             0x2a),
1015     CMN_EVENT_CXHA(snptrk_occ,          0x2b),
1016     CMN_EVENT_CXHA(sdb_occ,             0x2c),
1017     CMN_EVENT_CXHA(snphaz_occ,          0x2d),
1018 
1019     CMN_EVENT_CCRA(rht_occ,             0x41),
1020     CMN_EVENT_CCRA(sht_occ,             0x42),
1021     CMN_EVENT_CCRA(rdb_occ,             0x43),
1022     CMN_EVENT_CCRA(wdb_occ,             0x44),
1023     CMN_EVENT_CCRA(ssb_occ,             0x45),
1024     CMN_EVENT_CCRA(snp_bcasts,          0x46),
1025     CMN_EVENT_CCRA(req_chains,          0x47),
1026     CMN_EVENT_CCRA(req_chain_avglen,        0x48),
1027     CMN_EVENT_CCRA(chirsp_stalls,           0x49),
1028     CMN_EVENT_CCRA(chidat_stalls,           0x4a),
1029     CMN_EVENT_CCRA(cxreq_pcrd_stalls_link0,     0x4b),
1030     CMN_EVENT_CCRA(cxreq_pcrd_stalls_link1,     0x4c),
1031     CMN_EVENT_CCRA(cxreq_pcrd_stalls_link2,     0x4d),
1032     CMN_EVENT_CCRA(cxdat_pcrd_stalls_link0,     0x4e),
1033     CMN_EVENT_CCRA(cxdat_pcrd_stalls_link1,     0x4f),
1034     CMN_EVENT_CCRA(cxdat_pcrd_stalls_link2,     0x50),
1035     CMN_EVENT_CCRA(external_chirsp_stalls,      0x51),
1036     CMN_EVENT_CCRA(external_chidat_stalls,      0x52),
1037     CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link0,    0x53),
1038     CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link1,    0x54),
1039     CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link2,    0x55),
1040     CMN_EVENT_CCRA(rht_alloc,           0x56),
1041     CMN_EVENT_CCRA(sht_alloc,           0x57),
1042     CMN_EVENT_CCRA(rdb_alloc,           0x58),
1043     CMN_EVENT_CCRA(wdb_alloc,           0x59),
1044     CMN_EVENT_CCRA(ssb_alloc,           0x5a),
1045 
1046     CMN_EVENT_CCHA(rddatbyp,            0x61),
1047     CMN_EVENT_CCHA(chirsp_up_stall,         0x62),
1048     CMN_EVENT_CCHA(chidat_up_stall,         0x63),
1049     CMN_EVENT_CCHA(snppcrd_link0_stall,     0x64),
1050     CMN_EVENT_CCHA(snppcrd_link1_stall,     0x65),
1051     CMN_EVENT_CCHA(snppcrd_link2_stall,     0x66),
1052     CMN_EVENT_CCHA(reqtrk_occ,          0x67),
1053     CMN_EVENT_CCHA(rdb_occ,             0x68),
1054     CMN_EVENT_CCHA(rdbyp_occ,           0x69),
1055     CMN_EVENT_CCHA(wdb_occ,             0x6a),
1056     CMN_EVENT_CCHA(snptrk_occ,          0x6b),
1057     CMN_EVENT_CCHA(sdb_occ,             0x6c),
1058     CMN_EVENT_CCHA(snphaz_occ,          0x6d),
1059     CMN_EVENT_CCHA(reqtrk_alloc,            0x6e),
1060     CMN_EVENT_CCHA(rdb_alloc,           0x6f),
1061     CMN_EVENT_CCHA(rdbyp_alloc,         0x70),
1062     CMN_EVENT_CCHA(wdb_alloc,           0x71),
1063     CMN_EVENT_CCHA(snptrk_alloc,            0x72),
1064     CMN_EVENT_CCHA(sdb_alloc,           0x73),
1065     CMN_EVENT_CCHA(snphaz_alloc,            0x74),
1066     CMN_EVENT_CCHA(pb_rhu_req_occ,          0x75),
1067     CMN_EVENT_CCHA(pb_rhu_req_alloc,        0x76),
1068     CMN_EVENT_CCHA(pb_rhu_pcie_req_occ,     0x77),
1069     CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc,       0x78),
1070     CMN_EVENT_CCHA(pb_pcie_wr_req_occ,      0x79),
1071     CMN_EVENT_CCHA(pb_pcie_wr_req_alloc,        0x7a),
1072     CMN_EVENT_CCHA(pb_pcie_reg_req_occ,     0x7b),
1073     CMN_EVENT_CCHA(pb_pcie_reg_req_alloc,       0x7c),
1074     CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ,        0x7d),
1075     CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc,      0x7e),
1076     CMN_EVENT_CCHA(pb_rhu_dat_occ,          0x7f),
1077     CMN_EVENT_CCHA(pb_rhu_dat_alloc,        0x80),
1078     CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ,     0x81),
1079     CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc,       0x82),
1080     CMN_EVENT_CCHA(pb_pcie_wr_dat_occ,      0x83),
1081     CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc,        0x84),
1082 
1083     CMN_EVENT_CCLA(rx_cxs,              0x21),
1084     CMN_EVENT_CCLA(tx_cxs,              0x22),
1085     CMN_EVENT_CCLA(rx_cxs_avg_size,         0x23),
1086     CMN_EVENT_CCLA(tx_cxs_avg_size,         0x24),
1087     CMN_EVENT_CCLA(tx_cxs_lcrd_backpressure,    0x25),
1088     CMN_EVENT_CCLA(link_crdbuf_occ,         0x26),
1089     CMN_EVENT_CCLA(link_crdbuf_alloc,       0x27),
1090     CMN_EVENT_CCLA(pfwd_rcvr_cxs,           0x28),
1091     CMN_EVENT_CCLA(pfwd_sndr_num_flits,     0x29),
1092     CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd, 0x2a),
1093     CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd,    0x2b),
1094 
1095     NULL
1096 };
1097 
1098 static const struct attribute_group arm_cmn_event_attrs_group = {
1099     .name = "events",
1100     .attrs = arm_cmn_event_attrs,
1101     .is_visible = arm_cmn_event_attr_is_visible,
1102 };
1103 
1104 static ssize_t arm_cmn_format_show(struct device *dev,
1105                    struct device_attribute *attr, char *buf)
1106 {
1107     struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
1108     int lo = __ffs(fmt->field), hi = __fls(fmt->field);
1109 
1110     if (lo == hi)
1111         return sysfs_emit(buf, "config:%d\n", lo);
1112 
1113     if (!fmt->config)
1114         return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
1115 
1116     return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
1117 }
1118 
1119 #define _CMN_FORMAT_ATTR(_name, _cfg, _fld)             \
1120     (&((struct arm_cmn_format_attr[]) {{                \
1121         .attr = __ATTR(_name, 0444, arm_cmn_format_show, NULL), \
1122         .config = _cfg,                     \
1123         .field = _fld,                      \
1124     }})[0].attr.attr)
1125 #define CMN_FORMAT_ATTR(_name, _fld)    _CMN_FORMAT_ATTR(_name, 0, _fld)
1126 
1127 static struct attribute *arm_cmn_format_attrs[] = {
1128     CMN_FORMAT_ATTR(type, CMN_CONFIG_TYPE),
1129     CMN_FORMAT_ATTR(eventid, CMN_CONFIG_EVENTID),
1130     CMN_FORMAT_ATTR(occupid, CMN_CONFIG_OCCUPID),
1131     CMN_FORMAT_ATTR(bynodeid, CMN_CONFIG_BYNODEID),
1132     CMN_FORMAT_ATTR(nodeid, CMN_CONFIG_NODEID),
1133 
1134     CMN_FORMAT_ATTR(wp_dev_sel, CMN_CONFIG_WP_DEV_SEL),
1135     CMN_FORMAT_ATTR(wp_chn_sel, CMN_CONFIG_WP_CHN_SEL),
1136     CMN_FORMAT_ATTR(wp_grp, CMN_CONFIG_WP_GRP),
1137     CMN_FORMAT_ATTR(wp_exclusive, CMN_CONFIG_WP_EXCLUSIVE),
1138     CMN_FORMAT_ATTR(wp_combine, CMN_CONFIG_WP_COMBINE),
1139 
1140     _CMN_FORMAT_ATTR(wp_val, 1, CMN_CONFIG1_WP_VAL),
1141     _CMN_FORMAT_ATTR(wp_mask, 2, CMN_CONFIG2_WP_MASK),
1142 
1143     NULL
1144 };
1145 
1146 static const struct attribute_group arm_cmn_format_attrs_group = {
1147     .name = "format",
1148     .attrs = arm_cmn_format_attrs,
1149 };
1150 
1151 static ssize_t arm_cmn_cpumask_show(struct device *dev,
1152                     struct device_attribute *attr, char *buf)
1153 {
1154     struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
1155 
1156     return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu));
1157 }
1158 
1159 static struct device_attribute arm_cmn_cpumask_attr =
1160         __ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL);
1161 
1162 static struct attribute *arm_cmn_cpumask_attrs[] = {
1163     &arm_cmn_cpumask_attr.attr,
1164     NULL,
1165 };
1166 
1167 static const struct attribute_group arm_cmn_cpumask_attr_group = {
1168     .attrs = arm_cmn_cpumask_attrs,
1169 };
1170 
1171 static const struct attribute_group *arm_cmn_attr_groups[] = {
1172     &arm_cmn_event_attrs_group,
1173     &arm_cmn_format_attrs_group,
1174     &arm_cmn_cpumask_attr_group,
1175     NULL
1176 };
1177 
1178 static int arm_cmn_wp_idx(struct perf_event *event)
1179 {
1180     return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event);
1181 }
1182 
1183 static u32 arm_cmn_wp_config(struct perf_event *event)
1184 {
1185     u32 config;
1186     u32 dev = CMN_EVENT_WP_DEV_SEL(event);
1187     u32 chn = CMN_EVENT_WP_CHN_SEL(event);
1188     u32 grp = CMN_EVENT_WP_GRP(event);
1189     u32 exc = CMN_EVENT_WP_EXCLUSIVE(event);
1190     u32 combine = CMN_EVENT_WP_COMBINE(event);
1191     bool is_cmn600 = to_cmn(event->pmu)->model == CMN600;
1192 
1193     config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
1194          FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
1195          FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
1196          FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1);
1197     if (exc)
1198         config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
1199                       CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
1200     if (combine && !grp)
1201         config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
1202                       CMN_DTM_WPn_CONFIG_WP_COMBINE;
1203     return config;
1204 }
1205 
1206 static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
1207 {
1208     if (!cmn->state)
1209         writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR);
1210     cmn->state |= state;
1211 }
1212 
1213 static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
1214 {
1215     cmn->state &= ~state;
1216     if (!cmn->state)
1217         writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
1218                    cmn->dtc[0].base + CMN_DT_PMCR);
1219 }
1220 
1221 static void arm_cmn_pmu_enable(struct pmu *pmu)
1222 {
1223     arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_DISABLED);
1224 }
1225 
1226 static void arm_cmn_pmu_disable(struct pmu *pmu)
1227 {
1228     arm_cmn_set_state(to_cmn(pmu), CMN_STATE_DISABLED);
1229 }
1230 
1231 static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
1232                 bool snapshot)
1233 {
1234     struct arm_cmn_dtm *dtm = NULL;
1235     struct arm_cmn_node *dn;
1236     unsigned int i, offset, dtm_idx;
1237     u64 reg, count = 0;
1238 
1239     offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT;
1240     for_each_hw_dn(hw, dn, i) {
1241         if (dtm != &cmn->dtms[dn->dtm]) {
1242             dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
1243             reg = readq_relaxed(dtm->base + offset);
1244         }
1245         dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
1246         count += (u16)(reg >> (dtm_idx * 16));
1247     }
1248     return count;
1249 }
1250 
1251 static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
1252 {
1253     u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR);
1254 
1255     writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR);
1256     return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
1257 }
1258 
1259 static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
1260 {
1261     u32 val, pmevcnt = CMN_DT_PMEVCNT(idx);
1262 
1263     val = readl_relaxed(dtc->base + pmevcnt);
1264     writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt);
1265     return val - CMN_COUNTER_INIT;
1266 }
1267 
1268 static void arm_cmn_init_counter(struct perf_event *event)
1269 {
1270     struct arm_cmn *cmn = to_cmn(event->pmu);
1271     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1272     unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx);
1273     u64 count;
1274 
1275     for (i = 0; hw->dtcs_used & (1U << i); i++) {
1276         writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt);
1277         cmn->dtc[i].counters[hw->dtc_idx] = event;
1278     }
1279 
1280     count = arm_cmn_read_dtm(cmn, hw, false);
1281     local64_set(&event->hw.prev_count, count);
1282 }
1283 
1284 static void arm_cmn_event_read(struct perf_event *event)
1285 {
1286     struct arm_cmn *cmn = to_cmn(event->pmu);
1287     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1288     u64 delta, new, prev;
1289     unsigned long flags;
1290     unsigned int i;
1291 
1292     if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) {
1293         i = __ffs(hw->dtcs_used);
1294         delta = arm_cmn_read_cc(cmn->dtc + i);
1295         local64_add(delta, &event->count);
1296         return;
1297     }
1298     new = arm_cmn_read_dtm(cmn, hw, false);
1299     prev = local64_xchg(&event->hw.prev_count, new);
1300 
1301     delta = new - prev;
1302 
1303     local_irq_save(flags);
1304     for (i = 0; hw->dtcs_used & (1U << i); i++) {
1305         new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx);
1306         delta += new << 16;
1307     }
1308     local_irq_restore(flags);
1309     local64_add(delta, &event->count);
1310 }
1311 
1312 static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn,
1313                     enum cmn_filter_select fsel, u8 occupid)
1314 {
1315     u64 reg;
1316 
1317     if (fsel == SEL_NONE)
1318         return 0;
1319 
1320     if (!dn->occupid[fsel].count) {
1321         dn->occupid[fsel].val = occupid;
1322         reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL,
1323                  dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) |
1324               FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID,
1325                  dn->occupid[SEL_CLASS_OCCUP_ID].val) |
1326               FIELD_PREP(CMN__PMU_OCCUP1_ID,
1327                  dn->occupid[SEL_OCCUP1ID].val);
1328         writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
1329     } else if (dn->occupid[fsel].val != occupid) {
1330         return -EBUSY;
1331     }
1332     dn->occupid[fsel].count++;
1333     return 0;
1334 }
1335 
1336 static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx,
1337                      int eventid, bool wide_sel)
1338 {
1339     if (wide_sel) {
1340         dn->event_w[dtm_idx] = eventid;
1341         writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL);
1342     } else {
1343         dn->event[dtm_idx] = eventid;
1344         writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
1345     }
1346 }
1347 
1348 static void arm_cmn_event_start(struct perf_event *event, int flags)
1349 {
1350     struct arm_cmn *cmn = to_cmn(event->pmu);
1351     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1352     struct arm_cmn_node *dn;
1353     enum cmn_node_type type = CMN_EVENT_TYPE(event);
1354     int i;
1355 
1356     if (type == CMN_TYPE_DTC) {
1357         i = __ffs(hw->dtcs_used);
1358         writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
1359         cmn->dtc[i].cc_active = true;
1360     } else if (type == CMN_TYPE_WP) {
1361         int wp_idx = arm_cmn_wp_idx(event);
1362         u64 val = CMN_EVENT_WP_VAL(event);
1363         u64 mask = CMN_EVENT_WP_MASK(event);
1364 
1365         for_each_hw_dn(hw, dn, i) {
1366             void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
1367 
1368             writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
1369             writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
1370         }
1371     } else for_each_hw_dn(hw, dn, i) {
1372         int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
1373 
1374         arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event),
1375                      hw->wide_sel);
1376     }
1377 }
1378 
1379 static void arm_cmn_event_stop(struct perf_event *event, int flags)
1380 {
1381     struct arm_cmn *cmn = to_cmn(event->pmu);
1382     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1383     struct arm_cmn_node *dn;
1384     enum cmn_node_type type = CMN_EVENT_TYPE(event);
1385     int i;
1386 
1387     if (type == CMN_TYPE_DTC) {
1388         i = __ffs(hw->dtcs_used);
1389         cmn->dtc[i].cc_active = false;
1390     } else if (type == CMN_TYPE_WP) {
1391         int wp_idx = arm_cmn_wp_idx(event);
1392 
1393         for_each_hw_dn(hw, dn, i) {
1394             void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
1395 
1396             writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
1397             writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
1398         }
1399     } else for_each_hw_dn(hw, dn, i) {
1400         int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
1401 
1402         arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel);
1403     }
1404 
1405     arm_cmn_event_read(event);
1406 }
1407 
1408 struct arm_cmn_val {
1409     u8 dtm_count[CMN_MAX_DTMS];
1410     u8 occupid[CMN_MAX_DTMS][SEL_MAX];
1411     u8 wp[CMN_MAX_DTMS][4];
1412     int dtc_count;
1413     bool cycles;
1414 };
1415 
1416 static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
1417                   struct perf_event *event)
1418 {
1419     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1420     struct arm_cmn_node *dn;
1421     enum cmn_node_type type;
1422     int i;
1423 
1424     if (is_software_event(event))
1425         return;
1426 
1427     type = CMN_EVENT_TYPE(event);
1428     if (type == CMN_TYPE_DTC) {
1429         val->cycles = true;
1430         return;
1431     }
1432 
1433     val->dtc_count++;
1434 
1435     for_each_hw_dn(hw, dn, i) {
1436         int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
1437 
1438         val->dtm_count[dtm]++;
1439 
1440         if (sel > SEL_NONE)
1441             val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1;
1442 
1443         if (type != CMN_TYPE_WP)
1444             continue;
1445 
1446         wp_idx = arm_cmn_wp_idx(event);
1447         val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
1448     }
1449 }
1450 
1451 static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
1452 {
1453     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1454     struct arm_cmn_node *dn;
1455     struct perf_event *sibling, *leader = event->group_leader;
1456     enum cmn_node_type type;
1457     struct arm_cmn_val *val;
1458     int i, ret = -EINVAL;
1459 
1460     if (leader == event)
1461         return 0;
1462 
1463     if (event->pmu != leader->pmu && !is_software_event(leader))
1464         return -EINVAL;
1465 
1466     val = kzalloc(sizeof(*val), GFP_KERNEL);
1467     if (!val)
1468         return -ENOMEM;
1469 
1470     arm_cmn_val_add_event(cmn, val, leader);
1471     for_each_sibling_event(sibling, leader)
1472         arm_cmn_val_add_event(cmn, val, sibling);
1473 
1474     type = CMN_EVENT_TYPE(event);
1475     if (type == CMN_TYPE_DTC) {
1476         ret = val->cycles ? -EINVAL : 0;
1477         goto done;
1478     }
1479 
1480     if (val->dtc_count == CMN_DT_NUM_COUNTERS)
1481         goto done;
1482 
1483     for_each_hw_dn(hw, dn, i) {
1484         int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel;
1485 
1486         if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
1487             goto done;
1488 
1489         if (sel > SEL_NONE && val->occupid[dtm][sel] &&
1490             val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1)
1491             goto done;
1492 
1493         if (type != CMN_TYPE_WP)
1494             continue;
1495 
1496         wp_idx = arm_cmn_wp_idx(event);
1497         if (val->wp[dtm][wp_idx])
1498             goto done;
1499 
1500         wp_cmb = val->wp[dtm][wp_idx ^ 1];
1501         if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1)
1502             goto done;
1503     }
1504 
1505     ret = 0;
1506 done:
1507     kfree(val);
1508     return ret;
1509 }
1510 
1511 static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model,
1512                          enum cmn_node_type type,
1513                          unsigned int eventid)
1514 {
1515     struct arm_cmn_event_attr *e;
1516     int i;
1517 
1518     for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
1519         e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
1520         if (e->model & model && e->type == type && e->eventid == eventid)
1521             return e->fsel;
1522     }
1523     return SEL_NONE;
1524 }
1525 
1526 
1527 static int arm_cmn_event_init(struct perf_event *event)
1528 {
1529     struct arm_cmn *cmn = to_cmn(event->pmu);
1530     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1531     struct arm_cmn_node *dn;
1532     enum cmn_node_type type;
1533     bool bynodeid;
1534     u16 nodeid, eventid;
1535 
1536     if (event->attr.type != event->pmu->type)
1537         return -ENOENT;
1538 
1539     if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
1540         return -EINVAL;
1541 
1542     event->cpu = cmn->cpu;
1543     if (event->cpu < 0)
1544         return -EINVAL;
1545 
1546     type = CMN_EVENT_TYPE(event);
1547     /* DTC events (i.e. cycles) already have everything they need */
1548     if (type == CMN_TYPE_DTC)
1549         return 0;
1550 
1551     eventid = CMN_EVENT_EVENTID(event);
1552     /* For watchpoints we need the actual XP node here */
1553     if (type == CMN_TYPE_WP) {
1554         type = CMN_TYPE_XP;
1555         /* ...and we need a "real" direction */
1556         if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
1557             return -EINVAL;
1558         /* ...but the DTM may depend on which port we're watching */
1559         if (cmn->multi_dtm)
1560             hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
1561     } else if (type == CMN_TYPE_XP && cmn->model == CMN700) {
1562         hw->wide_sel = true;
1563     }
1564 
1565     /* This is sufficiently annoying to recalculate, so cache it */
1566     hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid);
1567 
1568     bynodeid = CMN_EVENT_BYNODEID(event);
1569     nodeid = CMN_EVENT_NODEID(event);
1570 
1571     hw->dn = arm_cmn_node(cmn, type);
1572     if (!hw->dn)
1573         return -EINVAL;
1574     for (dn = hw->dn; dn->type == type; dn++) {
1575         if (bynodeid && dn->id != nodeid) {
1576             hw->dn++;
1577             continue;
1578         }
1579         hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc;
1580         hw->num_dns++;
1581         if (bynodeid)
1582             break;
1583     }
1584 
1585     if (!hw->num_dns) {
1586         struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid);
1587 
1588         dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
1589             nodeid, nid.x, nid.y, nid.port, nid.dev, type);
1590         return -EINVAL;
1591     }
1592 
1593     return arm_cmn_validate_group(cmn, event);
1594 }
1595 
1596 static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
1597                 int i)
1598 {
1599     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1600     enum cmn_node_type type = CMN_EVENT_TYPE(event);
1601 
1602     while (i--) {
1603         struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
1604         unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
1605 
1606         if (type == CMN_TYPE_WP)
1607             dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
1608 
1609         if (hw->filter_sel > SEL_NONE)
1610             hw->dn[i].occupid[hw->filter_sel].count--;
1611 
1612         dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
1613         writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
1614     }
1615     memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
1616 
1617     for (i = 0; hw->dtcs_used & (1U << i); i++)
1618         cmn->dtc[i].counters[hw->dtc_idx] = NULL;
1619 }
1620 
1621 static int arm_cmn_event_add(struct perf_event *event, int flags)
1622 {
1623     struct arm_cmn *cmn = to_cmn(event->pmu);
1624     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1625     struct arm_cmn_dtc *dtc = &cmn->dtc[0];
1626     struct arm_cmn_node *dn;
1627     enum cmn_node_type type = CMN_EVENT_TYPE(event);
1628     unsigned int i, dtc_idx, input_sel;
1629 
1630     if (type == CMN_TYPE_DTC) {
1631         i = 0;
1632         while (cmn->dtc[i].cycles)
1633             if (++i == cmn->num_dtcs)
1634                 return -ENOSPC;
1635 
1636         cmn->dtc[i].cycles = event;
1637         hw->dtc_idx = CMN_DT_NUM_COUNTERS;
1638         hw->dtcs_used = 1U << i;
1639 
1640         if (flags & PERF_EF_START)
1641             arm_cmn_event_start(event, 0);
1642         return 0;
1643     }
1644 
1645     /* Grab a free global counter first... */
1646     dtc_idx = 0;
1647     while (dtc->counters[dtc_idx])
1648         if (++dtc_idx == CMN_DT_NUM_COUNTERS)
1649             return -ENOSPC;
1650 
1651     hw->dtc_idx = dtc_idx;
1652 
1653     /* ...then the local counters to feed it. */
1654     for_each_hw_dn(hw, dn, i) {
1655         struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
1656         unsigned int dtm_idx, shift;
1657         u64 reg;
1658 
1659         dtm_idx = 0;
1660         while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx))
1661             if (++dtm_idx == CMN_DTM_NUM_COUNTERS)
1662                 goto free_dtms;
1663 
1664         if (type == CMN_TYPE_XP) {
1665             input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx;
1666         } else if (type == CMN_TYPE_WP) {
1667             int tmp, wp_idx = arm_cmn_wp_idx(event);
1668             u32 cfg = arm_cmn_wp_config(event);
1669 
1670             if (dtm->wp_event[wp_idx] >= 0)
1671                 goto free_dtms;
1672 
1673             tmp = dtm->wp_event[wp_idx ^ 1];
1674             if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
1675                     CMN_EVENT_WP_COMBINE(dtc->counters[tmp]))
1676                 goto free_dtms;
1677 
1678             input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
1679             dtm->wp_event[wp_idx] = dtc_idx;
1680             writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
1681         } else {
1682             struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
1683 
1684             if (cmn->multi_dtm)
1685                 nid.port %= 2;
1686 
1687             input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
1688                     (nid.port << 4) + (nid.dev << 2);
1689 
1690             if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event)))
1691                 goto free_dtms;
1692         }
1693 
1694         arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);
1695 
1696         dtm->input_sel[dtm_idx] = input_sel;
1697         shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
1698         dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
1699         dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift;
1700         dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
1701         reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low;
1702         writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG);
1703     }
1704 
1705     /* Go go go! */
1706     arm_cmn_init_counter(event);
1707 
1708     if (flags & PERF_EF_START)
1709         arm_cmn_event_start(event, 0);
1710 
1711     return 0;
1712 
1713 free_dtms:
1714     arm_cmn_event_clear(cmn, event, i);
1715     return -ENOSPC;
1716 }
1717 
1718 static void arm_cmn_event_del(struct perf_event *event, int flags)
1719 {
1720     struct arm_cmn *cmn = to_cmn(event->pmu);
1721     struct arm_cmn_hw_event *hw = to_cmn_hw(event);
1722     enum cmn_node_type type = CMN_EVENT_TYPE(event);
1723 
1724     arm_cmn_event_stop(event, PERF_EF_UPDATE);
1725 
1726     if (type == CMN_TYPE_DTC)
1727         cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL;
1728     else
1729         arm_cmn_event_clear(cmn, event, hw->num_dns);
1730 }
1731 
1732 /*
1733  * We stop the PMU for both add and read, to avoid skew across DTM counters.
1734  * In theory we could use snapshots to read without stopping, but then it
1735  * becomes a lot trickier to deal with overlow and racing against interrupts,
1736  * plus it seems they don't work properly on some hardware anyway :(
1737  */
1738 static void arm_cmn_start_txn(struct pmu *pmu, unsigned int flags)
1739 {
1740     arm_cmn_set_state(to_cmn(pmu), CMN_STATE_TXN);
1741 }
1742 
1743 static void arm_cmn_end_txn(struct pmu *pmu)
1744 {
1745     arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_TXN);
1746 }
1747 
1748 static int arm_cmn_commit_txn(struct pmu *pmu)
1749 {
1750     arm_cmn_end_txn(pmu);
1751     return 0;
1752 }
1753 
1754 static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu)
1755 {
1756     unsigned int i;
1757 
1758     perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu);
1759     for (i = 0; i < cmn->num_dtcs; i++)
1760         irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu));
1761     cmn->cpu = cpu;
1762 }
1763 
1764 static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
1765 {
1766     struct arm_cmn *cmn;
1767     int node;
1768 
1769     cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
1770     node = dev_to_node(cmn->dev);
1771     if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
1772         arm_cmn_migrate(cmn, cpu);
1773     return 0;
1774 }
1775 
1776 static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
1777 {
1778     struct arm_cmn *cmn;
1779     unsigned int target;
1780     int node;
1781     cpumask_t mask;
1782 
1783     cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
1784     if (cpu != cmn->cpu)
1785         return 0;
1786 
1787     node = dev_to_node(cmn->dev);
1788     if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
1789         cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
1790         target = cpumask_any(&mask);
1791     else
1792         target = cpumask_any_but(cpu_online_mask, cpu);
1793     if (target < nr_cpu_ids)
1794         arm_cmn_migrate(cmn, target);
1795     return 0;
1796 }
1797 
1798 static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
1799 {
1800     struct arm_cmn_dtc *dtc = dev_id;
1801     irqreturn_t ret = IRQ_NONE;
1802 
1803     for (;;) {
1804         u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR);
1805         u64 delta;
1806         int i;
1807 
1808         for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
1809             if (status & (1U << i)) {
1810                 ret = IRQ_HANDLED;
1811                 if (WARN_ON(!dtc->counters[i]))
1812                     continue;
1813                 delta = (u64)arm_cmn_read_counter(dtc, i) << 16;
1814                 local64_add(delta, &dtc->counters[i]->count);
1815             }
1816         }
1817 
1818         if (status & (1U << CMN_DT_NUM_COUNTERS)) {
1819             ret = IRQ_HANDLED;
1820             if (dtc->cc_active && !WARN_ON(!dtc->cycles)) {
1821                 delta = arm_cmn_read_cc(dtc);
1822                 local64_add(delta, &dtc->cycles->count);
1823             }
1824         }
1825 
1826         writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR);
1827 
1828         if (!dtc->irq_friend)
1829             return ret;
1830         dtc += dtc->irq_friend;
1831     }
1832 }
1833 
1834 /* We can reasonably accommodate DTCs of the same CMN sharing IRQs */
1835 static int arm_cmn_init_irqs(struct arm_cmn *cmn)
1836 {
1837     int i, j, irq, err;
1838 
1839     for (i = 0; i < cmn->num_dtcs; i++) {
1840         irq = cmn->dtc[i].irq;
1841         for (j = i; j--; ) {
1842             if (cmn->dtc[j].irq == irq) {
1843                 cmn->dtc[j].irq_friend = i - j;
1844                 goto next;
1845             }
1846         }
1847         err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq,
1848                        IRQF_NOBALANCING | IRQF_NO_THREAD,
1849                        dev_name(cmn->dev), &cmn->dtc[i]);
1850         if (err)
1851             return err;
1852 
1853         err = irq_set_affinity(irq, cpumask_of(cmn->cpu));
1854         if (err)
1855             return err;
1856     next:
1857         ; /* isn't C great? */
1858     }
1859     return 0;
1860 }
1861 
1862 static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx)
1863 {
1864     int i;
1865 
1866     dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
1867     dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
1868     for (i = 0; i < 4; i++) {
1869         dtm->wp_event[i] = -1;
1870         writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i));
1871         writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i));
1872     }
1873 }
1874 
1875 static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx)
1876 {
1877     struct arm_cmn_dtc *dtc = cmn->dtc + idx;
1878 
1879     dtc->base = dn->pmu_base - CMN_PMU_OFFSET;
1880     dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
1881     if (dtc->irq < 0)
1882         return dtc->irq;
1883 
1884     writel_relaxed(0, dtc->base + CMN_DT_PMCR);
1885     writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
1886     writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
1887 
1888     return 0;
1889 }
1890 
1891 static int arm_cmn_node_cmp(const void *a, const void *b)
1892 {
1893     const struct arm_cmn_node *dna = a, *dnb = b;
1894     int cmp;
1895 
1896     cmp = dna->type - dnb->type;
1897     if (!cmp)
1898         cmp = dna->logid - dnb->logid;
1899     return cmp;
1900 }
1901 
1902 static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
1903 {
1904     struct arm_cmn_node *dn, *xp;
1905     int dtc_idx = 0;
1906     u8 dtcs_present = (1 << cmn->num_dtcs) - 1;
1907 
1908     cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
1909     if (!cmn->dtc)
1910         return -ENOMEM;
1911 
1912     sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL);
1913 
1914     cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
1915 
1916     for (dn = cmn->dns; dn->type; dn++) {
1917         if (dn->type == CMN_TYPE_XP) {
1918             dn->dtc &= dtcs_present;
1919             continue;
1920         }
1921 
1922         xp = arm_cmn_node_to_xp(cmn, dn);
1923         dn->dtm = xp->dtm;
1924         if (cmn->multi_dtm)
1925             dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
1926 
1927         if (dn->type == CMN_TYPE_DTC) {
1928             int err;
1929             /* We do at least know that a DTC's XP must be in that DTC's domain */
1930             if (xp->dtc == 0xf)
1931                 xp->dtc = 1 << dtc_idx;
1932             err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
1933             if (err)
1934                 return err;
1935         }
1936 
1937         /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
1938         if (dn->type == CMN_TYPE_RND)
1939             dn->type = CMN_TYPE_RNI;
1940 
1941         /* We split the RN-I off already, so let the CCLA part match CCLA events */
1942         if (dn->type == CMN_TYPE_CCLA_RNI)
1943             dn->type = CMN_TYPE_CCLA;
1944     }
1945 
1946     writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL);
1947 
1948     return 0;
1949 }
1950 
1951 static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
1952 {
1953     int level;
1954     u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO);
1955 
1956     node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg);
1957     node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
1958     node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);
1959 
1960     node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET;
1961 
1962     if (node->type == CMN_TYPE_CFG)
1963         level = 0;
1964     else if (node->type == CMN_TYPE_XP)
1965         level = 1;
1966     else
1967         level = 2;
1968 
1969     dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n",
1970             (level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ',
1971             node->type, node->logid, offset);
1972 }
1973 
1974 static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type)
1975 {
1976     switch (type) {
1977     case CMN_TYPE_HNP:
1978         return CMN_TYPE_HNI;
1979     case CMN_TYPE_CCLA_RNI:
1980         return CMN_TYPE_RNI;
1981     default:
1982         return CMN_TYPE_INVALID;
1983     }
1984 }
1985 
1986 static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
1987 {
1988     void __iomem *cfg_region;
1989     struct arm_cmn_node cfg, *dn;
1990     struct arm_cmn_dtm *dtm;
1991     u16 child_count, child_poff;
1992     u32 xp_offset[CMN_MAX_XPS];
1993     u64 reg;
1994     int i, j;
1995     size_t sz;
1996 
1997     arm_cmn_init_node_info(cmn, rgn_offset, &cfg);
1998     if (cfg.type != CMN_TYPE_CFG)
1999         return -ENODEV;
2000 
2001     cfg_region = cmn->base + rgn_offset;
2002     reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2);
2003     cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
2004 
2005     reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
2006     cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
2007     cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
2008     cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
2009 
2010     reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL_1);
2011     cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg);
2012     cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg);
2013 
2014     reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
2015     child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
2016     child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
2017 
2018     cmn->num_xps = child_count;
2019     cmn->num_dns = cmn->num_xps;
2020 
2021     /* Pass 1: visit the XPs, enumerate their children */
2022     for (i = 0; i < cmn->num_xps; i++) {
2023         reg = readq_relaxed(cfg_region + child_poff + i * 8);
2024         xp_offset[i] = reg & CMN_CHILD_NODE_ADDR;
2025 
2026         reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO);
2027         cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
2028     }
2029 
2030     /*
2031      * Some nodes effectively have two separate types, which we'll handle
2032      * by creating one of each internally. For a (very) safe initial upper
2033      * bound, account for double the number of non-XP nodes.
2034      */
2035     dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps,
2036               sizeof(*dn), GFP_KERNEL);
2037     if (!dn)
2038         return -ENOMEM;
2039 
2040     /* Initial safe upper bound on DTMs for any possible mesh layout */
2041     i = cmn->num_xps;
2042     if (cmn->multi_dtm)
2043         i += cmn->num_xps + 1;
2044     dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL);
2045     if (!dtm)
2046         return -ENOMEM;
2047 
2048     /* Pass 2: now we can actually populate the nodes */
2049     cmn->dns = dn;
2050     cmn->dtms = dtm;
2051     for (i = 0; i < cmn->num_xps; i++) {
2052         void __iomem *xp_region = cmn->base + xp_offset[i];
2053         struct arm_cmn_node *xp = dn++;
2054         unsigned int xp_ports = 0;
2055 
2056         arm_cmn_init_node_info(cmn, xp_offset[i], xp);
2057         /*
2058          * Thanks to the order in which XP logical IDs seem to be
2059          * assigned, we can handily infer the mesh X dimension by
2060          * looking out for the XP at (0,1) without needing to know
2061          * the exact node ID format, which we can later derive.
2062          */
2063         if (xp->id == (1 << 3))
2064             cmn->mesh_x = xp->logid;
2065 
2066         if (cmn->model == CMN600)
2067             xp->dtc = 0xf;
2068         else
2069             xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
2070 
2071         xp->dtm = dtm - cmn->dtms;
2072         arm_cmn_init_dtm(dtm++, xp, 0);
2073         /*
2074          * Keeping track of connected ports will let us filter out
2075          * unnecessary XP events easily. We can also reliably infer the
2076          * "extra device ports" configuration for the node ID format
2077          * from this, since in that case we will see at least one XP
2078          * with port 2 connected, for the HN-D.
2079          */
2080         if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0))
2081             xp_ports |= BIT(0);
2082         if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1))
2083             xp_ports |= BIT(1);
2084         if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
2085             xp_ports |= BIT(2);
2086         if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
2087             xp_ports |= BIT(3);
2088         if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
2089             xp_ports |= BIT(4);
2090         if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
2091             xp_ports |= BIT(5);
2092 
2093         if (cmn->multi_dtm && (xp_ports & 0xc))
2094             arm_cmn_init_dtm(dtm++, xp, 1);
2095         if (cmn->multi_dtm && (xp_ports & 0x30))
2096             arm_cmn_init_dtm(dtm++, xp, 2);
2097 
2098         cmn->ports_used |= xp_ports;
2099 
2100         reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
2101         child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
2102         child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
2103 
2104         for (j = 0; j < child_count; j++) {
2105             reg = readq_relaxed(xp_region + child_poff + j * 8);
2106             /*
2107              * Don't even try to touch anything external, since in general
2108              * we haven't a clue how to power up arbitrary CHI requesters.
2109              * As of CMN-600r1 these could only be RN-SAMs or CXLAs,
2110              * neither of which have any PMU events anyway.
2111              * (Actually, CXLAs do seem to have grown some events in r1p2,
2112              * but they don't go to regular XP DTMs, and they depend on
2113              * secure configuration which we can't easily deal with)
2114              */
2115             if (reg & CMN_CHILD_NODE_EXTERNAL) {
2116                 dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
2117                 continue;
2118             }
2119 
2120             arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
2121 
2122             switch (dn->type) {
2123             case CMN_TYPE_DTC:
2124                 cmn->num_dtcs++;
2125                 dn++;
2126                 break;
2127             /* These guys have PMU events */
2128             case CMN_TYPE_DVM:
2129             case CMN_TYPE_HNI:
2130             case CMN_TYPE_HNF:
2131             case CMN_TYPE_SBSX:
2132             case CMN_TYPE_RNI:
2133             case CMN_TYPE_RND:
2134             case CMN_TYPE_MTSX:
2135             case CMN_TYPE_CXRA:
2136             case CMN_TYPE_CXHA:
2137             case CMN_TYPE_CCRA:
2138             case CMN_TYPE_CCHA:
2139             case CMN_TYPE_CCLA:
2140                 dn++;
2141                 break;
2142             /* Nothing to see here */
2143             case CMN_TYPE_MPAM_S:
2144             case CMN_TYPE_MPAM_NS:
2145             case CMN_TYPE_RNSAM:
2146             case CMN_TYPE_CXLA:
2147                 break;
2148             /*
2149              * Split "optimised" combination nodes into separate
2150              * types for the different event sets. Offsetting the
2151              * base address lets us handle the second pmu_event_sel
2152              * register via the normal mechanism later.
2153              */
2154             case CMN_TYPE_HNP:
2155             case CMN_TYPE_CCLA_RNI:
2156                 dn[1] = dn[0];
2157                 dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL;
2158                 dn[1].type = arm_cmn_subtype(dn->type);
2159                 dn += 2;
2160                 break;
2161             /* Something has gone horribly wrong */
2162             default:
2163                 dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type);
2164                 return -ENODEV;
2165             }
2166         }
2167     }
2168 
2169     /* Correct for any nodes we added or skipped */
2170     cmn->num_dns = dn - cmn->dns;
2171 
2172     /* Cheeky +1 to help terminate pointer-based iteration later */
2173     sz = (void *)(dn + 1) - (void *)cmn->dns;
2174     dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL);
2175     if (dn)
2176         cmn->dns = dn;
2177 
2178     sz = (void *)dtm - (void *)cmn->dtms;
2179     dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL);
2180     if (dtm)
2181         cmn->dtms = dtm;
2182 
2183     /*
2184      * If mesh_x wasn't set during discovery then we never saw
2185      * an XP at (0,1), thus we must have an Nx1 configuration.
2186      */
2187     if (!cmn->mesh_x)
2188         cmn->mesh_x = cmn->num_xps;
2189     cmn->mesh_y = cmn->num_xps / cmn->mesh_x;
2190 
2191     /* 1x1 config plays havoc with XP event encodings */
2192     if (cmn->num_xps == 1)
2193         dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n");
2194 
2195     dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev);
2196     reg = cmn->ports_used;
2197     dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n",
2198         cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), &reg,
2199         cmn->multi_dtm ? ", multi-DTM" : "");
2200 
2201     return 0;
2202 }
2203 
2204 static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn)
2205 {
2206     struct resource *cfg, *root;
2207 
2208     cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2209     if (!cfg)
2210         return -EINVAL;
2211 
2212     root = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2213     if (!root)
2214         return -EINVAL;
2215 
2216     if (!resource_contains(cfg, root))
2217         swap(cfg, root);
2218     /*
2219      * Note that devm_ioremap_resource() is dumb and won't let the platform
2220      * device claim cfg when the ACPI companion device has already claimed
2221      * root within it. But since they *are* already both claimed in the
2222      * appropriate name, we don't really need to do it again here anyway.
2223      */
2224     cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg));
2225     if (!cmn->base)
2226         return -ENOMEM;
2227 
2228     return root->start - cfg->start;
2229 }
2230 
2231 static int arm_cmn600_of_probe(struct device_node *np)
2232 {
2233     u32 rootnode;
2234 
2235     return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode;
2236 }
2237 
2238 static int arm_cmn_probe(struct platform_device *pdev)
2239 {
2240     struct arm_cmn *cmn;
2241     const char *name;
2242     static atomic_t id;
2243     int err, rootnode, this_id;
2244 
2245     cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL);
2246     if (!cmn)
2247         return -ENOMEM;
2248 
2249     cmn->dev = &pdev->dev;
2250     cmn->model = (unsigned long)device_get_match_data(cmn->dev);
2251     platform_set_drvdata(pdev, cmn);
2252 
2253     if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) {
2254         rootnode = arm_cmn600_acpi_probe(pdev, cmn);
2255     } else {
2256         rootnode = 0;
2257         cmn->base = devm_platform_ioremap_resource(pdev, 0);
2258         if (IS_ERR(cmn->base))
2259             return PTR_ERR(cmn->base);
2260         if (cmn->model == CMN600)
2261             rootnode = arm_cmn600_of_probe(pdev->dev.of_node);
2262     }
2263     if (rootnode < 0)
2264         return rootnode;
2265 
2266     err = arm_cmn_discover(cmn, rootnode);
2267     if (err)
2268         return err;
2269 
2270     err = arm_cmn_init_dtcs(cmn);
2271     if (err)
2272         return err;
2273 
2274     err = arm_cmn_init_irqs(cmn);
2275     if (err)
2276         return err;
2277 
2278     cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
2279     cmn->pmu = (struct pmu) {
2280         .module = THIS_MODULE,
2281         .attr_groups = arm_cmn_attr_groups,
2282         .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
2283         .task_ctx_nr = perf_invalid_context,
2284         .pmu_enable = arm_cmn_pmu_enable,
2285         .pmu_disable = arm_cmn_pmu_disable,
2286         .event_init = arm_cmn_event_init,
2287         .add = arm_cmn_event_add,
2288         .del = arm_cmn_event_del,
2289         .start = arm_cmn_event_start,
2290         .stop = arm_cmn_event_stop,
2291         .read = arm_cmn_event_read,
2292         .start_txn = arm_cmn_start_txn,
2293         .commit_txn = arm_cmn_commit_txn,
2294         .cancel_txn = arm_cmn_end_txn,
2295     };
2296 
2297     this_id = atomic_fetch_inc(&id);
2298     name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id);
2299     if (!name)
2300         return -ENOMEM;
2301 
2302     err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node);
2303     if (err)
2304         return err;
2305 
2306     err = perf_pmu_register(&cmn->pmu, name, -1);
2307     if (err)
2308         cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
2309     else
2310         arm_cmn_debugfs_init(cmn, this_id);
2311 
2312     return err;
2313 }
2314 
2315 static int arm_cmn_remove(struct platform_device *pdev)
2316 {
2317     struct arm_cmn *cmn = platform_get_drvdata(pdev);
2318 
2319     writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL);
2320 
2321     perf_pmu_unregister(&cmn->pmu);
2322     cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
2323     debugfs_remove(cmn->debug);
2324     return 0;
2325 }
2326 
2327 #ifdef CONFIG_OF
2328 static const struct of_device_id arm_cmn_of_match[] = {
2329     { .compatible = "arm,cmn-600", .data = (void *)CMN600 },
2330     { .compatible = "arm,cmn-650", .data = (void *)CMN650 },
2331     { .compatible = "arm,cmn-700", .data = (void *)CMN700 },
2332     { .compatible = "arm,ci-700", .data = (void *)CI700 },
2333     {}
2334 };
2335 MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
2336 #endif
2337 
2338 #ifdef CONFIG_ACPI
2339 static const struct acpi_device_id arm_cmn_acpi_match[] = {
2340     { "ARMHC600", CMN600 },
2341     { "ARMHC650", CMN650 },
2342     { "ARMHC700", CMN700 },
2343     {}
2344 };
2345 MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
2346 #endif
2347 
2348 static struct platform_driver arm_cmn_driver = {
2349     .driver = {
2350         .name = "arm-cmn",
2351         .of_match_table = of_match_ptr(arm_cmn_of_match),
2352         .acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
2353     },
2354     .probe = arm_cmn_probe,
2355     .remove = arm_cmn_remove,
2356 };
2357 
2358 static int __init arm_cmn_init(void)
2359 {
2360     int ret;
2361 
2362     ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
2363                       "perf/arm/cmn:online",
2364                       arm_cmn_pmu_online_cpu,
2365                       arm_cmn_pmu_offline_cpu);
2366     if (ret < 0)
2367         return ret;
2368 
2369     arm_cmn_hp_state = ret;
2370     arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL);
2371 
2372     ret = platform_driver_register(&arm_cmn_driver);
2373     if (ret) {
2374         cpuhp_remove_multi_state(arm_cmn_hp_state);
2375         debugfs_remove(arm_cmn_debugfs);
2376     }
2377     return ret;
2378 }
2379 
2380 static void __exit arm_cmn_exit(void)
2381 {
2382     platform_driver_unregister(&arm_cmn_driver);
2383     cpuhp_remove_multi_state(arm_cmn_hp_state);
2384     debugfs_remove(arm_cmn_debugfs);
2385 }
2386 
2387 module_init(arm_cmn_init);
2388 module_exit(arm_cmn_exit);
2389 
2390 MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
2391 MODULE_DESCRIPTION("Arm CMN-600 PMU driver");
2392 MODULE_LICENSE("GPL v2");