Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
0004  *
0005  * Description: CoreSight Program Flow Trace driver
0006  */
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/moduleparam.h>
0010 #include <linux/init.h>
0011 #include <linux/types.h>
0012 #include <linux/device.h>
0013 #include <linux/io.h>
0014 #include <linux/err.h>
0015 #include <linux/fs.h>
0016 #include <linux/slab.h>
0017 #include <linux/delay.h>
0018 #include <linux/smp.h>
0019 #include <linux/sysfs.h>
0020 #include <linux/stat.h>
0021 #include <linux/pm_runtime.h>
0022 #include <linux/cpu.h>
0023 #include <linux/of.h>
0024 #include <linux/coresight.h>
0025 #include <linux/coresight-pmu.h>
0026 #include <linux/amba/bus.h>
0027 #include <linux/seq_file.h>
0028 #include <linux/uaccess.h>
0029 #include <linux/clk.h>
0030 #include <linux/perf_event.h>
0031 #include <asm/sections.h>
0032 
0033 #include "coresight-etm.h"
0034 #include "coresight-etm-perf.h"
0035 
0036 /*
0037  * Not really modular but using module_param is the easiest way to
0038  * remain consistent with existing use cases for now.
0039  */
0040 static int boot_enable;
0041 module_param_named(boot_enable, boot_enable, int, S_IRUGO);
0042 
0043 static struct etm_drvdata *etmdrvdata[NR_CPUS];
0044 
0045 static enum cpuhp_state hp_online;
0046 
0047 /*
0048  * Memory mapped writes to clear os lock are not supported on some processors
0049  * and OS lock must be unlocked before any memory mapped access on such
0050  * processors, otherwise memory mapped reads/writes will be invalid.
0051  */
0052 static void etm_os_unlock(struct etm_drvdata *drvdata)
0053 {
0054     /* Writing any value to ETMOSLAR unlocks the trace registers */
0055     etm_writel(drvdata, 0x0, ETMOSLAR);
0056     drvdata->os_unlock = true;
0057     isb();
0058 }
0059 
0060 static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
0061 {
0062     u32 etmcr;
0063 
0064     /* Ensure pending cp14 accesses complete before setting pwrdwn */
0065     mb();
0066     isb();
0067     etmcr = etm_readl(drvdata, ETMCR);
0068     etmcr |= ETMCR_PWD_DWN;
0069     etm_writel(drvdata, etmcr, ETMCR);
0070 }
0071 
0072 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
0073 {
0074     u32 etmcr;
0075 
0076     etmcr = etm_readl(drvdata, ETMCR);
0077     etmcr &= ~ETMCR_PWD_DWN;
0078     etm_writel(drvdata, etmcr, ETMCR);
0079     /* Ensure pwrup completes before subsequent cp14 accesses */
0080     mb();
0081     isb();
0082 }
0083 
0084 static void etm_set_pwrup(struct etm_drvdata *drvdata)
0085 {
0086     u32 etmpdcr;
0087 
0088     etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
0089     etmpdcr |= ETMPDCR_PWD_UP;
0090     writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
0091     /* Ensure pwrup completes before subsequent cp14 accesses */
0092     mb();
0093     isb();
0094 }
0095 
0096 static void etm_clr_pwrup(struct etm_drvdata *drvdata)
0097 {
0098     u32 etmpdcr;
0099 
0100     /* Ensure pending cp14 accesses complete before clearing pwrup */
0101     mb();
0102     isb();
0103     etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
0104     etmpdcr &= ~ETMPDCR_PWD_UP;
0105     writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
0106 }
0107 
0108 /**
0109  * coresight_timeout_etm - loop until a bit has changed to a specific state.
0110  * @drvdata: etm's private data structure.
0111  * @offset: address of a register, starting from @addr.
0112  * @position: the position of the bit of interest.
0113  * @value: the value the bit should have.
0114  *
0115  * Basically the same as @coresight_timeout except for the register access
0116  * method where we have to account for CP14 configurations.
0117 
0118  * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
0119  * TIMEOUT_US has elapsed, which ever happens first.
0120  */
0121 
0122 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
0123                   int position, int value)
0124 {
0125     int i;
0126     u32 val;
0127 
0128     for (i = TIMEOUT_US; i > 0; i--) {
0129         val = etm_readl(drvdata, offset);
0130         /* Waiting on the bit to go from 0 to 1 */
0131         if (value) {
0132             if (val & BIT(position))
0133                 return 0;
0134         /* Waiting on the bit to go from 1 to 0 */
0135         } else {
0136             if (!(val & BIT(position)))
0137                 return 0;
0138         }
0139 
0140         /*
0141          * Delay is arbitrary - the specification doesn't say how long
0142          * we are expected to wait.  Extra check required to make sure
0143          * we don't wait needlessly on the last iteration.
0144          */
0145         if (i - 1)
0146             udelay(1);
0147     }
0148 
0149     return -EAGAIN;
0150 }
0151 
0152 
0153 static void etm_set_prog(struct etm_drvdata *drvdata)
0154 {
0155     u32 etmcr;
0156 
0157     etmcr = etm_readl(drvdata, ETMCR);
0158     etmcr |= ETMCR_ETM_PRG;
0159     etm_writel(drvdata, etmcr, ETMCR);
0160     /*
0161      * Recommended by spec for cp14 accesses to ensure etmcr write is
0162      * complete before polling etmsr
0163      */
0164     isb();
0165     if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
0166         dev_err(&drvdata->csdev->dev,
0167             "%s: timeout observed when probing at offset %#x\n",
0168             __func__, ETMSR);
0169     }
0170 }
0171 
0172 static void etm_clr_prog(struct etm_drvdata *drvdata)
0173 {
0174     u32 etmcr;
0175 
0176     etmcr = etm_readl(drvdata, ETMCR);
0177     etmcr &= ~ETMCR_ETM_PRG;
0178     etm_writel(drvdata, etmcr, ETMCR);
0179     /*
0180      * Recommended by spec for cp14 accesses to ensure etmcr write is
0181      * complete before polling etmsr
0182      */
0183     isb();
0184     if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
0185         dev_err(&drvdata->csdev->dev,
0186             "%s: timeout observed when probing at offset %#x\n",
0187             __func__, ETMSR);
0188     }
0189 }
0190 
0191 void etm_set_default(struct etm_config *config)
0192 {
0193     int i;
0194 
0195     if (WARN_ON_ONCE(!config))
0196         return;
0197 
0198     /*
0199      * Taken verbatim from the TRM:
0200      *
0201      * To trace all memory:
0202      *  set bit [24] in register 0x009, the ETMTECR1, to 1
0203      *  set all other bits in register 0x009, the ETMTECR1, to 0
0204      *  set all bits in register 0x007, the ETMTECR2, to 0
0205      *  set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
0206      */
0207     config->enable_ctrl1 = ETMTECR1_INC_EXC;
0208     config->enable_ctrl2 = 0x0;
0209     config->enable_event = ETM_HARD_WIRE_RES_A;
0210 
0211     config->trigger_event = ETM_DEFAULT_EVENT_VAL;
0212     config->enable_event = ETM_HARD_WIRE_RES_A;
0213 
0214     config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
0215     config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
0216     config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
0217     config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
0218     config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
0219     config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
0220     config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
0221 
0222     for (i = 0; i < ETM_MAX_CNTR; i++) {
0223         config->cntr_rld_val[i] = 0x0;
0224         config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
0225         config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
0226         config->cntr_val[i] = 0x0;
0227     }
0228 
0229     config->seq_curr_state = 0x0;
0230     config->ctxid_idx = 0x0;
0231     for (i = 0; i < ETM_MAX_CTXID_CMP; i++)
0232         config->ctxid_pid[i] = 0x0;
0233 
0234     config->ctxid_mask = 0x0;
0235     /* Setting default to 1024 as per TRM recommendation */
0236     config->sync_freq = 0x400;
0237 }
0238 
0239 void etm_config_trace_mode(struct etm_config *config)
0240 {
0241     u32 flags, mode;
0242 
0243     mode = config->mode;
0244 
0245     mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
0246 
0247     /* excluding kernel AND user space doesn't make sense */
0248     if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
0249         return;
0250 
0251     /* nothing to do if neither flags are set */
0252     if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
0253         return;
0254 
0255     flags = (1 << 0 |   /* instruction execute */
0256          3 << 3 |   /* ARM instruction */
0257          0 << 5 |   /* No data value comparison */
0258          0 << 7 |   /* No exact mach */
0259          0 << 8);   /* Ignore context ID */
0260 
0261     /* No need to worry about single address comparators. */
0262     config->enable_ctrl2 = 0x0;
0263 
0264     /* Bit 0 is address range comparator 1 */
0265     config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
0266 
0267     /*
0268      * On ETMv3.5:
0269      * ETMACTRn[13,11] == Non-secure state comparison control
0270      * ETMACTRn[12,10] == Secure state comparison control
0271      *
0272      * b00 == Match in all modes in this state
0273      * b01 == Do not match in any more in this state
0274      * b10 == Match in all modes excepts user mode in this state
0275      * b11 == Match only in user mode in this state
0276      */
0277 
0278     /* Tracing in secure mode is not supported at this time */
0279     flags |= (0 << 12 | 1 << 10);
0280 
0281     if (mode & ETM_MODE_EXCL_USER) {
0282         /* exclude user, match all modes except user mode */
0283         flags |= (1 << 13 | 0 << 11);
0284     } else {
0285         /* exclude kernel, match only in user mode */
0286         flags |= (1 << 13 | 1 << 11);
0287     }
0288 
0289     /*
0290      * The ETMEEVR register is already set to "hard wire A".  As such
0291      * all there is to do is setup an address comparator that spans
0292      * the entire address range and configure the state and mode bits.
0293      */
0294     config->addr_val[0] = (u32) 0x0;
0295     config->addr_val[1] = (u32) ~0x0;
0296     config->addr_acctype[0] = flags;
0297     config->addr_acctype[1] = flags;
0298     config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
0299     config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
0300 }
0301 
0302 #define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \
0303                  ETMCR_TIMESTAMP_EN | \
0304                  ETMCR_RETURN_STACK)
0305 
0306 static int etm_parse_event_config(struct etm_drvdata *drvdata,
0307                   struct perf_event *event)
0308 {
0309     struct etm_config *config = &drvdata->config;
0310     struct perf_event_attr *attr = &event->attr;
0311 
0312     if (!attr)
0313         return -EINVAL;
0314 
0315     /* Clear configuration from previous run */
0316     memset(config, 0, sizeof(struct etm_config));
0317 
0318     if (attr->exclude_kernel)
0319         config->mode = ETM_MODE_EXCL_KERN;
0320 
0321     if (attr->exclude_user)
0322         config->mode = ETM_MODE_EXCL_USER;
0323 
0324     /* Always start from the default config */
0325     etm_set_default(config);
0326 
0327     /*
0328      * By default the tracers are configured to trace the whole address
0329      * range.  Narrow the field only if requested by user space.
0330      */
0331     if (config->mode)
0332         etm_config_trace_mode(config);
0333 
0334     /*
0335      * At this time only cycle accurate, return stack  and timestamp
0336      * options are available.
0337      */
0338     if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
0339         return -EINVAL;
0340 
0341     config->ctrl = attr->config;
0342 
0343     /* Don't trace contextID when runs in non-root PID namespace */
0344     if (!task_is_in_init_pid_ns(current))
0345         config->ctrl &= ~ETMCR_CTXID_SIZE;
0346 
0347     /*
0348      * Possible to have cores with PTM (supports ret stack) and ETM
0349      * (never has ret stack) on the same SoC. So if we have a request
0350      * for return stack that can't be honoured on this core then
0351      * clear the bit - trace will still continue normally
0352      */
0353     if ((config->ctrl & ETMCR_RETURN_STACK) &&
0354         !(drvdata->etmccer & ETMCCER_RETSTACK))
0355         config->ctrl &= ~ETMCR_RETURN_STACK;
0356 
0357     return 0;
0358 }
0359 
0360 static int etm_enable_hw(struct etm_drvdata *drvdata)
0361 {
0362     int i, rc;
0363     u32 etmcr;
0364     struct etm_config *config = &drvdata->config;
0365     struct coresight_device *csdev = drvdata->csdev;
0366 
0367     CS_UNLOCK(drvdata->base);
0368 
0369     rc = coresight_claim_device_unlocked(csdev);
0370     if (rc)
0371         goto done;
0372 
0373     /* Turn engine on */
0374     etm_clr_pwrdwn(drvdata);
0375     /* Apply power to trace registers */
0376     etm_set_pwrup(drvdata);
0377     /* Make sure all registers are accessible */
0378     etm_os_unlock(drvdata);
0379 
0380     etm_set_prog(drvdata);
0381 
0382     etmcr = etm_readl(drvdata, ETMCR);
0383     /* Clear setting from a previous run if need be */
0384     etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
0385     etmcr |= drvdata->port_size;
0386     etmcr |= ETMCR_ETM_EN;
0387     etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
0388     etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
0389     etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
0390     etm_writel(drvdata, config->enable_event, ETMTEEVR);
0391     etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
0392     etm_writel(drvdata, config->fifofull_level, ETMFFLR);
0393     for (i = 0; i < drvdata->nr_addr_cmp; i++) {
0394         etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
0395         etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
0396     }
0397     for (i = 0; i < drvdata->nr_cntr; i++) {
0398         etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
0399         etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
0400         etm_writel(drvdata, config->cntr_rld_event[i],
0401                ETMCNTRLDEVRn(i));
0402         etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
0403     }
0404     etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
0405     etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
0406     etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
0407     etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
0408     etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
0409     etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
0410     etm_writel(drvdata, config->seq_curr_state, ETMSQR);
0411     for (i = 0; i < drvdata->nr_ext_out; i++)
0412         etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
0413     for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
0414         etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
0415     etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
0416     etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
0417     /* No external input selected */
0418     etm_writel(drvdata, 0x0, ETMEXTINSELR);
0419     etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
0420     /* No auxiliary control selected */
0421     etm_writel(drvdata, 0x0, ETMAUXCR);
0422     etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
0423     /* No VMID comparator value selected */
0424     etm_writel(drvdata, 0x0, ETMVMIDCVR);
0425 
0426     etm_clr_prog(drvdata);
0427 
0428 done:
0429     CS_LOCK(drvdata->base);
0430 
0431     dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n",
0432         drvdata->cpu, rc);
0433     return rc;
0434 }
0435 
0436 struct etm_enable_arg {
0437     struct etm_drvdata *drvdata;
0438     int rc;
0439 };
0440 
0441 static void etm_enable_hw_smp_call(void *info)
0442 {
0443     struct etm_enable_arg *arg = info;
0444 
0445     if (WARN_ON(!arg))
0446         return;
0447     arg->rc = etm_enable_hw(arg->drvdata);
0448 }
0449 
0450 static int etm_cpu_id(struct coresight_device *csdev)
0451 {
0452     struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0453 
0454     return drvdata->cpu;
0455 }
0456 
0457 int etm_get_trace_id(struct etm_drvdata *drvdata)
0458 {
0459     unsigned long flags;
0460     int trace_id = -1;
0461     struct device *etm_dev;
0462 
0463     if (!drvdata)
0464         goto out;
0465 
0466     etm_dev = drvdata->csdev->dev.parent;
0467     if (!local_read(&drvdata->mode))
0468         return drvdata->traceid;
0469 
0470     pm_runtime_get_sync(etm_dev);
0471 
0472     spin_lock_irqsave(&drvdata->spinlock, flags);
0473 
0474     CS_UNLOCK(drvdata->base);
0475     trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
0476     CS_LOCK(drvdata->base);
0477 
0478     spin_unlock_irqrestore(&drvdata->spinlock, flags);
0479     pm_runtime_put(etm_dev);
0480 
0481 out:
0482     return trace_id;
0483 
0484 }
0485 
0486 static int etm_trace_id(struct coresight_device *csdev)
0487 {
0488     struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0489 
0490     return etm_get_trace_id(drvdata);
0491 }
0492 
0493 static int etm_enable_perf(struct coresight_device *csdev,
0494                struct perf_event *event)
0495 {
0496     struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0497 
0498     if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
0499         return -EINVAL;
0500 
0501     /* Configure the tracer based on the session's specifics */
0502     etm_parse_event_config(drvdata, event);
0503     /* And enable it */
0504     return etm_enable_hw(drvdata);
0505 }
0506 
0507 static int etm_enable_sysfs(struct coresight_device *csdev)
0508 {
0509     struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0510     struct etm_enable_arg arg = { };
0511     int ret;
0512 
0513     spin_lock(&drvdata->spinlock);
0514 
0515     /*
0516      * Configure the ETM only if the CPU is online.  If it isn't online
0517      * hw configuration will take place on the local CPU during bring up.
0518      */
0519     if (cpu_online(drvdata->cpu)) {
0520         arg.drvdata = drvdata;
0521         ret = smp_call_function_single(drvdata->cpu,
0522                            etm_enable_hw_smp_call, &arg, 1);
0523         if (!ret)
0524             ret = arg.rc;
0525         if (!ret)
0526             drvdata->sticky_enable = true;
0527     } else {
0528         ret = -ENODEV;
0529     }
0530 
0531     spin_unlock(&drvdata->spinlock);
0532 
0533     if (!ret)
0534         dev_dbg(&csdev->dev, "ETM tracing enabled\n");
0535     return ret;
0536 }
0537 
0538 static int etm_enable(struct coresight_device *csdev,
0539               struct perf_event *event, u32 mode)
0540 {
0541     int ret;
0542     u32 val;
0543     struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0544 
0545     val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
0546 
0547     /* Someone is already using the tracer */
0548     if (val)
0549         return -EBUSY;
0550 
0551     switch (mode) {
0552     case CS_MODE_SYSFS:
0553         ret = etm_enable_sysfs(csdev);
0554         break;
0555     case CS_MODE_PERF:
0556         ret = etm_enable_perf(csdev, event);
0557         break;
0558     default:
0559         ret = -EINVAL;
0560     }
0561 
0562     /* The tracer didn't start */
0563     if (ret)
0564         local_set(&drvdata->mode, CS_MODE_DISABLED);
0565 
0566     return ret;
0567 }
0568 
0569 static void etm_disable_hw(void *info)
0570 {
0571     int i;
0572     struct etm_drvdata *drvdata = info;
0573     struct etm_config *config = &drvdata->config;
0574     struct coresight_device *csdev = drvdata->csdev;
0575 
0576     CS_UNLOCK(drvdata->base);
0577     etm_set_prog(drvdata);
0578 
0579     /* Read back sequencer and counters for post trace analysis */
0580     config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
0581 
0582     for (i = 0; i < drvdata->nr_cntr; i++)
0583         config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
0584 
0585     etm_set_pwrdwn(drvdata);
0586     coresight_disclaim_device_unlocked(csdev);
0587 
0588     CS_LOCK(drvdata->base);
0589 
0590     dev_dbg(&drvdata->csdev->dev,
0591         "cpu: %d disable smp call done\n", drvdata->cpu);
0592 }
0593 
0594 static void etm_disable_perf(struct coresight_device *csdev)
0595 {
0596     struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0597 
0598     if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
0599         return;
0600 
0601     CS_UNLOCK(drvdata->base);
0602 
0603     /* Setting the prog bit disables tracing immediately */
0604     etm_set_prog(drvdata);
0605 
0606     /*
0607      * There is no way to know when the tracer will be used again so
0608      * power down the tracer.
0609      */
0610     etm_set_pwrdwn(drvdata);
0611     coresight_disclaim_device_unlocked(csdev);
0612 
0613     CS_LOCK(drvdata->base);
0614 }
0615 
0616 static void etm_disable_sysfs(struct coresight_device *csdev)
0617 {
0618     struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0619 
0620     /*
0621      * Taking hotplug lock here protects from clocks getting disabled
0622      * with tracing being left on (crash scenario) if user disable occurs
0623      * after cpu online mask indicates the cpu is offline but before the
0624      * DYING hotplug callback is serviced by the ETM driver.
0625      */
0626     cpus_read_lock();
0627     spin_lock(&drvdata->spinlock);
0628 
0629     /*
0630      * Executing etm_disable_hw on the cpu whose ETM is being disabled
0631      * ensures that register writes occur when cpu is powered.
0632      */
0633     smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
0634 
0635     spin_unlock(&drvdata->spinlock);
0636     cpus_read_unlock();
0637 
0638     dev_dbg(&csdev->dev, "ETM tracing disabled\n");
0639 }
0640 
0641 static void etm_disable(struct coresight_device *csdev,
0642             struct perf_event *event)
0643 {
0644     u32 mode;
0645     struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0646 
0647     /*
0648      * For as long as the tracer isn't disabled another entity can't
0649      * change its status.  As such we can read the status here without
0650      * fearing it will change under us.
0651      */
0652     mode = local_read(&drvdata->mode);
0653 
0654     switch (mode) {
0655     case CS_MODE_DISABLED:
0656         break;
0657     case CS_MODE_SYSFS:
0658         etm_disable_sysfs(csdev);
0659         break;
0660     case CS_MODE_PERF:
0661         etm_disable_perf(csdev);
0662         break;
0663     default:
0664         WARN_ON_ONCE(mode);
0665         return;
0666     }
0667 
0668     if (mode)
0669         local_set(&drvdata->mode, CS_MODE_DISABLED);
0670 }
0671 
0672 static const struct coresight_ops_source etm_source_ops = {
0673     .cpu_id     = etm_cpu_id,
0674     .trace_id   = etm_trace_id,
0675     .enable     = etm_enable,
0676     .disable    = etm_disable,
0677 };
0678 
0679 static const struct coresight_ops etm_cs_ops = {
0680     .source_ops = &etm_source_ops,
0681 };
0682 
0683 static int etm_online_cpu(unsigned int cpu)
0684 {
0685     if (!etmdrvdata[cpu])
0686         return 0;
0687 
0688     if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
0689         coresight_enable(etmdrvdata[cpu]->csdev);
0690     return 0;
0691 }
0692 
0693 static int etm_starting_cpu(unsigned int cpu)
0694 {
0695     if (!etmdrvdata[cpu])
0696         return 0;
0697 
0698     spin_lock(&etmdrvdata[cpu]->spinlock);
0699     if (!etmdrvdata[cpu]->os_unlock) {
0700         etm_os_unlock(etmdrvdata[cpu]);
0701         etmdrvdata[cpu]->os_unlock = true;
0702     }
0703 
0704     if (local_read(&etmdrvdata[cpu]->mode))
0705         etm_enable_hw(etmdrvdata[cpu]);
0706     spin_unlock(&etmdrvdata[cpu]->spinlock);
0707     return 0;
0708 }
0709 
0710 static int etm_dying_cpu(unsigned int cpu)
0711 {
0712     if (!etmdrvdata[cpu])
0713         return 0;
0714 
0715     spin_lock(&etmdrvdata[cpu]->spinlock);
0716     if (local_read(&etmdrvdata[cpu]->mode))
0717         etm_disable_hw(etmdrvdata[cpu]);
0718     spin_unlock(&etmdrvdata[cpu]->spinlock);
0719     return 0;
0720 }
0721 
0722 static bool etm_arch_supported(u8 arch)
0723 {
0724     switch (arch) {
0725     case ETM_ARCH_V3_3:
0726         break;
0727     case ETM_ARCH_V3_5:
0728         break;
0729     case PFT_ARCH_V1_0:
0730         break;
0731     case PFT_ARCH_V1_1:
0732         break;
0733     default:
0734         return false;
0735     }
0736     return true;
0737 }
0738 
0739 static void etm_init_arch_data(void *info)
0740 {
0741     u32 etmidr;
0742     u32 etmccr;
0743     struct etm_drvdata *drvdata = info;
0744 
0745     /* Make sure all registers are accessible */
0746     etm_os_unlock(drvdata);
0747 
0748     CS_UNLOCK(drvdata->base);
0749 
0750     /* First dummy read */
0751     (void)etm_readl(drvdata, ETMPDSR);
0752     /* Provide power to ETM: ETMPDCR[3] == 1 */
0753     etm_set_pwrup(drvdata);
0754     /*
0755      * Clear power down bit since when this bit is set writes to
0756      * certain registers might be ignored.
0757      */
0758     etm_clr_pwrdwn(drvdata);
0759     /*
0760      * Set prog bit. It will be set from reset but this is included to
0761      * ensure it is set
0762      */
0763     etm_set_prog(drvdata);
0764 
0765     /* Find all capabilities */
0766     etmidr = etm_readl(drvdata, ETMIDR);
0767     drvdata->arch = BMVAL(etmidr, 4, 11);
0768     drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
0769 
0770     drvdata->etmccer = etm_readl(drvdata, ETMCCER);
0771     etmccr = etm_readl(drvdata, ETMCCR);
0772     drvdata->etmccr = etmccr;
0773     drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
0774     drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
0775     drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
0776     drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
0777     drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
0778 
0779     etm_set_pwrdwn(drvdata);
0780     etm_clr_pwrup(drvdata);
0781     CS_LOCK(drvdata->base);
0782 }
0783 
0784 static void etm_init_trace_id(struct etm_drvdata *drvdata)
0785 {
0786     drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
0787 }
0788 
0789 static int __init etm_hp_setup(void)
0790 {
0791     int ret;
0792 
0793     ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
0794                            "arm/coresight:starting",
0795                            etm_starting_cpu, etm_dying_cpu);
0796 
0797     if (ret)
0798         return ret;
0799 
0800     ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
0801                            "arm/coresight:online",
0802                            etm_online_cpu, NULL);
0803 
0804     /* HP dyn state ID returned in ret on success */
0805     if (ret > 0) {
0806         hp_online = ret;
0807         return 0;
0808     }
0809 
0810     /* failed dyn state - remove others */
0811     cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
0812 
0813     return ret;
0814 }
0815 
0816 static void etm_hp_clear(void)
0817 {
0818     cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
0819     if (hp_online) {
0820         cpuhp_remove_state_nocalls(hp_online);
0821         hp_online = 0;
0822     }
0823 }
0824 
0825 static int etm_probe(struct amba_device *adev, const struct amba_id *id)
0826 {
0827     int ret;
0828     void __iomem *base;
0829     struct device *dev = &adev->dev;
0830     struct coresight_platform_data *pdata = NULL;
0831     struct etm_drvdata *drvdata;
0832     struct resource *res = &adev->res;
0833     struct coresight_desc desc = { 0 };
0834 
0835     drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
0836     if (!drvdata)
0837         return -ENOMEM;
0838 
0839     drvdata->use_cp14 = fwnode_property_read_bool(dev->fwnode, "arm,cp14");
0840     dev_set_drvdata(dev, drvdata);
0841 
0842     /* Validity for the resource is already checked by the AMBA core */
0843     base = devm_ioremap_resource(dev, res);
0844     if (IS_ERR(base))
0845         return PTR_ERR(base);
0846 
0847     drvdata->base = base;
0848     desc.access = CSDEV_ACCESS_IOMEM(base);
0849 
0850     spin_lock_init(&drvdata->spinlock);
0851 
0852     drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
0853     if (!IS_ERR(drvdata->atclk)) {
0854         ret = clk_prepare_enable(drvdata->atclk);
0855         if (ret)
0856             return ret;
0857     }
0858 
0859     drvdata->cpu = coresight_get_cpu(dev);
0860     if (drvdata->cpu < 0)
0861         return drvdata->cpu;
0862 
0863     desc.name  = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
0864     if (!desc.name)
0865         return -ENOMEM;
0866 
0867     if (smp_call_function_single(drvdata->cpu,
0868                      etm_init_arch_data,  drvdata, 1))
0869         dev_err(dev, "ETM arch init failed\n");
0870 
0871     if (etm_arch_supported(drvdata->arch) == false)
0872         return -EINVAL;
0873 
0874     etm_init_trace_id(drvdata);
0875     etm_set_default(&drvdata->config);
0876 
0877     pdata = coresight_get_platform_data(dev);
0878     if (IS_ERR(pdata))
0879         return PTR_ERR(pdata);
0880 
0881     adev->dev.platform_data = pdata;
0882 
0883     desc.type = CORESIGHT_DEV_TYPE_SOURCE;
0884     desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
0885     desc.ops = &etm_cs_ops;
0886     desc.pdata = pdata;
0887     desc.dev = dev;
0888     desc.groups = coresight_etm_groups;
0889     drvdata->csdev = coresight_register(&desc);
0890     if (IS_ERR(drvdata->csdev))
0891         return PTR_ERR(drvdata->csdev);
0892 
0893     ret = etm_perf_symlink(drvdata->csdev, true);
0894     if (ret) {
0895         coresight_unregister(drvdata->csdev);
0896         return ret;
0897     }
0898 
0899     etmdrvdata[drvdata->cpu] = drvdata;
0900 
0901     pm_runtime_put(&adev->dev);
0902     dev_info(&drvdata->csdev->dev,
0903          "%s initialized\n", (char *)coresight_get_uci_data(id));
0904     if (boot_enable) {
0905         coresight_enable(drvdata->csdev);
0906         drvdata->boot_enable = true;
0907     }
0908 
0909     return 0;
0910 }
0911 
0912 static void clear_etmdrvdata(void *info)
0913 {
0914     int cpu = *(int *)info;
0915 
0916     etmdrvdata[cpu] = NULL;
0917 }
0918 
0919 static void etm_remove(struct amba_device *adev)
0920 {
0921     struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
0922 
0923     etm_perf_symlink(drvdata->csdev, false);
0924 
0925     /*
0926      * Taking hotplug lock here to avoid racing between etm_remove and
0927      * CPU hotplug call backs.
0928      */
0929     cpus_read_lock();
0930     /*
0931      * The readers for etmdrvdata[] are CPU hotplug call backs
0932      * and PM notification call backs. Change etmdrvdata[i] on
0933      * CPU i ensures these call backs has consistent view
0934      * inside one call back function.
0935      */
0936     if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1))
0937         etmdrvdata[drvdata->cpu] = NULL;
0938 
0939     cpus_read_unlock();
0940 
0941     coresight_unregister(drvdata->csdev);
0942 }
0943 
0944 #ifdef CONFIG_PM
0945 static int etm_runtime_suspend(struct device *dev)
0946 {
0947     struct etm_drvdata *drvdata = dev_get_drvdata(dev);
0948 
0949     if (drvdata && !IS_ERR(drvdata->atclk))
0950         clk_disable_unprepare(drvdata->atclk);
0951 
0952     return 0;
0953 }
0954 
0955 static int etm_runtime_resume(struct device *dev)
0956 {
0957     struct etm_drvdata *drvdata = dev_get_drvdata(dev);
0958 
0959     if (drvdata && !IS_ERR(drvdata->atclk))
0960         clk_prepare_enable(drvdata->atclk);
0961 
0962     return 0;
0963 }
0964 #endif
0965 
0966 static const struct dev_pm_ops etm_dev_pm_ops = {
0967     SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
0968 };
0969 
0970 static const struct amba_id etm_ids[] = {
0971     /* ETM 3.3 */
0972     CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3"),
0973     /* ETM 3.5 - Cortex-A5 */
0974     CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5"),
0975     /* ETM 3.5 */
0976     CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5"),
0977     /* PTM 1.0 */
0978     CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0"),
0979     /* PTM 1.1 */
0980     CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"),
0981     /* PTM 1.1 Qualcomm */
0982     CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"),
0983     { 0, 0},
0984 };
0985 
0986 MODULE_DEVICE_TABLE(amba, etm_ids);
0987 
0988 static struct amba_driver etm_driver = {
0989     .drv = {
0990         .name   = "coresight-etm3x",
0991         .owner  = THIS_MODULE,
0992         .pm = &etm_dev_pm_ops,
0993         .suppress_bind_attrs = true,
0994     },
0995     .probe      = etm_probe,
0996     .remove         = etm_remove,
0997     .id_table   = etm_ids,
0998 };
0999 
1000 static int __init etm_init(void)
1001 {
1002     int ret;
1003 
1004     ret = etm_hp_setup();
1005 
1006     /* etm_hp_setup() does its own cleanup - exit on error */
1007     if (ret)
1008         return ret;
1009 
1010     ret = amba_driver_register(&etm_driver);
1011     if (ret) {
1012         pr_err("Error registering etm3x driver\n");
1013         etm_hp_clear();
1014     }
1015 
1016     return ret;
1017 }
1018 
1019 static void __exit etm_exit(void)
1020 {
1021     amba_driver_unregister(&etm_driver);
1022     etm_hp_clear();
1023 }
1024 
1025 module_init(etm_init);
1026 module_exit(etm_exit);
1027 
1028 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
1029 MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
1030 MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace driver");
1031 MODULE_LICENSE("GPL v2");