Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (c) 2017 Linaro Limited. All rights reserved.
0004  *
0005  * Author: Leo Yan <leo.yan@linaro.org>
0006  */
0007 #include <linux/amba/bus.h>
0008 #include <linux/coresight.h>
0009 #include <linux/cpu.h>
0010 #include <linux/debugfs.h>
0011 #include <linux/delay.h>
0012 #include <linux/device.h>
0013 #include <linux/err.h>
0014 #include <linux/init.h>
0015 #include <linux/io.h>
0016 #include <linux/iopoll.h>
0017 #include <linux/kernel.h>
0018 #include <linux/module.h>
0019 #include <linux/moduleparam.h>
0020 #include <linux/panic_notifier.h>
0021 #include <linux/pm_qos.h>
0022 #include <linux/slab.h>
0023 #include <linux/smp.h>
0024 #include <linux/types.h>
0025 #include <linux/uaccess.h>
0026 
0027 #include "coresight-priv.h"
0028 
0029 #define EDPCSR              0x0A0
0030 #define EDCIDSR             0x0A4
0031 #define EDVIDSR             0x0A8
0032 #define EDPCSR_HI           0x0AC
0033 #define EDOSLAR             0x300
0034 #define EDPRCR              0x310
0035 #define EDPRSR              0x314
0036 #define EDDEVID1            0xFC4
0037 #define EDDEVID             0xFC8
0038 
0039 #define EDPCSR_PROHIBITED       0xFFFFFFFF
0040 
0041 /* bits definition for EDPCSR */
0042 #define EDPCSR_THUMB            BIT(0)
0043 #define EDPCSR_ARM_INST_MASK        GENMASK(31, 2)
0044 #define EDPCSR_THUMB_INST_MASK      GENMASK(31, 1)
0045 
0046 /* bits definition for EDPRCR */
0047 #define EDPRCR_COREPURQ         BIT(3)
0048 #define EDPRCR_CORENPDRQ        BIT(0)
0049 
0050 /* bits definition for EDPRSR */
0051 #define EDPRSR_DLK          BIT(6)
0052 #define EDPRSR_PU           BIT(0)
0053 
0054 /* bits definition for EDVIDSR */
0055 #define EDVIDSR_NS          BIT(31)
0056 #define EDVIDSR_E2          BIT(30)
0057 #define EDVIDSR_E3          BIT(29)
0058 #define EDVIDSR_HV          BIT(28)
0059 #define EDVIDSR_VMID            GENMASK(7, 0)
0060 
0061 /*
0062  * bits definition for EDDEVID1:PSCROffset
0063  *
0064  * NOTE: armv8 and armv7 have different definition for the register,
0065  * so consolidate the bits definition as below:
0066  *
0067  * 0b0000 - Sample offset applies based on the instruction state, we
0068  *          rely on EDDEVID to check if EDPCSR is implemented or not
0069  * 0b0001 - No offset applies.
0070  * 0b0010 - No offset applies, but do not use in AArch32 mode
0071  *
0072  */
0073 #define EDDEVID1_PCSR_OFFSET_MASK   GENMASK(3, 0)
0074 #define EDDEVID1_PCSR_OFFSET_INS_SET    (0x0)
0075 #define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 (0x2)
0076 
0077 /* bits definition for EDDEVID */
0078 #define EDDEVID_PCSAMPLE_MODE       GENMASK(3, 0)
0079 #define EDDEVID_IMPL_EDPCSR     (0x1)
0080 #define EDDEVID_IMPL_EDPCSR_EDCIDSR (0x2)
0081 #define EDDEVID_IMPL_FULL       (0x3)
0082 
0083 #define DEBUG_WAIT_SLEEP        1000
0084 #define DEBUG_WAIT_TIMEOUT      32000
0085 
0086 struct debug_drvdata {
0087     void __iomem    *base;
0088     struct device   *dev;
0089     int     cpu;
0090 
0091     bool        edpcsr_present;
0092     bool        edcidsr_present;
0093     bool        edvidsr_present;
0094     bool        pc_has_offset;
0095 
0096     u32     edpcsr;
0097     u32     edpcsr_hi;
0098     u32     edprsr;
0099     u32     edvidsr;
0100     u32     edcidsr;
0101 };
0102 
0103 static DEFINE_MUTEX(debug_lock);
0104 static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
0105 static int debug_count;
0106 static struct dentry *debug_debugfs_dir;
0107 
0108 static bool debug_enable = IS_ENABLED(CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON);
0109 module_param_named(enable, debug_enable, bool, 0600);
0110 MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
0111 
0112 static void debug_os_unlock(struct debug_drvdata *drvdata)
0113 {
0114     /* Unlocks the debug registers */
0115     writel_relaxed(0x0, drvdata->base + EDOSLAR);
0116 
0117     /* Make sure the registers are unlocked before accessing */
0118     wmb();
0119 }
0120 
0121 /*
0122  * According to ARM DDI 0487A.k, before access external debug
0123  * registers should firstly check the access permission; if any
0124  * below condition has been met then cannot access debug
0125  * registers to avoid lockup issue:
0126  *
0127  * - CPU power domain is powered off;
0128  * - The OS Double Lock is locked;
0129  *
0130  * By checking EDPRSR can get to know if meet these conditions.
0131  */
0132 static bool debug_access_permitted(struct debug_drvdata *drvdata)
0133 {
0134     /* CPU is powered off */
0135     if (!(drvdata->edprsr & EDPRSR_PU))
0136         return false;
0137 
0138     /* The OS Double Lock is locked */
0139     if (drvdata->edprsr & EDPRSR_DLK)
0140         return false;
0141 
0142     return true;
0143 }
0144 
0145 static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
0146 {
0147     u32 edprcr;
0148 
0149 try_again:
0150 
0151     /*
0152      * Send request to power management controller and assert
0153      * DBGPWRUPREQ signal; if power management controller has
0154      * sane implementation, it should enable CPU power domain
0155      * in case CPU is in low power state.
0156      */
0157     edprcr = readl_relaxed(drvdata->base + EDPRCR);
0158     edprcr |= EDPRCR_COREPURQ;
0159     writel_relaxed(edprcr, drvdata->base + EDPRCR);
0160 
0161     /* Wait for CPU to be powered up (timeout~=32ms) */
0162     if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
0163             drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
0164             DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
0165         /*
0166          * Unfortunately the CPU cannot be powered up, so return
0167          * back and later has no permission to access other
0168          * registers. For this case, should disable CPU low power
0169          * states to ensure CPU power domain is enabled!
0170          */
0171         dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
0172             __func__, drvdata->cpu);
0173         return;
0174     }
0175 
0176     /*
0177      * At this point the CPU is powered up, so set the no powerdown
0178      * request bit so we don't lose power and emulate power down.
0179      */
0180     edprcr = readl_relaxed(drvdata->base + EDPRCR);
0181     edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
0182     writel_relaxed(edprcr, drvdata->base + EDPRCR);
0183 
0184     drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
0185 
0186     /* The core power domain got switched off on use, try again */
0187     if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
0188         goto try_again;
0189 }
0190 
0191 static void debug_read_regs(struct debug_drvdata *drvdata)
0192 {
0193     u32 save_edprcr;
0194 
0195     CS_UNLOCK(drvdata->base);
0196 
0197     /* Unlock os lock */
0198     debug_os_unlock(drvdata);
0199 
0200     /* Save EDPRCR register */
0201     save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
0202 
0203     /*
0204      * Ensure CPU power domain is enabled to let registers
0205      * are accessiable.
0206      */
0207     debug_force_cpu_powered_up(drvdata);
0208 
0209     if (!debug_access_permitted(drvdata))
0210         goto out;
0211 
0212     drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
0213 
0214     /*
0215      * As described in ARM DDI 0487A.k, if the processing
0216      * element (PE) is in debug state, or sample-based
0217      * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
0218      * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
0219      * UNKNOWN state. So directly bail out for this case.
0220      */
0221     if (drvdata->edpcsr == EDPCSR_PROHIBITED)
0222         goto out;
0223 
0224     /*
0225      * A read of the EDPCSR normally has the side-effect of
0226      * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
0227      * at this point it's safe to read value from them.
0228      */
0229     if (IS_ENABLED(CONFIG_64BIT))
0230         drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
0231 
0232     if (drvdata->edcidsr_present)
0233         drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
0234 
0235     if (drvdata->edvidsr_present)
0236         drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
0237 
0238 out:
0239     /* Restore EDPRCR register */
0240     writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
0241 
0242     CS_LOCK(drvdata->base);
0243 }
0244 
0245 #ifdef CONFIG_64BIT
0246 static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
0247 {
0248     return (unsigned long)drvdata->edpcsr_hi << 32 |
0249            (unsigned long)drvdata->edpcsr;
0250 }
0251 #else
0252 static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
0253 {
0254     unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
0255     unsigned long pc;
0256 
0257     pc = (unsigned long)drvdata->edpcsr;
0258 
0259     if (drvdata->pc_has_offset) {
0260         arm_inst_offset = 8;
0261         thumb_inst_offset = 4;
0262     }
0263 
0264     /* Handle thumb instruction */
0265     if (pc & EDPCSR_THUMB) {
0266         pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
0267         return pc;
0268     }
0269 
0270     /*
0271      * Handle arm instruction offset, if the arm instruction
0272      * is not 4 byte alignment then it's possible the case
0273      * for implementation defined; keep original value for this
0274      * case and print info for notice.
0275      */
0276     if (pc & BIT(1))
0277         dev_emerg(drvdata->dev,
0278               "Instruction offset is implementation defined\n");
0279     else
0280         pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
0281 
0282     return pc;
0283 }
0284 #endif
0285 
0286 static void debug_dump_regs(struct debug_drvdata *drvdata)
0287 {
0288     struct device *dev = drvdata->dev;
0289     unsigned long pc;
0290 
0291     dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
0292           drvdata->edprsr,
0293           drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
0294           drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
0295 
0296     if (!debug_access_permitted(drvdata)) {
0297         dev_emerg(dev, "No permission to access debug registers!\n");
0298         return;
0299     }
0300 
0301     if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
0302         dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
0303         return;
0304     }
0305 
0306     pc = debug_adjust_pc(drvdata);
0307     dev_emerg(dev, " EDPCSR:  %pS\n", (void *)pc);
0308 
0309     if (drvdata->edcidsr_present)
0310         dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
0311 
0312     if (drvdata->edvidsr_present)
0313         dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
0314               drvdata->edvidsr,
0315               drvdata->edvidsr & EDVIDSR_NS ?
0316               "Non-secure" : "Secure",
0317               drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
0318                 (drvdata->edvidsr & EDVIDSR_E2 ?
0319                  "EL2" : "EL1/0"),
0320               drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
0321               drvdata->edvidsr & (u32)EDVIDSR_VMID);
0322 }
0323 
0324 static void debug_init_arch_data(void *info)
0325 {
0326     struct debug_drvdata *drvdata = info;
0327     u32 mode, pcsr_offset;
0328     u32 eddevid, eddevid1;
0329 
0330     CS_UNLOCK(drvdata->base);
0331 
0332     /* Read device info */
0333     eddevid  = readl_relaxed(drvdata->base + EDDEVID);
0334     eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
0335 
0336     CS_LOCK(drvdata->base);
0337 
0338     /* Parse implementation feature */
0339     mode = eddevid & EDDEVID_PCSAMPLE_MODE;
0340     pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
0341 
0342     drvdata->edpcsr_present  = false;
0343     drvdata->edcidsr_present = false;
0344     drvdata->edvidsr_present = false;
0345     drvdata->pc_has_offset   = false;
0346 
0347     switch (mode) {
0348     case EDDEVID_IMPL_FULL:
0349         drvdata->edvidsr_present = true;
0350         fallthrough;
0351     case EDDEVID_IMPL_EDPCSR_EDCIDSR:
0352         drvdata->edcidsr_present = true;
0353         fallthrough;
0354     case EDDEVID_IMPL_EDPCSR:
0355         /*
0356          * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
0357          * define if has the offset for PC sampling value; if read
0358          * back EDDEVID1.PCSROffset == 0x2, then this means the debug
0359          * module does not sample the instruction set state when
0360          * armv8 CPU in AArch32 state.
0361          */
0362         drvdata->edpcsr_present =
0363             ((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
0364              (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
0365 
0366         drvdata->pc_has_offset =
0367             (pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
0368         break;
0369     default:
0370         break;
0371     }
0372 }
0373 
0374 /*
0375  * Dump out information on panic.
0376  */
0377 static int debug_notifier_call(struct notifier_block *self,
0378                    unsigned long v, void *p)
0379 {
0380     int cpu;
0381     struct debug_drvdata *drvdata;
0382 
0383     /* Bail out if we can't acquire the mutex or the functionality is off */
0384     if (!mutex_trylock(&debug_lock))
0385         return NOTIFY_DONE;
0386 
0387     if (!debug_enable)
0388         goto skip_dump;
0389 
0390     pr_emerg("ARM external debug module:\n");
0391 
0392     for_each_possible_cpu(cpu) {
0393         drvdata = per_cpu(debug_drvdata, cpu);
0394         if (!drvdata)
0395             continue;
0396 
0397         dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
0398 
0399         debug_read_regs(drvdata);
0400         debug_dump_regs(drvdata);
0401     }
0402 
0403 skip_dump:
0404     mutex_unlock(&debug_lock);
0405     return NOTIFY_DONE;
0406 }
0407 
0408 static struct notifier_block debug_notifier = {
0409     .notifier_call = debug_notifier_call,
0410 };
0411 
0412 static int debug_enable_func(void)
0413 {
0414     struct debug_drvdata *drvdata;
0415     int cpu, ret = 0;
0416     cpumask_t mask;
0417 
0418     /*
0419      * Use cpumask to track which debug power domains have
0420      * been powered on and use it to handle failure case.
0421      */
0422     cpumask_clear(&mask);
0423 
0424     for_each_possible_cpu(cpu) {
0425         drvdata = per_cpu(debug_drvdata, cpu);
0426         if (!drvdata)
0427             continue;
0428 
0429         ret = pm_runtime_get_sync(drvdata->dev);
0430         if (ret < 0)
0431             goto err;
0432         else
0433             cpumask_set_cpu(cpu, &mask);
0434     }
0435 
0436     return 0;
0437 
0438 err:
0439     /*
0440      * If pm_runtime_get_sync() has failed, need rollback on
0441      * all the other CPUs that have been enabled before that.
0442      */
0443     for_each_cpu(cpu, &mask) {
0444         drvdata = per_cpu(debug_drvdata, cpu);
0445         pm_runtime_put_noidle(drvdata->dev);
0446     }
0447 
0448     return ret;
0449 }
0450 
0451 static int debug_disable_func(void)
0452 {
0453     struct debug_drvdata *drvdata;
0454     int cpu, ret, err = 0;
0455 
0456     /*
0457      * Disable debug power domains, records the error and keep
0458      * circling through all other CPUs when an error has been
0459      * encountered.
0460      */
0461     for_each_possible_cpu(cpu) {
0462         drvdata = per_cpu(debug_drvdata, cpu);
0463         if (!drvdata)
0464             continue;
0465 
0466         ret = pm_runtime_put(drvdata->dev);
0467         if (ret < 0)
0468             err = ret;
0469     }
0470 
0471     return err;
0472 }
0473 
0474 static ssize_t debug_func_knob_write(struct file *f,
0475         const char __user *buf, size_t count, loff_t *ppos)
0476 {
0477     u8 val;
0478     int ret;
0479 
0480     ret = kstrtou8_from_user(buf, count, 2, &val);
0481     if (ret)
0482         return ret;
0483 
0484     mutex_lock(&debug_lock);
0485 
0486     if (val == debug_enable)
0487         goto out;
0488 
0489     if (val)
0490         ret = debug_enable_func();
0491     else
0492         ret = debug_disable_func();
0493 
0494     if (ret) {
0495         pr_err("%s: unable to %s debug function: %d\n",
0496                __func__, val ? "enable" : "disable", ret);
0497         goto err;
0498     }
0499 
0500     debug_enable = val;
0501 out:
0502     ret = count;
0503 err:
0504     mutex_unlock(&debug_lock);
0505     return ret;
0506 }
0507 
0508 static ssize_t debug_func_knob_read(struct file *f,
0509         char __user *ubuf, size_t count, loff_t *ppos)
0510 {
0511     ssize_t ret;
0512     char buf[3];
0513 
0514     mutex_lock(&debug_lock);
0515     snprintf(buf, sizeof(buf), "%d\n", debug_enable);
0516     mutex_unlock(&debug_lock);
0517 
0518     ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
0519     return ret;
0520 }
0521 
0522 static const struct file_operations debug_func_knob_fops = {
0523     .open   = simple_open,
0524     .read   = debug_func_knob_read,
0525     .write  = debug_func_knob_write,
0526 };
0527 
0528 static int debug_func_init(void)
0529 {
0530     int ret;
0531 
0532     /* Create debugfs node */
0533     debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
0534     debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
0535                 &debug_func_knob_fops);
0536 
0537     /* Register function to be called for panic */
0538     ret = atomic_notifier_chain_register(&panic_notifier_list,
0539                          &debug_notifier);
0540     if (ret) {
0541         pr_err("%s: unable to register notifier: %d\n",
0542                __func__, ret);
0543         goto err;
0544     }
0545 
0546     return 0;
0547 
0548 err:
0549     debugfs_remove_recursive(debug_debugfs_dir);
0550     return ret;
0551 }
0552 
0553 static void debug_func_exit(void)
0554 {
0555     atomic_notifier_chain_unregister(&panic_notifier_list,
0556                      &debug_notifier);
0557     debugfs_remove_recursive(debug_debugfs_dir);
0558 }
0559 
0560 static int debug_probe(struct amba_device *adev, const struct amba_id *id)
0561 {
0562     void __iomem *base;
0563     struct device *dev = &adev->dev;
0564     struct debug_drvdata *drvdata;
0565     struct resource *res = &adev->res;
0566     int ret;
0567 
0568     drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
0569     if (!drvdata)
0570         return -ENOMEM;
0571 
0572     drvdata->cpu = coresight_get_cpu(dev);
0573     if (drvdata->cpu < 0)
0574         return drvdata->cpu;
0575 
0576     if (per_cpu(debug_drvdata, drvdata->cpu)) {
0577         dev_err(dev, "CPU%d drvdata has already been initialized\n",
0578             drvdata->cpu);
0579         return -EBUSY;
0580     }
0581 
0582     drvdata->dev = &adev->dev;
0583     amba_set_drvdata(adev, drvdata);
0584 
0585     /* Validity for the resource is already checked by the AMBA core */
0586     base = devm_ioremap_resource(dev, res);
0587     if (IS_ERR(base))
0588         return PTR_ERR(base);
0589 
0590     drvdata->base = base;
0591 
0592     cpus_read_lock();
0593     per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
0594     ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
0595                        drvdata, 1);
0596     cpus_read_unlock();
0597 
0598     if (ret) {
0599         dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
0600         goto err;
0601     }
0602 
0603     if (!drvdata->edpcsr_present) {
0604         dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
0605             drvdata->cpu);
0606         ret = -ENXIO;
0607         goto err;
0608     }
0609 
0610     if (!debug_count++) {
0611         ret = debug_func_init();
0612         if (ret)
0613             goto err_func_init;
0614     }
0615 
0616     mutex_lock(&debug_lock);
0617     /* Turn off debug power domain if debugging is disabled */
0618     if (!debug_enable)
0619         pm_runtime_put(dev);
0620     mutex_unlock(&debug_lock);
0621 
0622     dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
0623     return 0;
0624 
0625 err_func_init:
0626     debug_count--;
0627 err:
0628     per_cpu(debug_drvdata, drvdata->cpu) = NULL;
0629     return ret;
0630 }
0631 
0632 static void debug_remove(struct amba_device *adev)
0633 {
0634     struct device *dev = &adev->dev;
0635     struct debug_drvdata *drvdata = amba_get_drvdata(adev);
0636 
0637     per_cpu(debug_drvdata, drvdata->cpu) = NULL;
0638 
0639     mutex_lock(&debug_lock);
0640     /* Turn off debug power domain before rmmod the module */
0641     if (debug_enable)
0642         pm_runtime_put(dev);
0643     mutex_unlock(&debug_lock);
0644 
0645     if (!--debug_count)
0646         debug_func_exit();
0647 }
0648 
0649 static const struct amba_cs_uci_id uci_id_debug[] = {
0650     {
0651         /*  CPU Debug UCI data */
0652         .devarch    = 0x47706a15,
0653         .devarch_mask   = 0xfff0ffff,
0654         .devtype    = 0x00000015,
0655     }
0656 };
0657 
0658 static const struct amba_id debug_ids[] = {
0659     CS_AMBA_ID(0x000bbd03),             /* Cortex-A53 */
0660     CS_AMBA_ID(0x000bbd07),             /* Cortex-A57 */
0661     CS_AMBA_ID(0x000bbd08),             /* Cortex-A72 */
0662     CS_AMBA_ID(0x000bbd09),             /* Cortex-A73 */
0663     CS_AMBA_UCI_ID(0x000f0205, uci_id_debug),   /* Qualcomm Kryo */
0664     CS_AMBA_UCI_ID(0x000f0211, uci_id_debug),   /* Qualcomm Kryo */
0665     {},
0666 };
0667 
0668 MODULE_DEVICE_TABLE(amba, debug_ids);
0669 
0670 static struct amba_driver debug_driver = {
0671     .drv = {
0672         .name   = "coresight-cpu-debug",
0673         .suppress_bind_attrs = true,
0674     },
0675     .probe      = debug_probe,
0676     .remove     = debug_remove,
0677     .id_table   = debug_ids,
0678 };
0679 
0680 module_amba_driver(debug_driver);
0681 
0682 MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
0683 MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
0684 MODULE_LICENSE("GPL");