0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/atomic.h>
0009 #include <linux/kernel.h>
0010 #include <linux/init.h>
0011 #include <linux/types.h>
0012 #include <linux/device.h>
0013 #include <linux/io.h>
0014 #include <linux/err.h>
0015 #include <linux/fs.h>
0016 #include <linux/miscdevice.h>
0017 #include <linux/uaccess.h>
0018 #include <linux/slab.h>
0019 #include <linux/spinlock.h>
0020 #include <linux/pm_runtime.h>
0021 #include <linux/seq_file.h>
0022 #include <linux/coresight.h>
0023 #include <linux/amba/bus.h>
0024 #include <linux/clk.h>
0025 #include <linux/circ_buf.h>
0026 #include <linux/mm.h>
0027 #include <linux/perf_event.h>
0028
0029
0030 #include "coresight-priv.h"
0031 #include "coresight-etm-perf.h"
0032
0033 #define ETB_RAM_DEPTH_REG 0x004
0034 #define ETB_STATUS_REG 0x00c
0035 #define ETB_RAM_READ_DATA_REG 0x010
0036 #define ETB_RAM_READ_POINTER 0x014
0037 #define ETB_RAM_WRITE_POINTER 0x018
0038 #define ETB_TRG 0x01c
0039 #define ETB_CTL_REG 0x020
0040 #define ETB_RWD_REG 0x024
0041 #define ETB_FFSR 0x300
0042 #define ETB_FFCR 0x304
0043 #define ETB_ITMISCOP0 0xee0
0044 #define ETB_ITTRFLINACK 0xee4
0045 #define ETB_ITTRFLIN 0xee8
0046 #define ETB_ITATBDATA0 0xeeC
0047 #define ETB_ITATBCTR2 0xef0
0048 #define ETB_ITATBCTR1 0xef4
0049 #define ETB_ITATBCTR0 0xef8
0050
0051
0052
0053 #define ETB_STATUS_RAM_FULL BIT(0)
0054
0055 #define ETB_CTL_CAPT_EN BIT(0)
0056
0057 #define ETB_FFCR_EN_FTC BIT(0)
0058 #define ETB_FFCR_FON_MAN BIT(6)
0059 #define ETB_FFCR_STOP_FI BIT(12)
0060 #define ETB_FFCR_STOP_TRIGGER BIT(13)
0061
0062 #define ETB_FFCR_BIT 6
0063 #define ETB_FFSR_BIT 1
0064 #define ETB_FRAME_SIZE_WORDS 4
0065
0066 DEFINE_CORESIGHT_DEVLIST(etb_devs, "etb");
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 struct etb_drvdata {
0084 void __iomem *base;
0085 struct clk *atclk;
0086 struct coresight_device *csdev;
0087 struct miscdevice miscdev;
0088 spinlock_t spinlock;
0089 local_t reading;
0090 pid_t pid;
0091 u8 *buf;
0092 u32 mode;
0093 u32 buffer_depth;
0094 u32 trigger_cntr;
0095 };
0096
0097 static int etb_set_buffer(struct coresight_device *csdev,
0098 struct perf_output_handle *handle);
0099
0100 static inline unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
0101 {
0102 return readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
0103 }
0104
0105 static void __etb_enable_hw(struct etb_drvdata *drvdata)
0106 {
0107 int i;
0108 u32 depth;
0109
0110 CS_UNLOCK(drvdata->base);
0111
0112 depth = drvdata->buffer_depth;
0113
0114 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
0115
0116 for (i = 0; i < depth; i++)
0117 writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
0118
0119
0120 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
0121
0122 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
0123
0124 writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
0125 writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
0126 drvdata->base + ETB_FFCR);
0127
0128 writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
0129
0130 CS_LOCK(drvdata->base);
0131 }
0132
0133 static int etb_enable_hw(struct etb_drvdata *drvdata)
0134 {
0135 int rc = coresight_claim_device(drvdata->csdev);
0136
0137 if (rc)
0138 return rc;
0139
0140 __etb_enable_hw(drvdata);
0141 return 0;
0142 }
0143
0144 static int etb_enable_sysfs(struct coresight_device *csdev)
0145 {
0146 int ret = 0;
0147 unsigned long flags;
0148 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0149
0150 spin_lock_irqsave(&drvdata->spinlock, flags);
0151
0152
0153 if (drvdata->mode == CS_MODE_PERF) {
0154 ret = -EBUSY;
0155 goto out;
0156 }
0157
0158 if (drvdata->mode == CS_MODE_DISABLED) {
0159 ret = etb_enable_hw(drvdata);
0160 if (ret)
0161 goto out;
0162
0163 drvdata->mode = CS_MODE_SYSFS;
0164 }
0165
0166 atomic_inc(csdev->refcnt);
0167 out:
0168 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0169 return ret;
0170 }
0171
0172 static int etb_enable_perf(struct coresight_device *csdev, void *data)
0173 {
0174 int ret = 0;
0175 pid_t pid;
0176 unsigned long flags;
0177 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0178 struct perf_output_handle *handle = data;
0179 struct cs_buffers *buf = etm_perf_sink_config(handle);
0180
0181 spin_lock_irqsave(&drvdata->spinlock, flags);
0182
0183
0184 if (drvdata->mode == CS_MODE_SYSFS) {
0185 ret = -EBUSY;
0186 goto out;
0187 }
0188
0189
0190 pid = buf->pid;
0191
0192 if (drvdata->pid != -1 && drvdata->pid != pid) {
0193 ret = -EBUSY;
0194 goto out;
0195 }
0196
0197
0198
0199
0200
0201 if (drvdata->pid == pid) {
0202 atomic_inc(csdev->refcnt);
0203 goto out;
0204 }
0205
0206
0207
0208
0209
0210
0211 ret = etb_set_buffer(csdev, handle);
0212 if (ret)
0213 goto out;
0214
0215 ret = etb_enable_hw(drvdata);
0216 if (!ret) {
0217
0218 drvdata->pid = pid;
0219 drvdata->mode = CS_MODE_PERF;
0220 atomic_inc(csdev->refcnt);
0221 }
0222
0223 out:
0224 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0225 return ret;
0226 }
0227
0228 static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
0229 {
0230 int ret;
0231
0232 switch (mode) {
0233 case CS_MODE_SYSFS:
0234 ret = etb_enable_sysfs(csdev);
0235 break;
0236 case CS_MODE_PERF:
0237 ret = etb_enable_perf(csdev, data);
0238 break;
0239 default:
0240 ret = -EINVAL;
0241 break;
0242 }
0243
0244 if (ret)
0245 return ret;
0246
0247 dev_dbg(&csdev->dev, "ETB enabled\n");
0248 return 0;
0249 }
0250
0251 static void __etb_disable_hw(struct etb_drvdata *drvdata)
0252 {
0253 u32 ffcr;
0254 struct device *dev = &drvdata->csdev->dev;
0255 struct csdev_access *csa = &drvdata->csdev->access;
0256
0257 CS_UNLOCK(drvdata->base);
0258
0259 ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
0260
0261 ffcr |= ETB_FFCR_STOP_FI;
0262 writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
0263
0264 ffcr |= ETB_FFCR_FON_MAN;
0265 writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
0266
0267 if (coresight_timeout(csa, ETB_FFCR, ETB_FFCR_BIT, 0)) {
0268 dev_err(dev,
0269 "timeout while waiting for completion of Manual Flush\n");
0270 }
0271
0272
0273 writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
0274
0275 if (coresight_timeout(csa, ETB_FFSR, ETB_FFSR_BIT, 1)) {
0276 dev_err(dev,
0277 "timeout while waiting for Formatter to Stop\n");
0278 }
0279
0280 CS_LOCK(drvdata->base);
0281 }
0282
0283 static void etb_dump_hw(struct etb_drvdata *drvdata)
0284 {
0285 bool lost = false;
0286 int i;
0287 u8 *buf_ptr;
0288 u32 read_data, depth;
0289 u32 read_ptr, write_ptr;
0290 u32 frame_off, frame_endoff;
0291 struct device *dev = &drvdata->csdev->dev;
0292
0293 CS_UNLOCK(drvdata->base);
0294
0295 read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
0296 write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
0297
0298 frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
0299 frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
0300 if (frame_off) {
0301 dev_err(dev,
0302 "write_ptr: %lu not aligned to formatter frame size\n",
0303 (unsigned long)write_ptr);
0304 dev_err(dev, "frameoff: %lu, frame_endoff: %lu\n",
0305 (unsigned long)frame_off, (unsigned long)frame_endoff);
0306 write_ptr += frame_endoff;
0307 }
0308
0309 if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
0310 & ETB_STATUS_RAM_FULL) == 0) {
0311 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
0312 } else {
0313 writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
0314 lost = true;
0315 }
0316
0317 depth = drvdata->buffer_depth;
0318 buf_ptr = drvdata->buf;
0319 for (i = 0; i < depth; i++) {
0320 read_data = readl_relaxed(drvdata->base +
0321 ETB_RAM_READ_DATA_REG);
0322 *(u32 *)buf_ptr = read_data;
0323 buf_ptr += 4;
0324 }
0325
0326 if (lost)
0327 coresight_insert_barrier_packet(drvdata->buf);
0328
0329 if (frame_off) {
0330 buf_ptr -= (frame_endoff * 4);
0331 for (i = 0; i < frame_endoff; i++) {
0332 *buf_ptr++ = 0x0;
0333 *buf_ptr++ = 0x0;
0334 *buf_ptr++ = 0x0;
0335 *buf_ptr++ = 0x0;
0336 }
0337 }
0338
0339 writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
0340
0341 CS_LOCK(drvdata->base);
0342 }
0343
0344 static void etb_disable_hw(struct etb_drvdata *drvdata)
0345 {
0346 __etb_disable_hw(drvdata);
0347 etb_dump_hw(drvdata);
0348 coresight_disclaim_device(drvdata->csdev);
0349 }
0350
0351 static int etb_disable(struct coresight_device *csdev)
0352 {
0353 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0354 unsigned long flags;
0355
0356 spin_lock_irqsave(&drvdata->spinlock, flags);
0357
0358 if (atomic_dec_return(csdev->refcnt)) {
0359 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0360 return -EBUSY;
0361 }
0362
0363
0364 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
0365 etb_disable_hw(drvdata);
0366
0367 drvdata->pid = -1;
0368 drvdata->mode = CS_MODE_DISABLED;
0369 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0370
0371 dev_dbg(&csdev->dev, "ETB disabled\n");
0372 return 0;
0373 }
0374
0375 static void *etb_alloc_buffer(struct coresight_device *csdev,
0376 struct perf_event *event, void **pages,
0377 int nr_pages, bool overwrite)
0378 {
0379 int node;
0380 struct cs_buffers *buf;
0381
0382 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
0383
0384 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
0385 if (!buf)
0386 return NULL;
0387
0388 buf->pid = task_pid_nr(event->owner);
0389 buf->snapshot = overwrite;
0390 buf->nr_pages = nr_pages;
0391 buf->data_pages = pages;
0392
0393 return buf;
0394 }
0395
0396 static void etb_free_buffer(void *config)
0397 {
0398 struct cs_buffers *buf = config;
0399
0400 kfree(buf);
0401 }
0402
0403 static int etb_set_buffer(struct coresight_device *csdev,
0404 struct perf_output_handle *handle)
0405 {
0406 int ret = 0;
0407 unsigned long head;
0408 struct cs_buffers *buf = etm_perf_sink_config(handle);
0409
0410 if (!buf)
0411 return -EINVAL;
0412
0413
0414 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
0415
0416
0417 buf->cur = head / PAGE_SIZE;
0418
0419
0420 buf->offset = head % PAGE_SIZE;
0421
0422 local_set(&buf->data_size, 0);
0423
0424 return ret;
0425 }
0426
0427 static unsigned long etb_update_buffer(struct coresight_device *csdev,
0428 struct perf_output_handle *handle,
0429 void *sink_config)
0430 {
0431 bool lost = false;
0432 int i, cur;
0433 u8 *buf_ptr;
0434 const u32 *barrier;
0435 u32 read_ptr, write_ptr, capacity;
0436 u32 status, read_data;
0437 unsigned long offset, to_read = 0, flags;
0438 struct cs_buffers *buf = sink_config;
0439 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0440
0441 if (!buf)
0442 return 0;
0443
0444 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
0445
0446 spin_lock_irqsave(&drvdata->spinlock, flags);
0447
0448
0449 if (atomic_read(csdev->refcnt) != 1)
0450 goto out;
0451
0452 __etb_disable_hw(drvdata);
0453 CS_UNLOCK(drvdata->base);
0454
0455
0456 read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
0457 write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
0458
0459
0460
0461
0462
0463
0464 if (write_ptr % ETB_FRAME_SIZE_WORDS) {
0465 dev_err(&csdev->dev,
0466 "write_ptr: %lu not aligned to formatter frame size\n",
0467 (unsigned long)write_ptr);
0468
0469 write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
0470 lost = true;
0471 }
0472
0473
0474
0475
0476
0477
0478
0479 status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
0480 if (status & ETB_STATUS_RAM_FULL) {
0481 lost = true;
0482 to_read = capacity;
0483 read_ptr = write_ptr;
0484 } else {
0485 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
0486 to_read *= ETB_FRAME_SIZE_WORDS;
0487 }
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 if (!buf->snapshot && to_read > handle->size) {
0500 u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
0501
0502
0503 to_read = handle->size & mask;
0504
0505
0506
0507
0508 read_ptr = (write_ptr + drvdata->buffer_depth) -
0509 to_read / ETB_FRAME_SIZE_WORDS;
0510
0511 if (read_ptr > (drvdata->buffer_depth - 1))
0512 read_ptr -= drvdata->buffer_depth;
0513
0514 lost = true;
0515 }
0516
0517
0518
0519
0520
0521
0522
0523 if (!buf->snapshot && lost)
0524 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
0525
0526
0527 writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
0528
0529 cur = buf->cur;
0530 offset = buf->offset;
0531 barrier = coresight_barrier_pkt;
0532
0533 for (i = 0; i < to_read; i += 4) {
0534 buf_ptr = buf->data_pages[cur] + offset;
0535 read_data = readl_relaxed(drvdata->base +
0536 ETB_RAM_READ_DATA_REG);
0537 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
0538 read_data = *barrier;
0539 barrier++;
0540 }
0541
0542 *(u32 *)buf_ptr = read_data;
0543 buf_ptr += 4;
0544
0545 offset += 4;
0546 if (offset >= PAGE_SIZE) {
0547 offset = 0;
0548 cur++;
0549
0550 cur &= buf->nr_pages - 1;
0551 }
0552 }
0553
0554
0555 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
0556 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
0557
0558
0559
0560
0561
0562
0563 if (buf->snapshot)
0564 handle->head += to_read;
0565
0566 __etb_enable_hw(drvdata);
0567 CS_LOCK(drvdata->base);
0568 out:
0569 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0570
0571 return to_read;
0572 }
0573
0574 static const struct coresight_ops_sink etb_sink_ops = {
0575 .enable = etb_enable,
0576 .disable = etb_disable,
0577 .alloc_buffer = etb_alloc_buffer,
0578 .free_buffer = etb_free_buffer,
0579 .update_buffer = etb_update_buffer,
0580 };
0581
0582 static const struct coresight_ops etb_cs_ops = {
0583 .sink_ops = &etb_sink_ops,
0584 };
0585
0586 static void etb_dump(struct etb_drvdata *drvdata)
0587 {
0588 unsigned long flags;
0589
0590 spin_lock_irqsave(&drvdata->spinlock, flags);
0591 if (drvdata->mode == CS_MODE_SYSFS) {
0592 __etb_disable_hw(drvdata);
0593 etb_dump_hw(drvdata);
0594 __etb_enable_hw(drvdata);
0595 }
0596 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0597
0598 dev_dbg(&drvdata->csdev->dev, "ETB dumped\n");
0599 }
0600
0601 static int etb_open(struct inode *inode, struct file *file)
0602 {
0603 struct etb_drvdata *drvdata = container_of(file->private_data,
0604 struct etb_drvdata, miscdev);
0605
0606 if (local_cmpxchg(&drvdata->reading, 0, 1))
0607 return -EBUSY;
0608
0609 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
0610 return 0;
0611 }
0612
0613 static ssize_t etb_read(struct file *file, char __user *data,
0614 size_t len, loff_t *ppos)
0615 {
0616 u32 depth;
0617 struct etb_drvdata *drvdata = container_of(file->private_data,
0618 struct etb_drvdata, miscdev);
0619 struct device *dev = &drvdata->csdev->dev;
0620
0621 etb_dump(drvdata);
0622
0623 depth = drvdata->buffer_depth;
0624 if (*ppos + len > depth * 4)
0625 len = depth * 4 - *ppos;
0626
0627 if (copy_to_user(data, drvdata->buf + *ppos, len)) {
0628 dev_dbg(dev,
0629 "%s: copy_to_user failed\n", __func__);
0630 return -EFAULT;
0631 }
0632
0633 *ppos += len;
0634
0635 dev_dbg(dev, "%s: %zu bytes copied, %d bytes left\n",
0636 __func__, len, (int)(depth * 4 - *ppos));
0637 return len;
0638 }
0639
0640 static int etb_release(struct inode *inode, struct file *file)
0641 {
0642 struct etb_drvdata *drvdata = container_of(file->private_data,
0643 struct etb_drvdata, miscdev);
0644 local_set(&drvdata->reading, 0);
0645
0646 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
0647 return 0;
0648 }
0649
0650 static const struct file_operations etb_fops = {
0651 .owner = THIS_MODULE,
0652 .open = etb_open,
0653 .read = etb_read,
0654 .release = etb_release,
0655 .llseek = no_llseek,
0656 };
0657
0658 #define coresight_etb10_reg(name, offset) \
0659 coresight_simple_reg32(struct etb_drvdata, name, offset)
0660
0661 coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
0662 coresight_etb10_reg(sts, ETB_STATUS_REG);
0663 coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
0664 coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
0665 coresight_etb10_reg(trg, ETB_TRG);
0666 coresight_etb10_reg(ctl, ETB_CTL_REG);
0667 coresight_etb10_reg(ffsr, ETB_FFSR);
0668 coresight_etb10_reg(ffcr, ETB_FFCR);
0669
0670 static struct attribute *coresight_etb_mgmt_attrs[] = {
0671 &dev_attr_rdp.attr,
0672 &dev_attr_sts.attr,
0673 &dev_attr_rrp.attr,
0674 &dev_attr_rwp.attr,
0675 &dev_attr_trg.attr,
0676 &dev_attr_ctl.attr,
0677 &dev_attr_ffsr.attr,
0678 &dev_attr_ffcr.attr,
0679 NULL,
0680 };
0681
0682 static ssize_t trigger_cntr_show(struct device *dev,
0683 struct device_attribute *attr, char *buf)
0684 {
0685 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
0686 unsigned long val = drvdata->trigger_cntr;
0687
0688 return sprintf(buf, "%#lx\n", val);
0689 }
0690
0691 static ssize_t trigger_cntr_store(struct device *dev,
0692 struct device_attribute *attr,
0693 const char *buf, size_t size)
0694 {
0695 int ret;
0696 unsigned long val;
0697 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
0698
0699 ret = kstrtoul(buf, 16, &val);
0700 if (ret)
0701 return ret;
0702
0703 drvdata->trigger_cntr = val;
0704 return size;
0705 }
0706 static DEVICE_ATTR_RW(trigger_cntr);
0707
0708 static struct attribute *coresight_etb_attrs[] = {
0709 &dev_attr_trigger_cntr.attr,
0710 NULL,
0711 };
0712
0713 static const struct attribute_group coresight_etb_group = {
0714 .attrs = coresight_etb_attrs,
0715 };
0716
0717 static const struct attribute_group coresight_etb_mgmt_group = {
0718 .attrs = coresight_etb_mgmt_attrs,
0719 .name = "mgmt",
0720 };
0721
0722 static const struct attribute_group *coresight_etb_groups[] = {
0723 &coresight_etb_group,
0724 &coresight_etb_mgmt_group,
0725 NULL,
0726 };
0727
0728 static int etb_probe(struct amba_device *adev, const struct amba_id *id)
0729 {
0730 int ret;
0731 void __iomem *base;
0732 struct device *dev = &adev->dev;
0733 struct coresight_platform_data *pdata = NULL;
0734 struct etb_drvdata *drvdata;
0735 struct resource *res = &adev->res;
0736 struct coresight_desc desc = { 0 };
0737
0738 desc.name = coresight_alloc_device_name(&etb_devs, dev);
0739 if (!desc.name)
0740 return -ENOMEM;
0741
0742 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
0743 if (!drvdata)
0744 return -ENOMEM;
0745
0746 drvdata->atclk = devm_clk_get(&adev->dev, "atclk");
0747 if (!IS_ERR(drvdata->atclk)) {
0748 ret = clk_prepare_enable(drvdata->atclk);
0749 if (ret)
0750 return ret;
0751 }
0752 dev_set_drvdata(dev, drvdata);
0753
0754
0755 base = devm_ioremap_resource(dev, res);
0756 if (IS_ERR(base))
0757 return PTR_ERR(base);
0758
0759 drvdata->base = base;
0760 desc.access = CSDEV_ACCESS_IOMEM(base);
0761
0762 spin_lock_init(&drvdata->spinlock);
0763
0764 drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
0765
0766 if (drvdata->buffer_depth & 0x80000000)
0767 return -EINVAL;
0768
0769 drvdata->buf = devm_kcalloc(dev,
0770 drvdata->buffer_depth, 4, GFP_KERNEL);
0771 if (!drvdata->buf)
0772 return -ENOMEM;
0773
0774
0775 drvdata->pid = -1;
0776
0777 pdata = coresight_get_platform_data(dev);
0778 if (IS_ERR(pdata))
0779 return PTR_ERR(pdata);
0780 adev->dev.platform_data = pdata;
0781
0782 desc.type = CORESIGHT_DEV_TYPE_SINK;
0783 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
0784 desc.ops = &etb_cs_ops;
0785 desc.pdata = pdata;
0786 desc.dev = dev;
0787 desc.groups = coresight_etb_groups;
0788 drvdata->csdev = coresight_register(&desc);
0789 if (IS_ERR(drvdata->csdev))
0790 return PTR_ERR(drvdata->csdev);
0791
0792 drvdata->miscdev.name = desc.name;
0793 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
0794 drvdata->miscdev.fops = &etb_fops;
0795 ret = misc_register(&drvdata->miscdev);
0796 if (ret)
0797 goto err_misc_register;
0798
0799 pm_runtime_put(&adev->dev);
0800 return 0;
0801
0802 err_misc_register:
0803 coresight_unregister(drvdata->csdev);
0804 return ret;
0805 }
0806
0807 static void etb_remove(struct amba_device *adev)
0808 {
0809 struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
0810
0811
0812
0813
0814
0815
0816 misc_deregister(&drvdata->miscdev);
0817 coresight_unregister(drvdata->csdev);
0818 }
0819
0820 #ifdef CONFIG_PM
0821 static int etb_runtime_suspend(struct device *dev)
0822 {
0823 struct etb_drvdata *drvdata = dev_get_drvdata(dev);
0824
0825 if (drvdata && !IS_ERR(drvdata->atclk))
0826 clk_disable_unprepare(drvdata->atclk);
0827
0828 return 0;
0829 }
0830
0831 static int etb_runtime_resume(struct device *dev)
0832 {
0833 struct etb_drvdata *drvdata = dev_get_drvdata(dev);
0834
0835 if (drvdata && !IS_ERR(drvdata->atclk))
0836 clk_prepare_enable(drvdata->atclk);
0837
0838 return 0;
0839 }
0840 #endif
0841
0842 static const struct dev_pm_ops etb_dev_pm_ops = {
0843 SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
0844 };
0845
0846 static const struct amba_id etb_ids[] = {
0847 {
0848 .id = 0x000bb907,
0849 .mask = 0x000fffff,
0850 },
0851 { 0, 0},
0852 };
0853
0854 MODULE_DEVICE_TABLE(amba, etb_ids);
0855
0856 static struct amba_driver etb_driver = {
0857 .drv = {
0858 .name = "coresight-etb10",
0859 .owner = THIS_MODULE,
0860 .pm = &etb_dev_pm_ops,
0861 .suppress_bind_attrs = true,
0862
0863 },
0864 .probe = etb_probe,
0865 .remove = etb_remove,
0866 .id_table = etb_ids,
0867 };
0868
0869 module_amba_driver(etb_driver);
0870
0871 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
0872 MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
0873 MODULE_DESCRIPTION("Arm CoreSight Embedded Trace Buffer driver");
0874 MODULE_LICENSE("GPL v2");