0001
0002
0003
0004
0005
0006
0007 #include <linux/atomic.h>
0008 #include <linux/circ_buf.h>
0009 #include <linux/coresight.h>
0010 #include <linux/perf_event.h>
0011 #include <linux/slab.h>
0012 #include "coresight-priv.h"
0013 #include "coresight-tmc.h"
0014 #include "coresight-etm-perf.h"
0015
0016 static int tmc_set_etf_buffer(struct coresight_device *csdev,
0017 struct perf_output_handle *handle);
0018
0019 static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
0020 {
0021 CS_UNLOCK(drvdata->base);
0022
0023
0024 tmc_wait_for_tmcready(drvdata);
0025
0026 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
0027 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
0028 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
0029 TMC_FFCR_TRIGON_TRIGIN,
0030 drvdata->base + TMC_FFCR);
0031
0032 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
0033 tmc_enable_hw(drvdata);
0034
0035 CS_LOCK(drvdata->base);
0036 }
0037
0038 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
0039 {
0040 int rc = coresight_claim_device(drvdata->csdev);
0041
0042 if (rc)
0043 return rc;
0044
0045 __tmc_etb_enable_hw(drvdata);
0046 return 0;
0047 }
0048
0049 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
0050 {
0051 char *bufp;
0052 u32 read_data, lost;
0053
0054
0055 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
0056 bufp = drvdata->buf;
0057 drvdata->len = 0;
0058 while (1) {
0059 read_data = readl_relaxed(drvdata->base + TMC_RRD);
0060 if (read_data == 0xFFFFFFFF)
0061 break;
0062 memcpy(bufp, &read_data, 4);
0063 bufp += 4;
0064 drvdata->len += 4;
0065 }
0066
0067 if (lost)
0068 coresight_insert_barrier_packet(drvdata->buf);
0069 return;
0070 }
0071
0072 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
0073 {
0074 CS_UNLOCK(drvdata->base);
0075
0076 tmc_flush_and_stop(drvdata);
0077
0078
0079
0080
0081 if (drvdata->mode == CS_MODE_SYSFS)
0082 tmc_etb_dump_hw(drvdata);
0083 tmc_disable_hw(drvdata);
0084
0085 CS_LOCK(drvdata->base);
0086 }
0087
0088 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
0089 {
0090 __tmc_etb_disable_hw(drvdata);
0091 coresight_disclaim_device(drvdata->csdev);
0092 }
0093
0094 static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
0095 {
0096 CS_UNLOCK(drvdata->base);
0097
0098
0099 tmc_wait_for_tmcready(drvdata);
0100
0101 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
0102 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
0103 drvdata->base + TMC_FFCR);
0104 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
0105 tmc_enable_hw(drvdata);
0106
0107 CS_LOCK(drvdata->base);
0108 }
0109
0110 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
0111 {
0112 int rc = coresight_claim_device(drvdata->csdev);
0113
0114 if (rc)
0115 return rc;
0116
0117 __tmc_etf_enable_hw(drvdata);
0118 return 0;
0119 }
0120
0121 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
0122 {
0123 struct coresight_device *csdev = drvdata->csdev;
0124
0125 CS_UNLOCK(drvdata->base);
0126
0127 tmc_flush_and_stop(drvdata);
0128 tmc_disable_hw(drvdata);
0129 coresight_disclaim_device_unlocked(csdev);
0130 CS_LOCK(drvdata->base);
0131 }
0132
0133
0134
0135
0136
0137
0138 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
0139 loff_t pos, size_t len, char **bufpp)
0140 {
0141 ssize_t actual = len;
0142
0143
0144 if (pos + actual > drvdata->len)
0145 actual = drvdata->len - pos;
0146 if (actual > 0)
0147 *bufpp = drvdata->buf + pos;
0148 return actual;
0149 }
0150
0151 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
0152 {
0153 int ret = 0;
0154 bool used = false;
0155 char *buf = NULL;
0156 unsigned long flags;
0157 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0158
0159
0160
0161
0162
0163 spin_lock_irqsave(&drvdata->spinlock, flags);
0164 if (!drvdata->buf) {
0165 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0166
0167
0168 buf = kzalloc(drvdata->size, GFP_KERNEL);
0169 if (!buf)
0170 return -ENOMEM;
0171
0172
0173 spin_lock_irqsave(&drvdata->spinlock, flags);
0174 }
0175
0176 if (drvdata->reading) {
0177 ret = -EBUSY;
0178 goto out;
0179 }
0180
0181
0182
0183
0184
0185
0186 if (drvdata->mode == CS_MODE_SYSFS) {
0187 atomic_inc(csdev->refcnt);
0188 goto out;
0189 }
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 if (drvdata->buf) {
0201 memset(drvdata->buf, 0, drvdata->size);
0202 } else {
0203 used = true;
0204 drvdata->buf = buf;
0205 }
0206
0207 ret = tmc_etb_enable_hw(drvdata);
0208 if (!ret) {
0209 drvdata->mode = CS_MODE_SYSFS;
0210 atomic_inc(csdev->refcnt);
0211 } else {
0212
0213 used = false;
0214 }
0215 out:
0216 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0217
0218
0219 if (!used)
0220 kfree(buf);
0221
0222 return ret;
0223 }
0224
0225 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
0226 {
0227 int ret = 0;
0228 pid_t pid;
0229 unsigned long flags;
0230 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0231 struct perf_output_handle *handle = data;
0232 struct cs_buffers *buf = etm_perf_sink_config(handle);
0233
0234 spin_lock_irqsave(&drvdata->spinlock, flags);
0235 do {
0236 ret = -EINVAL;
0237 if (drvdata->reading)
0238 break;
0239
0240
0241
0242
0243 if (drvdata->mode == CS_MODE_SYSFS) {
0244 ret = -EBUSY;
0245 break;
0246 }
0247
0248
0249 pid = buf->pid;
0250
0251 if (drvdata->pid != -1 && drvdata->pid != pid) {
0252 ret = -EBUSY;
0253 break;
0254 }
0255
0256 ret = tmc_set_etf_buffer(csdev, handle);
0257 if (ret)
0258 break;
0259
0260
0261
0262
0263
0264 if (drvdata->pid == pid) {
0265 atomic_inc(csdev->refcnt);
0266 break;
0267 }
0268
0269 ret = tmc_etb_enable_hw(drvdata);
0270 if (!ret) {
0271
0272 drvdata->pid = pid;
0273 drvdata->mode = CS_MODE_PERF;
0274 atomic_inc(csdev->refcnt);
0275 }
0276 } while (0);
0277 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0278
0279 return ret;
0280 }
0281
0282 static int tmc_enable_etf_sink(struct coresight_device *csdev,
0283 u32 mode, void *data)
0284 {
0285 int ret;
0286
0287 switch (mode) {
0288 case CS_MODE_SYSFS:
0289 ret = tmc_enable_etf_sink_sysfs(csdev);
0290 break;
0291 case CS_MODE_PERF:
0292 ret = tmc_enable_etf_sink_perf(csdev, data);
0293 break;
0294
0295 default:
0296 ret = -EINVAL;
0297 break;
0298 }
0299
0300 if (ret)
0301 return ret;
0302
0303 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
0304 return 0;
0305 }
0306
0307 static int tmc_disable_etf_sink(struct coresight_device *csdev)
0308 {
0309 unsigned long flags;
0310 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0311
0312 spin_lock_irqsave(&drvdata->spinlock, flags);
0313
0314 if (drvdata->reading) {
0315 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0316 return -EBUSY;
0317 }
0318
0319 if (atomic_dec_return(csdev->refcnt)) {
0320 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0321 return -EBUSY;
0322 }
0323
0324
0325 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
0326 tmc_etb_disable_hw(drvdata);
0327
0328 drvdata->pid = -1;
0329 drvdata->mode = CS_MODE_DISABLED;
0330
0331 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0332
0333 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
0334 return 0;
0335 }
0336
0337 static int tmc_enable_etf_link(struct coresight_device *csdev,
0338 int inport, int outport)
0339 {
0340 int ret = 0;
0341 unsigned long flags;
0342 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0343 bool first_enable = false;
0344
0345 spin_lock_irqsave(&drvdata->spinlock, flags);
0346 if (drvdata->reading) {
0347 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0348 return -EBUSY;
0349 }
0350
0351 if (atomic_read(&csdev->refcnt[0]) == 0) {
0352 ret = tmc_etf_enable_hw(drvdata);
0353 if (!ret) {
0354 drvdata->mode = CS_MODE_SYSFS;
0355 first_enable = true;
0356 }
0357 }
0358 if (!ret)
0359 atomic_inc(&csdev->refcnt[0]);
0360 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0361
0362 if (first_enable)
0363 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
0364 return ret;
0365 }
0366
0367 static void tmc_disable_etf_link(struct coresight_device *csdev,
0368 int inport, int outport)
0369 {
0370 unsigned long flags;
0371 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0372 bool last_disable = false;
0373
0374 spin_lock_irqsave(&drvdata->spinlock, flags);
0375 if (drvdata->reading) {
0376 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0377 return;
0378 }
0379
0380 if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
0381 tmc_etf_disable_hw(drvdata);
0382 drvdata->mode = CS_MODE_DISABLED;
0383 last_disable = true;
0384 }
0385 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0386
0387 if (last_disable)
0388 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
0389 }
0390
0391 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
0392 struct perf_event *event, void **pages,
0393 int nr_pages, bool overwrite)
0394 {
0395 int node;
0396 struct cs_buffers *buf;
0397
0398 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
0399
0400
0401 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
0402 if (!buf)
0403 return NULL;
0404
0405 buf->pid = task_pid_nr(event->owner);
0406 buf->snapshot = overwrite;
0407 buf->nr_pages = nr_pages;
0408 buf->data_pages = pages;
0409
0410 return buf;
0411 }
0412
0413 static void tmc_free_etf_buffer(void *config)
0414 {
0415 struct cs_buffers *buf = config;
0416
0417 kfree(buf);
0418 }
0419
0420 static int tmc_set_etf_buffer(struct coresight_device *csdev,
0421 struct perf_output_handle *handle)
0422 {
0423 int ret = 0;
0424 unsigned long head;
0425 struct cs_buffers *buf = etm_perf_sink_config(handle);
0426
0427 if (!buf)
0428 return -EINVAL;
0429
0430
0431 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
0432
0433
0434 buf->cur = head / PAGE_SIZE;
0435
0436
0437 buf->offset = head % PAGE_SIZE;
0438
0439 local_set(&buf->data_size, 0);
0440
0441 return ret;
0442 }
0443
0444 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
0445 struct perf_output_handle *handle,
0446 void *sink_config)
0447 {
0448 bool lost = false;
0449 int i, cur;
0450 const u32 *barrier;
0451 u32 *buf_ptr;
0452 u64 read_ptr, write_ptr;
0453 u32 status;
0454 unsigned long offset, to_read = 0, flags;
0455 struct cs_buffers *buf = sink_config;
0456 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0457
0458 if (!buf)
0459 return 0;
0460
0461
0462 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
0463 return 0;
0464
0465 spin_lock_irqsave(&drvdata->spinlock, flags);
0466
0467
0468 if (atomic_read(csdev->refcnt) != 1)
0469 goto out;
0470
0471 CS_UNLOCK(drvdata->base);
0472
0473 tmc_flush_and_stop(drvdata);
0474
0475 read_ptr = tmc_read_rrp(drvdata);
0476 write_ptr = tmc_read_rwp(drvdata);
0477
0478
0479
0480
0481
0482 status = readl_relaxed(drvdata->base + TMC_STS);
0483 if (status & TMC_STS_FULL) {
0484 lost = true;
0485 to_read = drvdata->size;
0486 } else {
0487 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
0488 }
0489
0490
0491
0492
0493
0494
0495
0496
0497 if (!buf->snapshot && to_read > handle->size) {
0498 u32 mask = tmc_get_memwidth_mask(drvdata);
0499
0500
0501
0502
0503
0504 to_read = handle->size & mask;
0505
0506 read_ptr = (write_ptr + drvdata->size) - to_read;
0507
0508 if (read_ptr > (drvdata->size - 1))
0509 read_ptr -= drvdata->size;
0510
0511 tmc_write_rrp(drvdata, read_ptr);
0512 lost = true;
0513 }
0514
0515
0516
0517
0518
0519
0520
0521 if (!buf->snapshot && lost)
0522 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
0523
0524 cur = buf->cur;
0525 offset = buf->offset;
0526 barrier = coresight_barrier_pkt;
0527
0528
0529 for (i = 0; i < to_read; i += 4) {
0530 buf_ptr = buf->data_pages[cur] + offset;
0531 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
0532
0533 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
0534 *buf_ptr = *barrier;
0535 barrier++;
0536 }
0537
0538 offset += 4;
0539 if (offset >= PAGE_SIZE) {
0540 offset = 0;
0541 cur++;
0542
0543 cur &= buf->nr_pages - 1;
0544 }
0545 }
0546
0547
0548
0549
0550
0551
0552 if (buf->snapshot)
0553 handle->head += to_read;
0554
0555
0556
0557
0558
0559
0560 CS_LOCK(drvdata->base);
0561 out:
0562 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0563
0564 return to_read;
0565 }
0566
0567 static const struct coresight_ops_sink tmc_etf_sink_ops = {
0568 .enable = tmc_enable_etf_sink,
0569 .disable = tmc_disable_etf_sink,
0570 .alloc_buffer = tmc_alloc_etf_buffer,
0571 .free_buffer = tmc_free_etf_buffer,
0572 .update_buffer = tmc_update_etf_buffer,
0573 };
0574
0575 static const struct coresight_ops_link tmc_etf_link_ops = {
0576 .enable = tmc_enable_etf_link,
0577 .disable = tmc_disable_etf_link,
0578 };
0579
0580 const struct coresight_ops tmc_etb_cs_ops = {
0581 .sink_ops = &tmc_etf_sink_ops,
0582 };
0583
0584 const struct coresight_ops tmc_etf_cs_ops = {
0585 .sink_ops = &tmc_etf_sink_ops,
0586 .link_ops = &tmc_etf_link_ops,
0587 };
0588
0589 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
0590 {
0591 enum tmc_mode mode;
0592 int ret = 0;
0593 unsigned long flags;
0594
0595
0596 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
0597 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
0598 return -EINVAL;
0599
0600 spin_lock_irqsave(&drvdata->spinlock, flags);
0601
0602 if (drvdata->reading) {
0603 ret = -EBUSY;
0604 goto out;
0605 }
0606
0607
0608 if (drvdata->mode == CS_MODE_PERF) {
0609 ret = -EINVAL;
0610 goto out;
0611 }
0612
0613
0614 if (drvdata->buf == NULL) {
0615 ret = -EINVAL;
0616 goto out;
0617 }
0618
0619
0620 if (drvdata->mode == CS_MODE_SYSFS) {
0621
0622 mode = readl_relaxed(drvdata->base + TMC_MODE);
0623 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
0624 ret = -EINVAL;
0625 goto out;
0626 }
0627 __tmc_etb_disable_hw(drvdata);
0628 }
0629
0630 drvdata->reading = true;
0631 out:
0632 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0633
0634 return ret;
0635 }
0636
0637 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
0638 {
0639 char *buf = NULL;
0640 enum tmc_mode mode;
0641 unsigned long flags;
0642
0643
0644 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
0645 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
0646 return -EINVAL;
0647
0648 spin_lock_irqsave(&drvdata->spinlock, flags);
0649
0650
0651 if (drvdata->mode == CS_MODE_SYSFS) {
0652
0653 mode = readl_relaxed(drvdata->base + TMC_MODE);
0654 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
0655 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0656 return -EINVAL;
0657 }
0658
0659
0660
0661
0662
0663
0664
0665
0666 memset(drvdata->buf, 0, drvdata->size);
0667 __tmc_etb_enable_hw(drvdata);
0668 } else {
0669
0670
0671
0672
0673 buf = drvdata->buf;
0674 drvdata->buf = NULL;
0675 }
0676
0677 drvdata->reading = false;
0678 spin_unlock_irqrestore(&drvdata->spinlock, flags);
0679
0680
0681
0682
0683
0684 kfree(buf);
0685
0686 return 0;
0687 }