0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #define DRVNAME "arm_trbe"
0015
0016 #define pr_fmt(fmt) DRVNAME ": " fmt
0017
0018 #include <asm/barrier.h>
0019 #include <asm/cpufeature.h>
0020
0021 #include "coresight-self-hosted-trace.h"
0022 #include "coresight-trbe.h"
0023
0024 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #define ETE_IGNORE_PACKET 0x70
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044 #define TRBE_TRACE_MIN_BUF_SIZE 64
0045
0046 enum trbe_fault_action {
0047 TRBE_FAULT_ACT_WRAP,
0048 TRBE_FAULT_ACT_SPURIOUS,
0049 TRBE_FAULT_ACT_FATAL,
0050 };
0051
0052 struct trbe_buf {
0053
0054
0055
0056
0057
0058
0059
0060
0061 unsigned long trbe_base;
0062
0063 unsigned long trbe_hw_base;
0064 unsigned long trbe_limit;
0065 unsigned long trbe_write;
0066 int nr_pages;
0067 void **pages;
0068 bool snapshot;
0069 struct trbe_cpudata *cpudata;
0070 };
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0
0093 #define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1
0094 #define TRBE_NEEDS_DRAIN_AFTER_DISABLE 2
0095 #define TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE 3
0096 #define TRBE_IS_BROKEN 4
0097
0098 static int trbe_errata_cpucaps[] = {
0099 [TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
0100 [TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
0101 [TRBE_NEEDS_DRAIN_AFTER_DISABLE] = ARM64_WORKAROUND_2064142,
0102 [TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE] = ARM64_WORKAROUND_2038923,
0103 [TRBE_IS_BROKEN] = ARM64_WORKAROUND_1902691,
0104 -1,
0105 };
0106
0107
0108 #define TRBE_ERRATA_MAX (ARRAY_SIZE(trbe_errata_cpucaps) - 1)
0109
0110
0111
0112
0113
0114 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES 256
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 struct trbe_cpudata {
0127 bool trbe_flag;
0128 u64 trbe_hw_align;
0129 u64 trbe_align;
0130 int cpu;
0131 enum cs_mode mode;
0132 struct trbe_buf *buf;
0133 struct trbe_drvdata *drvdata;
0134 DECLARE_BITMAP(errata, TRBE_ERRATA_MAX);
0135 };
0136
0137 struct trbe_drvdata {
0138 struct trbe_cpudata __percpu *cpudata;
0139 struct perf_output_handle * __percpu *handle;
0140 struct hlist_node hotplug_node;
0141 int irq;
0142 cpumask_t supported_cpus;
0143 enum cpuhp_state trbe_online;
0144 struct platform_device *pdev;
0145 };
0146
0147 static void trbe_check_errata(struct trbe_cpudata *cpudata)
0148 {
0149 int i;
0150
0151 for (i = 0; i < TRBE_ERRATA_MAX; i++) {
0152 int cap = trbe_errata_cpucaps[i];
0153
0154 if (WARN_ON_ONCE(cap < 0))
0155 return;
0156 if (this_cpu_has_cap(cap))
0157 set_bit(i, cpudata->errata);
0158 }
0159 }
0160
0161 static inline bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i)
0162 {
0163 return (i < TRBE_ERRATA_MAX) && test_bit(i, cpudata->errata);
0164 }
0165
0166 static inline bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata)
0167 {
0168 return trbe_has_erratum(cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE);
0169 }
0170
0171 static inline bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
0172 {
0173 return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
0174 }
0175
0176 static inline bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
0177 {
0178
0179
0180
0181
0182
0183 return trbe_has_erratum(cpudata, TRBE_NEEDS_DRAIN_AFTER_DISABLE);
0184 }
0185
0186 static inline bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudata)
0187 {
0188
0189
0190
0191
0192
0193
0194 return trbe_has_erratum(cpudata, TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE);
0195 }
0196
0197 static inline bool trbe_is_broken(struct trbe_cpudata *cpudata)
0198 {
0199 return trbe_has_erratum(cpudata, TRBE_IS_BROKEN);
0200 }
0201
0202 static int trbe_alloc_node(struct perf_event *event)
0203 {
0204 if (event->cpu == -1)
0205 return NUMA_NO_NODE;
0206 return cpu_to_node(event->cpu);
0207 }
0208
0209 static inline void trbe_drain_buffer(void)
0210 {
0211 tsb_csync();
0212 dsb(nsh);
0213 }
0214
0215 static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
0216 {
0217
0218
0219
0220
0221 trblimitr |= TRBLIMITR_ENABLE;
0222 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
0223
0224
0225 isb();
0226
0227 if (trbe_needs_ctxt_sync_after_enable(cpudata))
0228 isb();
0229 }
0230
0231 static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
0232 {
0233 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
0234
0235
0236
0237
0238
0239 trblimitr &= ~TRBLIMITR_ENABLE;
0240 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
0241
0242 if (trbe_needs_drain_after_disable(cpudata))
0243 trbe_drain_buffer();
0244 isb();
0245 }
0246
0247 static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
0248 {
0249 trbe_drain_buffer();
0250 set_trbe_disabled(cpudata);
0251 }
0252
0253 static void trbe_reset_local(struct trbe_cpudata *cpudata)
0254 {
0255 trbe_drain_and_disable_local(cpudata);
0256 write_sysreg_s(0, SYS_TRBLIMITR_EL1);
0257 write_sysreg_s(0, SYS_TRBPTR_EL1);
0258 write_sysreg_s(0, SYS_TRBBASER_EL1);
0259 write_sysreg_s(0, SYS_TRBSR_EL1);
0260 }
0261
0262 static void trbe_report_wrap_event(struct perf_output_handle *handle)
0263 {
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
0279 }
0280
0281 static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
0282 {
0283 struct trbe_buf *buf = etm_perf_sink_config(handle);
0284
0285
0286
0287
0288
0289
0290
0291
0292 trbe_drain_and_disable_local(buf->cpudata);
0293 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
0294 perf_aux_output_end(handle, 0);
0295 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
0296 }
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 static void __trbe_pad_buf(struct trbe_buf *buf, u64 offset, int len)
0342 {
0343 memset((void *)buf->trbe_base + offset, ETE_IGNORE_PACKET, len);
0344 }
0345
0346 static void trbe_pad_buf(struct perf_output_handle *handle, int len)
0347 {
0348 struct trbe_buf *buf = etm_perf_sink_config(handle);
0349 u64 head = PERF_IDX2OFF(handle->head, buf);
0350
0351 __trbe_pad_buf(buf, head, len);
0352 if (!buf->snapshot)
0353 perf_aux_output_skip(handle, len);
0354 }
0355
0356 static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
0357 {
0358 struct trbe_buf *buf = etm_perf_sink_config(handle);
0359
0360
0361
0362
0363
0364
0365 return buf->nr_pages * PAGE_SIZE;
0366 }
0367
0368 static u64 trbe_min_trace_buf_size(struct perf_output_handle *handle)
0369 {
0370 u64 size = TRBE_TRACE_MIN_BUF_SIZE;
0371 struct trbe_buf *buf = etm_perf_sink_config(handle);
0372 struct trbe_cpudata *cpudata = buf->cpudata;
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382 if (trbe_may_write_out_of_range(cpudata))
0383 size += PAGE_SIZE;
0384 return size;
0385 }
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397 static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
0398 {
0399 struct trbe_buf *buf = etm_perf_sink_config(handle);
0400 struct trbe_cpudata *cpudata = buf->cpudata;
0401 const u64 bufsize = buf->nr_pages * PAGE_SIZE;
0402 u64 limit = bufsize;
0403 u64 head, tail, wakeup;
0404
0405 head = PERF_IDX2OFF(handle->head, buf);
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 if (!IS_ALIGNED(head, cpudata->trbe_align)) {
0424 unsigned long delta = roundup(head, cpudata->trbe_align) - head;
0425
0426 delta = min(delta, handle->size);
0427 trbe_pad_buf(handle, delta);
0428 head = PERF_IDX2OFF(handle->head, buf);
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 if (!handle->size)
0441 return 0;
0442
0443
0444 tail = PERF_IDX2OFF(handle->head + handle->size, buf);
0445 wakeup = PERF_IDX2OFF(handle->wakeup, buf);
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 if (head < tail)
0492 limit = round_down(tail, PAGE_SIZE);
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
0509 limit = min(limit, round_up(wakeup, PAGE_SIZE));
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539 if (limit > head)
0540 return limit;
0541
0542 trbe_pad_buf(handle, handle->size);
0543 return 0;
0544 }
0545
0546 static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
0547 {
0548 struct trbe_buf *buf = etm_perf_sink_config(handle);
0549 u64 limit = __trbe_normal_offset(handle);
0550 u64 head = PERF_IDX2OFF(handle->head, buf);
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560 while (limit && ((limit - head) < trbe_min_trace_buf_size(handle))) {
0561 trbe_pad_buf(handle, limit - head);
0562 limit = __trbe_normal_offset(handle);
0563 head = PERF_IDX2OFF(handle->head, buf);
0564 }
0565 return limit;
0566 }
0567
0568 static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
0569 {
0570 struct trbe_buf *buf = etm_perf_sink_config(handle);
0571 unsigned long offset;
0572
0573 if (buf->snapshot)
0574 offset = trbe_snapshot_offset(handle);
0575 else
0576 offset = trbe_normal_offset(handle);
0577 return buf->trbe_base + offset;
0578 }
0579
0580 static void clr_trbe_status(void)
0581 {
0582 u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
0583
0584 WARN_ON(is_trbe_enabled());
0585 trbsr &= ~TRBSR_IRQ;
0586 trbsr &= ~TRBSR_TRG;
0587 trbsr &= ~TRBSR_WRAP;
0588 trbsr &= ~(TRBSR_EC_MASK << TRBSR_EC_SHIFT);
0589 trbsr &= ~(TRBSR_BSC_MASK << TRBSR_BSC_SHIFT);
0590 trbsr &= ~TRBSR_STOP;
0591 write_sysreg_s(trbsr, SYS_TRBSR_EL1);
0592 }
0593
0594 static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
0595 {
0596 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
0597 unsigned long addr = buf->trbe_limit;
0598
0599 WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_LIMIT_SHIFT)));
0600 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
0601
0602 trblimitr &= ~TRBLIMITR_NVM;
0603 trblimitr &= ~(TRBLIMITR_FILL_MODE_MASK << TRBLIMITR_FILL_MODE_SHIFT);
0604 trblimitr &= ~(TRBLIMITR_TRIG_MODE_MASK << TRBLIMITR_TRIG_MODE_SHIFT);
0605 trblimitr &= ~(TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 trblimitr |= (TRBE_FILL_MODE_FILL & TRBLIMITR_FILL_MODE_MASK) << TRBLIMITR_FILL_MODE_SHIFT;
0617
0618
0619
0620
0621
0622 trblimitr |= (TRBE_TRIG_MODE_IGNORE & TRBLIMITR_TRIG_MODE_MASK) <<
0623 TRBLIMITR_TRIG_MODE_SHIFT;
0624 trblimitr |= (addr & PAGE_MASK);
0625 set_trbe_enabled(buf->cpudata, trblimitr);
0626 }
0627
0628 static void trbe_enable_hw(struct trbe_buf *buf)
0629 {
0630 WARN_ON(buf->trbe_hw_base < buf->trbe_base);
0631 WARN_ON(buf->trbe_write < buf->trbe_hw_base);
0632 WARN_ON(buf->trbe_write >= buf->trbe_limit);
0633 set_trbe_disabled(buf->cpudata);
0634 clr_trbe_status();
0635 set_trbe_base_pointer(buf->trbe_hw_base);
0636 set_trbe_write_pointer(buf->trbe_write);
0637
0638
0639
0640
0641
0642 isb();
0643 set_trbe_limit_pointer_enabled(buf);
0644 }
0645
0646 static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle,
0647 u64 trbsr)
0648 {
0649 int ec = get_trbe_ec(trbsr);
0650 int bsc = get_trbe_bsc(trbsr);
0651 struct trbe_buf *buf = etm_perf_sink_config(handle);
0652 struct trbe_cpudata *cpudata = buf->cpudata;
0653
0654 WARN_ON(is_trbe_running(trbsr));
0655 if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
0656 return TRBE_FAULT_ACT_FATAL;
0657
0658 if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
0659 return TRBE_FAULT_ACT_FATAL;
0660
0661
0662
0663
0664
0665
0666 if ((is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) &&
0667 (trbe_may_overwrite_in_fill_mode(cpudata) ||
0668 get_trbe_write_pointer() == get_trbe_base_pointer()))
0669 return TRBE_FAULT_ACT_WRAP;
0670
0671 return TRBE_FAULT_ACT_SPURIOUS;
0672 }
0673
0674 static unsigned long trbe_get_trace_size(struct perf_output_handle *handle,
0675 struct trbe_buf *buf, bool wrap)
0676 {
0677 u64 write;
0678 u64 start_off, end_off;
0679 u64 size;
0680 u64 overwrite_skip = TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 if (wrap)
0698 write = get_trbe_limit_pointer();
0699 else
0700 write = get_trbe_write_pointer();
0701
0702
0703
0704
0705
0706
0707 end_off = write - buf->trbe_base;
0708 start_off = PERF_IDX2OFF(handle->head, buf);
0709
0710 if (WARN_ON_ONCE(end_off < start_off))
0711 return 0;
0712
0713 size = end_off - start_off;
0714
0715
0716
0717
0718
0719 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE) &&
0720 !WARN_ON(size < overwrite_skip))
0721 __trbe_pad_buf(buf, start_off, overwrite_skip);
0722
0723 return size;
0724 }
0725
0726 static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
0727 struct perf_event *event, void **pages,
0728 int nr_pages, bool snapshot)
0729 {
0730 struct trbe_buf *buf;
0731 struct page **pglist;
0732 int i;
0733
0734
0735
0736
0737
0738
0739
0740 if (nr_pages < 2)
0741 return NULL;
0742
0743 buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
0744 if (!buf)
0745 return ERR_PTR(-ENOMEM);
0746
0747 pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
0748 if (!pglist) {
0749 kfree(buf);
0750 return ERR_PTR(-ENOMEM);
0751 }
0752
0753 for (i = 0; i < nr_pages; i++)
0754 pglist[i] = virt_to_page(pages[i]);
0755
0756 buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
0757 if (!buf->trbe_base) {
0758 kfree(pglist);
0759 kfree(buf);
0760 return ERR_PTR(-ENOMEM);
0761 }
0762 buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
0763 buf->trbe_write = buf->trbe_base;
0764 buf->snapshot = snapshot;
0765 buf->nr_pages = nr_pages;
0766 buf->pages = pages;
0767 kfree(pglist);
0768 return buf;
0769 }
0770
0771 static void arm_trbe_free_buffer(void *config)
0772 {
0773 struct trbe_buf *buf = config;
0774
0775 vunmap((void *)buf->trbe_base);
0776 kfree(buf);
0777 }
0778
0779 static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
0780 struct perf_output_handle *handle,
0781 void *config)
0782 {
0783 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
0784 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
0785 struct trbe_buf *buf = config;
0786 enum trbe_fault_action act;
0787 unsigned long size, status;
0788 unsigned long flags;
0789 bool wrap = false;
0790
0791 WARN_ON(buf->cpudata != cpudata);
0792 WARN_ON(cpudata->cpu != smp_processor_id());
0793 WARN_ON(cpudata->drvdata != drvdata);
0794 if (cpudata->mode != CS_MODE_PERF)
0795 return 0;
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806 local_irq_save(flags);
0807
0808
0809
0810
0811
0812
0813
0814
0815 if (!is_trbe_enabled()) {
0816 size = 0;
0817 goto done;
0818 }
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828 trbe_drain_and_disable_local(cpudata);
0829
0830
0831 status = read_sysreg_s(SYS_TRBSR_EL1);
0832 if (is_trbe_irq(status)) {
0833
0834
0835
0836
0837
0838
0839 clr_trbe_irq();
0840 isb();
0841
0842 act = trbe_get_fault_act(handle, status);
0843
0844
0845
0846
0847 if (act != TRBE_FAULT_ACT_WRAP) {
0848 size = 0;
0849 goto done;
0850 }
0851
0852 trbe_report_wrap_event(handle);
0853 wrap = true;
0854 }
0855
0856 size = trbe_get_trace_size(handle, buf, wrap);
0857
0858 done:
0859 local_irq_restore(flags);
0860
0861 if (buf->snapshot)
0862 handle->head += size;
0863 return size;
0864 }
0865
0866
0867 static int trbe_apply_work_around_before_enable(struct trbe_buf *buf)
0868 {
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE)) {
0943 if (WARN_ON(!IS_ALIGNED(buf->trbe_write, PAGE_SIZE)))
0944 return -EINVAL;
0945 buf->trbe_hw_base = buf->trbe_write;
0946 buf->trbe_write += TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
0947 }
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE)) {
0967 s64 space = buf->trbe_limit - buf->trbe_write;
0968
0969
0970
0971
0972 if (WARN_ON(space <= PAGE_SIZE ||
0973 !IS_ALIGNED(buf->trbe_limit, PAGE_SIZE)))
0974 return -EINVAL;
0975 buf->trbe_limit -= PAGE_SIZE;
0976 }
0977
0978 return 0;
0979 }
0980
0981 static int __arm_trbe_enable(struct trbe_buf *buf,
0982 struct perf_output_handle *handle)
0983 {
0984 int ret = 0;
0985
0986 perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
0987 buf->trbe_limit = compute_trbe_buffer_limit(handle);
0988 buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
0989 if (buf->trbe_limit == buf->trbe_base) {
0990 ret = -ENOSPC;
0991 goto err;
0992 }
0993
0994 buf->trbe_hw_base = buf->trbe_base;
0995
0996 ret = trbe_apply_work_around_before_enable(buf);
0997 if (ret)
0998 goto err;
0999
1000 *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
1001 trbe_enable_hw(buf);
1002 return 0;
1003 err:
1004 trbe_stop_and_truncate_event(handle);
1005 return ret;
1006 }
1007
1008 static int arm_trbe_enable(struct coresight_device *csdev, u32 mode, void *data)
1009 {
1010 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1011 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
1012 struct perf_output_handle *handle = data;
1013 struct trbe_buf *buf = etm_perf_sink_config(handle);
1014
1015 WARN_ON(cpudata->cpu != smp_processor_id());
1016 WARN_ON(cpudata->drvdata != drvdata);
1017 if (mode != CS_MODE_PERF)
1018 return -EINVAL;
1019
1020 cpudata->buf = buf;
1021 cpudata->mode = mode;
1022 buf->cpudata = cpudata;
1023
1024 return __arm_trbe_enable(buf, handle);
1025 }
1026
1027 static int arm_trbe_disable(struct coresight_device *csdev)
1028 {
1029 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1030 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
1031 struct trbe_buf *buf = cpudata->buf;
1032
1033 WARN_ON(buf->cpudata != cpudata);
1034 WARN_ON(cpudata->cpu != smp_processor_id());
1035 WARN_ON(cpudata->drvdata != drvdata);
1036 if (cpudata->mode != CS_MODE_PERF)
1037 return -EINVAL;
1038
1039 trbe_drain_and_disable_local(cpudata);
1040 buf->cpudata = NULL;
1041 cpudata->buf = NULL;
1042 cpudata->mode = CS_MODE_DISABLED;
1043 return 0;
1044 }
1045
1046 static void trbe_handle_spurious(struct perf_output_handle *handle)
1047 {
1048 struct trbe_buf *buf = etm_perf_sink_config(handle);
1049 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
1050
1051
1052
1053
1054
1055
1056 set_trbe_enabled(buf->cpudata, trblimitr);
1057 }
1058
1059 static int trbe_handle_overflow(struct perf_output_handle *handle)
1060 {
1061 struct perf_event *event = handle->event;
1062 struct trbe_buf *buf = etm_perf_sink_config(handle);
1063 unsigned long size;
1064 struct etm_event_data *event_data;
1065
1066 size = trbe_get_trace_size(handle, buf, true);
1067 if (buf->snapshot)
1068 handle->head += size;
1069
1070 trbe_report_wrap_event(handle);
1071 perf_aux_output_end(handle, size);
1072 event_data = perf_aux_output_begin(handle, event);
1073 if (!event_data) {
1074
1075
1076
1077
1078
1079
1080 trbe_drain_and_disable_local(buf->cpudata);
1081 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
1082 return -EINVAL;
1083 }
1084
1085 return __arm_trbe_enable(buf, handle);
1086 }
1087
1088 static bool is_perf_trbe(struct perf_output_handle *handle)
1089 {
1090 struct trbe_buf *buf = etm_perf_sink_config(handle);
1091 struct trbe_cpudata *cpudata = buf->cpudata;
1092 struct trbe_drvdata *drvdata = cpudata->drvdata;
1093 int cpu = smp_processor_id();
1094
1095 WARN_ON(buf->trbe_hw_base != get_trbe_base_pointer());
1096 WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
1097
1098 if (cpudata->mode != CS_MODE_PERF)
1099 return false;
1100
1101 if (cpudata->cpu != cpu)
1102 return false;
1103
1104 if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1105 return false;
1106
1107 return true;
1108 }
1109
1110 static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
1111 {
1112 struct perf_output_handle **handle_ptr = dev;
1113 struct perf_output_handle *handle = *handle_ptr;
1114 struct trbe_buf *buf = etm_perf_sink_config(handle);
1115 enum trbe_fault_action act;
1116 u64 status;
1117 bool truncated = false;
1118 u64 trfcr;
1119
1120
1121 status = read_sysreg_s(SYS_TRBSR_EL1);
1122
1123
1124
1125
1126 if (!is_trbe_irq(status))
1127 return IRQ_NONE;
1128
1129
1130 trfcr = cpu_prohibit_trace();
1131
1132
1133
1134
1135 trbe_drain_and_disable_local(buf->cpudata);
1136 clr_trbe_irq();
1137 isb();
1138
1139 if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
1140 return IRQ_NONE;
1141
1142 if (!is_perf_trbe(handle))
1143 return IRQ_NONE;
1144
1145 act = trbe_get_fault_act(handle, status);
1146 switch (act) {
1147 case TRBE_FAULT_ACT_WRAP:
1148 truncated = !!trbe_handle_overflow(handle);
1149 break;
1150 case TRBE_FAULT_ACT_SPURIOUS:
1151 trbe_handle_spurious(handle);
1152 break;
1153 case TRBE_FAULT_ACT_FATAL:
1154 trbe_stop_and_truncate_event(handle);
1155 truncated = true;
1156 break;
1157 }
1158
1159
1160
1161
1162
1163
1164
1165
1166 if (truncated)
1167 irq_work_run();
1168 else
1169 write_trfcr(trfcr);
1170
1171 return IRQ_HANDLED;
1172 }
1173
1174 static const struct coresight_ops_sink arm_trbe_sink_ops = {
1175 .enable = arm_trbe_enable,
1176 .disable = arm_trbe_disable,
1177 .alloc_buffer = arm_trbe_alloc_buffer,
1178 .free_buffer = arm_trbe_free_buffer,
1179 .update_buffer = arm_trbe_update_buffer,
1180 };
1181
1182 static const struct coresight_ops arm_trbe_cs_ops = {
1183 .sink_ops = &arm_trbe_sink_ops,
1184 };
1185
1186 static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
1187 {
1188 struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
1189
1190 return sprintf(buf, "%llx\n", cpudata->trbe_hw_align);
1191 }
1192 static DEVICE_ATTR_RO(align);
1193
1194 static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
1195 {
1196 struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
1197
1198 return sprintf(buf, "%d\n", cpudata->trbe_flag);
1199 }
1200 static DEVICE_ATTR_RO(flag);
1201
1202 static struct attribute *arm_trbe_attrs[] = {
1203 &dev_attr_align.attr,
1204 &dev_attr_flag.attr,
1205 NULL,
1206 };
1207
1208 static const struct attribute_group arm_trbe_group = {
1209 .attrs = arm_trbe_attrs,
1210 };
1211
1212 static const struct attribute_group *arm_trbe_groups[] = {
1213 &arm_trbe_group,
1214 NULL,
1215 };
1216
1217 static void arm_trbe_enable_cpu(void *info)
1218 {
1219 struct trbe_drvdata *drvdata = info;
1220 struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
1221
1222 trbe_reset_local(cpudata);
1223 enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
1224 }
1225
1226 static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
1227 {
1228 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1229 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
1230 struct coresight_desc desc = { 0 };
1231 struct device *dev;
1232
1233 if (WARN_ON(trbe_csdev))
1234 return;
1235
1236
1237 if (WARN_ON(!cpudata->drvdata))
1238 return;
1239
1240 dev = &cpudata->drvdata->pdev->dev;
1241 desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
1242 if (!desc.name)
1243 goto cpu_clear;
1244
1245 desc.type = CORESIGHT_DEV_TYPE_SINK;
1246 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
1247 desc.ops = &arm_trbe_cs_ops;
1248 desc.pdata = dev_get_platdata(dev);
1249 desc.groups = arm_trbe_groups;
1250 desc.dev = dev;
1251 trbe_csdev = coresight_register(&desc);
1252 if (IS_ERR(trbe_csdev))
1253 goto cpu_clear;
1254
1255 dev_set_drvdata(&trbe_csdev->dev, cpudata);
1256 coresight_set_percpu_sink(cpu, trbe_csdev);
1257 return;
1258 cpu_clear:
1259 cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
1260 }
1261
1262
1263
1264
1265 static void arm_trbe_probe_cpu(void *info)
1266 {
1267 struct trbe_drvdata *drvdata = info;
1268 int cpu = smp_processor_id();
1269 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1270 u64 trbidr;
1271
1272 if (WARN_ON(!cpudata))
1273 goto cpu_clear;
1274
1275 if (!is_trbe_available()) {
1276 pr_err("TRBE is not implemented on cpu %d\n", cpu);
1277 goto cpu_clear;
1278 }
1279
1280 trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
1281 if (!is_trbe_programmable(trbidr)) {
1282 pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
1283 goto cpu_clear;
1284 }
1285
1286 cpudata->trbe_hw_align = 1ULL << get_trbe_address_align(trbidr);
1287 if (cpudata->trbe_hw_align > SZ_2K) {
1288 pr_err("Unsupported alignment on cpu %d\n", cpu);
1289 goto cpu_clear;
1290 }
1291
1292
1293
1294
1295
1296 trbe_check_errata(cpudata);
1297
1298 if (trbe_is_broken(cpudata)) {
1299 pr_err("Disabling TRBE on cpu%d due to erratum\n", cpu);
1300 goto cpu_clear;
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 if (trbe_may_overwrite_in_fill_mode(cpudata))
1315 cpudata->trbe_align = PAGE_SIZE;
1316 else
1317 cpudata->trbe_align = cpudata->trbe_hw_align;
1318
1319 cpudata->trbe_flag = get_trbe_flag_update(trbidr);
1320 cpudata->cpu = cpu;
1321 cpudata->drvdata = drvdata;
1322 return;
1323 cpu_clear:
1324 cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
1325 }
1326
1327 static void arm_trbe_remove_coresight_cpu(void *info)
1328 {
1329 int cpu = smp_processor_id();
1330 struct trbe_drvdata *drvdata = info;
1331 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1332 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
1333
1334 disable_percpu_irq(drvdata->irq);
1335 trbe_reset_local(cpudata);
1336 if (trbe_csdev) {
1337 coresight_unregister(trbe_csdev);
1338 cpudata->drvdata = NULL;
1339 coresight_set_percpu_sink(cpu, NULL);
1340 }
1341 }
1342
1343 static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
1344 {
1345 int cpu;
1346
1347 drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
1348 if (!drvdata->cpudata)
1349 return -ENOMEM;
1350
1351 for_each_cpu(cpu, &drvdata->supported_cpus) {
1352
1353 if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
1354 continue;
1355 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1356 arm_trbe_register_coresight_cpu(drvdata, cpu);
1357 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1358 smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
1359 }
1360 return 0;
1361 }
1362
1363 static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
1364 {
1365 int cpu;
1366
1367 for_each_cpu(cpu, &drvdata->supported_cpus)
1368 smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
1369 free_percpu(drvdata->cpudata);
1370 return 0;
1371 }
1372
1373 static void arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata *drvdata)
1374 {
1375 preempt_disable();
1376 arm_trbe_probe_cpu(drvdata);
1377 preempt_enable();
1378 }
1379
1380 static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
1381 {
1382 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
1383
1384 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
1385
1386
1387
1388
1389
1390 if (!coresight_get_percpu_sink(cpu)) {
1391 arm_trbe_probe_hotplugged_cpu(drvdata);
1392 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1393 arm_trbe_register_coresight_cpu(drvdata, cpu);
1394 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1395 arm_trbe_enable_cpu(drvdata);
1396 } else {
1397 arm_trbe_enable_cpu(drvdata);
1398 }
1399 }
1400 return 0;
1401 }
1402
1403 static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1404 {
1405 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
1406
1407 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
1408 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1409
1410 disable_percpu_irq(drvdata->irq);
1411 trbe_reset_local(cpudata);
1412 }
1413 return 0;
1414 }
1415
1416 static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
1417 {
1418 enum cpuhp_state trbe_online;
1419 int ret;
1420
1421 trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1422 arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
1423 if (trbe_online < 0)
1424 return trbe_online;
1425
1426 ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
1427 if (ret) {
1428 cpuhp_remove_multi_state(trbe_online);
1429 return ret;
1430 }
1431 drvdata->trbe_online = trbe_online;
1432 return 0;
1433 }
1434
1435 static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
1436 {
1437 cpuhp_remove_multi_state(drvdata->trbe_online);
1438 }
1439
1440 static int arm_trbe_probe_irq(struct platform_device *pdev,
1441 struct trbe_drvdata *drvdata)
1442 {
1443 int ret;
1444
1445 drvdata->irq = platform_get_irq(pdev, 0);
1446 if (drvdata->irq < 0) {
1447 pr_err("IRQ not found for the platform device\n");
1448 return drvdata->irq;
1449 }
1450
1451 if (!irq_is_percpu(drvdata->irq)) {
1452 pr_err("IRQ is not a PPI\n");
1453 return -EINVAL;
1454 }
1455
1456 if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
1457 return -EINVAL;
1458
1459 drvdata->handle = alloc_percpu(struct perf_output_handle *);
1460 if (!drvdata->handle)
1461 return -ENOMEM;
1462
1463 ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
1464 if (ret) {
1465 free_percpu(drvdata->handle);
1466 return ret;
1467 }
1468 return 0;
1469 }
1470
1471 static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
1472 {
1473 free_percpu_irq(drvdata->irq, drvdata->handle);
1474 free_percpu(drvdata->handle);
1475 }
1476
1477 static int arm_trbe_device_probe(struct platform_device *pdev)
1478 {
1479 struct coresight_platform_data *pdata;
1480 struct trbe_drvdata *drvdata;
1481 struct device *dev = &pdev->dev;
1482 int ret;
1483
1484
1485 if (arm64_kernel_unmapped_at_el0()) {
1486 pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
1487 return -EOPNOTSUPP;
1488 }
1489
1490 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1491 if (!drvdata)
1492 return -ENOMEM;
1493
1494 pdata = coresight_get_platform_data(dev);
1495 if (IS_ERR(pdata))
1496 return PTR_ERR(pdata);
1497
1498 dev_set_drvdata(dev, drvdata);
1499 dev->platform_data = pdata;
1500 drvdata->pdev = pdev;
1501 ret = arm_trbe_probe_irq(pdev, drvdata);
1502 if (ret)
1503 return ret;
1504
1505 ret = arm_trbe_probe_coresight(drvdata);
1506 if (ret)
1507 goto probe_failed;
1508
1509 ret = arm_trbe_probe_cpuhp(drvdata);
1510 if (ret)
1511 goto cpuhp_failed;
1512
1513 return 0;
1514 cpuhp_failed:
1515 arm_trbe_remove_coresight(drvdata);
1516 probe_failed:
1517 arm_trbe_remove_irq(drvdata);
1518 return ret;
1519 }
1520
1521 static int arm_trbe_device_remove(struct platform_device *pdev)
1522 {
1523 struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
1524
1525 arm_trbe_remove_cpuhp(drvdata);
1526 arm_trbe_remove_coresight(drvdata);
1527 arm_trbe_remove_irq(drvdata);
1528 return 0;
1529 }
1530
1531 static const struct of_device_id arm_trbe_of_match[] = {
1532 { .compatible = "arm,trace-buffer-extension"},
1533 {},
1534 };
1535 MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
1536
1537 static struct platform_driver arm_trbe_driver = {
1538 .driver = {
1539 .name = DRVNAME,
1540 .of_match_table = of_match_ptr(arm_trbe_of_match),
1541 .suppress_bind_attrs = true,
1542 },
1543 .probe = arm_trbe_device_probe,
1544 .remove = arm_trbe_device_remove,
1545 };
1546
1547 static int __init arm_trbe_init(void)
1548 {
1549 int ret;
1550
1551 ret = platform_driver_register(&arm_trbe_driver);
1552 if (!ret)
1553 return 0;
1554
1555 pr_err("Error registering %s platform driver\n", DRVNAME);
1556 return ret;
1557 }
1558
1559 static void __exit arm_trbe_exit(void)
1560 {
1561 platform_driver_unregister(&arm_trbe_driver);
1562 }
1563 module_init(arm_trbe_init);
1564 module_exit(arm_trbe_exit);
1565
1566 MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>");
1567 MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
1568 MODULE_LICENSE("GPL v2");