0001
0002
0003
0004
0005
0006
0007 #include <linux/coresight.h>
0008 #include <linux/coresight-pmu.h>
0009 #include <linux/cpumask.h>
0010 #include <linux/device.h>
0011 #include <linux/list.h>
0012 #include <linux/mm.h>
0013 #include <linux/init.h>
0014 #include <linux/perf_event.h>
0015 #include <linux/percpu-defs.h>
0016 #include <linux/slab.h>
0017 #include <linux/stringhash.h>
0018 #include <linux/types.h>
0019 #include <linux/workqueue.h>
0020
0021 #include "coresight-config.h"
0022 #include "coresight-etm-perf.h"
0023 #include "coresight-priv.h"
0024 #include "coresight-syscfg.h"
0025
0026 static struct pmu etm_pmu;
0027 static bool etm_perf_up;
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 struct etm_ctxt {
0044 struct perf_output_handle handle;
0045 struct etm_event_data *event_data;
0046 };
0047
0048 static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
0049 static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
0050
0051
0052
0053
0054
0055 PMU_FORMAT_ATTR(branch_broadcast, "config:"__stringify(ETM_OPT_BRANCH_BROADCAST));
0056 PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
0057
0058 PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
0059
0060 PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
0061 PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
0062 PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
0063
0064 PMU_FORMAT_ATTR(preset, "config:0-3");
0065
0066 PMU_FORMAT_ATTR(sinkid, "config2:0-31");
0067
0068 PMU_FORMAT_ATTR(configid, "config2:32-63");
0069
0070
0071
0072
0073
0074
0075
0076 static ssize_t format_attr_contextid_show(struct device *dev,
0077 struct device_attribute *attr,
0078 char *page)
0079 {
0080 int pid_fmt = ETM_OPT_CTXTID;
0081
0082 #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
0083 pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
0084 #endif
0085 return sprintf(page, "config:%d\n", pid_fmt);
0086 }
0087
0088 static struct device_attribute format_attr_contextid =
0089 __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
0090
0091 static struct attribute *etm_config_formats_attr[] = {
0092 &format_attr_cycacc.attr,
0093 &format_attr_contextid.attr,
0094 &format_attr_contextid1.attr,
0095 &format_attr_contextid2.attr,
0096 &format_attr_timestamp.attr,
0097 &format_attr_retstack.attr,
0098 &format_attr_sinkid.attr,
0099 &format_attr_preset.attr,
0100 &format_attr_configid.attr,
0101 &format_attr_branch_broadcast.attr,
0102 NULL,
0103 };
0104
0105 static const struct attribute_group etm_pmu_format_group = {
0106 .name = "format",
0107 .attrs = etm_config_formats_attr,
0108 };
0109
0110 static struct attribute *etm_config_sinks_attr[] = {
0111 NULL,
0112 };
0113
0114 static const struct attribute_group etm_pmu_sinks_group = {
0115 .name = "sinks",
0116 .attrs = etm_config_sinks_attr,
0117 };
0118
0119 static struct attribute *etm_config_events_attr[] = {
0120 NULL,
0121 };
0122
0123 static const struct attribute_group etm_pmu_events_group = {
0124 .name = "events",
0125 .attrs = etm_config_events_attr,
0126 };
0127
0128 static const struct attribute_group *etm_pmu_attr_groups[] = {
0129 &etm_pmu_format_group,
0130 &etm_pmu_sinks_group,
0131 &etm_pmu_events_group,
0132 NULL,
0133 };
0134
0135 static inline struct list_head **
0136 etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
0137 {
0138 return per_cpu_ptr(data->path, cpu);
0139 }
0140
0141 static inline struct list_head *
0142 etm_event_cpu_path(struct etm_event_data *data, int cpu)
0143 {
0144 return *etm_event_cpu_path_ptr(data, cpu);
0145 }
0146
0147 static void etm_event_read(struct perf_event *event) {}
0148
0149 static int etm_addr_filters_alloc(struct perf_event *event)
0150 {
0151 struct etm_filters *filters;
0152 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
0153
0154 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
0155 if (!filters)
0156 return -ENOMEM;
0157
0158 if (event->parent)
0159 memcpy(filters, event->parent->hw.addr_filters,
0160 sizeof(*filters));
0161
0162 event->hw.addr_filters = filters;
0163
0164 return 0;
0165 }
0166
0167 static void etm_event_destroy(struct perf_event *event)
0168 {
0169 kfree(event->hw.addr_filters);
0170 event->hw.addr_filters = NULL;
0171 }
0172
0173 static int etm_event_init(struct perf_event *event)
0174 {
0175 int ret = 0;
0176
0177 if (event->attr.type != etm_pmu.type) {
0178 ret = -ENOENT;
0179 goto out;
0180 }
0181
0182 ret = etm_addr_filters_alloc(event);
0183 if (ret)
0184 goto out;
0185
0186 event->destroy = etm_event_destroy;
0187 out:
0188 return ret;
0189 }
0190
0191 static void free_sink_buffer(struct etm_event_data *event_data)
0192 {
0193 int cpu;
0194 cpumask_t *mask = &event_data->mask;
0195 struct coresight_device *sink;
0196
0197 if (!event_data->snk_config)
0198 return;
0199
0200 if (WARN_ON(cpumask_empty(mask)))
0201 return;
0202
0203 cpu = cpumask_first(mask);
0204 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
0205 sink_ops(sink)->free_buffer(event_data->snk_config);
0206 }
0207
0208 static void free_event_data(struct work_struct *work)
0209 {
0210 int cpu;
0211 cpumask_t *mask;
0212 struct etm_event_data *event_data;
0213
0214 event_data = container_of(work, struct etm_event_data, work);
0215 mask = &event_data->mask;
0216
0217
0218 free_sink_buffer(event_data);
0219
0220
0221 if (event_data->cfg_hash)
0222 cscfg_deactivate_config(event_data->cfg_hash);
0223
0224 for_each_cpu(cpu, mask) {
0225 struct list_head **ppath;
0226
0227 ppath = etm_event_cpu_path_ptr(event_data, cpu);
0228 if (!(IS_ERR_OR_NULL(*ppath)))
0229 coresight_release_path(*ppath);
0230 *ppath = NULL;
0231 }
0232
0233 free_percpu(event_data->path);
0234 kfree(event_data);
0235 }
0236
0237 static void *alloc_event_data(int cpu)
0238 {
0239 cpumask_t *mask;
0240 struct etm_event_data *event_data;
0241
0242
0243 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
0244 if (!event_data)
0245 return NULL;
0246
0247
0248 mask = &event_data->mask;
0249 if (cpu != -1)
0250 cpumask_set_cpu(cpu, mask);
0251 else
0252 cpumask_copy(mask, cpu_present_mask);
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 event_data->path = alloc_percpu(struct list_head *);
0263
0264 if (!event_data->path) {
0265 kfree(event_data);
0266 return NULL;
0267 }
0268
0269 return event_data;
0270 }
0271
0272 static void etm_free_aux(void *data)
0273 {
0274 struct etm_event_data *event_data = data;
0275
0276 schedule_work(&event_data->work);
0277 }
0278
0279
0280
0281
0282
0283
0284 static bool sinks_compatible(struct coresight_device *a,
0285 struct coresight_device *b)
0286 {
0287 if (!a || !b)
0288 return false;
0289
0290
0291
0292
0293
0294 return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
0295 (sink_ops(a) == sink_ops(b));
0296 }
0297
0298 static void *etm_setup_aux(struct perf_event *event, void **pages,
0299 int nr_pages, bool overwrite)
0300 {
0301 u32 id, cfg_hash;
0302 int cpu = event->cpu;
0303 cpumask_t *mask;
0304 struct coresight_device *sink = NULL;
0305 struct coresight_device *user_sink = NULL, *last_sink = NULL;
0306 struct etm_event_data *event_data = NULL;
0307
0308 event_data = alloc_event_data(cpu);
0309 if (!event_data)
0310 return NULL;
0311 INIT_WORK(&event_data->work, free_event_data);
0312
0313
0314 if (event->attr.config2 & GENMASK_ULL(31, 0)) {
0315 id = (u32)event->attr.config2;
0316 sink = user_sink = coresight_get_sink_by_id(id);
0317 }
0318
0319
0320 cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
0321 if (cfg_hash) {
0322 if (cscfg_activate_config(cfg_hash))
0323 goto err;
0324 event_data->cfg_hash = cfg_hash;
0325 }
0326
0327 mask = &event_data->mask;
0328
0329
0330
0331
0332
0333
0334
0335
0336 for_each_cpu(cpu, mask) {
0337 struct list_head *path;
0338 struct coresight_device *csdev;
0339
0340 csdev = per_cpu(csdev_src, cpu);
0341
0342
0343
0344
0345
0346 if (!csdev) {
0347 cpumask_clear_cpu(cpu, mask);
0348 continue;
0349 }
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364 if (!user_sink) {
0365
0366 sink = coresight_find_default_sink(csdev);
0367 if (!sink) {
0368 cpumask_clear_cpu(cpu, mask);
0369 continue;
0370 }
0371
0372
0373 if (last_sink && !sinks_compatible(last_sink, sink)) {
0374 cpumask_clear_cpu(cpu, mask);
0375 continue;
0376 }
0377 last_sink = sink;
0378 }
0379
0380
0381
0382
0383
0384
0385 path = coresight_build_path(csdev, sink);
0386 if (IS_ERR(path)) {
0387 cpumask_clear_cpu(cpu, mask);
0388 continue;
0389 }
0390
0391 *etm_event_cpu_path_ptr(event_data, cpu) = path;
0392 }
0393
0394
0395 if (!sink)
0396 goto err;
0397
0398
0399 cpu = cpumask_first(mask);
0400 if (cpu >= nr_cpu_ids)
0401 goto err;
0402
0403 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
0404 goto err;
0405
0406
0407
0408
0409
0410
0411
0412 event_data->snk_config =
0413 sink_ops(sink)->alloc_buffer(sink, event, pages,
0414 nr_pages, overwrite);
0415 if (!event_data->snk_config)
0416 goto err;
0417
0418 out:
0419 return event_data;
0420
0421 err:
0422 etm_free_aux(event_data);
0423 event_data = NULL;
0424 goto out;
0425 }
0426
0427 static void etm_event_start(struct perf_event *event, int flags)
0428 {
0429 int cpu = smp_processor_id();
0430 struct etm_event_data *event_data;
0431 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
0432 struct perf_output_handle *handle = &ctxt->handle;
0433 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
0434 struct list_head *path;
0435
0436 if (!csdev)
0437 goto fail;
0438
0439
0440 if (WARN_ON(ctxt->event_data))
0441 goto fail;
0442
0443
0444
0445
0446
0447 event_data = perf_aux_output_begin(handle, event);
0448 if (!event_data)
0449 goto fail;
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 if (!cpumask_test_cpu(cpu, &event_data->mask))
0464 goto out;
0465
0466 path = etm_event_cpu_path(event_data, cpu);
0467
0468 sink = coresight_get_sink(path);
0469 if (WARN_ON_ONCE(!sink))
0470 goto fail_end_stop;
0471
0472
0473 if (coresight_enable_path(path, CS_MODE_PERF, handle))
0474 goto fail_end_stop;
0475
0476
0477 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
0478 goto fail_disable_path;
0479
0480 out:
0481
0482 event->hw.state = 0;
0483
0484 ctxt->event_data = event_data;
0485 return;
0486
0487 fail_disable_path:
0488 coresight_disable_path(path);
0489 fail_end_stop:
0490
0491
0492
0493
0494
0495 if (READ_ONCE(handle->event)) {
0496 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
0497 perf_aux_output_end(handle, 0);
0498 }
0499 fail:
0500 event->hw.state = PERF_HES_STOPPED;
0501 return;
0502 }
0503
0504 static void etm_event_stop(struct perf_event *event, int mode)
0505 {
0506 int cpu = smp_processor_id();
0507 unsigned long size;
0508 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
0509 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
0510 struct perf_output_handle *handle = &ctxt->handle;
0511 struct etm_event_data *event_data;
0512 struct list_head *path;
0513
0514
0515
0516
0517
0518 if (handle->event &&
0519 WARN_ON(perf_get_aux(handle) != ctxt->event_data))
0520 return;
0521
0522 event_data = ctxt->event_data;
0523
0524 ctxt->event_data = NULL;
0525
0526 if (event->hw.state == PERF_HES_STOPPED)
0527 return;
0528
0529
0530 if (WARN_ON(!event_data))
0531 return;
0532
0533
0534
0535
0536
0537
0538
0539 if (handle->event && (mode & PERF_EF_UPDATE) &&
0540 !cpumask_test_cpu(cpu, &event_data->mask)) {
0541 event->hw.state = PERF_HES_STOPPED;
0542 perf_aux_output_end(handle, 0);
0543 return;
0544 }
0545
0546 if (!csdev)
0547 return;
0548
0549 path = etm_event_cpu_path(event_data, cpu);
0550 if (!path)
0551 return;
0552
0553 sink = coresight_get_sink(path);
0554 if (!sink)
0555 return;
0556
0557
0558 source_ops(csdev)->disable(csdev, event);
0559
0560
0561 event->hw.state = PERF_HES_STOPPED;
0562
0563
0564
0565
0566
0567
0568
0569 if (handle->event && (mode & PERF_EF_UPDATE)) {
0570 if (WARN_ON_ONCE(handle->event != event))
0571 return;
0572
0573
0574 if (!sink_ops(sink)->update_buffer)
0575 return;
0576
0577 size = sink_ops(sink)->update_buffer(sink, handle,
0578 event_data->snk_config);
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 if (READ_ONCE(handle->event))
0591 perf_aux_output_end(handle, size);
0592 else
0593 WARN_ON(size);
0594 }
0595
0596
0597 coresight_disable_path(path);
0598 }
0599
0600 static int etm_event_add(struct perf_event *event, int mode)
0601 {
0602 int ret = 0;
0603 struct hw_perf_event *hwc = &event->hw;
0604
0605 if (mode & PERF_EF_START) {
0606 etm_event_start(event, 0);
0607 if (hwc->state & PERF_HES_STOPPED)
0608 ret = -EINVAL;
0609 } else {
0610 hwc->state = PERF_HES_STOPPED;
0611 }
0612
0613 return ret;
0614 }
0615
0616 static void etm_event_del(struct perf_event *event, int mode)
0617 {
0618 etm_event_stop(event, PERF_EF_UPDATE);
0619 }
0620
0621 static int etm_addr_filters_validate(struct list_head *filters)
0622 {
0623 bool range = false, address = false;
0624 int index = 0;
0625 struct perf_addr_filter *filter;
0626
0627 list_for_each_entry(filter, filters, entry) {
0628
0629
0630
0631
0632 if (++index > ETM_ADDR_CMP_MAX)
0633 return -EOPNOTSUPP;
0634
0635
0636 if (filter->size) {
0637
0638
0639
0640
0641 if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
0642 filter->action == PERF_ADDR_FILTER_ACTION_STOP)
0643 return -EOPNOTSUPP;
0644
0645 range = true;
0646 } else
0647 address = true;
0648
0649
0650
0651
0652
0653 if (range && address)
0654 return -EOPNOTSUPP;
0655 }
0656
0657 return 0;
0658 }
0659
0660 static void etm_addr_filters_sync(struct perf_event *event)
0661 {
0662 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
0663 unsigned long start, stop;
0664 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
0665 struct etm_filters *filters = event->hw.addr_filters;
0666 struct etm_filter *etm_filter;
0667 struct perf_addr_filter *filter;
0668 int i = 0;
0669
0670 list_for_each_entry(filter, &head->list, entry) {
0671 start = fr[i].start;
0672 stop = start + fr[i].size;
0673 etm_filter = &filters->etm_filter[i];
0674
0675 switch (filter->action) {
0676 case PERF_ADDR_FILTER_ACTION_FILTER:
0677 etm_filter->start_addr = start;
0678 etm_filter->stop_addr = stop;
0679 etm_filter->type = ETM_ADDR_TYPE_RANGE;
0680 break;
0681 case PERF_ADDR_FILTER_ACTION_START:
0682 etm_filter->start_addr = start;
0683 etm_filter->type = ETM_ADDR_TYPE_START;
0684 break;
0685 case PERF_ADDR_FILTER_ACTION_STOP:
0686 etm_filter->stop_addr = stop;
0687 etm_filter->type = ETM_ADDR_TYPE_STOP;
0688 break;
0689 }
0690 i++;
0691 }
0692
0693 filters->nr_filters = i;
0694 }
0695
0696 int etm_perf_symlink(struct coresight_device *csdev, bool link)
0697 {
0698 char entry[sizeof("cpu9999999")];
0699 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
0700 struct device *pmu_dev = etm_pmu.dev;
0701 struct device *cs_dev = &csdev->dev;
0702
0703 sprintf(entry, "cpu%d", cpu);
0704
0705 if (!etm_perf_up)
0706 return -EPROBE_DEFER;
0707
0708 if (link) {
0709 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
0710 if (ret)
0711 return ret;
0712 per_cpu(csdev_src, cpu) = csdev;
0713 } else {
0714 sysfs_remove_link(&pmu_dev->kobj, entry);
0715 per_cpu(csdev_src, cpu) = NULL;
0716 }
0717
0718 return 0;
0719 }
0720 EXPORT_SYMBOL_GPL(etm_perf_symlink);
0721
0722 static ssize_t etm_perf_sink_name_show(struct device *dev,
0723 struct device_attribute *dattr,
0724 char *buf)
0725 {
0726 struct dev_ext_attribute *ea;
0727
0728 ea = container_of(dattr, struct dev_ext_attribute, attr);
0729 return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
0730 }
0731
0732 static struct dev_ext_attribute *
0733 etm_perf_add_symlink_group(struct device *dev, const char *name, const char *group_name)
0734 {
0735 struct dev_ext_attribute *ea;
0736 unsigned long hash;
0737 int ret;
0738 struct device *pmu_dev = etm_pmu.dev;
0739
0740 if (!etm_perf_up)
0741 return ERR_PTR(-EPROBE_DEFER);
0742
0743 ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
0744 if (!ea)
0745 return ERR_PTR(-ENOMEM);
0746
0747
0748
0749
0750
0751
0752
0753 hash = hashlen_hash(hashlen_string(NULL, name));
0754
0755 sysfs_attr_init(&ea->attr.attr);
0756 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
0757 if (!ea->attr.attr.name)
0758 return ERR_PTR(-ENOMEM);
0759
0760 ea->attr.attr.mode = 0444;
0761 ea->var = (unsigned long *)hash;
0762
0763 ret = sysfs_add_file_to_group(&pmu_dev->kobj,
0764 &ea->attr.attr, group_name);
0765
0766 return ret ? ERR_PTR(ret) : ea;
0767 }
0768
0769 int etm_perf_add_symlink_sink(struct coresight_device *csdev)
0770 {
0771 const char *name;
0772 struct device *dev = &csdev->dev;
0773 int err = 0;
0774
0775 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
0776 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
0777 return -EINVAL;
0778
0779 if (csdev->ea != NULL)
0780 return -EINVAL;
0781
0782 name = dev_name(dev);
0783 csdev->ea = etm_perf_add_symlink_group(dev, name, "sinks");
0784 if (IS_ERR(csdev->ea)) {
0785 err = PTR_ERR(csdev->ea);
0786 csdev->ea = NULL;
0787 } else
0788 csdev->ea->attr.show = etm_perf_sink_name_show;
0789
0790 return err;
0791 }
0792
0793 static void etm_perf_del_symlink_group(struct dev_ext_attribute *ea, const char *group_name)
0794 {
0795 struct device *pmu_dev = etm_pmu.dev;
0796
0797 sysfs_remove_file_from_group(&pmu_dev->kobj,
0798 &ea->attr.attr, group_name);
0799 }
0800
0801 void etm_perf_del_symlink_sink(struct coresight_device *csdev)
0802 {
0803 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
0804 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
0805 return;
0806
0807 if (!csdev->ea)
0808 return;
0809
0810 etm_perf_del_symlink_group(csdev->ea, "sinks");
0811 csdev->ea = NULL;
0812 }
0813
0814 static ssize_t etm_perf_cscfg_event_show(struct device *dev,
0815 struct device_attribute *dattr,
0816 char *buf)
0817 {
0818 struct dev_ext_attribute *ea;
0819
0820 ea = container_of(dattr, struct dev_ext_attribute, attr);
0821 return scnprintf(buf, PAGE_SIZE, "configid=0x%lx\n", (unsigned long)(ea->var));
0822 }
0823
0824 int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc)
0825 {
0826 int err = 0;
0827
0828 if (config_desc->event_ea != NULL)
0829 return 0;
0830
0831 config_desc->event_ea = etm_perf_add_symlink_group(dev, config_desc->name, "events");
0832
0833
0834 if (!IS_ERR(config_desc->event_ea))
0835 config_desc->event_ea->attr.show = etm_perf_cscfg_event_show;
0836 else {
0837 err = PTR_ERR(config_desc->event_ea);
0838 config_desc->event_ea = NULL;
0839 }
0840
0841 return err;
0842 }
0843
0844 void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc)
0845 {
0846 if (!config_desc->event_ea)
0847 return;
0848
0849 etm_perf_del_symlink_group(config_desc->event_ea, "events");
0850 config_desc->event_ea = NULL;
0851 }
0852
0853 int __init etm_perf_init(void)
0854 {
0855 int ret;
0856
0857 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
0858 PERF_PMU_CAP_ITRACE);
0859
0860 etm_pmu.attr_groups = etm_pmu_attr_groups;
0861 etm_pmu.task_ctx_nr = perf_sw_context;
0862 etm_pmu.read = etm_event_read;
0863 etm_pmu.event_init = etm_event_init;
0864 etm_pmu.setup_aux = etm_setup_aux;
0865 etm_pmu.free_aux = etm_free_aux;
0866 etm_pmu.start = etm_event_start;
0867 etm_pmu.stop = etm_event_stop;
0868 etm_pmu.add = etm_event_add;
0869 etm_pmu.del = etm_event_del;
0870 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
0871 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
0872 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
0873
0874 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
0875 if (ret == 0)
0876 etm_perf_up = true;
0877
0878 return ret;
0879 }
0880
0881 void etm_perf_exit(void)
0882 {
0883 perf_pmu_unregister(&etm_pmu);
0884 }