0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/bitmap.h>
0012 #include <linux/bitops.h>
0013 #include <linux/bug.h>
0014 #include <linux/err.h>
0015 #include <linux/errno.h>
0016 #include <linux/interrupt.h>
0017
0018 #include <asm/cputype.h>
0019 #include <asm/local64.h>
0020
0021 #include "hisi_uncore_pmu.h"
0022
0023 #define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
0024 #define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0))
0025
0026
0027
0028
0029 ssize_t hisi_format_sysfs_show(struct device *dev,
0030 struct device_attribute *attr, char *buf)
0031 {
0032 struct dev_ext_attribute *eattr;
0033
0034 eattr = container_of(attr, struct dev_ext_attribute, attr);
0035
0036 return sysfs_emit(buf, "%s\n", (char *)eattr->var);
0037 }
0038 EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
0039
0040
0041
0042
0043 ssize_t hisi_event_sysfs_show(struct device *dev,
0044 struct device_attribute *attr, char *page)
0045 {
0046 struct dev_ext_attribute *eattr;
0047
0048 eattr = container_of(attr, struct dev_ext_attribute, attr);
0049
0050 return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
0051 }
0052 EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
0053
0054
0055
0056
0057 ssize_t hisi_cpumask_sysfs_show(struct device *dev,
0058 struct device_attribute *attr, char *buf)
0059 {
0060 struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
0061
0062 return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
0063 }
0064 EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
0065
0066 static bool hisi_validate_event_group(struct perf_event *event)
0067 {
0068 struct perf_event *sibling, *leader = event->group_leader;
0069 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0070
0071 int counters = 1;
0072
0073 if (!is_software_event(leader)) {
0074
0075
0076
0077
0078 if (leader->pmu != event->pmu)
0079 return false;
0080
0081
0082 if (leader != event)
0083 counters++;
0084 }
0085
0086 for_each_sibling_event(sibling, event->group_leader) {
0087 if (is_software_event(sibling))
0088 continue;
0089 if (sibling->pmu != event->pmu)
0090 return false;
0091
0092 counters++;
0093 }
0094
0095
0096 return counters <= hisi_pmu->num_counters;
0097 }
0098
0099 int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
0100 {
0101 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0102 unsigned long *used_mask = hisi_pmu->pmu_events.used_mask;
0103 u32 num_counters = hisi_pmu->num_counters;
0104 int idx;
0105
0106 idx = find_first_zero_bit(used_mask, num_counters);
0107 if (idx == num_counters)
0108 return -EAGAIN;
0109
0110 set_bit(idx, used_mask);
0111
0112 return idx;
0113 }
0114 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
0115
0116 ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
0117 struct device_attribute *attr,
0118 char *page)
0119 {
0120 struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
0121
0122 return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
0123 }
0124 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
0125
0126 static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
0127 {
0128 clear_bit(idx, hisi_pmu->pmu_events.used_mask);
0129 }
0130
0131 static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
0132 {
0133 struct hisi_pmu *hisi_pmu = data;
0134 struct perf_event *event;
0135 unsigned long overflown;
0136 int idx;
0137
0138 overflown = hisi_pmu->ops->get_int_status(hisi_pmu);
0139 if (!overflown)
0140 return IRQ_NONE;
0141
0142
0143
0144
0145
0146 for_each_set_bit(idx, &overflown, hisi_pmu->num_counters) {
0147
0148 hisi_pmu->ops->clear_int_status(hisi_pmu, idx);
0149
0150 event = hisi_pmu->pmu_events.hw_events[idx];
0151 if (!event)
0152 continue;
0153
0154 hisi_uncore_pmu_event_update(event);
0155 hisi_uncore_pmu_set_event_period(event);
0156 }
0157
0158 return IRQ_HANDLED;
0159 }
0160
0161 int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
0162 struct platform_device *pdev)
0163 {
0164 int irq, ret;
0165
0166 irq = platform_get_irq(pdev, 0);
0167 if (irq < 0)
0168 return irq;
0169
0170 ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
0171 IRQF_NOBALANCING | IRQF_NO_THREAD,
0172 dev_name(&pdev->dev), hisi_pmu);
0173 if (ret < 0) {
0174 dev_err(&pdev->dev,
0175 "Fail to request IRQ: %d ret: %d.\n", irq, ret);
0176 return ret;
0177 }
0178
0179 hisi_pmu->irq = irq;
0180
0181 return 0;
0182 }
0183 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
0184
0185 int hisi_uncore_pmu_event_init(struct perf_event *event)
0186 {
0187 struct hw_perf_event *hwc = &event->hw;
0188 struct hisi_pmu *hisi_pmu;
0189
0190 if (event->attr.type != event->pmu->type)
0191 return -ENOENT;
0192
0193
0194
0195
0196
0197
0198 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
0199 return -EOPNOTSUPP;
0200
0201
0202
0203
0204
0205 if (event->cpu < 0)
0206 return -EINVAL;
0207
0208
0209
0210
0211
0212 if (!hisi_validate_event_group(event))
0213 return -EINVAL;
0214
0215 hisi_pmu = to_hisi_pmu(event->pmu);
0216 if (event->attr.config > hisi_pmu->check_event)
0217 return -EINVAL;
0218
0219 if (hisi_pmu->on_cpu == -1)
0220 return -EINVAL;
0221
0222
0223
0224
0225
0226 hwc->idx = -1;
0227 hwc->config_base = event->attr.config;
0228
0229
0230 event->cpu = hisi_pmu->on_cpu;
0231
0232 return 0;
0233 }
0234 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
0235
0236
0237
0238
0239
0240 static void hisi_uncore_pmu_enable_event(struct perf_event *event)
0241 {
0242 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0243 struct hw_perf_event *hwc = &event->hw;
0244
0245 hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
0246 HISI_GET_EVENTID(event));
0247
0248 if (hisi_pmu->ops->enable_filter)
0249 hisi_pmu->ops->enable_filter(event);
0250
0251 hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
0252 hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
0253 }
0254
0255
0256
0257
0258 static void hisi_uncore_pmu_disable_event(struct perf_event *event)
0259 {
0260 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0261 struct hw_perf_event *hwc = &event->hw;
0262
0263 hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
0264 hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
0265
0266 if (hisi_pmu->ops->disable_filter)
0267 hisi_pmu->ops->disable_filter(event);
0268 }
0269
0270 void hisi_uncore_pmu_set_event_period(struct perf_event *event)
0271 {
0272 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0273 struct hw_perf_event *hwc = &event->hw;
0274
0275
0276
0277
0278
0279
0280
0281
0282 u64 val = BIT_ULL(hisi_pmu->counter_bits - 1);
0283
0284 local64_set(&hwc->prev_count, val);
0285
0286 hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
0287 }
0288 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
0289
0290 void hisi_uncore_pmu_event_update(struct perf_event *event)
0291 {
0292 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0293 struct hw_perf_event *hwc = &event->hw;
0294 u64 delta, prev_raw_count, new_raw_count;
0295
0296 do {
0297
0298 new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc);
0299 prev_raw_count = local64_read(&hwc->prev_count);
0300 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
0301 new_raw_count) != prev_raw_count);
0302
0303
0304
0305 delta = (new_raw_count - prev_raw_count) &
0306 HISI_MAX_PERIOD(hisi_pmu->counter_bits);
0307 local64_add(delta, &event->count);
0308 }
0309 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
0310
0311 void hisi_uncore_pmu_start(struct perf_event *event, int flags)
0312 {
0313 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0314 struct hw_perf_event *hwc = &event->hw;
0315
0316 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
0317 return;
0318
0319 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
0320 hwc->state = 0;
0321 hisi_uncore_pmu_set_event_period(event);
0322
0323 if (flags & PERF_EF_RELOAD) {
0324 u64 prev_raw_count = local64_read(&hwc->prev_count);
0325
0326 hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count);
0327 }
0328
0329 hisi_uncore_pmu_enable_event(event);
0330 perf_event_update_userpage(event);
0331 }
0332 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
0333
0334 void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
0335 {
0336 struct hw_perf_event *hwc = &event->hw;
0337
0338 hisi_uncore_pmu_disable_event(event);
0339 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
0340 hwc->state |= PERF_HES_STOPPED;
0341
0342 if (hwc->state & PERF_HES_UPTODATE)
0343 return;
0344
0345
0346 hisi_uncore_pmu_event_update(event);
0347 hwc->state |= PERF_HES_UPTODATE;
0348 }
0349 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
0350
0351 int hisi_uncore_pmu_add(struct perf_event *event, int flags)
0352 {
0353 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0354 struct hw_perf_event *hwc = &event->hw;
0355 int idx;
0356
0357 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
0358
0359
0360 idx = hisi_pmu->ops->get_event_idx(event);
0361 if (idx < 0)
0362 return idx;
0363
0364 event->hw.idx = idx;
0365 hisi_pmu->pmu_events.hw_events[idx] = event;
0366
0367 if (flags & PERF_EF_START)
0368 hisi_uncore_pmu_start(event, PERF_EF_RELOAD);
0369
0370 return 0;
0371 }
0372 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
0373
0374 void hisi_uncore_pmu_del(struct perf_event *event, int flags)
0375 {
0376 struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
0377 struct hw_perf_event *hwc = &event->hw;
0378
0379 hisi_uncore_pmu_stop(event, PERF_EF_UPDATE);
0380 hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx);
0381 perf_event_update_userpage(event);
0382 hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
0383 }
0384 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
0385
0386 void hisi_uncore_pmu_read(struct perf_event *event)
0387 {
0388
0389 hisi_uncore_pmu_event_update(event);
0390 }
0391 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
0392
0393 void hisi_uncore_pmu_enable(struct pmu *pmu)
0394 {
0395 struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
0396 bool enabled = !bitmap_empty(hisi_pmu->pmu_events.used_mask,
0397 hisi_pmu->num_counters);
0398
0399 if (!enabled)
0400 return;
0401
0402 hisi_pmu->ops->start_counters(hisi_pmu);
0403 }
0404 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
0405
0406 void hisi_uncore_pmu_disable(struct pmu *pmu)
0407 {
0408 struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
0409
0410 hisi_pmu->ops->stop_counters(hisi_pmu);
0411 }
0412 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428 static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
0429 {
0430 u64 mpidr = read_cpuid_mpidr();
0431 int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
0432 int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
0433 int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
0434 bool mt = mpidr & MPIDR_MT_BITMASK;
0435 int sccl, ccl;
0436
0437 if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
0438 sccl = aff2 >> 3;
0439 ccl = aff2 & 0x7;
0440 } else if (mt) {
0441 sccl = aff3;
0442 ccl = aff2;
0443 } else {
0444 sccl = aff2;
0445 ccl = aff1;
0446 }
0447
0448 if (scclp)
0449 *scclp = sccl;
0450 if (cclp)
0451 *cclp = ccl;
0452 }
0453
0454
0455
0456
0457 static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
0458 {
0459 int sccl_id, ccl_id;
0460
0461
0462 if (hisi_pmu->sccl_id == -1)
0463 return true;
0464
0465 if (hisi_pmu->ccl_id == -1) {
0466
0467 hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
0468
0469 return sccl_id == hisi_pmu->sccl_id;
0470 }
0471
0472 hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
0473
0474 return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
0475 }
0476
0477 int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
0478 {
0479 struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
0480 node);
0481
0482 if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
0483 return 0;
0484
0485 cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
0486
0487
0488 if (hisi_pmu->on_cpu != -1)
0489 return 0;
0490
0491
0492 hisi_pmu->on_cpu = cpu;
0493
0494
0495 WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
0496
0497 return 0;
0498 }
0499 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
0500
0501 int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
0502 {
0503 struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
0504 node);
0505 cpumask_t pmu_online_cpus;
0506 unsigned int target;
0507
0508 if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
0509 return 0;
0510
0511
0512 if (hisi_pmu->on_cpu != cpu)
0513 return 0;
0514
0515
0516 hisi_pmu->on_cpu = -1;
0517
0518
0519 cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
0520 cpu_online_mask);
0521 target = cpumask_any_but(&pmu_online_cpus, cpu);
0522 if (target >= nr_cpu_ids)
0523 return 0;
0524
0525 perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
0526
0527 hisi_pmu->on_cpu = target;
0528 WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
0529
0530 return 0;
0531 }
0532 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
0533
0534 void hisi_pmu_init(struct pmu *pmu, const char *name,
0535 const struct attribute_group **attr_groups, struct module *module)
0536 {
0537 pmu->name = name;
0538 pmu->module = module;
0539 pmu->task_ctx_nr = perf_invalid_context;
0540 pmu->event_init = hisi_uncore_pmu_event_init;
0541 pmu->pmu_enable = hisi_uncore_pmu_enable;
0542 pmu->pmu_disable = hisi_uncore_pmu_disable;
0543 pmu->add = hisi_uncore_pmu_add;
0544 pmu->del = hisi_uncore_pmu_del;
0545 pmu->start = hisi_uncore_pmu_start;
0546 pmu->stop = hisi_uncore_pmu_stop;
0547 pmu->read = hisi_uncore_pmu_read;
0548 pmu->attr_groups = attr_groups;
0549 }
0550 EXPORT_SYMBOL_GPL(hisi_pmu_init);
0551
0552 MODULE_LICENSE("GPL v2");