0001
0002 #include <inttypes.h>
0003 #include <math.h>
0004 #include <stdlib.h>
0005 #include <string.h>
0006 #include <linux/compiler.h>
0007
0008 #include "../util/callchain.h"
0009 #include "../util/debug.h"
0010 #include "../util/hist.h"
0011 #include "../util/sort.h"
0012 #include "../util/evsel.h"
0013 #include "../util/evlist.h"
0014 #include "../perf.h"
0015
0016
0017
0018 #define hpp__call_print_fn(hpp, fn, fmt, ...) \
0019 ({ \
0020 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
0021 advance_hpp(hpp, __ret); \
0022 __ret; \
0023 })
0024
0025 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
0026 hpp_field_fn get_field, const char *fmt, int len,
0027 hpp_snprint_fn print_fn, bool fmt_percent)
0028 {
0029 int ret;
0030 struct hists *hists = he->hists;
0031 struct evsel *evsel = hists_to_evsel(hists);
0032 char *buf = hpp->buf;
0033 size_t size = hpp->size;
0034
0035 if (fmt_percent) {
0036 double percent = 0.0;
0037 u64 total = hists__total_period(hists);
0038
0039 if (total)
0040 percent = 100.0 * get_field(he) / total;
0041
0042 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
0043 } else
0044 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
0045
0046 if (evsel__is_group_event(evsel)) {
0047 int prev_idx, idx_delta;
0048 struct hist_entry *pair;
0049 int nr_members = evsel->core.nr_members;
0050
0051 prev_idx = evsel__group_idx(evsel);
0052
0053 list_for_each_entry(pair, &he->pairs.head, pairs.node) {
0054 u64 period = get_field(pair);
0055 u64 total = hists__total_period(pair->hists);
0056
0057 if (!total)
0058 continue;
0059
0060 evsel = hists_to_evsel(pair->hists);
0061 idx_delta = evsel__group_idx(evsel) - prev_idx - 1;
0062
0063 while (idx_delta--) {
0064
0065
0066
0067
0068 if (fmt_percent) {
0069 ret += hpp__call_print_fn(hpp, print_fn,
0070 fmt, len, 0.0);
0071 } else {
0072 ret += hpp__call_print_fn(hpp, print_fn,
0073 fmt, len, 0ULL);
0074 }
0075 }
0076
0077 if (fmt_percent) {
0078 ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
0079 100.0 * period / total);
0080 } else {
0081 ret += hpp__call_print_fn(hpp, print_fn, fmt,
0082 len, period);
0083 }
0084
0085 prev_idx = evsel__group_idx(evsel);
0086 }
0087
0088 idx_delta = nr_members - prev_idx - 1;
0089
0090 while (idx_delta--) {
0091
0092
0093
0094 if (fmt_percent) {
0095 ret += hpp__call_print_fn(hpp, print_fn,
0096 fmt, len, 0.0);
0097 } else {
0098 ret += hpp__call_print_fn(hpp, print_fn,
0099 fmt, len, 0ULL);
0100 }
0101 }
0102 }
0103
0104
0105
0106
0107
0108 hpp->buf = buf;
0109 hpp->size = size;
0110
0111 return ret;
0112 }
0113
0114 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
0115 struct hist_entry *he, hpp_field_fn get_field,
0116 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
0117 {
0118 int len = fmt->user_len ?: fmt->len;
0119
0120 if (symbol_conf.field_sep) {
0121 return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
0122 print_fn, fmt_percent);
0123 }
0124
0125 if (fmt_percent)
0126 len -= 2;
0127 else
0128 len -= 1;
0129
0130 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
0131 }
0132
0133 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
0134 struct hist_entry *he, hpp_field_fn get_field,
0135 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
0136 {
0137 if (!symbol_conf.cumulate_callchain) {
0138 int len = fmt->user_len ?: fmt->len;
0139 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
0140 }
0141
0142 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
0143 }
0144
0145 static int field_cmp(u64 field_a, u64 field_b)
0146 {
0147 if (field_a > field_b)
0148 return 1;
0149 if (field_a < field_b)
0150 return -1;
0151 return 0;
0152 }
0153
0154 static int hist_entry__new_pair(struct hist_entry *a, struct hist_entry *b,
0155 hpp_field_fn get_field, int nr_members,
0156 u64 **fields_a, u64 **fields_b)
0157 {
0158 u64 *fa = calloc(nr_members, sizeof(*fa)),
0159 *fb = calloc(nr_members, sizeof(*fb));
0160 struct hist_entry *pair;
0161
0162 if (!fa || !fb)
0163 goto out_free;
0164
0165 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
0166 struct evsel *evsel = hists_to_evsel(pair->hists);
0167 fa[evsel__group_idx(evsel)] = get_field(pair);
0168 }
0169
0170 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
0171 struct evsel *evsel = hists_to_evsel(pair->hists);
0172 fb[evsel__group_idx(evsel)] = get_field(pair);
0173 }
0174
0175 *fields_a = fa;
0176 *fields_b = fb;
0177 return 0;
0178 out_free:
0179 free(fa);
0180 free(fb);
0181 *fields_a = *fields_b = NULL;
0182 return -1;
0183 }
0184
0185 static int __hpp__group_sort_idx(struct hist_entry *a, struct hist_entry *b,
0186 hpp_field_fn get_field, int idx)
0187 {
0188 struct evsel *evsel = hists_to_evsel(a->hists);
0189 u64 *fields_a, *fields_b;
0190 int cmp, nr_members, ret, i;
0191
0192 cmp = field_cmp(get_field(a), get_field(b));
0193 if (!evsel__is_group_event(evsel))
0194 return cmp;
0195
0196 nr_members = evsel->core.nr_members;
0197 if (idx < 1 || idx >= nr_members)
0198 return cmp;
0199
0200 ret = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
0201 if (ret) {
0202 ret = cmp;
0203 goto out;
0204 }
0205
0206 ret = field_cmp(fields_a[idx], fields_b[idx]);
0207 if (ret)
0208 goto out;
0209
0210 for (i = 1; i < nr_members; i++) {
0211 if (i != idx) {
0212 ret = field_cmp(fields_a[i], fields_b[i]);
0213 if (ret)
0214 goto out;
0215 }
0216 }
0217
0218 out:
0219 free(fields_a);
0220 free(fields_b);
0221
0222 return ret;
0223 }
0224
0225 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
0226 hpp_field_fn get_field)
0227 {
0228 s64 ret;
0229 int i, nr_members;
0230 struct evsel *evsel;
0231 u64 *fields_a, *fields_b;
0232
0233 if (symbol_conf.group_sort_idx && symbol_conf.event_group) {
0234 return __hpp__group_sort_idx(a, b, get_field,
0235 symbol_conf.group_sort_idx);
0236 }
0237
0238 ret = field_cmp(get_field(a), get_field(b));
0239 if (ret || !symbol_conf.event_group)
0240 return ret;
0241
0242 evsel = hists_to_evsel(a->hists);
0243 if (!evsel__is_group_event(evsel))
0244 return ret;
0245
0246 nr_members = evsel->core.nr_members;
0247 i = hist_entry__new_pair(a, b, get_field, nr_members, &fields_a, &fields_b);
0248 if (i)
0249 goto out;
0250
0251 for (i = 1; i < nr_members; i++) {
0252 ret = field_cmp(fields_a[i], fields_b[i]);
0253 if (ret)
0254 break;
0255 }
0256
0257 out:
0258 free(fields_a);
0259 free(fields_b);
0260
0261 return ret;
0262 }
0263
0264 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
0265 hpp_field_fn get_field)
0266 {
0267 s64 ret = 0;
0268
0269 if (symbol_conf.cumulate_callchain) {
0270
0271
0272
0273 ret = field_cmp(get_field(a), get_field(b));
0274 if (ret)
0275 return ret;
0276
0277 if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
0278 return 0;
0279
0280 ret = b->callchain->max_depth - a->callchain->max_depth;
0281 if (callchain_param.order == ORDER_CALLER)
0282 ret = -ret;
0283 }
0284 return ret;
0285 }
0286
0287 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
0288 struct perf_hpp *hpp __maybe_unused,
0289 struct hists *hists)
0290 {
0291 int len = fmt->user_len ?: fmt->len;
0292 struct evsel *evsel = hists_to_evsel(hists);
0293
0294 if (symbol_conf.event_group)
0295 len = max(len, evsel->core.nr_members * fmt->len);
0296
0297 if (len < (int)strlen(fmt->name))
0298 len = strlen(fmt->name);
0299
0300 return len;
0301 }
0302
0303 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
0304 struct hists *hists, int line __maybe_unused,
0305 int *span __maybe_unused)
0306 {
0307 int len = hpp__width_fn(fmt, hpp, hists);
0308 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
0309 }
0310
0311 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
0312 {
0313 va_list args;
0314 ssize_t ssize = hpp->size;
0315 double percent;
0316 int ret, len;
0317
0318 va_start(args, fmt);
0319 len = va_arg(args, int);
0320 percent = va_arg(args, double);
0321 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
0322 va_end(args);
0323
0324 return (ret >= ssize) ? (ssize - 1) : ret;
0325 }
0326
0327 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
0328 {
0329 va_list args;
0330 ssize_t ssize = hpp->size;
0331 int ret;
0332
0333 va_start(args, fmt);
0334 ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
0335 va_end(args);
0336
0337 return (ret >= ssize) ? (ssize - 1) : ret;
0338 }
0339
0340 #define __HPP_COLOR_PERCENT_FN(_type, _field) \
0341 static u64 he_get_##_field(struct hist_entry *he) \
0342 { \
0343 return he->stat._field; \
0344 } \
0345 \
0346 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
0347 struct perf_hpp *hpp, struct hist_entry *he) \
0348 { \
0349 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
0350 hpp_color_scnprintf, true); \
0351 }
0352
0353 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
0354 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
0355 struct perf_hpp *hpp, struct hist_entry *he) \
0356 { \
0357 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
0358 hpp_entry_scnprintf, true); \
0359 }
0360
0361 #define __HPP_SORT_FN(_type, _field) \
0362 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
0363 struct hist_entry *a, struct hist_entry *b) \
0364 { \
0365 return __hpp__sort(a, b, he_get_##_field); \
0366 }
0367
0368 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
0369 static u64 he_get_acc_##_field(struct hist_entry *he) \
0370 { \
0371 return he->stat_acc->_field; \
0372 } \
0373 \
0374 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
0375 struct perf_hpp *hpp, struct hist_entry *he) \
0376 { \
0377 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
0378 hpp_color_scnprintf, true); \
0379 }
0380
0381 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
0382 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
0383 struct perf_hpp *hpp, struct hist_entry *he) \
0384 { \
0385 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
0386 hpp_entry_scnprintf, true); \
0387 }
0388
0389 #define __HPP_SORT_ACC_FN(_type, _field) \
0390 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
0391 struct hist_entry *a, struct hist_entry *b) \
0392 { \
0393 return __hpp__sort_acc(a, b, he_get_acc_##_field); \
0394 }
0395
0396 #define __HPP_ENTRY_RAW_FN(_type, _field) \
0397 static u64 he_get_raw_##_field(struct hist_entry *he) \
0398 { \
0399 return he->stat._field; \
0400 } \
0401 \
0402 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
0403 struct perf_hpp *hpp, struct hist_entry *he) \
0404 { \
0405 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
0406 hpp_entry_scnprintf, false); \
0407 }
0408
0409 #define __HPP_SORT_RAW_FN(_type, _field) \
0410 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
0411 struct hist_entry *a, struct hist_entry *b) \
0412 { \
0413 return __hpp__sort(a, b, he_get_raw_##_field); \
0414 }
0415
0416
0417 #define HPP_PERCENT_FNS(_type, _field) \
0418 __HPP_COLOR_PERCENT_FN(_type, _field) \
0419 __HPP_ENTRY_PERCENT_FN(_type, _field) \
0420 __HPP_SORT_FN(_type, _field)
0421
0422 #define HPP_PERCENT_ACC_FNS(_type, _field) \
0423 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
0424 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
0425 __HPP_SORT_ACC_FN(_type, _field)
0426
0427 #define HPP_RAW_FNS(_type, _field) \
0428 __HPP_ENTRY_RAW_FN(_type, _field) \
0429 __HPP_SORT_RAW_FN(_type, _field)
0430
0431 HPP_PERCENT_FNS(overhead, period)
0432 HPP_PERCENT_FNS(overhead_sys, period_sys)
0433 HPP_PERCENT_FNS(overhead_us, period_us)
0434 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
0435 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
0436 HPP_PERCENT_ACC_FNS(overhead_acc, period)
0437
0438 HPP_RAW_FNS(samples, nr_events)
0439 HPP_RAW_FNS(period, period)
0440
0441 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
0442 struct hist_entry *a __maybe_unused,
0443 struct hist_entry *b __maybe_unused)
0444 {
0445 return 0;
0446 }
0447
0448 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
0449 {
0450 return a->header == hpp__header_fn;
0451 }
0452
0453 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
0454 {
0455 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
0456 return false;
0457
0458 return a->idx == b->idx;
0459 }
0460
0461 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
0462 { \
0463 .name = _name, \
0464 .header = hpp__header_fn, \
0465 .width = hpp__width_fn, \
0466 .color = hpp__color_ ## _fn, \
0467 .entry = hpp__entry_ ## _fn, \
0468 .cmp = hpp__nop_cmp, \
0469 .collapse = hpp__nop_cmp, \
0470 .sort = hpp__sort_ ## _fn, \
0471 .idx = PERF_HPP__ ## _idx, \
0472 .equal = hpp__equal, \
0473 }
0474
0475 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
0476 { \
0477 .name = _name, \
0478 .header = hpp__header_fn, \
0479 .width = hpp__width_fn, \
0480 .color = hpp__color_ ## _fn, \
0481 .entry = hpp__entry_ ## _fn, \
0482 .cmp = hpp__nop_cmp, \
0483 .collapse = hpp__nop_cmp, \
0484 .sort = hpp__sort_ ## _fn, \
0485 .idx = PERF_HPP__ ## _idx, \
0486 .equal = hpp__equal, \
0487 }
0488
0489 #define HPP__PRINT_FNS(_name, _fn, _idx) \
0490 { \
0491 .name = _name, \
0492 .header = hpp__header_fn, \
0493 .width = hpp__width_fn, \
0494 .entry = hpp__entry_ ## _fn, \
0495 .cmp = hpp__nop_cmp, \
0496 .collapse = hpp__nop_cmp, \
0497 .sort = hpp__sort_ ## _fn, \
0498 .idx = PERF_HPP__ ## _idx, \
0499 .equal = hpp__equal, \
0500 }
0501
0502 struct perf_hpp_fmt perf_hpp__format[] = {
0503 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
0504 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
0505 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
0506 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
0507 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
0508 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
0509 HPP__PRINT_FNS("Samples", samples, SAMPLES),
0510 HPP__PRINT_FNS("Period", period, PERIOD)
0511 };
0512
0513 struct perf_hpp_list perf_hpp_list = {
0514 .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
0515 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
0516 .nr_header_lines = 1,
0517 };
0518
0519 #undef HPP__COLOR_PRINT_FNS
0520 #undef HPP__COLOR_ACC_PRINT_FNS
0521 #undef HPP__PRINT_FNS
0522
0523 #undef HPP_PERCENT_FNS
0524 #undef HPP_PERCENT_ACC_FNS
0525 #undef HPP_RAW_FNS
0526
0527 #undef __HPP_HEADER_FN
0528 #undef __HPP_WIDTH_FN
0529 #undef __HPP_COLOR_PERCENT_FN
0530 #undef __HPP_ENTRY_PERCENT_FN
0531 #undef __HPP_COLOR_ACC_PERCENT_FN
0532 #undef __HPP_ENTRY_ACC_PERCENT_FN
0533 #undef __HPP_ENTRY_RAW_FN
0534 #undef __HPP_SORT_FN
0535 #undef __HPP_SORT_ACC_FN
0536 #undef __HPP_SORT_RAW_FN
0537
0538 static void fmt_free(struct perf_hpp_fmt *fmt)
0539 {
0540
0541
0542
0543
0544 BUG_ON(!list_empty(&fmt->list));
0545 BUG_ON(!list_empty(&fmt->sort_list));
0546
0547 if (fmt->free)
0548 fmt->free(fmt);
0549 }
0550
0551 void perf_hpp__init(void)
0552 {
0553 int i;
0554
0555 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
0556 struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
0557
0558 INIT_LIST_HEAD(&fmt->list);
0559
0560
0561 if (fmt->sort_list.next == NULL)
0562 INIT_LIST_HEAD(&fmt->sort_list);
0563 }
0564
0565
0566
0567
0568 if (is_strict_order(field_order))
0569 return;
0570
0571 if (symbol_conf.cumulate_callchain) {
0572 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
0573 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
0574 }
0575
0576 hpp_dimension__add_output(PERF_HPP__OVERHEAD);
0577
0578 if (symbol_conf.show_cpu_utilization) {
0579 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
0580 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
0581
0582 if (perf_guest) {
0583 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
0584 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
0585 }
0586 }
0587
0588 if (symbol_conf.show_nr_samples)
0589 hpp_dimension__add_output(PERF_HPP__SAMPLES);
0590
0591 if (symbol_conf.show_total_period)
0592 hpp_dimension__add_output(PERF_HPP__PERIOD);
0593 }
0594
0595 void perf_hpp_list__column_register(struct perf_hpp_list *list,
0596 struct perf_hpp_fmt *format)
0597 {
0598 list_add_tail(&format->list, &list->fields);
0599 }
0600
0601 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
0602 struct perf_hpp_fmt *format)
0603 {
0604 list_add_tail(&format->sort_list, &list->sorts);
0605 }
0606
0607 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
0608 struct perf_hpp_fmt *format)
0609 {
0610 list_add(&format->sort_list, &list->sorts);
0611 }
0612
0613 static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
0614 {
0615 list_del_init(&format->list);
0616 fmt_free(format);
0617 }
0618
0619 void perf_hpp__cancel_cumulate(void)
0620 {
0621 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
0622
0623 if (is_strict_order(field_order))
0624 return;
0625
0626 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
0627 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
0628
0629 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
0630 if (acc->equal(acc, fmt)) {
0631 perf_hpp__column_unregister(fmt);
0632 continue;
0633 }
0634
0635 if (ovh->equal(ovh, fmt))
0636 fmt->name = "Overhead";
0637 }
0638 }
0639
0640 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
0641 {
0642 return a->equal && a->equal(a, b);
0643 }
0644
0645 void perf_hpp__setup_output_field(struct perf_hpp_list *list)
0646 {
0647 struct perf_hpp_fmt *fmt;
0648
0649
0650 perf_hpp_list__for_each_sort_list(list, fmt) {
0651 struct perf_hpp_fmt *pos;
0652
0653
0654 if (!fmt->entry && !fmt->color)
0655 continue;
0656
0657 perf_hpp_list__for_each_format(list, pos) {
0658 if (fmt_equal(fmt, pos))
0659 goto next;
0660 }
0661
0662 perf_hpp__column_register(fmt);
0663 next:
0664 continue;
0665 }
0666 }
0667
0668 void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
0669 {
0670 struct perf_hpp_fmt *fmt;
0671
0672
0673 perf_hpp_list__for_each_format(list, fmt) {
0674 struct perf_hpp_fmt *pos;
0675
0676 perf_hpp_list__for_each_sort_list(list, pos) {
0677 if (fmt_equal(fmt, pos))
0678 goto next;
0679 }
0680
0681 perf_hpp__register_sort_field(fmt);
0682 next:
0683 continue;
0684 }
0685 }
0686
0687
0688 void perf_hpp__reset_output_field(struct perf_hpp_list *list)
0689 {
0690 struct perf_hpp_fmt *fmt, *tmp;
0691
0692
0693 perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
0694 list_del_init(&fmt->list);
0695 list_del_init(&fmt->sort_list);
0696 fmt_free(fmt);
0697 }
0698
0699
0700 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
0701 list_del_init(&fmt->list);
0702 list_del_init(&fmt->sort_list);
0703 fmt_free(fmt);
0704 }
0705 }
0706
0707
0708
0709
0710 unsigned int hists__sort_list_width(struct hists *hists)
0711 {
0712 struct perf_hpp_fmt *fmt;
0713 int ret = 0;
0714 bool first = true;
0715 struct perf_hpp dummy_hpp;
0716
0717 hists__for_each_format(hists, fmt) {
0718 if (perf_hpp__should_skip(fmt, hists))
0719 continue;
0720
0721 if (first)
0722 first = false;
0723 else
0724 ret += 2;
0725
0726 ret += fmt->width(fmt, &dummy_hpp, hists);
0727 }
0728
0729 if (verbose > 0 && hists__has(hists, sym))
0730 ret += 3 + BITS_PER_LONG / 4;
0731
0732 return ret;
0733 }
0734
0735 unsigned int hists__overhead_width(struct hists *hists)
0736 {
0737 struct perf_hpp_fmt *fmt;
0738 int ret = 0;
0739 bool first = true;
0740 struct perf_hpp dummy_hpp;
0741
0742 hists__for_each_format(hists, fmt) {
0743 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
0744 break;
0745
0746 if (first)
0747 first = false;
0748 else
0749 ret += 2;
0750
0751 ret += fmt->width(fmt, &dummy_hpp, hists);
0752 }
0753
0754 return ret;
0755 }
0756
0757 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
0758 {
0759 if (perf_hpp__is_sort_entry(fmt))
0760 return perf_hpp__reset_sort_width(fmt, hists);
0761
0762 if (perf_hpp__is_dynamic_entry(fmt))
0763 return;
0764
0765 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
0766
0767 switch (fmt->idx) {
0768 case PERF_HPP__OVERHEAD:
0769 case PERF_HPP__OVERHEAD_SYS:
0770 case PERF_HPP__OVERHEAD_US:
0771 case PERF_HPP__OVERHEAD_ACC:
0772 fmt->len = 8;
0773 break;
0774
0775 case PERF_HPP__OVERHEAD_GUEST_SYS:
0776 case PERF_HPP__OVERHEAD_GUEST_US:
0777 fmt->len = 9;
0778 break;
0779
0780 case PERF_HPP__SAMPLES:
0781 case PERF_HPP__PERIOD:
0782 fmt->len = 12;
0783 break;
0784
0785 default:
0786 break;
0787 }
0788 }
0789
0790 void hists__reset_column_width(struct hists *hists)
0791 {
0792 struct perf_hpp_fmt *fmt;
0793 struct perf_hpp_list_node *node;
0794
0795 hists__for_each_format(hists, fmt)
0796 perf_hpp__reset_width(fmt, hists);
0797
0798
0799 list_for_each_entry(node, &hists->hpp_formats, list) {
0800 perf_hpp_list__for_each_format(&node->hpp, fmt)
0801 perf_hpp__reset_width(fmt, hists);
0802 }
0803 }
0804
0805 void perf_hpp__set_user_width(const char *width_list_str)
0806 {
0807 struct perf_hpp_fmt *fmt;
0808 const char *ptr = width_list_str;
0809
0810 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
0811 char *p;
0812
0813 int len = strtol(ptr, &p, 10);
0814 fmt->user_len = len;
0815
0816 if (*p == ',')
0817 ptr = p + 1;
0818 else
0819 break;
0820 }
0821 }
0822
0823 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
0824 {
0825 struct perf_hpp_list_node *node = NULL;
0826 struct perf_hpp_fmt *fmt_copy;
0827 bool found = false;
0828 bool skip = perf_hpp__should_skip(fmt, hists);
0829
0830 list_for_each_entry(node, &hists->hpp_formats, list) {
0831 if (node->level == fmt->level) {
0832 found = true;
0833 break;
0834 }
0835 }
0836
0837 if (!found) {
0838 node = malloc(sizeof(*node));
0839 if (node == NULL)
0840 return -1;
0841
0842 node->skip = skip;
0843 node->level = fmt->level;
0844 perf_hpp_list__init(&node->hpp);
0845
0846 hists->nr_hpp_node++;
0847 list_add_tail(&node->list, &hists->hpp_formats);
0848 }
0849
0850 fmt_copy = perf_hpp_fmt__dup(fmt);
0851 if (fmt_copy == NULL)
0852 return -1;
0853
0854 if (!skip)
0855 node->skip = false;
0856
0857 list_add_tail(&fmt_copy->list, &node->hpp.fields);
0858 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
0859
0860 return 0;
0861 }
0862
0863 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
0864 struct evlist *evlist)
0865 {
0866 struct evsel *evsel;
0867 struct perf_hpp_fmt *fmt;
0868 struct hists *hists;
0869 int ret;
0870
0871 if (!symbol_conf.report_hierarchy)
0872 return 0;
0873
0874 evlist__for_each_entry(evlist, evsel) {
0875 hists = evsel__hists(evsel);
0876
0877 perf_hpp_list__for_each_sort_list(list, fmt) {
0878 if (perf_hpp__is_dynamic_entry(fmt) &&
0879 !perf_hpp__defined_dynamic_entry(fmt, hists))
0880 continue;
0881
0882 ret = add_hierarchy_fmt(hists, fmt);
0883 if (ret < 0)
0884 return ret;
0885 }
0886 }
0887
0888 return 0;
0889 }