Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include "callchain.h"
0003 #include "debug.h"
0004 #include "dso.h"
0005 #include "build-id.h"
0006 #include "hist.h"
0007 #include "map.h"
0008 #include "map_symbol.h"
0009 #include "branch.h"
0010 #include "mem-events.h"
0011 #include "session.h"
0012 #include "namespaces.h"
0013 #include "cgroup.h"
0014 #include "sort.h"
0015 #include "units.h"
0016 #include "evlist.h"
0017 #include "evsel.h"
0018 #include "annotate.h"
0019 #include "srcline.h"
0020 #include "symbol.h"
0021 #include "thread.h"
0022 #include "block-info.h"
0023 #include "ui/progress.h"
0024 #include <errno.h>
0025 #include <math.h>
0026 #include <inttypes.h>
0027 #include <sys/param.h>
0028 #include <linux/rbtree.h>
0029 #include <linux/string.h>
0030 #include <linux/time64.h>
0031 #include <linux/zalloc.h>
0032 
0033 static bool hists__filter_entry_by_dso(struct hists *hists,
0034                        struct hist_entry *he);
0035 static bool hists__filter_entry_by_thread(struct hists *hists,
0036                       struct hist_entry *he);
0037 static bool hists__filter_entry_by_symbol(struct hists *hists,
0038                       struct hist_entry *he);
0039 static bool hists__filter_entry_by_socket(struct hists *hists,
0040                       struct hist_entry *he);
0041 
0042 u16 hists__col_len(struct hists *hists, enum hist_column col)
0043 {
0044     return hists->col_len[col];
0045 }
0046 
0047 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
0048 {
0049     hists->col_len[col] = len;
0050 }
0051 
0052 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
0053 {
0054     if (len > hists__col_len(hists, col)) {
0055         hists__set_col_len(hists, col, len);
0056         return true;
0057     }
0058     return false;
0059 }
0060 
0061 void hists__reset_col_len(struct hists *hists)
0062 {
0063     enum hist_column col;
0064 
0065     for (col = 0; col < HISTC_NR_COLS; ++col)
0066         hists__set_col_len(hists, col, 0);
0067 }
0068 
0069 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
0070 {
0071     const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
0072 
0073     if (hists__col_len(hists, dso) < unresolved_col_width &&
0074         !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
0075         !symbol_conf.dso_list)
0076         hists__set_col_len(hists, dso, unresolved_col_width);
0077 }
0078 
0079 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
0080 {
0081     const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
0082     int symlen;
0083     u16 len;
0084 
0085     if (h->block_info)
0086         return;
0087     /*
0088      * +4 accounts for '[x] ' priv level info
0089      * +2 accounts for 0x prefix on raw addresses
0090      * +3 accounts for ' y ' symtab origin info
0091      */
0092     if (h->ms.sym) {
0093         symlen = h->ms.sym->namelen + 4;
0094         if (verbose > 0)
0095             symlen += BITS_PER_LONG / 4 + 2 + 3;
0096         hists__new_col_len(hists, HISTC_SYMBOL, symlen);
0097     } else {
0098         symlen = unresolved_col_width + 4 + 2;
0099         hists__new_col_len(hists, HISTC_SYMBOL, symlen);
0100         hists__set_unres_dso_col_len(hists, HISTC_DSO);
0101     }
0102 
0103     len = thread__comm_len(h->thread);
0104     if (hists__new_col_len(hists, HISTC_COMM, len))
0105         hists__set_col_len(hists, HISTC_THREAD, len + 8);
0106 
0107     if (h->ms.map) {
0108         len = dso__name_len(h->ms.map->dso);
0109         hists__new_col_len(hists, HISTC_DSO, len);
0110     }
0111 
0112     if (h->parent)
0113         hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
0114 
0115     if (h->branch_info) {
0116         if (h->branch_info->from.ms.sym) {
0117             symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
0118             if (verbose > 0)
0119                 symlen += BITS_PER_LONG / 4 + 2 + 3;
0120             hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
0121 
0122             symlen = dso__name_len(h->branch_info->from.ms.map->dso);
0123             hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
0124         } else {
0125             symlen = unresolved_col_width + 4 + 2;
0126             hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
0127             hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
0128             hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
0129         }
0130 
0131         if (h->branch_info->to.ms.sym) {
0132             symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
0133             if (verbose > 0)
0134                 symlen += BITS_PER_LONG / 4 + 2 + 3;
0135             hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
0136 
0137             symlen = dso__name_len(h->branch_info->to.ms.map->dso);
0138             hists__new_col_len(hists, HISTC_DSO_TO, symlen);
0139         } else {
0140             symlen = unresolved_col_width + 4 + 2;
0141             hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
0142             hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
0143             hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
0144         }
0145 
0146         if (h->branch_info->srcline_from)
0147             hists__new_col_len(hists, HISTC_SRCLINE_FROM,
0148                     strlen(h->branch_info->srcline_from));
0149         if (h->branch_info->srcline_to)
0150             hists__new_col_len(hists, HISTC_SRCLINE_TO,
0151                     strlen(h->branch_info->srcline_to));
0152     }
0153 
0154     if (h->mem_info) {
0155         if (h->mem_info->daddr.ms.sym) {
0156             symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
0157                    + unresolved_col_width + 2;
0158             hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
0159                        symlen);
0160             hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
0161                        symlen + 1);
0162         } else {
0163             symlen = unresolved_col_width + 4 + 2;
0164             hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
0165                        symlen);
0166             hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
0167                        symlen);
0168         }
0169 
0170         if (h->mem_info->iaddr.ms.sym) {
0171             symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
0172                    + unresolved_col_width + 2;
0173             hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
0174                        symlen);
0175         } else {
0176             symlen = unresolved_col_width + 4 + 2;
0177             hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
0178                        symlen);
0179         }
0180 
0181         if (h->mem_info->daddr.ms.map) {
0182             symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
0183             hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
0184                        symlen);
0185         } else {
0186             symlen = unresolved_col_width + 4 + 2;
0187             hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
0188         }
0189 
0190         hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
0191                    unresolved_col_width + 4 + 2);
0192 
0193         hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
0194                    unresolved_col_width + 4 + 2);
0195 
0196     } else {
0197         symlen = unresolved_col_width + 4 + 2;
0198         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
0199         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
0200         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
0201     }
0202 
0203     hists__new_col_len(hists, HISTC_CGROUP, 6);
0204     hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
0205     hists__new_col_len(hists, HISTC_CPU, 3);
0206     hists__new_col_len(hists, HISTC_SOCKET, 6);
0207     hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
0208     hists__new_col_len(hists, HISTC_MEM_TLB, 22);
0209     hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
0210     hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
0211     hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
0212     hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
0213     hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
0214     hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
0215     hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
0216     hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
0217     hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
0218 
0219     if (symbol_conf.nanosecs)
0220         hists__new_col_len(hists, HISTC_TIME, 16);
0221     else
0222         hists__new_col_len(hists, HISTC_TIME, 12);
0223     hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
0224 
0225     if (h->srcline) {
0226         len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
0227         hists__new_col_len(hists, HISTC_SRCLINE, len);
0228     }
0229 
0230     if (h->srcfile)
0231         hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
0232 
0233     if (h->transaction)
0234         hists__new_col_len(hists, HISTC_TRANSACTION,
0235                    hist_entry__transaction_len());
0236 
0237     if (h->trace_output)
0238         hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
0239 
0240     if (h->cgroup) {
0241         const char *cgrp_name = "unknown";
0242         struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
0243                            h->cgroup);
0244         if (cgrp != NULL)
0245             cgrp_name = cgrp->name;
0246 
0247         hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
0248     }
0249 }
0250 
0251 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
0252 {
0253     struct rb_node *next = rb_first_cached(&hists->entries);
0254     struct hist_entry *n;
0255     int row = 0;
0256 
0257     hists__reset_col_len(hists);
0258 
0259     while (next && row++ < max_rows) {
0260         n = rb_entry(next, struct hist_entry, rb_node);
0261         if (!n->filtered)
0262             hists__calc_col_len(hists, n);
0263         next = rb_next(&n->rb_node);
0264     }
0265 }
0266 
0267 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
0268                     unsigned int cpumode, u64 period)
0269 {
0270     switch (cpumode) {
0271     case PERF_RECORD_MISC_KERNEL:
0272         he_stat->period_sys += period;
0273         break;
0274     case PERF_RECORD_MISC_USER:
0275         he_stat->period_us += period;
0276         break;
0277     case PERF_RECORD_MISC_GUEST_KERNEL:
0278         he_stat->period_guest_sys += period;
0279         break;
0280     case PERF_RECORD_MISC_GUEST_USER:
0281         he_stat->period_guest_us += period;
0282         break;
0283     default:
0284         break;
0285     }
0286 }
0287 
0288 static long hist_time(unsigned long htime)
0289 {
0290     unsigned long time_quantum = symbol_conf.time_quantum;
0291     if (time_quantum)
0292         return (htime / time_quantum) * time_quantum;
0293     return htime;
0294 }
0295 
0296 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
0297 {
0298     he_stat->period     += period;
0299     he_stat->nr_events  += 1;
0300 }
0301 
0302 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
0303 {
0304     dest->period        += src->period;
0305     dest->period_sys    += src->period_sys;
0306     dest->period_us     += src->period_us;
0307     dest->period_guest_sys  += src->period_guest_sys;
0308     dest->period_guest_us   += src->period_guest_us;
0309     dest->nr_events     += src->nr_events;
0310 }
0311 
0312 static void he_stat__decay(struct he_stat *he_stat)
0313 {
0314     he_stat->period = (he_stat->period * 7) / 8;
0315     he_stat->nr_events = (he_stat->nr_events * 7) / 8;
0316     /* XXX need decay for weight too? */
0317 }
0318 
0319 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
0320 
0321 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
0322 {
0323     u64 prev_period = he->stat.period;
0324     u64 diff;
0325 
0326     if (prev_period == 0)
0327         return true;
0328 
0329     he_stat__decay(&he->stat);
0330     if (symbol_conf.cumulate_callchain)
0331         he_stat__decay(he->stat_acc);
0332     decay_callchain(he->callchain);
0333 
0334     diff = prev_period - he->stat.period;
0335 
0336     if (!he->depth) {
0337         hists->stats.total_period -= diff;
0338         if (!he->filtered)
0339             hists->stats.total_non_filtered_period -= diff;
0340     }
0341 
0342     if (!he->leaf) {
0343         struct hist_entry *child;
0344         struct rb_node *node = rb_first_cached(&he->hroot_out);
0345         while (node) {
0346             child = rb_entry(node, struct hist_entry, rb_node);
0347             node = rb_next(node);
0348 
0349             if (hists__decay_entry(hists, child))
0350                 hists__delete_entry(hists, child);
0351         }
0352     }
0353 
0354     return he->stat.period == 0;
0355 }
0356 
0357 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
0358 {
0359     struct rb_root_cached *root_in;
0360     struct rb_root_cached *root_out;
0361 
0362     if (he->parent_he) {
0363         root_in  = &he->parent_he->hroot_in;
0364         root_out = &he->parent_he->hroot_out;
0365     } else {
0366         if (hists__has(hists, need_collapse))
0367             root_in = &hists->entries_collapsed;
0368         else
0369             root_in = hists->entries_in;
0370         root_out = &hists->entries;
0371     }
0372 
0373     rb_erase_cached(&he->rb_node_in, root_in);
0374     rb_erase_cached(&he->rb_node, root_out);
0375 
0376     --hists->nr_entries;
0377     if (!he->filtered)
0378         --hists->nr_non_filtered_entries;
0379 
0380     hist_entry__delete(he);
0381 }
0382 
0383 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
0384 {
0385     struct rb_node *next = rb_first_cached(&hists->entries);
0386     struct hist_entry *n;
0387 
0388     while (next) {
0389         n = rb_entry(next, struct hist_entry, rb_node);
0390         next = rb_next(&n->rb_node);
0391         if (((zap_user && n->level == '.') ||
0392              (zap_kernel && n->level != '.') ||
0393              hists__decay_entry(hists, n))) {
0394             hists__delete_entry(hists, n);
0395         }
0396     }
0397 }
0398 
0399 void hists__delete_entries(struct hists *hists)
0400 {
0401     struct rb_node *next = rb_first_cached(&hists->entries);
0402     struct hist_entry *n;
0403 
0404     while (next) {
0405         n = rb_entry(next, struct hist_entry, rb_node);
0406         next = rb_next(&n->rb_node);
0407 
0408         hists__delete_entry(hists, n);
0409     }
0410 }
0411 
0412 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
0413 {
0414     struct rb_node *next = rb_first_cached(&hists->entries);
0415     struct hist_entry *n;
0416     int i = 0;
0417 
0418     while (next) {
0419         n = rb_entry(next, struct hist_entry, rb_node);
0420         if (i == idx)
0421             return n;
0422 
0423         next = rb_next(&n->rb_node);
0424         i++;
0425     }
0426 
0427     return NULL;
0428 }
0429 
0430 /*
0431  * histogram, sorted on item, collects periods
0432  */
0433 
0434 static int hist_entry__init(struct hist_entry *he,
0435                 struct hist_entry *template,
0436                 bool sample_self,
0437                 size_t callchain_size)
0438 {
0439     *he = *template;
0440     he->callchain_size = callchain_size;
0441 
0442     if (symbol_conf.cumulate_callchain) {
0443         he->stat_acc = malloc(sizeof(he->stat));
0444         if (he->stat_acc == NULL)
0445             return -ENOMEM;
0446         memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
0447         if (!sample_self)
0448             memset(&he->stat, 0, sizeof(he->stat));
0449     }
0450 
0451     map__get(he->ms.map);
0452 
0453     if (he->branch_info) {
0454         /*
0455          * This branch info is (a part of) allocated from
0456          * sample__resolve_bstack() and will be freed after
0457          * adding new entries.  So we need to save a copy.
0458          */
0459         he->branch_info = malloc(sizeof(*he->branch_info));
0460         if (he->branch_info == NULL)
0461             goto err;
0462 
0463         memcpy(he->branch_info, template->branch_info,
0464                sizeof(*he->branch_info));
0465 
0466         map__get(he->branch_info->from.ms.map);
0467         map__get(he->branch_info->to.ms.map);
0468     }
0469 
0470     if (he->mem_info) {
0471         map__get(he->mem_info->iaddr.ms.map);
0472         map__get(he->mem_info->daddr.ms.map);
0473     }
0474 
0475     if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
0476         callchain_init(he->callchain);
0477 
0478     if (he->raw_data) {
0479         he->raw_data = memdup(he->raw_data, he->raw_size);
0480         if (he->raw_data == NULL)
0481             goto err_infos;
0482     }
0483 
0484     if (he->srcline) {
0485         he->srcline = strdup(he->srcline);
0486         if (he->srcline == NULL)
0487             goto err_rawdata;
0488     }
0489 
0490     if (symbol_conf.res_sample) {
0491         he->res_samples = calloc(sizeof(struct res_sample),
0492                     symbol_conf.res_sample);
0493         if (!he->res_samples)
0494             goto err_srcline;
0495     }
0496 
0497     INIT_LIST_HEAD(&he->pairs.node);
0498     thread__get(he->thread);
0499     he->hroot_in  = RB_ROOT_CACHED;
0500     he->hroot_out = RB_ROOT_CACHED;
0501 
0502     if (!symbol_conf.report_hierarchy)
0503         he->leaf = true;
0504 
0505     return 0;
0506 
0507 err_srcline:
0508     zfree(&he->srcline);
0509 
0510 err_rawdata:
0511     zfree(&he->raw_data);
0512 
0513 err_infos:
0514     if (he->branch_info) {
0515         map__put(he->branch_info->from.ms.map);
0516         map__put(he->branch_info->to.ms.map);
0517         zfree(&he->branch_info);
0518     }
0519     if (he->mem_info) {
0520         map__put(he->mem_info->iaddr.ms.map);
0521         map__put(he->mem_info->daddr.ms.map);
0522     }
0523 err:
0524     map__zput(he->ms.map);
0525     zfree(&he->stat_acc);
0526     return -ENOMEM;
0527 }
0528 
0529 static void *hist_entry__zalloc(size_t size)
0530 {
0531     return zalloc(size + sizeof(struct hist_entry));
0532 }
0533 
0534 static void hist_entry__free(void *ptr)
0535 {
0536     free(ptr);
0537 }
0538 
0539 static struct hist_entry_ops default_ops = {
0540     .new    = hist_entry__zalloc,
0541     .free   = hist_entry__free,
0542 };
0543 
0544 static struct hist_entry *hist_entry__new(struct hist_entry *template,
0545                       bool sample_self)
0546 {
0547     struct hist_entry_ops *ops = template->ops;
0548     size_t callchain_size = 0;
0549     struct hist_entry *he;
0550     int err = 0;
0551 
0552     if (!ops)
0553         ops = template->ops = &default_ops;
0554 
0555     if (symbol_conf.use_callchain)
0556         callchain_size = sizeof(struct callchain_root);
0557 
0558     he = ops->new(callchain_size);
0559     if (he) {
0560         err = hist_entry__init(he, template, sample_self, callchain_size);
0561         if (err) {
0562             ops->free(he);
0563             he = NULL;
0564         }
0565     }
0566 
0567     return he;
0568 }
0569 
0570 static u8 symbol__parent_filter(const struct symbol *parent)
0571 {
0572     if (symbol_conf.exclude_other && parent == NULL)
0573         return 1 << HIST_FILTER__PARENT;
0574     return 0;
0575 }
0576 
0577 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
0578 {
0579     if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
0580         return;
0581 
0582     he->hists->callchain_period += period;
0583     if (!he->filtered)
0584         he->hists->callchain_non_filtered_period += period;
0585 }
0586 
0587 static struct hist_entry *hists__findnew_entry(struct hists *hists,
0588                            struct hist_entry *entry,
0589                            struct addr_location *al,
0590                            bool sample_self)
0591 {
0592     struct rb_node **p;
0593     struct rb_node *parent = NULL;
0594     struct hist_entry *he;
0595     int64_t cmp;
0596     u64 period = entry->stat.period;
0597     bool leftmost = true;
0598 
0599     p = &hists->entries_in->rb_root.rb_node;
0600 
0601     while (*p != NULL) {
0602         parent = *p;
0603         he = rb_entry(parent, struct hist_entry, rb_node_in);
0604 
0605         /*
0606          * Make sure that it receives arguments in a same order as
0607          * hist_entry__collapse() so that we can use an appropriate
0608          * function when searching an entry regardless which sort
0609          * keys were used.
0610          */
0611         cmp = hist_entry__cmp(he, entry);
0612 
0613         if (!cmp) {
0614             if (sample_self) {
0615                 he_stat__add_period(&he->stat, period);
0616                 hist_entry__add_callchain_period(he, period);
0617             }
0618             if (symbol_conf.cumulate_callchain)
0619                 he_stat__add_period(he->stat_acc, period);
0620 
0621             /*
0622              * This mem info was allocated from sample__resolve_mem
0623              * and will not be used anymore.
0624              */
0625             mem_info__zput(entry->mem_info);
0626 
0627             block_info__zput(entry->block_info);
0628 
0629             /* If the map of an existing hist_entry has
0630              * become out-of-date due to an exec() or
0631              * similar, update it.  Otherwise we will
0632              * mis-adjust symbol addresses when computing
0633              * the history counter to increment.
0634              */
0635             if (he->ms.map != entry->ms.map) {
0636                 map__put(he->ms.map);
0637                 he->ms.map = map__get(entry->ms.map);
0638             }
0639             goto out;
0640         }
0641 
0642         if (cmp < 0)
0643             p = &(*p)->rb_left;
0644         else {
0645             p = &(*p)->rb_right;
0646             leftmost = false;
0647         }
0648     }
0649 
0650     he = hist_entry__new(entry, sample_self);
0651     if (!he)
0652         return NULL;
0653 
0654     if (sample_self)
0655         hist_entry__add_callchain_period(he, period);
0656     hists->nr_entries++;
0657 
0658     rb_link_node(&he->rb_node_in, parent, p);
0659     rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
0660 out:
0661     if (sample_self)
0662         he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
0663     if (symbol_conf.cumulate_callchain)
0664         he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
0665     return he;
0666 }
0667 
0668 static unsigned random_max(unsigned high)
0669 {
0670     unsigned thresh = -high % high;
0671     for (;;) {
0672         unsigned r = random();
0673         if (r >= thresh)
0674             return r % high;
0675     }
0676 }
0677 
0678 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
0679 {
0680     struct res_sample *r;
0681     int j;
0682 
0683     if (he->num_res < symbol_conf.res_sample) {
0684         j = he->num_res++;
0685     } else {
0686         j = random_max(symbol_conf.res_sample);
0687     }
0688     r = &he->res_samples[j];
0689     r->time = sample->time;
0690     r->cpu = sample->cpu;
0691     r->tid = sample->tid;
0692 }
0693 
0694 static struct hist_entry*
0695 __hists__add_entry(struct hists *hists,
0696            struct addr_location *al,
0697            struct symbol *sym_parent,
0698            struct branch_info *bi,
0699            struct mem_info *mi,
0700            struct block_info *block_info,
0701            struct perf_sample *sample,
0702            bool sample_self,
0703            struct hist_entry_ops *ops)
0704 {
0705     struct namespaces *ns = thread__namespaces(al->thread);
0706     struct hist_entry entry = {
0707         .thread = al->thread,
0708         .comm = thread__comm(al->thread),
0709         .cgroup_id = {
0710             .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
0711             .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
0712         },
0713         .cgroup = sample->cgroup,
0714         .ms = {
0715             .maps   = al->maps,
0716             .map    = al->map,
0717             .sym    = al->sym,
0718         },
0719         .srcline = (char *) al->srcline,
0720         .socket  = al->socket,
0721         .cpu     = al->cpu,
0722         .cpumode = al->cpumode,
0723         .ip  = al->addr,
0724         .level   = al->level,
0725         .code_page_size = sample->code_page_size,
0726         .stat = {
0727             .nr_events = 1,
0728             .period = sample->period,
0729         },
0730         .parent = sym_parent,
0731         .filtered = symbol__parent_filter(sym_parent) | al->filtered,
0732         .hists  = hists,
0733         .branch_info = bi,
0734         .mem_info = mi,
0735         .block_info = block_info,
0736         .transaction = sample->transaction,
0737         .raw_data = sample->raw_data,
0738         .raw_size = sample->raw_size,
0739         .ops = ops,
0740         .time = hist_time(sample->time),
0741         .weight = sample->weight,
0742         .ins_lat = sample->ins_lat,
0743         .p_stage_cyc = sample->p_stage_cyc,
0744     }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
0745 
0746     if (!hists->has_callchains && he && he->callchain_size != 0)
0747         hists->has_callchains = true;
0748     if (he && symbol_conf.res_sample)
0749         hists__res_sample(he, sample);
0750     return he;
0751 }
0752 
0753 struct hist_entry *hists__add_entry(struct hists *hists,
0754                     struct addr_location *al,
0755                     struct symbol *sym_parent,
0756                     struct branch_info *bi,
0757                     struct mem_info *mi,
0758                     struct perf_sample *sample,
0759                     bool sample_self)
0760 {
0761     return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
0762                   sample, sample_self, NULL);
0763 }
0764 
0765 struct hist_entry *hists__add_entry_ops(struct hists *hists,
0766                     struct hist_entry_ops *ops,
0767                     struct addr_location *al,
0768                     struct symbol *sym_parent,
0769                     struct branch_info *bi,
0770                     struct mem_info *mi,
0771                     struct perf_sample *sample,
0772                     bool sample_self)
0773 {
0774     return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
0775                   sample, sample_self, ops);
0776 }
0777 
0778 struct hist_entry *hists__add_entry_block(struct hists *hists,
0779                       struct addr_location *al,
0780                       struct block_info *block_info)
0781 {
0782     struct hist_entry entry = {
0783         .block_info = block_info,
0784         .hists = hists,
0785         .ms = {
0786             .maps = al->maps,
0787             .map = al->map,
0788             .sym = al->sym,
0789         },
0790     }, *he = hists__findnew_entry(hists, &entry, al, false);
0791 
0792     return he;
0793 }
0794 
0795 static int
0796 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
0797             struct addr_location *al __maybe_unused)
0798 {
0799     return 0;
0800 }
0801 
0802 static int
0803 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
0804             struct addr_location *al __maybe_unused)
0805 {
0806     return 0;
0807 }
0808 
0809 static int
0810 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
0811 {
0812     struct perf_sample *sample = iter->sample;
0813     struct mem_info *mi;
0814 
0815     mi = sample__resolve_mem(sample, al);
0816     if (mi == NULL)
0817         return -ENOMEM;
0818 
0819     iter->priv = mi;
0820     return 0;
0821 }
0822 
0823 static int
0824 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
0825 {
0826     u64 cost;
0827     struct mem_info *mi = iter->priv;
0828     struct hists *hists = evsel__hists(iter->evsel);
0829     struct perf_sample *sample = iter->sample;
0830     struct hist_entry *he;
0831 
0832     if (mi == NULL)
0833         return -EINVAL;
0834 
0835     cost = sample->weight;
0836     if (!cost)
0837         cost = 1;
0838 
0839     /*
0840      * must pass period=weight in order to get the correct
0841      * sorting from hists__collapse_resort() which is solely
0842      * based on periods. We want sorting be done on nr_events * weight
0843      * and this is indirectly achieved by passing period=weight here
0844      * and the he_stat__add_period() function.
0845      */
0846     sample->period = cost;
0847 
0848     he = hists__add_entry(hists, al, iter->parent, NULL, mi,
0849                   sample, true);
0850     if (!he)
0851         return -ENOMEM;
0852 
0853     iter->he = he;
0854     return 0;
0855 }
0856 
0857 static int
0858 iter_finish_mem_entry(struct hist_entry_iter *iter,
0859               struct addr_location *al __maybe_unused)
0860 {
0861     struct evsel *evsel = iter->evsel;
0862     struct hists *hists = evsel__hists(evsel);
0863     struct hist_entry *he = iter->he;
0864     int err = -EINVAL;
0865 
0866     if (he == NULL)
0867         goto out;
0868 
0869     hists__inc_nr_samples(hists, he->filtered);
0870 
0871     err = hist_entry__append_callchain(he, iter->sample);
0872 
0873 out:
0874     /*
0875      * We don't need to free iter->priv (mem_info) here since the mem info
0876      * was either already freed in hists__findnew_entry() or passed to a
0877      * new hist entry by hist_entry__new().
0878      */
0879     iter->priv = NULL;
0880 
0881     iter->he = NULL;
0882     return err;
0883 }
0884 
0885 static int
0886 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
0887 {
0888     struct branch_info *bi;
0889     struct perf_sample *sample = iter->sample;
0890 
0891     bi = sample__resolve_bstack(sample, al);
0892     if (!bi)
0893         return -ENOMEM;
0894 
0895     iter->curr = 0;
0896     iter->total = sample->branch_stack->nr;
0897 
0898     iter->priv = bi;
0899     return 0;
0900 }
0901 
0902 static int
0903 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
0904                  struct addr_location *al __maybe_unused)
0905 {
0906     return 0;
0907 }
0908 
0909 static int
0910 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
0911 {
0912     struct branch_info *bi = iter->priv;
0913     int i = iter->curr;
0914 
0915     if (bi == NULL)
0916         return 0;
0917 
0918     if (iter->curr >= iter->total)
0919         return 0;
0920 
0921     al->maps = bi[i].to.ms.maps;
0922     al->map = bi[i].to.ms.map;
0923     al->sym = bi[i].to.ms.sym;
0924     al->addr = bi[i].to.addr;
0925     return 1;
0926 }
0927 
0928 static int
0929 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
0930 {
0931     struct branch_info *bi;
0932     struct evsel *evsel = iter->evsel;
0933     struct hists *hists = evsel__hists(evsel);
0934     struct perf_sample *sample = iter->sample;
0935     struct hist_entry *he = NULL;
0936     int i = iter->curr;
0937     int err = 0;
0938 
0939     bi = iter->priv;
0940 
0941     if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
0942         goto out;
0943 
0944     /*
0945      * The report shows the percentage of total branches captured
0946      * and not events sampled. Thus we use a pseudo period of 1.
0947      */
0948     sample->period = 1;
0949     sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
0950 
0951     he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
0952                   sample, true);
0953     if (he == NULL)
0954         return -ENOMEM;
0955 
0956     hists__inc_nr_samples(hists, he->filtered);
0957 
0958 out:
0959     iter->he = he;
0960     iter->curr++;
0961     return err;
0962 }
0963 
0964 static int
0965 iter_finish_branch_entry(struct hist_entry_iter *iter,
0966              struct addr_location *al __maybe_unused)
0967 {
0968     zfree(&iter->priv);
0969     iter->he = NULL;
0970 
0971     return iter->curr >= iter->total ? 0 : -1;
0972 }
0973 
0974 static int
0975 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
0976               struct addr_location *al __maybe_unused)
0977 {
0978     return 0;
0979 }
0980 
0981 static int
0982 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
0983 {
0984     struct evsel *evsel = iter->evsel;
0985     struct perf_sample *sample = iter->sample;
0986     struct hist_entry *he;
0987 
0988     he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
0989                   sample, true);
0990     if (he == NULL)
0991         return -ENOMEM;
0992 
0993     iter->he = he;
0994     return 0;
0995 }
0996 
0997 static int
0998 iter_finish_normal_entry(struct hist_entry_iter *iter,
0999              struct addr_location *al __maybe_unused)
1000 {
1001     struct hist_entry *he = iter->he;
1002     struct evsel *evsel = iter->evsel;
1003     struct perf_sample *sample = iter->sample;
1004 
1005     if (he == NULL)
1006         return 0;
1007 
1008     iter->he = NULL;
1009 
1010     hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1011 
1012     return hist_entry__append_callchain(he, sample);
1013 }
1014 
1015 static int
1016 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1017                   struct addr_location *al __maybe_unused)
1018 {
1019     struct hist_entry **he_cache;
1020 
1021     callchain_cursor_commit(&callchain_cursor);
1022 
1023     /*
1024      * This is for detecting cycles or recursions so that they're
1025      * cumulated only one time to prevent entries more than 100%
1026      * overhead.
1027      */
1028     he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1029     if (he_cache == NULL)
1030         return -ENOMEM;
1031 
1032     iter->priv = he_cache;
1033     iter->curr = 0;
1034 
1035     return 0;
1036 }
1037 
1038 static int
1039 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1040                  struct addr_location *al)
1041 {
1042     struct evsel *evsel = iter->evsel;
1043     struct hists *hists = evsel__hists(evsel);
1044     struct perf_sample *sample = iter->sample;
1045     struct hist_entry **he_cache = iter->priv;
1046     struct hist_entry *he;
1047     int err = 0;
1048 
1049     he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1050                   sample, true);
1051     if (he == NULL)
1052         return -ENOMEM;
1053 
1054     iter->he = he;
1055     he_cache[iter->curr++] = he;
1056 
1057     hist_entry__append_callchain(he, sample);
1058 
1059     /*
1060      * We need to re-initialize the cursor since callchain_append()
1061      * advanced the cursor to the end.
1062      */
1063     callchain_cursor_commit(&callchain_cursor);
1064 
1065     hists__inc_nr_samples(hists, he->filtered);
1066 
1067     return err;
1068 }
1069 
1070 static int
1071 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1072                struct addr_location *al)
1073 {
1074     struct callchain_cursor_node *node;
1075 
1076     node = callchain_cursor_current(&callchain_cursor);
1077     if (node == NULL)
1078         return 0;
1079 
1080     return fill_callchain_info(al, node, iter->hide_unresolved);
1081 }
1082 
1083 static bool
1084 hist_entry__fast__sym_diff(struct hist_entry *left,
1085                struct hist_entry *right)
1086 {
1087     struct symbol *sym_l = left->ms.sym;
1088     struct symbol *sym_r = right->ms.sym;
1089 
1090     if (!sym_l && !sym_r)
1091         return left->ip != right->ip;
1092 
1093     return !!_sort__sym_cmp(sym_l, sym_r);
1094 }
1095 
1096 
1097 static int
1098 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1099                    struct addr_location *al)
1100 {
1101     struct evsel *evsel = iter->evsel;
1102     struct perf_sample *sample = iter->sample;
1103     struct hist_entry **he_cache = iter->priv;
1104     struct hist_entry *he;
1105     struct hist_entry he_tmp = {
1106         .hists = evsel__hists(evsel),
1107         .cpu = al->cpu,
1108         .thread = al->thread,
1109         .comm = thread__comm(al->thread),
1110         .ip = al->addr,
1111         .ms = {
1112             .maps = al->maps,
1113             .map = al->map,
1114             .sym = al->sym,
1115         },
1116         .srcline = (char *) al->srcline,
1117         .parent = iter->parent,
1118         .raw_data = sample->raw_data,
1119         .raw_size = sample->raw_size,
1120     };
1121     int i;
1122     struct callchain_cursor cursor;
1123     bool fast = hists__has(he_tmp.hists, sym);
1124 
1125     callchain_cursor_snapshot(&cursor, &callchain_cursor);
1126 
1127     callchain_cursor_advance(&callchain_cursor);
1128 
1129     /*
1130      * Check if there's duplicate entries in the callchain.
1131      * It's possible that it has cycles or recursive calls.
1132      */
1133     for (i = 0; i < iter->curr; i++) {
1134         /*
1135          * For most cases, there are no duplicate entries in callchain.
1136          * The symbols are usually different. Do a quick check for
1137          * symbols first.
1138          */
1139         if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1140             continue;
1141 
1142         if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1143             /* to avoid calling callback function */
1144             iter->he = NULL;
1145             return 0;
1146         }
1147     }
1148 
1149     he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1150                   sample, false);
1151     if (he == NULL)
1152         return -ENOMEM;
1153 
1154     iter->he = he;
1155     he_cache[iter->curr++] = he;
1156 
1157     if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1158         callchain_append(he->callchain, &cursor, sample->period);
1159     return 0;
1160 }
1161 
1162 static int
1163 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1164                  struct addr_location *al __maybe_unused)
1165 {
1166     zfree(&iter->priv);
1167     iter->he = NULL;
1168 
1169     return 0;
1170 }
1171 
1172 const struct hist_iter_ops hist_iter_mem = {
1173     .prepare_entry      = iter_prepare_mem_entry,
1174     .add_single_entry   = iter_add_single_mem_entry,
1175     .next_entry         = iter_next_nop_entry,
1176     .add_next_entry     = iter_add_next_nop_entry,
1177     .finish_entry       = iter_finish_mem_entry,
1178 };
1179 
1180 const struct hist_iter_ops hist_iter_branch = {
1181     .prepare_entry      = iter_prepare_branch_entry,
1182     .add_single_entry   = iter_add_single_branch_entry,
1183     .next_entry         = iter_next_branch_entry,
1184     .add_next_entry     = iter_add_next_branch_entry,
1185     .finish_entry       = iter_finish_branch_entry,
1186 };
1187 
1188 const struct hist_iter_ops hist_iter_normal = {
1189     .prepare_entry      = iter_prepare_normal_entry,
1190     .add_single_entry   = iter_add_single_normal_entry,
1191     .next_entry         = iter_next_nop_entry,
1192     .add_next_entry     = iter_add_next_nop_entry,
1193     .finish_entry       = iter_finish_normal_entry,
1194 };
1195 
1196 const struct hist_iter_ops hist_iter_cumulative = {
1197     .prepare_entry      = iter_prepare_cumulative_entry,
1198     .add_single_entry   = iter_add_single_cumulative_entry,
1199     .next_entry         = iter_next_cumulative_entry,
1200     .add_next_entry     = iter_add_next_cumulative_entry,
1201     .finish_entry       = iter_finish_cumulative_entry,
1202 };
1203 
1204 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1205              int max_stack_depth, void *arg)
1206 {
1207     int err, err2;
1208     struct map *alm = NULL;
1209 
1210     if (al)
1211         alm = map__get(al->map);
1212 
1213     err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1214                     iter->evsel, al, max_stack_depth);
1215     if (err) {
1216         map__put(alm);
1217         return err;
1218     }
1219 
1220     err = iter->ops->prepare_entry(iter, al);
1221     if (err)
1222         goto out;
1223 
1224     err = iter->ops->add_single_entry(iter, al);
1225     if (err)
1226         goto out;
1227 
1228     if (iter->he && iter->add_entry_cb) {
1229         err = iter->add_entry_cb(iter, al, true, arg);
1230         if (err)
1231             goto out;
1232     }
1233 
1234     while (iter->ops->next_entry(iter, al)) {
1235         err = iter->ops->add_next_entry(iter, al);
1236         if (err)
1237             break;
1238 
1239         if (iter->he && iter->add_entry_cb) {
1240             err = iter->add_entry_cb(iter, al, false, arg);
1241             if (err)
1242                 goto out;
1243         }
1244     }
1245 
1246 out:
1247     err2 = iter->ops->finish_entry(iter, al);
1248     if (!err)
1249         err = err2;
1250 
1251     map__put(alm);
1252 
1253     return err;
1254 }
1255 
1256 int64_t
1257 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1258 {
1259     struct hists *hists = left->hists;
1260     struct perf_hpp_fmt *fmt;
1261     int64_t cmp = 0;
1262 
1263     hists__for_each_sort_list(hists, fmt) {
1264         if (perf_hpp__is_dynamic_entry(fmt) &&
1265             !perf_hpp__defined_dynamic_entry(fmt, hists))
1266             continue;
1267 
1268         cmp = fmt->cmp(fmt, left, right);
1269         if (cmp)
1270             break;
1271     }
1272 
1273     return cmp;
1274 }
1275 
1276 int64_t
1277 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1278 {
1279     struct hists *hists = left->hists;
1280     struct perf_hpp_fmt *fmt;
1281     int64_t cmp = 0;
1282 
1283     hists__for_each_sort_list(hists, fmt) {
1284         if (perf_hpp__is_dynamic_entry(fmt) &&
1285             !perf_hpp__defined_dynamic_entry(fmt, hists))
1286             continue;
1287 
1288         cmp = fmt->collapse(fmt, left, right);
1289         if (cmp)
1290             break;
1291     }
1292 
1293     return cmp;
1294 }
1295 
1296 void hist_entry__delete(struct hist_entry *he)
1297 {
1298     struct hist_entry_ops *ops = he->ops;
1299 
1300     thread__zput(he->thread);
1301     map__zput(he->ms.map);
1302 
1303     if (he->branch_info) {
1304         map__zput(he->branch_info->from.ms.map);
1305         map__zput(he->branch_info->to.ms.map);
1306         free_srcline(he->branch_info->srcline_from);
1307         free_srcline(he->branch_info->srcline_to);
1308         zfree(&he->branch_info);
1309     }
1310 
1311     if (he->mem_info) {
1312         map__zput(he->mem_info->iaddr.ms.map);
1313         map__zput(he->mem_info->daddr.ms.map);
1314         mem_info__zput(he->mem_info);
1315     }
1316 
1317     if (he->block_info)
1318         block_info__zput(he->block_info);
1319 
1320     zfree(&he->res_samples);
1321     zfree(&he->stat_acc);
1322     free_srcline(he->srcline);
1323     if (he->srcfile && he->srcfile[0])
1324         zfree(&he->srcfile);
1325     free_callchain(he->callchain);
1326     zfree(&he->trace_output);
1327     zfree(&he->raw_data);
1328     ops->free(he);
1329 }
1330 
1331 /*
1332  * If this is not the last column, then we need to pad it according to the
1333  * pre-calculated max length for this column, otherwise don't bother adding
1334  * spaces because that would break viewing this with, for instance, 'less',
1335  * that would show tons of trailing spaces when a long C++ demangled method
1336  * names is sampled.
1337 */
1338 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1339                    struct perf_hpp_fmt *fmt, int printed)
1340 {
1341     if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1342         const int width = fmt->width(fmt, hpp, he->hists);
1343         if (printed < width) {
1344             advance_hpp(hpp, printed);
1345             printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1346         }
1347     }
1348 
1349     return printed;
1350 }
1351 
1352 /*
1353  * collapse the histogram
1354  */
1355 
1356 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1357 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1358                        enum hist_filter type);
1359 
1360 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1361 
1362 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1363 {
1364     return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1365 }
1366 
1367 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1368                         enum hist_filter type,
1369                         fmt_chk_fn check)
1370 {
1371     struct perf_hpp_fmt *fmt;
1372     bool type_match = false;
1373     struct hist_entry *parent = he->parent_he;
1374 
1375     switch (type) {
1376     case HIST_FILTER__THREAD:
1377         if (symbol_conf.comm_list == NULL &&
1378             symbol_conf.pid_list == NULL &&
1379             symbol_conf.tid_list == NULL)
1380             return;
1381         break;
1382     case HIST_FILTER__DSO:
1383         if (symbol_conf.dso_list == NULL)
1384             return;
1385         break;
1386     case HIST_FILTER__SYMBOL:
1387         if (symbol_conf.sym_list == NULL)
1388             return;
1389         break;
1390     case HIST_FILTER__PARENT:
1391     case HIST_FILTER__GUEST:
1392     case HIST_FILTER__HOST:
1393     case HIST_FILTER__SOCKET:
1394     case HIST_FILTER__C2C:
1395     default:
1396         return;
1397     }
1398 
1399     /* if it's filtered by own fmt, it has to have filter bits */
1400     perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1401         if (check(fmt)) {
1402             type_match = true;
1403             break;
1404         }
1405     }
1406 
1407     if (type_match) {
1408         /*
1409          * If the filter is for current level entry, propagate
1410          * filter marker to parents.  The marker bit was
1411          * already set by default so it only needs to clear
1412          * non-filtered entries.
1413          */
1414         if (!(he->filtered & (1 << type))) {
1415             while (parent) {
1416                 parent->filtered &= ~(1 << type);
1417                 parent = parent->parent_he;
1418             }
1419         }
1420     } else {
1421         /*
1422          * If current entry doesn't have matching formats, set
1423          * filter marker for upper level entries.  it will be
1424          * cleared if its lower level entries is not filtered.
1425          *
1426          * For lower-level entries, it inherits parent's
1427          * filter bit so that lower level entries of a
1428          * non-filtered entry won't set the filter marker.
1429          */
1430         if (parent == NULL)
1431             he->filtered |= (1 << type);
1432         else
1433             he->filtered |= (parent->filtered & (1 << type));
1434     }
1435 }
1436 
1437 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1438 {
1439     hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1440                         check_thread_entry);
1441 
1442     hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1443                         perf_hpp__is_dso_entry);
1444 
1445     hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1446                         perf_hpp__is_sym_entry);
1447 
1448     hists__apply_filters(he->hists, he);
1449 }
1450 
1451 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1452                          struct rb_root_cached *root,
1453                          struct hist_entry *he,
1454                          struct hist_entry *parent_he,
1455                          struct perf_hpp_list *hpp_list)
1456 {
1457     struct rb_node **p = &root->rb_root.rb_node;
1458     struct rb_node *parent = NULL;
1459     struct hist_entry *iter, *new;
1460     struct perf_hpp_fmt *fmt;
1461     int64_t cmp;
1462     bool leftmost = true;
1463 
1464     while (*p != NULL) {
1465         parent = *p;
1466         iter = rb_entry(parent, struct hist_entry, rb_node_in);
1467 
1468         cmp = 0;
1469         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1470             cmp = fmt->collapse(fmt, iter, he);
1471             if (cmp)
1472                 break;
1473         }
1474 
1475         if (!cmp) {
1476             he_stat__add_stat(&iter->stat, &he->stat);
1477             return iter;
1478         }
1479 
1480         if (cmp < 0)
1481             p = &parent->rb_left;
1482         else {
1483             p = &parent->rb_right;
1484             leftmost = false;
1485         }
1486     }
1487 
1488     new = hist_entry__new(he, true);
1489     if (new == NULL)
1490         return NULL;
1491 
1492     hists->nr_entries++;
1493 
1494     /* save related format list for output */
1495     new->hpp_list = hpp_list;
1496     new->parent_he = parent_he;
1497 
1498     hist_entry__apply_hierarchy_filters(new);
1499 
1500     /* some fields are now passed to 'new' */
1501     perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1502         if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1503             he->trace_output = NULL;
1504         else
1505             new->trace_output = NULL;
1506 
1507         if (perf_hpp__is_srcline_entry(fmt))
1508             he->srcline = NULL;
1509         else
1510             new->srcline = NULL;
1511 
1512         if (perf_hpp__is_srcfile_entry(fmt))
1513             he->srcfile = NULL;
1514         else
1515             new->srcfile = NULL;
1516     }
1517 
1518     rb_link_node(&new->rb_node_in, parent, p);
1519     rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1520     return new;
1521 }
1522 
1523 static int hists__hierarchy_insert_entry(struct hists *hists,
1524                      struct rb_root_cached *root,
1525                      struct hist_entry *he)
1526 {
1527     struct perf_hpp_list_node *node;
1528     struct hist_entry *new_he = NULL;
1529     struct hist_entry *parent = NULL;
1530     int depth = 0;
1531     int ret = 0;
1532 
1533     list_for_each_entry(node, &hists->hpp_formats, list) {
1534         /* skip period (overhead) and elided columns */
1535         if (node->level == 0 || node->skip)
1536             continue;
1537 
1538         /* insert copy of 'he' for each fmt into the hierarchy */
1539         new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1540         if (new_he == NULL) {
1541             ret = -1;
1542             break;
1543         }
1544 
1545         root = &new_he->hroot_in;
1546         new_he->depth = depth++;
1547         parent = new_he;
1548     }
1549 
1550     if (new_he) {
1551         new_he->leaf = true;
1552 
1553         if (hist_entry__has_callchains(new_he) &&
1554             symbol_conf.use_callchain) {
1555             callchain_cursor_reset(&callchain_cursor);
1556             if (callchain_merge(&callchain_cursor,
1557                         new_he->callchain,
1558                         he->callchain) < 0)
1559                 ret = -1;
1560         }
1561     }
1562 
1563     /* 'he' is no longer used */
1564     hist_entry__delete(he);
1565 
1566     /* return 0 (or -1) since it already applied filters */
1567     return ret;
1568 }
1569 
1570 static int hists__collapse_insert_entry(struct hists *hists,
1571                     struct rb_root_cached *root,
1572                     struct hist_entry *he)
1573 {
1574     struct rb_node **p = &root->rb_root.rb_node;
1575     struct rb_node *parent = NULL;
1576     struct hist_entry *iter;
1577     int64_t cmp;
1578     bool leftmost = true;
1579 
1580     if (symbol_conf.report_hierarchy)
1581         return hists__hierarchy_insert_entry(hists, root, he);
1582 
1583     while (*p != NULL) {
1584         parent = *p;
1585         iter = rb_entry(parent, struct hist_entry, rb_node_in);
1586 
1587         cmp = hist_entry__collapse(iter, he);
1588 
1589         if (!cmp) {
1590             int ret = 0;
1591 
1592             he_stat__add_stat(&iter->stat, &he->stat);
1593             if (symbol_conf.cumulate_callchain)
1594                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1595 
1596             if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1597                 callchain_cursor_reset(&callchain_cursor);
1598                 if (callchain_merge(&callchain_cursor,
1599                             iter->callchain,
1600                             he->callchain) < 0)
1601                     ret = -1;
1602             }
1603             hist_entry__delete(he);
1604             return ret;
1605         }
1606 
1607         if (cmp < 0)
1608             p = &(*p)->rb_left;
1609         else {
1610             p = &(*p)->rb_right;
1611             leftmost = false;
1612         }
1613     }
1614     hists->nr_entries++;
1615 
1616     rb_link_node(&he->rb_node_in, parent, p);
1617     rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1618     return 1;
1619 }
1620 
1621 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1622 {
1623     struct rb_root_cached *root;
1624 
1625     pthread_mutex_lock(&hists->lock);
1626 
1627     root = hists->entries_in;
1628     if (++hists->entries_in > &hists->entries_in_array[1])
1629         hists->entries_in = &hists->entries_in_array[0];
1630 
1631     pthread_mutex_unlock(&hists->lock);
1632 
1633     return root;
1634 }
1635 
1636 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1637 {
1638     hists__filter_entry_by_dso(hists, he);
1639     hists__filter_entry_by_thread(hists, he);
1640     hists__filter_entry_by_symbol(hists, he);
1641     hists__filter_entry_by_socket(hists, he);
1642 }
1643 
1644 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1645 {
1646     struct rb_root_cached *root;
1647     struct rb_node *next;
1648     struct hist_entry *n;
1649     int ret;
1650 
1651     if (!hists__has(hists, need_collapse))
1652         return 0;
1653 
1654     hists->nr_entries = 0;
1655 
1656     root = hists__get_rotate_entries_in(hists);
1657 
1658     next = rb_first_cached(root);
1659 
1660     while (next) {
1661         if (session_done())
1662             break;
1663         n = rb_entry(next, struct hist_entry, rb_node_in);
1664         next = rb_next(&n->rb_node_in);
1665 
1666         rb_erase_cached(&n->rb_node_in, root);
1667         ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1668         if (ret < 0)
1669             return -1;
1670 
1671         if (ret) {
1672             /*
1673              * If it wasn't combined with one of the entries already
1674              * collapsed, we need to apply the filters that may have
1675              * been set by, say, the hist_browser.
1676              */
1677             hists__apply_filters(hists, n);
1678         }
1679         if (prog)
1680             ui_progress__update(prog, 1);
1681     }
1682     return 0;
1683 }
1684 
1685 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1686 {
1687     struct hists *hists = a->hists;
1688     struct perf_hpp_fmt *fmt;
1689     int64_t cmp = 0;
1690 
1691     hists__for_each_sort_list(hists, fmt) {
1692         if (perf_hpp__should_skip(fmt, a->hists))
1693             continue;
1694 
1695         cmp = fmt->sort(fmt, a, b);
1696         if (cmp)
1697             break;
1698     }
1699 
1700     return cmp;
1701 }
1702 
1703 static void hists__reset_filter_stats(struct hists *hists)
1704 {
1705     hists->nr_non_filtered_entries = 0;
1706     hists->stats.total_non_filtered_period = 0;
1707 }
1708 
1709 void hists__reset_stats(struct hists *hists)
1710 {
1711     hists->nr_entries = 0;
1712     hists->stats.total_period = 0;
1713 
1714     hists__reset_filter_stats(hists);
1715 }
1716 
1717 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1718 {
1719     hists->nr_non_filtered_entries++;
1720     hists->stats.total_non_filtered_period += h->stat.period;
1721 }
1722 
1723 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1724 {
1725     if (!h->filtered)
1726         hists__inc_filter_stats(hists, h);
1727 
1728     hists->nr_entries++;
1729     hists->stats.total_period += h->stat.period;
1730 }
1731 
1732 static void hierarchy_recalc_total_periods(struct hists *hists)
1733 {
1734     struct rb_node *node;
1735     struct hist_entry *he;
1736 
1737     node = rb_first_cached(&hists->entries);
1738 
1739     hists->stats.total_period = 0;
1740     hists->stats.total_non_filtered_period = 0;
1741 
1742     /*
1743      * recalculate total period using top-level entries only
1744      * since lower level entries only see non-filtered entries
1745      * but upper level entries have sum of both entries.
1746      */
1747     while (node) {
1748         he = rb_entry(node, struct hist_entry, rb_node);
1749         node = rb_next(node);
1750 
1751         hists->stats.total_period += he->stat.period;
1752         if (!he->filtered)
1753             hists->stats.total_non_filtered_period += he->stat.period;
1754     }
1755 }
1756 
1757 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1758                       struct hist_entry *he)
1759 {
1760     struct rb_node **p = &root->rb_root.rb_node;
1761     struct rb_node *parent = NULL;
1762     struct hist_entry *iter;
1763     struct perf_hpp_fmt *fmt;
1764     bool leftmost = true;
1765 
1766     while (*p != NULL) {
1767         parent = *p;
1768         iter = rb_entry(parent, struct hist_entry, rb_node);
1769 
1770         if (hist_entry__sort(he, iter) > 0)
1771             p = &parent->rb_left;
1772         else {
1773             p = &parent->rb_right;
1774             leftmost = false;
1775         }
1776     }
1777 
1778     rb_link_node(&he->rb_node, parent, p);
1779     rb_insert_color_cached(&he->rb_node, root, leftmost);
1780 
1781     /* update column width of dynamic entry */
1782     perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1783         if (perf_hpp__is_dynamic_entry(fmt))
1784             fmt->sort(fmt, he, NULL);
1785     }
1786 }
1787 
1788 static void hists__hierarchy_output_resort(struct hists *hists,
1789                        struct ui_progress *prog,
1790                        struct rb_root_cached *root_in,
1791                        struct rb_root_cached *root_out,
1792                        u64 min_callchain_hits,
1793                        bool use_callchain)
1794 {
1795     struct rb_node *node;
1796     struct hist_entry *he;
1797 
1798     *root_out = RB_ROOT_CACHED;
1799     node = rb_first_cached(root_in);
1800 
1801     while (node) {
1802         he = rb_entry(node, struct hist_entry, rb_node_in);
1803         node = rb_next(node);
1804 
1805         hierarchy_insert_output_entry(root_out, he);
1806 
1807         if (prog)
1808             ui_progress__update(prog, 1);
1809 
1810         hists->nr_entries++;
1811         if (!he->filtered) {
1812             hists->nr_non_filtered_entries++;
1813             hists__calc_col_len(hists, he);
1814         }
1815 
1816         if (!he->leaf) {
1817             hists__hierarchy_output_resort(hists, prog,
1818                                &he->hroot_in,
1819                                &he->hroot_out,
1820                                min_callchain_hits,
1821                                use_callchain);
1822             continue;
1823         }
1824 
1825         if (!use_callchain)
1826             continue;
1827 
1828         if (callchain_param.mode == CHAIN_GRAPH_REL) {
1829             u64 total = he->stat.period;
1830 
1831             if (symbol_conf.cumulate_callchain)
1832                 total = he->stat_acc->period;
1833 
1834             min_callchain_hits = total * (callchain_param.min_percent / 100);
1835         }
1836 
1837         callchain_param.sort(&he->sorted_chain, he->callchain,
1838                      min_callchain_hits, &callchain_param);
1839     }
1840 }
1841 
1842 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1843                      struct hist_entry *he,
1844                      u64 min_callchain_hits,
1845                      bool use_callchain)
1846 {
1847     struct rb_node **p = &entries->rb_root.rb_node;
1848     struct rb_node *parent = NULL;
1849     struct hist_entry *iter;
1850     struct perf_hpp_fmt *fmt;
1851     bool leftmost = true;
1852 
1853     if (use_callchain) {
1854         if (callchain_param.mode == CHAIN_GRAPH_REL) {
1855             u64 total = he->stat.period;
1856 
1857             if (symbol_conf.cumulate_callchain)
1858                 total = he->stat_acc->period;
1859 
1860             min_callchain_hits = total * (callchain_param.min_percent / 100);
1861         }
1862         callchain_param.sort(&he->sorted_chain, he->callchain,
1863                       min_callchain_hits, &callchain_param);
1864     }
1865 
1866     while (*p != NULL) {
1867         parent = *p;
1868         iter = rb_entry(parent, struct hist_entry, rb_node);
1869 
1870         if (hist_entry__sort(he, iter) > 0)
1871             p = &(*p)->rb_left;
1872         else {
1873             p = &(*p)->rb_right;
1874             leftmost = false;
1875         }
1876     }
1877 
1878     rb_link_node(&he->rb_node, parent, p);
1879     rb_insert_color_cached(&he->rb_node, entries, leftmost);
1880 
1881     perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1882         if (perf_hpp__is_dynamic_entry(fmt) &&
1883             perf_hpp__defined_dynamic_entry(fmt, he->hists))
1884             fmt->sort(fmt, he, NULL);  /* update column width */
1885     }
1886 }
1887 
1888 static void output_resort(struct hists *hists, struct ui_progress *prog,
1889               bool use_callchain, hists__resort_cb_t cb,
1890               void *cb_arg)
1891 {
1892     struct rb_root_cached *root;
1893     struct rb_node *next;
1894     struct hist_entry *n;
1895     u64 callchain_total;
1896     u64 min_callchain_hits;
1897 
1898     callchain_total = hists->callchain_period;
1899     if (symbol_conf.filter_relative)
1900         callchain_total = hists->callchain_non_filtered_period;
1901 
1902     min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1903 
1904     hists__reset_stats(hists);
1905     hists__reset_col_len(hists);
1906 
1907     if (symbol_conf.report_hierarchy) {
1908         hists__hierarchy_output_resort(hists, prog,
1909                            &hists->entries_collapsed,
1910                            &hists->entries,
1911                            min_callchain_hits,
1912                            use_callchain);
1913         hierarchy_recalc_total_periods(hists);
1914         return;
1915     }
1916 
1917     if (hists__has(hists, need_collapse))
1918         root = &hists->entries_collapsed;
1919     else
1920         root = hists->entries_in;
1921 
1922     next = rb_first_cached(root);
1923     hists->entries = RB_ROOT_CACHED;
1924 
1925     while (next) {
1926         n = rb_entry(next, struct hist_entry, rb_node_in);
1927         next = rb_next(&n->rb_node_in);
1928 
1929         if (cb && cb(n, cb_arg))
1930             continue;
1931 
1932         __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1933         hists__inc_stats(hists, n);
1934 
1935         if (!n->filtered)
1936             hists__calc_col_len(hists, n);
1937 
1938         if (prog)
1939             ui_progress__update(prog, 1);
1940     }
1941 }
1942 
1943 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1944                  hists__resort_cb_t cb, void *cb_arg)
1945 {
1946     bool use_callchain;
1947 
1948     if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1949         use_callchain = evsel__has_callchain(evsel);
1950     else
1951         use_callchain = symbol_conf.use_callchain;
1952 
1953     use_callchain |= symbol_conf.show_branchflag_count;
1954 
1955     output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1956 }
1957 
1958 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1959 {
1960     return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1961 }
1962 
1963 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1964 {
1965     output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1966 }
1967 
1968 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1969                  hists__resort_cb_t cb)
1970 {
1971     output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1972 }
1973 
1974 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1975 {
1976     if (he->leaf || hmd == HMD_FORCE_SIBLING)
1977         return false;
1978 
1979     if (he->unfolded || hmd == HMD_FORCE_CHILD)
1980         return true;
1981 
1982     return false;
1983 }
1984 
1985 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1986 {
1987     struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1988 
1989     while (can_goto_child(he, HMD_NORMAL)) {
1990         node = rb_last(&he->hroot_out.rb_root);
1991         he = rb_entry(node, struct hist_entry, rb_node);
1992     }
1993     return node;
1994 }
1995 
1996 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1997 {
1998     struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1999 
2000     if (can_goto_child(he, hmd))
2001         node = rb_first_cached(&he->hroot_out);
2002     else
2003         node = rb_next(node);
2004 
2005     while (node == NULL) {
2006         he = he->parent_he;
2007         if (he == NULL)
2008             break;
2009 
2010         node = rb_next(&he->rb_node);
2011     }
2012     return node;
2013 }
2014 
2015 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2016 {
2017     struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2018 
2019     node = rb_prev(node);
2020     if (node)
2021         return rb_hierarchy_last(node);
2022 
2023     he = he->parent_he;
2024     if (he == NULL)
2025         return NULL;
2026 
2027     return &he->rb_node;
2028 }
2029 
2030 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2031 {
2032     struct rb_node *node;
2033     struct hist_entry *child;
2034     float percent;
2035 
2036     if (he->leaf)
2037         return false;
2038 
2039     node = rb_first_cached(&he->hroot_out);
2040     child = rb_entry(node, struct hist_entry, rb_node);
2041 
2042     while (node && child->filtered) {
2043         node = rb_next(node);
2044         child = rb_entry(node, struct hist_entry, rb_node);
2045     }
2046 
2047     if (node)
2048         percent = hist_entry__get_percent_limit(child);
2049     else
2050         percent = 0;
2051 
2052     return node && percent >= limit;
2053 }
2054 
2055 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2056                        enum hist_filter filter)
2057 {
2058     h->filtered &= ~(1 << filter);
2059 
2060     if (symbol_conf.report_hierarchy) {
2061         struct hist_entry *parent = h->parent_he;
2062 
2063         while (parent) {
2064             he_stat__add_stat(&parent->stat, &h->stat);
2065 
2066             parent->filtered &= ~(1 << filter);
2067 
2068             if (parent->filtered)
2069                 goto next;
2070 
2071             /* force fold unfiltered entry for simplicity */
2072             parent->unfolded = false;
2073             parent->has_no_entry = false;
2074             parent->row_offset = 0;
2075             parent->nr_rows = 0;
2076 next:
2077             parent = parent->parent_he;
2078         }
2079     }
2080 
2081     if (h->filtered)
2082         return;
2083 
2084     /* force fold unfiltered entry for simplicity */
2085     h->unfolded = false;
2086     h->has_no_entry = false;
2087     h->row_offset = 0;
2088     h->nr_rows = 0;
2089 
2090     hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2091 
2092     hists__inc_filter_stats(hists, h);
2093     hists__calc_col_len(hists, h);
2094 }
2095 
2096 
2097 static bool hists__filter_entry_by_dso(struct hists *hists,
2098                        struct hist_entry *he)
2099 {
2100     if (hists->dso_filter != NULL &&
2101         (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2102         he->filtered |= (1 << HIST_FILTER__DSO);
2103         return true;
2104     }
2105 
2106     return false;
2107 }
2108 
2109 static bool hists__filter_entry_by_thread(struct hists *hists,
2110                       struct hist_entry *he)
2111 {
2112     if (hists->thread_filter != NULL &&
2113         he->thread != hists->thread_filter) {
2114         he->filtered |= (1 << HIST_FILTER__THREAD);
2115         return true;
2116     }
2117 
2118     return false;
2119 }
2120 
2121 static bool hists__filter_entry_by_symbol(struct hists *hists,
2122                       struct hist_entry *he)
2123 {
2124     if (hists->symbol_filter_str != NULL &&
2125         (!he->ms.sym || strstr(he->ms.sym->name,
2126                    hists->symbol_filter_str) == NULL)) {
2127         he->filtered |= (1 << HIST_FILTER__SYMBOL);
2128         return true;
2129     }
2130 
2131     return false;
2132 }
2133 
2134 static bool hists__filter_entry_by_socket(struct hists *hists,
2135                       struct hist_entry *he)
2136 {
2137     if ((hists->socket_filter > -1) &&
2138         (he->socket != hists->socket_filter)) {
2139         he->filtered |= (1 << HIST_FILTER__SOCKET);
2140         return true;
2141     }
2142 
2143     return false;
2144 }
2145 
2146 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2147 
2148 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2149 {
2150     struct rb_node *nd;
2151 
2152     hists->stats.nr_non_filtered_samples = 0;
2153 
2154     hists__reset_filter_stats(hists);
2155     hists__reset_col_len(hists);
2156 
2157     for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2158         struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2159 
2160         if (filter(hists, h))
2161             continue;
2162 
2163         hists__remove_entry_filter(hists, h, type);
2164     }
2165 }
2166 
2167 static void resort_filtered_entry(struct rb_root_cached *root,
2168                   struct hist_entry *he)
2169 {
2170     struct rb_node **p = &root->rb_root.rb_node;
2171     struct rb_node *parent = NULL;
2172     struct hist_entry *iter;
2173     struct rb_root_cached new_root = RB_ROOT_CACHED;
2174     struct rb_node *nd;
2175     bool leftmost = true;
2176 
2177     while (*p != NULL) {
2178         parent = *p;
2179         iter = rb_entry(parent, struct hist_entry, rb_node);
2180 
2181         if (hist_entry__sort(he, iter) > 0)
2182             p = &(*p)->rb_left;
2183         else {
2184             p = &(*p)->rb_right;
2185             leftmost = false;
2186         }
2187     }
2188 
2189     rb_link_node(&he->rb_node, parent, p);
2190     rb_insert_color_cached(&he->rb_node, root, leftmost);
2191 
2192     if (he->leaf || he->filtered)
2193         return;
2194 
2195     nd = rb_first_cached(&he->hroot_out);
2196     while (nd) {
2197         struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2198 
2199         nd = rb_next(nd);
2200         rb_erase_cached(&h->rb_node, &he->hroot_out);
2201 
2202         resort_filtered_entry(&new_root, h);
2203     }
2204 
2205     he->hroot_out = new_root;
2206 }
2207 
2208 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2209 {
2210     struct rb_node *nd;
2211     struct rb_root_cached new_root = RB_ROOT_CACHED;
2212 
2213     hists->stats.nr_non_filtered_samples = 0;
2214 
2215     hists__reset_filter_stats(hists);
2216     hists__reset_col_len(hists);
2217 
2218     nd = rb_first_cached(&hists->entries);
2219     while (nd) {
2220         struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2221         int ret;
2222 
2223         ret = hist_entry__filter(h, type, arg);
2224 
2225         /*
2226          * case 1. non-matching type
2227          * zero out the period, set filter marker and move to child
2228          */
2229         if (ret < 0) {
2230             memset(&h->stat, 0, sizeof(h->stat));
2231             h->filtered |= (1 << type);
2232 
2233             nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2234         }
2235         /*
2236          * case 2. matched type (filter out)
2237          * set filter marker and move to next
2238          */
2239         else if (ret == 1) {
2240             h->filtered |= (1 << type);
2241 
2242             nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2243         }
2244         /*
2245          * case 3. ok (not filtered)
2246          * add period to hists and parents, erase the filter marker
2247          * and move to next sibling
2248          */
2249         else {
2250             hists__remove_entry_filter(hists, h, type);
2251 
2252             nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2253         }
2254     }
2255 
2256     hierarchy_recalc_total_periods(hists);
2257 
2258     /*
2259      * resort output after applying a new filter since filter in a lower
2260      * hierarchy can change periods in a upper hierarchy.
2261      */
2262     nd = rb_first_cached(&hists->entries);
2263     while (nd) {
2264         struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2265 
2266         nd = rb_next(nd);
2267         rb_erase_cached(&h->rb_node, &hists->entries);
2268 
2269         resort_filtered_entry(&new_root, h);
2270     }
2271 
2272     hists->entries = new_root;
2273 }
2274 
2275 void hists__filter_by_thread(struct hists *hists)
2276 {
2277     if (symbol_conf.report_hierarchy)
2278         hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2279                     hists->thread_filter);
2280     else
2281         hists__filter_by_type(hists, HIST_FILTER__THREAD,
2282                       hists__filter_entry_by_thread);
2283 }
2284 
2285 void hists__filter_by_dso(struct hists *hists)
2286 {
2287     if (symbol_conf.report_hierarchy)
2288         hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2289                     hists->dso_filter);
2290     else
2291         hists__filter_by_type(hists, HIST_FILTER__DSO,
2292                       hists__filter_entry_by_dso);
2293 }
2294 
2295 void hists__filter_by_symbol(struct hists *hists)
2296 {
2297     if (symbol_conf.report_hierarchy)
2298         hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2299                     hists->symbol_filter_str);
2300     else
2301         hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2302                       hists__filter_entry_by_symbol);
2303 }
2304 
2305 void hists__filter_by_socket(struct hists *hists)
2306 {
2307     if (symbol_conf.report_hierarchy)
2308         hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2309                     &hists->socket_filter);
2310     else
2311         hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2312                       hists__filter_entry_by_socket);
2313 }
2314 
2315 void events_stats__inc(struct events_stats *stats, u32 type)
2316 {
2317     ++stats->nr_events[0];
2318     ++stats->nr_events[type];
2319 }
2320 
2321 static void hists_stats__inc(struct hists_stats *stats)
2322 {
2323     ++stats->nr_samples;
2324 }
2325 
2326 void hists__inc_nr_events(struct hists *hists)
2327 {
2328     hists_stats__inc(&hists->stats);
2329 }
2330 
2331 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2332 {
2333     hists_stats__inc(&hists->stats);
2334     if (!filtered)
2335         hists->stats.nr_non_filtered_samples++;
2336 }
2337 
2338 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2339                          struct hist_entry *pair)
2340 {
2341     struct rb_root_cached *root;
2342     struct rb_node **p;
2343     struct rb_node *parent = NULL;
2344     struct hist_entry *he;
2345     int64_t cmp;
2346     bool leftmost = true;
2347 
2348     if (hists__has(hists, need_collapse))
2349         root = &hists->entries_collapsed;
2350     else
2351         root = hists->entries_in;
2352 
2353     p = &root->rb_root.rb_node;
2354 
2355     while (*p != NULL) {
2356         parent = *p;
2357         he = rb_entry(parent, struct hist_entry, rb_node_in);
2358 
2359         cmp = hist_entry__collapse(he, pair);
2360 
2361         if (!cmp)
2362             goto out;
2363 
2364         if (cmp < 0)
2365             p = &(*p)->rb_left;
2366         else {
2367             p = &(*p)->rb_right;
2368             leftmost = false;
2369         }
2370     }
2371 
2372     he = hist_entry__new(pair, true);
2373     if (he) {
2374         memset(&he->stat, 0, sizeof(he->stat));
2375         he->hists = hists;
2376         if (symbol_conf.cumulate_callchain)
2377             memset(he->stat_acc, 0, sizeof(he->stat));
2378         rb_link_node(&he->rb_node_in, parent, p);
2379         rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2380         hists__inc_stats(hists, he);
2381         he->dummy = true;
2382     }
2383 out:
2384     return he;
2385 }
2386 
2387 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2388                             struct rb_root_cached *root,
2389                             struct hist_entry *pair)
2390 {
2391     struct rb_node **p;
2392     struct rb_node *parent = NULL;
2393     struct hist_entry *he;
2394     struct perf_hpp_fmt *fmt;
2395     bool leftmost = true;
2396 
2397     p = &root->rb_root.rb_node;
2398     while (*p != NULL) {
2399         int64_t cmp = 0;
2400 
2401         parent = *p;
2402         he = rb_entry(parent, struct hist_entry, rb_node_in);
2403 
2404         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2405             cmp = fmt->collapse(fmt, he, pair);
2406             if (cmp)
2407                 break;
2408         }
2409         if (!cmp)
2410             goto out;
2411 
2412         if (cmp < 0)
2413             p = &parent->rb_left;
2414         else {
2415             p = &parent->rb_right;
2416             leftmost = false;
2417         }
2418     }
2419 
2420     he = hist_entry__new(pair, true);
2421     if (he) {
2422         rb_link_node(&he->rb_node_in, parent, p);
2423         rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2424 
2425         he->dummy = true;
2426         he->hists = hists;
2427         memset(&he->stat, 0, sizeof(he->stat));
2428         hists__inc_stats(hists, he);
2429     }
2430 out:
2431     return he;
2432 }
2433 
2434 static struct hist_entry *hists__find_entry(struct hists *hists,
2435                         struct hist_entry *he)
2436 {
2437     struct rb_node *n;
2438 
2439     if (hists__has(hists, need_collapse))
2440         n = hists->entries_collapsed.rb_root.rb_node;
2441     else
2442         n = hists->entries_in->rb_root.rb_node;
2443 
2444     while (n) {
2445         struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2446         int64_t cmp = hist_entry__collapse(iter, he);
2447 
2448         if (cmp < 0)
2449             n = n->rb_left;
2450         else if (cmp > 0)
2451             n = n->rb_right;
2452         else
2453             return iter;
2454     }
2455 
2456     return NULL;
2457 }
2458 
2459 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2460                               struct hist_entry *he)
2461 {
2462     struct rb_node *n = root->rb_root.rb_node;
2463 
2464     while (n) {
2465         struct hist_entry *iter;
2466         struct perf_hpp_fmt *fmt;
2467         int64_t cmp = 0;
2468 
2469         iter = rb_entry(n, struct hist_entry, rb_node_in);
2470         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2471             cmp = fmt->collapse(fmt, iter, he);
2472             if (cmp)
2473                 break;
2474         }
2475 
2476         if (cmp < 0)
2477             n = n->rb_left;
2478         else if (cmp > 0)
2479             n = n->rb_right;
2480         else
2481             return iter;
2482     }
2483 
2484     return NULL;
2485 }
2486 
2487 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2488                    struct rb_root_cached *other_root)
2489 {
2490     struct rb_node *nd;
2491     struct hist_entry *pos, *pair;
2492 
2493     for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2494         pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2495         pair = hists__find_hierarchy_entry(other_root, pos);
2496 
2497         if (pair) {
2498             hist_entry__add_pair(pair, pos);
2499             hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2500         }
2501     }
2502 }
2503 
2504 /*
2505  * Look for pairs to link to the leader buckets (hist_entries):
2506  */
2507 void hists__match(struct hists *leader, struct hists *other)
2508 {
2509     struct rb_root_cached *root;
2510     struct rb_node *nd;
2511     struct hist_entry *pos, *pair;
2512 
2513     if (symbol_conf.report_hierarchy) {
2514         /* hierarchy report always collapses entries */
2515         return hists__match_hierarchy(&leader->entries_collapsed,
2516                           &other->entries_collapsed);
2517     }
2518 
2519     if (hists__has(leader, need_collapse))
2520         root = &leader->entries_collapsed;
2521     else
2522         root = leader->entries_in;
2523 
2524     for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2525         pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2526         pair = hists__find_entry(other, pos);
2527 
2528         if (pair)
2529             hist_entry__add_pair(pair, pos);
2530     }
2531 }
2532 
2533 static int hists__link_hierarchy(struct hists *leader_hists,
2534                  struct hist_entry *parent,
2535                  struct rb_root_cached *leader_root,
2536                  struct rb_root_cached *other_root)
2537 {
2538     struct rb_node *nd;
2539     struct hist_entry *pos, *leader;
2540 
2541     for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2542         pos = rb_entry(nd, struct hist_entry, rb_node_in);
2543 
2544         if (hist_entry__has_pairs(pos)) {
2545             bool found = false;
2546 
2547             list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2548                 if (leader->hists == leader_hists) {
2549                     found = true;
2550                     break;
2551                 }
2552             }
2553             if (!found)
2554                 return -1;
2555         } else {
2556             leader = add_dummy_hierarchy_entry(leader_hists,
2557                                leader_root, pos);
2558             if (leader == NULL)
2559                 return -1;
2560 
2561             /* do not point parent in the pos */
2562             leader->parent_he = parent;
2563 
2564             hist_entry__add_pair(pos, leader);
2565         }
2566 
2567         if (!pos->leaf) {
2568             if (hists__link_hierarchy(leader_hists, leader,
2569                           &leader->hroot_in,
2570                           &pos->hroot_in) < 0)
2571                 return -1;
2572         }
2573     }
2574     return 0;
2575 }
2576 
2577 /*
2578  * Look for entries in the other hists that are not present in the leader, if
2579  * we find them, just add a dummy entry on the leader hists, with period=0,
2580  * nr_events=0, to serve as the list header.
2581  */
2582 int hists__link(struct hists *leader, struct hists *other)
2583 {
2584     struct rb_root_cached *root;
2585     struct rb_node *nd;
2586     struct hist_entry *pos, *pair;
2587 
2588     if (symbol_conf.report_hierarchy) {
2589         /* hierarchy report always collapses entries */
2590         return hists__link_hierarchy(leader, NULL,
2591                          &leader->entries_collapsed,
2592                          &other->entries_collapsed);
2593     }
2594 
2595     if (hists__has(other, need_collapse))
2596         root = &other->entries_collapsed;
2597     else
2598         root = other->entries_in;
2599 
2600     for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2601         pos = rb_entry(nd, struct hist_entry, rb_node_in);
2602 
2603         if (!hist_entry__has_pairs(pos)) {
2604             pair = hists__add_dummy_entry(leader, pos);
2605             if (pair == NULL)
2606                 return -1;
2607             hist_entry__add_pair(pos, pair);
2608         }
2609     }
2610 
2611     return 0;
2612 }
2613 
2614 int hists__unlink(struct hists *hists)
2615 {
2616     struct rb_root_cached *root;
2617     struct rb_node *nd;
2618     struct hist_entry *pos;
2619 
2620     if (hists__has(hists, need_collapse))
2621         root = &hists->entries_collapsed;
2622     else
2623         root = hists->entries_in;
2624 
2625     for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2626         pos = rb_entry(nd, struct hist_entry, rb_node_in);
2627         list_del_init(&pos->pairs.node);
2628     }
2629 
2630     return 0;
2631 }
2632 
2633 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2634               struct perf_sample *sample, bool nonany_branch_mode,
2635               u64 *total_cycles)
2636 {
2637     struct branch_info *bi;
2638     struct branch_entry *entries = perf_sample__branch_entries(sample);
2639 
2640     /* If we have branch cycles always annotate them. */
2641     if (bs && bs->nr && entries[0].flags.cycles) {
2642         int i;
2643 
2644         bi = sample__resolve_bstack(sample, al);
2645         if (bi) {
2646             struct addr_map_symbol *prev = NULL;
2647 
2648             /*
2649              * Ignore errors, still want to process the
2650              * other entries.
2651              *
2652              * For non standard branch modes always
2653              * force no IPC (prev == NULL)
2654              *
2655              * Note that perf stores branches reversed from
2656              * program order!
2657              */
2658             for (i = bs->nr - 1; i >= 0; i--) {
2659                 addr_map_symbol__account_cycles(&bi[i].from,
2660                     nonany_branch_mode ? NULL : prev,
2661                     bi[i].flags.cycles);
2662                 prev = &bi[i].to;
2663 
2664                 if (total_cycles)
2665                     *total_cycles += bi[i].flags.cycles;
2666             }
2667             free(bi);
2668         }
2669     }
2670 }
2671 
2672 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2673                  bool skip_empty)
2674 {
2675     struct evsel *pos;
2676     size_t ret = 0;
2677 
2678     evlist__for_each_entry(evlist, pos) {
2679         struct hists *hists = evsel__hists(pos);
2680 
2681         if (skip_empty && !hists->stats.nr_samples)
2682             continue;
2683 
2684         ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2685         ret += fprintf(fp, "%16s events: %10d\n",
2686                    "SAMPLE", hists->stats.nr_samples);
2687     }
2688 
2689     return ret;
2690 }
2691 
2692 
2693 u64 hists__total_period(struct hists *hists)
2694 {
2695     return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2696         hists->stats.total_period;
2697 }
2698 
2699 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2700 {
2701     char unit;
2702     int printed;
2703     const struct dso *dso = hists->dso_filter;
2704     struct thread *thread = hists->thread_filter;
2705     int socket_id = hists->socket_filter;
2706     unsigned long nr_samples = hists->stats.nr_samples;
2707     u64 nr_events = hists->stats.total_period;
2708     struct evsel *evsel = hists_to_evsel(hists);
2709     const char *ev_name = evsel__name(evsel);
2710     char buf[512], sample_freq_str[64] = "";
2711     size_t buflen = sizeof(buf);
2712     char ref[30] = " show reference callgraph, ";
2713     bool enable_ref = false;
2714 
2715     if (symbol_conf.filter_relative) {
2716         nr_samples = hists->stats.nr_non_filtered_samples;
2717         nr_events = hists->stats.total_non_filtered_period;
2718     }
2719 
2720     if (evsel__is_group_event(evsel)) {
2721         struct evsel *pos;
2722 
2723         evsel__group_desc(evsel, buf, buflen);
2724         ev_name = buf;
2725 
2726         for_each_group_member(pos, evsel) {
2727             struct hists *pos_hists = evsel__hists(pos);
2728 
2729             if (symbol_conf.filter_relative) {
2730                 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2731                 nr_events += pos_hists->stats.total_non_filtered_period;
2732             } else {
2733                 nr_samples += pos_hists->stats.nr_samples;
2734                 nr_events += pos_hists->stats.total_period;
2735             }
2736         }
2737     }
2738 
2739     if (symbol_conf.show_ref_callgraph &&
2740         strstr(ev_name, "call-graph=no"))
2741         enable_ref = true;
2742 
2743     if (show_freq)
2744         scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2745 
2746     nr_samples = convert_unit(nr_samples, &unit);
2747     printed = scnprintf(bf, size,
2748                "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2749                nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2750                ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2751 
2752 
2753     if (hists->uid_filter_str)
2754         printed += snprintf(bf + printed, size - printed,
2755                     ", UID: %s", hists->uid_filter_str);
2756     if (thread) {
2757         if (hists__has(hists, thread)) {
2758             printed += scnprintf(bf + printed, size - printed,
2759                     ", Thread: %s(%d)",
2760                      (thread->comm_set ? thread__comm_str(thread) : ""),
2761                     thread->tid);
2762         } else {
2763             printed += scnprintf(bf + printed, size - printed,
2764                     ", Thread: %s",
2765                      (thread->comm_set ? thread__comm_str(thread) : ""));
2766         }
2767     }
2768     if (dso)
2769         printed += scnprintf(bf + printed, size - printed,
2770                     ", DSO: %s", dso->short_name);
2771     if (socket_id > -1)
2772         printed += scnprintf(bf + printed, size - printed,
2773                     ", Processor Socket: %d", socket_id);
2774 
2775     return printed;
2776 }
2777 
2778 int parse_filter_percentage(const struct option *opt __maybe_unused,
2779                 const char *arg, int unset __maybe_unused)
2780 {
2781     if (!strcmp(arg, "relative"))
2782         symbol_conf.filter_relative = true;
2783     else if (!strcmp(arg, "absolute"))
2784         symbol_conf.filter_relative = false;
2785     else {
2786         pr_debug("Invalid percentage: %s\n", arg);
2787         return -1;
2788     }
2789 
2790     return 0;
2791 }
2792 
2793 int perf_hist_config(const char *var, const char *value)
2794 {
2795     if (!strcmp(var, "hist.percentage"))
2796         return parse_filter_percentage(NULL, value, 0);
2797 
2798     return 0;
2799 }
2800 
2801 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2802 {
2803     memset(hists, 0, sizeof(*hists));
2804     hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2805     hists->entries_in = &hists->entries_in_array[0];
2806     hists->entries_collapsed = RB_ROOT_CACHED;
2807     hists->entries = RB_ROOT_CACHED;
2808     pthread_mutex_init(&hists->lock, NULL);
2809     hists->socket_filter = -1;
2810     hists->hpp_list = hpp_list;
2811     INIT_LIST_HEAD(&hists->hpp_formats);
2812     return 0;
2813 }
2814 
2815 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2816 {
2817     struct rb_node *node;
2818     struct hist_entry *he;
2819 
2820     while (!RB_EMPTY_ROOT(&root->rb_root)) {
2821         node = rb_first_cached(root);
2822         rb_erase_cached(node, root);
2823 
2824         he = rb_entry(node, struct hist_entry, rb_node_in);
2825         hist_entry__delete(he);
2826     }
2827 }
2828 
2829 static void hists__delete_all_entries(struct hists *hists)
2830 {
2831     hists__delete_entries(hists);
2832     hists__delete_remaining_entries(&hists->entries_in_array[0]);
2833     hists__delete_remaining_entries(&hists->entries_in_array[1]);
2834     hists__delete_remaining_entries(&hists->entries_collapsed);
2835 }
2836 
2837 static void hists_evsel__exit(struct evsel *evsel)
2838 {
2839     struct hists *hists = evsel__hists(evsel);
2840     struct perf_hpp_fmt *fmt, *pos;
2841     struct perf_hpp_list_node *node, *tmp;
2842 
2843     hists__delete_all_entries(hists);
2844 
2845     list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2846         perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2847             list_del_init(&fmt->list);
2848             free(fmt);
2849         }
2850         list_del_init(&node->list);
2851         free(node);
2852     }
2853 }
2854 
2855 static int hists_evsel__init(struct evsel *evsel)
2856 {
2857     struct hists *hists = evsel__hists(evsel);
2858 
2859     __hists__init(hists, &perf_hpp_list);
2860     return 0;
2861 }
2862 
2863 /*
2864  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2865  * stored in the rbtree...
2866  */
2867 
2868 int hists__init(void)
2869 {
2870     int err = evsel__object_config(sizeof(struct hists_evsel),
2871                        hists_evsel__init, hists_evsel__exit);
2872     if (err)
2873         fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2874 
2875     return err;
2876 }
2877 
2878 void perf_hpp_list__init(struct perf_hpp_list *list)
2879 {
2880     INIT_LIST_HEAD(&list->fields);
2881     INIT_LIST_HEAD(&list->sorts);
2882 }