Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <stddef.h>
0003 #include <stdlib.h>
0004 #include <string.h>
0005 #include <errno.h>
0006 #include <sys/types.h>
0007 #include <sys/stat.h>
0008 #include <unistd.h>
0009 #include <api/fs/fs.h>
0010 #include <linux/kernel.h>
0011 #include "map_symbol.h"
0012 #include "mem-events.h"
0013 #include "debug.h"
0014 #include "symbol.h"
0015 #include "pmu.h"
0016 #include "pmu-hybrid.h"
0017 
0018 unsigned int perf_mem_events__loads_ldlat = 30;
0019 
0020 #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
0021 
0022 static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
0023     E("ldlat-loads",    "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"),
0024     E("ldlat-stores",   "cpu/mem-stores/P",     "cpu/events/mem-stores"),
0025     E(NULL,         NULL,               NULL),
0026 };
0027 #undef E
0028 
0029 static char mem_loads_name[100];
0030 static bool mem_loads_name__init;
0031 
0032 struct perf_mem_event * __weak perf_mem_events__ptr(int i)
0033 {
0034     if (i >= PERF_MEM_EVENTS__MAX)
0035         return NULL;
0036 
0037     return &perf_mem_events[i];
0038 }
0039 
0040 char * __weak perf_mem_events__name(int i, char *pmu_name  __maybe_unused)
0041 {
0042     struct perf_mem_event *e = perf_mem_events__ptr(i);
0043 
0044     if (!e)
0045         return NULL;
0046 
0047     if (i == PERF_MEM_EVENTS__LOAD) {
0048         if (!mem_loads_name__init) {
0049             mem_loads_name__init = true;
0050             scnprintf(mem_loads_name, sizeof(mem_loads_name),
0051                   e->name, perf_mem_events__loads_ldlat);
0052         }
0053         return mem_loads_name;
0054     }
0055 
0056     return (char *)e->name;
0057 }
0058 
0059 __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
0060 {
0061     return false;
0062 }
0063 
0064 int perf_mem_events__parse(const char *str)
0065 {
0066     char *tok, *saveptr = NULL;
0067     bool found = false;
0068     char *buf;
0069     int j;
0070 
0071     /* We need buffer that we know we can write to. */
0072     buf = malloc(strlen(str) + 1);
0073     if (!buf)
0074         return -ENOMEM;
0075 
0076     strcpy(buf, str);
0077 
0078     tok = strtok_r((char *)buf, ",", &saveptr);
0079 
0080     while (tok) {
0081         for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
0082             struct perf_mem_event *e = perf_mem_events__ptr(j);
0083 
0084             if (!e->tag)
0085                 continue;
0086 
0087             if (strstr(e->tag, tok))
0088                 e->record = found = true;
0089         }
0090 
0091         tok = strtok_r(NULL, ",", &saveptr);
0092     }
0093 
0094     free(buf);
0095 
0096     if (found)
0097         return 0;
0098 
0099     pr_err("failed: event '%s' not found, use '-e list' to get list of available events\n", str);
0100     return -1;
0101 }
0102 
0103 static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
0104 {
0105     char path[PATH_MAX];
0106     struct stat st;
0107 
0108     scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
0109     return !stat(path, &st);
0110 }
0111 
0112 int perf_mem_events__init(void)
0113 {
0114     const char *mnt = sysfs__mount();
0115     bool found = false;
0116     int j;
0117 
0118     if (!mnt)
0119         return -ENOENT;
0120 
0121     for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
0122         struct perf_mem_event *e = perf_mem_events__ptr(j);
0123         struct perf_pmu *pmu;
0124         char sysfs_name[100];
0125 
0126         /*
0127          * If the event entry isn't valid, skip initialization
0128          * and "e->supported" will keep false.
0129          */
0130         if (!e->tag)
0131             continue;
0132 
0133         if (!perf_pmu__has_hybrid()) {
0134             scnprintf(sysfs_name, sizeof(sysfs_name),
0135                   e->sysfs_name, "cpu");
0136             e->supported = perf_mem_event__supported(mnt, sysfs_name);
0137         } else {
0138             perf_pmu__for_each_hybrid_pmu(pmu) {
0139                 scnprintf(sysfs_name, sizeof(sysfs_name),
0140                       e->sysfs_name, pmu->name);
0141                 e->supported |= perf_mem_event__supported(mnt, sysfs_name);
0142             }
0143         }
0144 
0145         if (e->supported)
0146             found = true;
0147     }
0148 
0149     return found ? 0 : -ENOENT;
0150 }
0151 
0152 void perf_mem_events__list(void)
0153 {
0154     int j;
0155 
0156     for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
0157         struct perf_mem_event *e = perf_mem_events__ptr(j);
0158 
0159         fprintf(stderr, "%-13s%-*s%s\n",
0160             e->tag ?: "",
0161             verbose > 0 ? 25 : 0,
0162             verbose > 0 ? perf_mem_events__name(j, NULL) : "",
0163             e->supported ? ": available" : "");
0164     }
0165 }
0166 
0167 static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
0168                             int idx)
0169 {
0170     const char *mnt = sysfs__mount();
0171     char sysfs_name[100];
0172     struct perf_pmu *pmu;
0173 
0174     perf_pmu__for_each_hybrid_pmu(pmu) {
0175         scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
0176               pmu->name);
0177         if (!perf_mem_event__supported(mnt, sysfs_name)) {
0178             pr_err("failed: event '%s' not supported\n",
0179                    perf_mem_events__name(idx, pmu->name));
0180         }
0181     }
0182 }
0183 
0184 int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
0185                  char **rec_tmp, int *tmp_nr)
0186 {
0187     int i = *argv_nr, k = 0;
0188     struct perf_mem_event *e;
0189     struct perf_pmu *pmu;
0190     char *s;
0191 
0192     for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
0193         e = perf_mem_events__ptr(j);
0194         if (!e->record)
0195             continue;
0196 
0197         if (!perf_pmu__has_hybrid()) {
0198             if (!e->supported) {
0199                 pr_err("failed: event '%s' not supported\n",
0200                        perf_mem_events__name(j, NULL));
0201                 return -1;
0202             }
0203 
0204             rec_argv[i++] = "-e";
0205             rec_argv[i++] = perf_mem_events__name(j, NULL);
0206         } else {
0207             if (!e->supported) {
0208                 perf_mem_events__print_unsupport_hybrid(e, j);
0209                 return -1;
0210             }
0211 
0212             perf_pmu__for_each_hybrid_pmu(pmu) {
0213                 rec_argv[i++] = "-e";
0214                 s = perf_mem_events__name(j, pmu->name);
0215                 if (s) {
0216                     s = strdup(s);
0217                     if (!s)
0218                         return -1;
0219 
0220                     rec_argv[i++] = s;
0221                     rec_tmp[k++] = s;
0222                 }
0223             }
0224         }
0225     }
0226 
0227     *argv_nr = i;
0228     *tmp_nr = k;
0229     return 0;
0230 }
0231 
0232 static const char * const tlb_access[] = {
0233     "N/A",
0234     "HIT",
0235     "MISS",
0236     "L1",
0237     "L2",
0238     "Walker",
0239     "Fault",
0240 };
0241 
0242 int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
0243 {
0244     size_t l = 0, i;
0245     u64 m = PERF_MEM_TLB_NA;
0246     u64 hit, miss;
0247 
0248     sz -= 1; /* -1 for null termination */
0249     out[0] = '\0';
0250 
0251     if (mem_info)
0252         m = mem_info->data_src.mem_dtlb;
0253 
0254     hit = m & PERF_MEM_TLB_HIT;
0255     miss = m & PERF_MEM_TLB_MISS;
0256 
0257     /* already taken care of */
0258     m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
0259 
0260     for (i = 0; m && i < ARRAY_SIZE(tlb_access); i++, m >>= 1) {
0261         if (!(m & 0x1))
0262             continue;
0263         if (l) {
0264             strcat(out, " or ");
0265             l += 4;
0266         }
0267         l += scnprintf(out + l, sz - l, tlb_access[i]);
0268     }
0269     if (*out == '\0')
0270         l += scnprintf(out, sz - l, "N/A");
0271     if (hit)
0272         l += scnprintf(out + l, sz - l, " hit");
0273     if (miss)
0274         l += scnprintf(out + l, sz - l, " miss");
0275 
0276     return l;
0277 }
0278 
0279 static const char * const mem_lvl[] = {
0280     "N/A",
0281     "HIT",
0282     "MISS",
0283     "L1",
0284     "LFB",
0285     "L2",
0286     "L3",
0287     "Local RAM",
0288     "Remote RAM (1 hop)",
0289     "Remote RAM (2 hops)",
0290     "Remote Cache (1 hop)",
0291     "Remote Cache (2 hops)",
0292     "I/O",
0293     "Uncached",
0294 };
0295 
0296 static const char * const mem_lvlnum[] = {
0297     [PERF_MEM_LVLNUM_ANY_CACHE] = "Any cache",
0298     [PERF_MEM_LVLNUM_LFB] = "LFB",
0299     [PERF_MEM_LVLNUM_RAM] = "RAM",
0300     [PERF_MEM_LVLNUM_PMEM] = "PMEM",
0301     [PERF_MEM_LVLNUM_NA] = "N/A",
0302 };
0303 
0304 static const char * const mem_hops[] = {
0305     "N/A",
0306     /*
0307      * While printing, 'Remote' will be added to represent
0308      * 'Remote core, same node' accesses as remote field need
0309      * to be set with mem_hops field.
0310      */
0311     "core, same node",
0312     "node, same socket",
0313     "socket, same board",
0314     "board",
0315 };
0316 
0317 static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
0318 {
0319     u64 op = PERF_MEM_LOCK_NA;
0320     int l;
0321 
0322     if (mem_info)
0323         op = mem_info->data_src.mem_op;
0324 
0325     if (op & PERF_MEM_OP_NA)
0326         l = scnprintf(out, sz, "N/A");
0327     else if (op & PERF_MEM_OP_LOAD)
0328         l = scnprintf(out, sz, "LOAD");
0329     else if (op & PERF_MEM_OP_STORE)
0330         l = scnprintf(out, sz, "STORE");
0331     else if (op & PERF_MEM_OP_PFETCH)
0332         l = scnprintf(out, sz, "PFETCH");
0333     else if (op & PERF_MEM_OP_EXEC)
0334         l = scnprintf(out, sz, "EXEC");
0335     else
0336         l = scnprintf(out, sz, "No");
0337 
0338     return l;
0339 }
0340 
0341 int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
0342 {
0343     size_t i, l = 0;
0344     u64 m =  PERF_MEM_LVL_NA;
0345     u64 hit, miss;
0346     int printed = 0;
0347 
0348     if (mem_info)
0349         m  = mem_info->data_src.mem_lvl;
0350 
0351     sz -= 1; /* -1 for null termination */
0352     out[0] = '\0';
0353 
0354     hit = m & PERF_MEM_LVL_HIT;
0355     miss = m & PERF_MEM_LVL_MISS;
0356 
0357     /* already taken care of */
0358     m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
0359 
0360     if (mem_info && mem_info->data_src.mem_remote) {
0361         strcat(out, "Remote ");
0362         l += 7;
0363     }
0364 
0365     /*
0366      * Incase mem_hops field is set, we can skip printing data source via
0367      * PERF_MEM_LVL namespace.
0368      */
0369     if (mem_info && mem_info->data_src.mem_hops) {
0370         l += scnprintf(out + l, sz - l, "%s ", mem_hops[mem_info->data_src.mem_hops]);
0371     } else {
0372         for (i = 0; m && i < ARRAY_SIZE(mem_lvl); i++, m >>= 1) {
0373             if (!(m & 0x1))
0374                 continue;
0375             if (printed++) {
0376                 strcat(out, " or ");
0377                 l += 4;
0378             }
0379             l += scnprintf(out + l, sz - l, mem_lvl[i]);
0380         }
0381     }
0382 
0383     if (mem_info && mem_info->data_src.mem_lvl_num) {
0384         int lvl = mem_info->data_src.mem_lvl_num;
0385         if (printed++) {
0386             strcat(out, " or ");
0387             l += 4;
0388         }
0389         if (mem_lvlnum[lvl])
0390             l += scnprintf(out + l, sz - l, mem_lvlnum[lvl]);
0391         else
0392             l += scnprintf(out + l, sz - l, "L%d", lvl);
0393     }
0394 
0395     if (l == 0)
0396         l += scnprintf(out + l, sz - l, "N/A");
0397     if (hit)
0398         l += scnprintf(out + l, sz - l, " hit");
0399     if (miss)
0400         l += scnprintf(out + l, sz - l, " miss");
0401 
0402     return l;
0403 }
0404 
0405 static const char * const snoop_access[] = {
0406     "N/A",
0407     "None",
0408     "Hit",
0409     "Miss",
0410     "HitM",
0411 };
0412 
0413 static const char * const snoopx_access[] = {
0414     "Fwd",
0415     "Peer",
0416 };
0417 
0418 int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
0419 {
0420     size_t i, l = 0;
0421     u64 m = PERF_MEM_SNOOP_NA;
0422 
0423     sz -= 1; /* -1 for null termination */
0424     out[0] = '\0';
0425 
0426     if (mem_info)
0427         m = mem_info->data_src.mem_snoop;
0428 
0429     for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
0430         if (!(m & 0x1))
0431             continue;
0432         if (l) {
0433             strcat(out, " or ");
0434             l += 4;
0435         }
0436         l += scnprintf(out + l, sz - l, snoop_access[i]);
0437     }
0438 
0439     m = 0;
0440     if (mem_info)
0441         m = mem_info->data_src.mem_snoopx;
0442 
0443     for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
0444         if (!(m & 0x1))
0445             continue;
0446 
0447         if (l) {
0448             strcat(out, " or ");
0449             l += 4;
0450         }
0451         l += scnprintf(out + l, sz - l, snoopx_access[i]);
0452     }
0453 
0454     if (*out == '\0')
0455         l += scnprintf(out, sz - l, "N/A");
0456 
0457     return l;
0458 }
0459 
0460 int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
0461 {
0462     u64 mask = PERF_MEM_LOCK_NA;
0463     int l;
0464 
0465     if (mem_info)
0466         mask = mem_info->data_src.mem_lock;
0467 
0468     if (mask & PERF_MEM_LOCK_NA)
0469         l = scnprintf(out, sz, "N/A");
0470     else if (mask & PERF_MEM_LOCK_LOCKED)
0471         l = scnprintf(out, sz, "Yes");
0472     else
0473         l = scnprintf(out, sz, "No");
0474 
0475     return l;
0476 }
0477 
0478 int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
0479 {
0480     size_t l = 0;
0481     u64 mask = PERF_MEM_BLK_NA;
0482 
0483     sz -= 1; /* -1 for null termination */
0484     out[0] = '\0';
0485 
0486     if (mem_info)
0487         mask = mem_info->data_src.mem_blk;
0488 
0489     if (!mask || (mask & PERF_MEM_BLK_NA)) {
0490         l += scnprintf(out + l, sz - l, " N/A");
0491         return l;
0492     }
0493     if (mask & PERF_MEM_BLK_DATA)
0494         l += scnprintf(out + l, sz - l, " Data");
0495     if (mask & PERF_MEM_BLK_ADDR)
0496         l += scnprintf(out + l, sz - l, " Addr");
0497 
0498     return l;
0499 }
0500 
0501 int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
0502 {
0503     int i = 0;
0504 
0505     i += scnprintf(out, sz, "|OP ");
0506     i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
0507     i += scnprintf(out + i, sz - i, "|LVL ");
0508     i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
0509     i += scnprintf(out + i, sz - i, "|SNP ");
0510     i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
0511     i += scnprintf(out + i, sz - i, "|TLB ");
0512     i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
0513     i += scnprintf(out + i, sz - i, "|LCK ");
0514     i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
0515     i += scnprintf(out + i, sz - i, "|BLK ");
0516     i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
0517 
0518     return i;
0519 }
0520 
0521 int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
0522 {
0523     union perf_mem_data_src *data_src = &mi->data_src;
0524     u64 daddr  = mi->daddr.addr;
0525     u64 op     = data_src->mem_op;
0526     u64 lvl    = data_src->mem_lvl;
0527     u64 snoop  = data_src->mem_snoop;
0528     u64 snoopx = data_src->mem_snoopx;
0529     u64 lock   = data_src->mem_lock;
0530     u64 blk    = data_src->mem_blk;
0531     /*
0532      * Skylake might report unknown remote level via this
0533      * bit, consider it when evaluating remote HITMs.
0534      *
0535      * Incase of power, remote field can also be used to denote cache
0536      * accesses from the another core of same node. Hence, setting
0537      * mrem only when HOPS is zero along with set remote field.
0538      */
0539     bool mrem  = (data_src->mem_remote && !data_src->mem_hops);
0540     int err = 0;
0541 
0542 #define HITM_INC(__f)       \
0543 do {                \
0544     stats->__f++;       \
0545     stats->tot_hitm++;  \
0546 } while (0)
0547 
0548 #define PEER_INC(__f)       \
0549 do {                \
0550     stats->__f++;       \
0551     stats->tot_peer++;  \
0552 } while (0)
0553 
0554 #define P(a, b) PERF_MEM_##a##_##b
0555 
0556     stats->nr_entries++;
0557 
0558     if (lock & P(LOCK, LOCKED)) stats->locks++;
0559 
0560     if (blk & P(BLK, DATA)) stats->blk_data++;
0561     if (blk & P(BLK, ADDR)) stats->blk_addr++;
0562 
0563     if (op & P(OP, LOAD)) {
0564         /* load */
0565         stats->load++;
0566 
0567         if (!daddr) {
0568             stats->ld_noadrs++;
0569             return -1;
0570         }
0571 
0572         if (lvl & P(LVL, HIT)) {
0573             if (lvl & P(LVL, UNC)) stats->ld_uncache++;
0574             if (lvl & P(LVL, IO))  stats->ld_io++;
0575             if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
0576             if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
0577             if (lvl & P(LVL, L2)) {
0578                 stats->ld_l2hit++;
0579 
0580                 if (snoopx & P(SNOOPX, PEER))
0581                     PEER_INC(lcl_peer);
0582             }
0583             if (lvl & P(LVL, L3 )) {
0584                 if (snoop & P(SNOOP, HITM))
0585                     HITM_INC(lcl_hitm);
0586                 else
0587                     stats->ld_llchit++;
0588 
0589                 if (snoopx & P(SNOOPX, PEER))
0590                     PEER_INC(lcl_peer);
0591             }
0592 
0593             if (lvl & P(LVL, LOC_RAM)) {
0594                 stats->lcl_dram++;
0595                 if (snoop & P(SNOOP, HIT))
0596                     stats->ld_shared++;
0597                 else
0598                     stats->ld_excl++;
0599             }
0600 
0601             if ((lvl & P(LVL, REM_RAM1)) ||
0602                 (lvl & P(LVL, REM_RAM2)) ||
0603                  mrem) {
0604                 stats->rmt_dram++;
0605                 if (snoop & P(SNOOP, HIT))
0606                     stats->ld_shared++;
0607                 else
0608                     stats->ld_excl++;
0609             }
0610         }
0611 
0612         if ((lvl & P(LVL, REM_CCE1)) ||
0613             (lvl & P(LVL, REM_CCE2)) ||
0614              mrem) {
0615             if (snoop & P(SNOOP, HIT)) {
0616                 stats->rmt_hit++;
0617             } else if (snoop & P(SNOOP, HITM)) {
0618                 HITM_INC(rmt_hitm);
0619             } else if (snoopx & P(SNOOPX, PEER)) {
0620                 stats->rmt_hit++;
0621                 PEER_INC(rmt_peer);
0622             }
0623         }
0624 
0625         if ((lvl & P(LVL, MISS)))
0626             stats->ld_miss++;
0627 
0628     } else if (op & P(OP, STORE)) {
0629         /* store */
0630         stats->store++;
0631 
0632         if (!daddr) {
0633             stats->st_noadrs++;
0634             return -1;
0635         }
0636 
0637         if (lvl & P(LVL, HIT)) {
0638             if (lvl & P(LVL, UNC)) stats->st_uncache++;
0639             if (lvl & P(LVL, L1 )) stats->st_l1hit++;
0640         }
0641         if (lvl & P(LVL, MISS))
0642             if (lvl & P(LVL, L1)) stats->st_l1miss++;
0643         if (lvl & P(LVL, NA))
0644             stats->st_na++;
0645     } else {
0646         /* unparsable data_src? */
0647         stats->noparse++;
0648         return -1;
0649     }
0650 
0651     if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
0652         stats->nomap++;
0653         return -1;
0654     }
0655 
0656 #undef P
0657 #undef HITM_INC
0658     return err;
0659 }
0660 
0661 void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
0662 {
0663     stats->nr_entries   += add->nr_entries;
0664 
0665     stats->locks        += add->locks;
0666     stats->store        += add->store;
0667     stats->st_uncache   += add->st_uncache;
0668     stats->st_noadrs    += add->st_noadrs;
0669     stats->st_l1hit     += add->st_l1hit;
0670     stats->st_l1miss    += add->st_l1miss;
0671     stats->st_na        += add->st_na;
0672     stats->load     += add->load;
0673     stats->ld_excl      += add->ld_excl;
0674     stats->ld_shared    += add->ld_shared;
0675     stats->ld_uncache   += add->ld_uncache;
0676     stats->ld_io        += add->ld_io;
0677     stats->ld_miss      += add->ld_miss;
0678     stats->ld_noadrs    += add->ld_noadrs;
0679     stats->ld_fbhit     += add->ld_fbhit;
0680     stats->ld_l1hit     += add->ld_l1hit;
0681     stats->ld_l2hit     += add->ld_l2hit;
0682     stats->ld_llchit    += add->ld_llchit;
0683     stats->lcl_hitm     += add->lcl_hitm;
0684     stats->rmt_hitm     += add->rmt_hitm;
0685     stats->tot_hitm     += add->tot_hitm;
0686     stats->lcl_peer     += add->lcl_peer;
0687     stats->rmt_peer     += add->rmt_peer;
0688     stats->tot_peer     += add->tot_peer;
0689     stats->rmt_hit      += add->rmt_hit;
0690     stats->lcl_dram     += add->lcl_dram;
0691     stats->rmt_dram     += add->rmt_dram;
0692     stats->blk_data     += add->blk_data;
0693     stats->blk_addr     += add->blk_addr;
0694     stats->nomap        += add->nomap;
0695     stats->noparse      += add->noparse;
0696 }