0001
0002
0003 """Convert directories of JSON events to C code."""
0004 import argparse
0005 import csv
0006 import json
0007 import os
0008 import sys
0009 from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
0010 import collections
0011
0012
0013 _args = None
0014
0015 _sys_event_tables = []
0016
0017
0018
0019 _arch_std_events = {}
0020
0021 _close_table = False
0022
0023 _pending_events = []
0024
0025 _bcs = None
0026
0027 _json_event_attributes = [
0028
0029 'name', 'pmu', 'topic', 'desc', 'metric_name', 'metric_group',
0030
0031 'event',
0032
0033 'aggr_mode', 'compat', 'deprecated', 'perpkg', 'unit',
0034
0035 'metric_constraint', 'metric_expr', 'long_desc'
0036 ]
0037
0038
0039 def removesuffix(s: str, suffix: str) -> str:
0040 """Remove the suffix from a string
0041
0042 The removesuffix function is added to str in Python 3.9. We aim for 3.6
0043 compatibility and so provide our own function here.
0044 """
0045 return s[0:-len(suffix)] if s.endswith(suffix) else s
0046
0047
0048 def file_name_to_table_name(parents: Sequence[str], dirname: str) -> str:
0049 """Generate a C table name from directory names."""
0050 tblname = 'pme'
0051 for p in parents:
0052 tblname += '_' + p
0053 tblname += '_' + dirname
0054 return tblname.replace('-', '_')
0055
0056 def c_len(s: str) -> int:
0057 """Return the length of s a C string
0058
0059 This doesn't handle all escape characters properly. It first assumes
0060 all \ are for escaping, it then adjusts as it will have over counted
0061 \\. The code uses \000 rather than \0 as a terminator as an adjacent
0062 number would be folded into a string of \0 (ie. "\0" + "5" doesn't
0063 equal a terminator followed by the number 5 but the escape of
0064 \05). The code adjusts for \000 but not properly for all octal, hex
0065 or unicode values.
0066 """
0067 try:
0068 utf = s.encode(encoding='utf-8',errors='strict')
0069 except:
0070 print(f'broken string {s}')
0071 raise
0072 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
0073
0074 class BigCString:
0075 """A class to hold many strings concatenated together.
0076
0077 Generating a large number of stand-alone C strings creates a large
0078 number of relocations in position independent code. The BigCString
0079 is a helper for this case. It builds a single string which within it
0080 are all the other C strings (to avoid memory issues the string
0081 itself is held as a list of strings). The offsets within the big
0082 string are recorded and when stored to disk these don't need
0083 relocation. To reduce the size of the string further, identical
0084 strings are merged. If a longer string ends-with the same value as a
0085 shorter string, these entries are also merged.
0086 """
0087 strings: Set[str]
0088 big_string: Sequence[str]
0089 offsets: Dict[str, int]
0090
0091 def __init__(self):
0092 self.strings = set()
0093
0094 def add(self, s: str) -> None:
0095 """Called to add to the big string."""
0096 self.strings.add(s)
0097
0098 def compute(self) -> None:
0099 """Called once all strings are added to compute the string and offsets."""
0100
0101 folded_strings = {}
0102
0103
0104 sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
0105
0106
0107
0108
0109
0110
0111
0112 for pos,s in enumerate(sorted_reversed_strings):
0113 best_pos = pos
0114 for check_pos in range(pos + 1, len(sorted_reversed_strings)):
0115 if sorted_reversed_strings[check_pos].startswith(s):
0116 best_pos = check_pos
0117 else:
0118 break
0119 if pos != best_pos:
0120 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
0121
0122
0123 fold_into_strings = collections.defaultdict(set)
0124 for key, val in folded_strings.items():
0125 if key != val:
0126 fold_into_strings[val].add(key)
0127
0128
0129
0130
0131
0132
0133 big_string_offset = 0
0134 self.big_string = []
0135 self.offsets = {}
0136
0137
0138 for s in sorted(self.strings):
0139 if s not in folded_strings:
0140 self.offsets[s] = big_string_offset
0141 self.big_string.append(f'/* offset={big_string_offset} */ "')
0142 self.big_string.append(s)
0143 self.big_string.append('"')
0144 if s in fold_into_strings:
0145 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
0146 self.big_string.append('\n')
0147 big_string_offset += c_len(s)
0148 continue
0149
0150
0151 for s in folded_strings.keys():
0152 assert s not in self.offsets
0153 folded_s = folded_strings[s]
0154 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
0155
0156 _bcs = BigCString()
0157
0158 class JsonEvent:
0159 """Representation of an event loaded from a json file dictionary."""
0160
0161 def __init__(self, jd: dict):
0162 """Constructor passed the dictionary of parsed json values."""
0163
0164 def llx(x: int) -> str:
0165 """Convert an int to a string similar to a printf modifier of %#llx."""
0166 return '0' if x == 0 else hex(x)
0167
0168 def fixdesc(s: str) -> str:
0169 """Fix formatting issue for the desc string."""
0170 if s is None:
0171 return None
0172 return removesuffix(removesuffix(removesuffix(s, '. '),
0173 '. '), '.').replace('\n', '\\n').replace(
0174 '\"', '\\"').replace('\r', '\\r')
0175
0176 def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
0177 """Returns the aggr_mode_class enum value associated with the JSON string."""
0178 if not aggr_mode:
0179 return None
0180 aggr_mode_to_enum = {
0181 'PerChip': '1',
0182 'PerCore': '2',
0183 }
0184 return aggr_mode_to_enum[aggr_mode]
0185
0186 def lookup_msr(num: str) -> Optional[str]:
0187 """Converts the msr number, or first in a list to the appropriate event field."""
0188 if not num:
0189 return None
0190 msrmap = {
0191 0x3F6: 'ldlat=',
0192 0x1A6: 'offcore_rsp=',
0193 0x1A7: 'offcore_rsp=',
0194 0x3F7: 'frontend=',
0195 }
0196 return msrmap[int(num.split(',', 1)[0], 0)]
0197
0198 def real_event(name: str, event: str) -> Optional[str]:
0199 """Convert well known event names to an event string otherwise use the event argument."""
0200 fixed = {
0201 'inst_retired.any': 'event=0xc0,period=2000003',
0202 'inst_retired.any_p': 'event=0xc0,period=2000003',
0203 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
0204 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
0205 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
0206 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
0207 }
0208 if not name:
0209 return None
0210 if name.lower() in fixed:
0211 return fixed[name.lower()]
0212 return event
0213
0214 def unit_to_pmu(unit: str) -> Optional[str]:
0215 """Convert a JSON Unit to Linux PMU name."""
0216 if not unit:
0217 return None
0218
0219
0220 table = {
0221 'CBO': 'uncore_cbox',
0222 'QPI LL': 'uncore_qpi',
0223 'SBO': 'uncore_sbox',
0224 'iMPH-U': 'uncore_arb',
0225 'CPU-M-CF': 'cpum_cf',
0226 'CPU-M-SF': 'cpum_sf',
0227 'PAI-CRYPTO' : 'pai_crypto',
0228 'UPI LL': 'uncore_upi',
0229 'hisi_sicl,cpa': 'hisi_sicl,cpa',
0230 'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
0231 'hisi_sccl,hha': 'hisi_sccl,hha',
0232 'hisi_sccl,l3c': 'hisi_sccl,l3c',
0233 'imx8_ddr': 'imx8_ddr',
0234 'L3PMC': 'amd_l3',
0235 'DFPMC': 'amd_df',
0236 'cpu_core': 'cpu_core',
0237 'cpu_atom': 'cpu_atom',
0238 }
0239 return table[unit] if unit in table else f'uncore_{unit.lower()}'
0240
0241 eventcode = 0
0242 if 'EventCode' in jd:
0243 eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
0244 if 'ExtSel' in jd:
0245 eventcode |= int(jd['ExtSel']) << 8
0246 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
0247 self.name = jd['EventName'].lower() if 'EventName' in jd else None
0248 self.topic = ''
0249 self.compat = jd.get('Compat')
0250 self.desc = fixdesc(jd.get('BriefDescription'))
0251 self.long_desc = fixdesc(jd.get('PublicDescription'))
0252 precise = jd.get('PEBS')
0253 msr = lookup_msr(jd.get('MSRIndex'))
0254 msrval = jd.get('MSRValue')
0255 extra_desc = ''
0256 if 'Data_LA' in jd:
0257 extra_desc += ' Supports address when precise'
0258 if 'Errata' in jd:
0259 extra_desc += '.'
0260 if 'Errata' in jd:
0261 extra_desc += ' Spec update: ' + jd['Errata']
0262 self.pmu = unit_to_pmu(jd.get('Unit'))
0263 filter = jd.get('Filter')
0264 self.unit = jd.get('ScaleUnit')
0265 self.perpkg = jd.get('PerPkg')
0266 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
0267 self.deprecated = jd.get('Deprecated')
0268 self.metric_name = jd.get('MetricName')
0269 self.metric_group = jd.get('MetricGroup')
0270 self.metric_constraint = jd.get('MetricConstraint')
0271 self.metric_expr = jd.get('MetricExpr')
0272 if self.metric_expr:
0273 self.metric_expr = self.metric_expr.replace('\\', '\\\\')
0274 arch_std = jd.get('ArchStdEvent')
0275 if precise and self.desc and '(Precise Event)' not in self.desc:
0276 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
0277 'event)')
0278 event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
0279 event_fields = [
0280 ('AnyThread', 'any='),
0281 ('PortMask', 'ch_mask='),
0282 ('CounterMask', 'cmask='),
0283 ('EdgeDetect', 'edge='),
0284 ('FCMask', 'fc_mask='),
0285 ('Invert', 'inv='),
0286 ('SampleAfterValue', 'period='),
0287 ('UMask', 'umask='),
0288 ]
0289 for key, value in event_fields:
0290 if key in jd and jd[key] != '0':
0291 event += ',' + value + jd[key]
0292 if filter:
0293 event += f',{filter}'
0294 if msr:
0295 event += f',{msr}{msrval}'
0296 if self.desc and extra_desc:
0297 self.desc += extra_desc
0298 if self.long_desc and extra_desc:
0299 self.long_desc += extra_desc
0300 if self.pmu:
0301 if self.desc and not self.desc.endswith('. '):
0302 self.desc += '. '
0303 self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
0304 if arch_std and arch_std.lower() in _arch_std_events:
0305 event = _arch_std_events[arch_std.lower()].event
0306
0307 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
0308 if hasattr(self, attr) and not getattr(self, attr):
0309 setattr(self, attr, value)
0310
0311 self.event = real_event(self.name, event)
0312
0313 def __repr__(self) -> str:
0314 """String representation primarily for debugging."""
0315 s = '{\n'
0316 for attr, value in self.__dict__.items():
0317 if value:
0318 s += f'\t{attr} = {value},\n'
0319 return s + '}'
0320
0321 def build_c_string(self) -> str:
0322 s = ''
0323 for attr in _json_event_attributes:
0324 x = getattr(self, attr)
0325 s += f'{x}\\000' if x else '\\000'
0326 return s
0327
0328 def to_c_string(self) -> str:
0329 """Representation of the event as a C struct initializer."""
0330
0331 s = self.build_c_string()
0332 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
0333
0334
0335 def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
0336 """Read json events from the specified file."""
0337
0338 try:
0339 result = json.load(open(path), object_hook=JsonEvent)
0340 except BaseException as err:
0341 print(f"Exception processing {path}")
0342 raise
0343 for event in result:
0344 event.topic = topic
0345 return result
0346
0347 def preprocess_arch_std_files(archpath: str) -> None:
0348 """Read in all architecture standard events."""
0349 global _arch_std_events
0350 for item in os.scandir(archpath):
0351 if item.is_file() and item.name.endswith('.json'):
0352 for event in read_json_events(item.path, topic=''):
0353 if event.name:
0354 _arch_std_events[event.name.lower()] = event
0355
0356
0357 def print_events_table_prefix(tblname: str) -> None:
0358 """Called when a new events table is started."""
0359 global _close_table
0360 if _close_table:
0361 raise IOError('Printing table prefix but last table has no suffix')
0362 _args.output_file.write(f'static const struct compact_pmu_event {tblname}[] = {{\n')
0363 _close_table = True
0364
0365
0366 def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
0367 """Add contents of file to _pending_events table."""
0368 if not _close_table:
0369 raise IOError('Table entries missing prefix')
0370 for e in read_json_events(item.path, topic):
0371 _pending_events.append(e)
0372
0373
0374 def print_events_table_suffix() -> None:
0375 """Optionally close events table."""
0376
0377 def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
0378 def fix_none(s: Optional[str]) -> str:
0379 if s is None:
0380 return ''
0381 return s
0382
0383 return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
0384 fix_none(j.metric_name))
0385
0386 global _close_table
0387 if not _close_table:
0388 return
0389
0390 global _pending_events
0391 for event in sorted(_pending_events, key=event_cmp_key):
0392 _args.output_file.write(event.to_c_string())
0393 _pending_events = []
0394
0395 _args.output_file.write('};\n\n')
0396 _close_table = False
0397
0398 def get_topic(topic: str) -> str:
0399 if topic.endswith('metrics.json'):
0400 return 'metrics'
0401 return removesuffix(topic, '.json').replace('-', ' ')
0402
0403 def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
0404
0405 if item.is_dir():
0406 return
0407
0408
0409 level = len(parents)
0410 if level == 0 or level > 4:
0411 return
0412
0413
0414
0415 if not item.is_file() or not item.name.endswith('.json'):
0416 return
0417
0418 topic = get_topic(item.name)
0419 for event in read_json_events(item.path, topic):
0420 _bcs.add(event.build_c_string())
0421
0422 def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
0423 """Process a JSON file during the main walk."""
0424 global _sys_event_tables
0425
0426 def is_leaf_dir(path: str) -> bool:
0427 for item in os.scandir(path):
0428 if item.is_dir():
0429 return False
0430 return True
0431
0432
0433 if item.is_dir() and is_leaf_dir(item.path):
0434 print_events_table_suffix()
0435
0436 tblname = file_name_to_table_name(parents, item.name)
0437 if item.name == 'sys':
0438 _sys_event_tables.append(tblname)
0439 print_events_table_prefix(tblname)
0440 return
0441
0442
0443 level = len(parents)
0444 if level == 0 or level > 4:
0445 return
0446
0447
0448
0449 if not item.is_file() or not item.name.endswith('.json'):
0450 return
0451
0452 add_events_table_entries(item, get_topic(item.name))
0453
0454
0455 def print_mapping_table(archs: Sequence[str]) -> None:
0456 """Read the mapfile and generate the struct from cpuid string to event table."""
0457 _args.output_file.write("""
0458 /* Struct used to make the PMU event table implementation opaque to callers. */
0459 struct pmu_events_table {
0460 const struct compact_pmu_event *entries;
0461 size_t length;
0462 };
0463
0464 /*
0465 * Map a CPU to its table of PMU events. The CPU is identified by the
0466 * cpuid field, which is an arch-specific identifier for the CPU.
0467 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
0468 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
0469 *
0470 * The cpuid can contain any character other than the comma.
0471 */
0472 struct pmu_events_map {
0473 const char *arch;
0474 const char *cpuid;
0475 struct pmu_events_table table;
0476 };
0477
0478 /*
0479 * Global table mapping each known CPU for the architecture to its
0480 * table of PMU events.
0481 */
0482 const struct pmu_events_map pmu_events_map[] = {
0483 """)
0484 for arch in archs:
0485 if arch == 'test':
0486 _args.output_file.write("""{
0487 \t.arch = "testarch",
0488 \t.cpuid = "testcpu",
0489 \t.table = {
0490 \t.entries = pme_test_soc_cpu,
0491 \t.length = ARRAY_SIZE(pme_test_soc_cpu),
0492 \t}
0493 },
0494 """)
0495 else:
0496 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
0497 table = csv.reader(csvfile)
0498 first = True
0499 for row in table:
0500
0501 if not first and len(row) > 0 and not row[0].startswith('#'):
0502 tblname = file_name_to_table_name([], row[2].replace('/', '_'))
0503 cpuid = row[0].replace('\\', '\\\\')
0504 _args.output_file.write(f"""{{
0505 \t.arch = "{arch}",
0506 \t.cpuid = "{cpuid}",
0507 \t.table = {{
0508 \t\t.entries = {tblname},
0509 \t\t.length = ARRAY_SIZE({tblname})
0510 \t}}
0511 }},
0512 """)
0513 first = False
0514
0515 _args.output_file.write("""{
0516 \t.arch = 0,
0517 \t.cpuid = 0,
0518 \t.table = { 0, 0 },
0519 }
0520 };
0521 """)
0522
0523
0524 def print_system_mapping_table() -> None:
0525 """C struct mapping table array for tables from /sys directories."""
0526 _args.output_file.write("""
0527 struct pmu_sys_events {
0528 \tconst char *name;
0529 \tstruct pmu_events_table table;
0530 };
0531
0532 static const struct pmu_sys_events pmu_sys_event_tables[] = {
0533 """)
0534 for tblname in _sys_event_tables:
0535 _args.output_file.write(f"""\t{{
0536 \t\t.table = {{
0537 \t\t\t.entries = {tblname},
0538 \t\t\t.length = ARRAY_SIZE({tblname})
0539 \t\t}},
0540 \t\t.name = \"{tblname}\",
0541 \t}},
0542 """)
0543 _args.output_file.write("""\t{
0544 \t\t.table = { 0, 0 }
0545 \t},
0546 };
0547
0548 static void decompress(int offset, struct pmu_event *pe)
0549 {
0550 \tconst char *p = &big_c_string[offset];
0551 """)
0552 for attr in _json_event_attributes:
0553 _args.output_file.write(f"""
0554 \tpe->{attr} = (*p == '\\0' ? NULL : p);
0555 """)
0556 if attr == _json_event_attributes[-1]:
0557 continue
0558 _args.output_file.write('\twhile (*p++);')
0559 _args.output_file.write("""}
0560
0561 int pmu_events_table_for_each_event(const struct pmu_events_table *table,
0562 pmu_event_iter_fn fn,
0563 void *data)
0564 {
0565 for (size_t i = 0; i < table->length; i++) {
0566 struct pmu_event pe;
0567 int ret;
0568
0569 decompress(table->entries[i].offset, &pe);
0570 ret = fn(&pe, table, data);
0571 if (ret)
0572 return ret;
0573 }
0574 return 0;
0575 }
0576
0577 const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
0578 {
0579 const struct pmu_events_table *table = NULL;
0580 char *cpuid = perf_pmu__getcpuid(pmu);
0581 int i;
0582
0583 /* on some platforms which uses cpus map, cpuid can be NULL for
0584 * PMUs other than CORE PMUs.
0585 */
0586 if (!cpuid)
0587 return NULL;
0588
0589 i = 0;
0590 for (;;) {
0591 const struct pmu_events_map *map = &pmu_events_map[i++];
0592 if (!map->arch)
0593 break;
0594
0595 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
0596 table = &map->table;
0597 break;
0598 }
0599 }
0600 free(cpuid);
0601 return table;
0602 }
0603
0604 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
0605 {
0606 for (const struct pmu_events_map *tables = &pmu_events_map[0];
0607 tables->arch;
0608 tables++) {
0609 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
0610 return &tables->table;
0611 }
0612 return NULL;
0613 }
0614
0615 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
0616 {
0617 for (const struct pmu_events_map *tables = &pmu_events_map[0];
0618 tables->arch;
0619 tables++) {
0620 int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
0621
0622 if (ret)
0623 return ret;
0624 }
0625 return 0;
0626 }
0627
0628 const struct pmu_events_table *find_sys_events_table(const char *name)
0629 {
0630 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
0631 tables->name;
0632 tables++) {
0633 if (!strcmp(tables->name, name))
0634 return &tables->table;
0635 }
0636 return NULL;
0637 }
0638
0639 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
0640 {
0641 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
0642 tables->name;
0643 tables++) {
0644 int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
0645
0646 if (ret)
0647 return ret;
0648 }
0649 return 0;
0650 }
0651 """)
0652
0653
0654 def main() -> None:
0655 global _args
0656
0657 def dir_path(path: str) -> str:
0658 """Validate path is a directory for argparse."""
0659 if os.path.isdir(path):
0660 return path
0661 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
0662
0663 def ftw(path: str, parents: Sequence[str],
0664 action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
0665 """Replicate the directory/file walking behavior of C's file tree walk."""
0666 for item in os.scandir(path):
0667 action(parents, item)
0668 if item.is_dir():
0669 ftw(item.path, parents + [item.name], action)
0670
0671 ap = argparse.ArgumentParser()
0672 ap.add_argument('arch', help='Architecture name like x86')
0673 ap.add_argument(
0674 'starting_dir',
0675 type=dir_path,
0676 help='Root of tree containing architecture directories containing json files'
0677 )
0678 ap.add_argument(
0679 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
0680 _args = ap.parse_args()
0681
0682 _args.output_file.write("""
0683 #include "pmu-events/pmu-events.h"
0684 #include "util/header.h"
0685 #include "util/pmu.h"
0686 #include <string.h>
0687 #include <stddef.h>
0688
0689 struct compact_pmu_event {
0690 int offset;
0691 };
0692
0693 """)
0694 archs = []
0695 for item in os.scandir(_args.starting_dir):
0696 if not item.is_dir():
0697 continue
0698 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
0699 archs.append(item.name)
0700
0701 if len(archs) < 2:
0702 raise IOError(f'Missing architecture directory \'{_args.arch}\'')
0703
0704 archs.sort()
0705 for arch in archs:
0706 arch_path = f'{_args.starting_dir}/{arch}'
0707 preprocess_arch_std_files(arch_path)
0708 ftw(arch_path, [], preprocess_one_file)
0709
0710 _bcs.compute()
0711 _args.output_file.write('static const char *const big_c_string =\n')
0712 for s in _bcs.big_string:
0713 _args.output_file.write(s)
0714 _args.output_file.write(';\n\n')
0715 for arch in archs:
0716 arch_path = f'{_args.starting_dir}/{arch}'
0717 ftw(arch_path, [], process_one_file)
0718 print_events_table_suffix()
0719
0720 print_mapping_table(archs)
0721 print_system_mapping_table()
0722
0723
0724 if __name__ == '__main__':
0725 main()