2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3 """Convert directories of JSON events to C code."""
6 from functools
import lru_cache
11 from typing
import (Callable
, Dict
, Optional
, Sequence
, Set
, Tuple
)
14 # Global command line arguments.
16 # List of regular event tables.
18 # List of event tables generated from "/sys" directories.
19 _sys_event_tables
= []
20 # List of regular metric tables.
22 # List of metric tables generated from "/sys" directories.
23 _sys_metric_tables
= []
24 # Mapping between sys event table names and sys metric table names.
25 _sys_event_table_to_metric_table_mapping
= {}
26 # Map from an event name to an architecture standard
27 # JsonEvent. Architecture standard events are in json files in the top
28 # f'{_args.starting_dir}/{_args.arch}' directory.
30 # Events to write out when the table is closed
32 # Name of events table to be written out
33 _pending_events_tblname
= None
34 # Metrics to write out when the table is closed
36 # Name of metrics table to be written out
37 _pending_metrics_tblname
= None
38 # Global BigCString shared by all structures.
40 # Map from the name of a metric group to a description of the group.
42 # Order specific JsonEvent attributes will be visited.
43 _json_event_attributes
= [
44 # cmp_sevent related attributes.
45 'name', 'topic', 'desc',
46 # Seems useful, put it early.
48 # Short things in alphabetical order.
49 'compat', 'deprecated', 'perpkg', 'unit',
50 # Longer things (the last won't be iterated over during decompress).
54 # Attributes that are in pmu_metric rather than pmu_event.
55 _json_metric_attributes
= [
56 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
57 'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
58 'default_metricgroup_name', 'aggr_mode', 'event_grouping'
60 # Attributes that are bools or enum int values, encoded as '0', '1',...
61 _json_enum_attributes
= ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
63 def removesuffix(s
: str, suffix
: str) -> str:
64 """Remove the suffix from a string
66 The removesuffix function is added to str in Python 3.9. We aim for 3.6
67 compatibility and so provide our own function here.
69 return s
[0:-len(suffix
)] if s
.endswith(suffix
) else s
72 def file_name_to_table_name(prefix
: str, parents
: Sequence
[str],
74 """Generate a C table name from directory names."""
78 tblname
+= '_' + dirname
79 return tblname
.replace('-', '_')
82 def c_len(s
: str) -> int:
83 """Return the length of s a C string
85 This doesn't handle all escape characters properly. It first assumes
86 all \\ are for escaping, it then adjusts as it will have over counted
87 \\. The code uses \000 rather than \0 as a terminator as an adjacent
88 number would be folded into a string of \0 (ie. "\0" + "5" doesn't
89 equal a terminator followed by the number 5 but the escape of
90 \05). The code adjusts for \000 but not properly for all octal, hex
94 utf
= s
.encode(encoding
='utf-8',errors
='strict')
96 print(f
'broken string {s}')
98 return len(utf
) - utf
.count(b
'\\') + utf
.count(b
'\\\\') - (utf
.count(b
'\\000') * 2)
101 """A class to hold many strings concatenated together.
103 Generating a large number of stand-alone C strings creates a large
104 number of relocations in position independent code. The BigCString
105 is a helper for this case. It builds a single string which within it
106 are all the other C strings (to avoid memory issues the string
107 itself is held as a list of strings). The offsets within the big
108 string are recorded and when stored to disk these don't need
109 relocation. To reduce the size of the string further, identical
110 strings are merged. If a longer string ends-with the same value as a
111 shorter string, these entries are also merged.
114 big_string
: Sequence
[str]
115 offsets
: Dict
[str, int]
117 insert_point
: Dict
[str, int]
122 self
.insert_number
= 0;
123 self
.insert_point
= {}
126 def add(self
, s
: str, metric
: bool) -> None:
127 """Called to add to the big string."""
128 if s
not in self
.strings
:
130 self
.insert_point
[s
] = self
.insert_number
131 self
.insert_number
+= 1
135 def compute(self
) -> None:
136 """Called once all strings are added to compute the string and offsets."""
139 # Determine if two strings can be folded, ie. let 1 string use the
140 # end of another. First reverse all strings and sort them.
141 sorted_reversed_strings
= sorted([x
[::-1] for x
in self
.strings
])
143 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
144 # for each string to see if there is a better candidate to fold it
145 # into, in the example rather than using 'yz' we can use'xyz' at
146 # an offset of 1. We record which string can be folded into which
147 # in folded_strings, we don't need to record the offset as it is
148 # trivially computed from the string lengths.
149 for pos
,s
in enumerate(sorted_reversed_strings
):
151 for check_pos
in range(pos
+ 1, len(sorted_reversed_strings
)):
152 if sorted_reversed_strings
[check_pos
].startswith(s
):
157 folded_strings
[s
[::-1]] = sorted_reversed_strings
[best_pos
][::-1]
159 # Compute reverse mappings for debugging.
160 fold_into_strings
= collections
.defaultdict(set)
161 for key
, val
in folded_strings
.items():
163 fold_into_strings
[val
].add(key
)
165 # big_string_offset is the current location within the C string
166 # being appended to - comments, etc. don't count. big_string is
167 # the string contents represented as a list. Strings are immutable
168 # in Python and so appending to one causes memory issues, while
170 big_string_offset
= 0
174 def string_cmp_key(s
: str) -> Tuple
[bool, int, str]:
175 return (s
in self
.metrics
, self
.insert_point
[s
], s
)
177 # Emit all strings that aren't folded in a sorted manner.
178 for s
in sorted(self
.strings
, key
=string_cmp_key
):
179 if s
not in folded_strings
:
180 self
.offsets
[s
] = big_string_offset
181 self
.big_string
.append(f
'/* offset={big_string_offset} */ "')
182 self
.big_string
.append(s
)
183 self
.big_string
.append('"')
184 if s
in fold_into_strings
:
185 self
.big_string
.append(' /* also: ' + ', '.join(fold_into_strings
[s
]) + ' */')
186 self
.big_string
.append('\n')
187 big_string_offset
+= c_len(s
)
190 # Compute the offsets of the folded strings.
191 for s
in folded_strings
.keys():
192 assert s
not in self
.offsets
193 folded_s
= folded_strings
[s
]
194 self
.offsets
[s
] = self
.offsets
[folded_s
] + c_len(folded_s
) - c_len(s
)
199 """Representation of an event loaded from a json file dictionary."""
201 def __init__(self
, jd
: dict):
202 """Constructor passed the dictionary of parsed json values."""
204 def llx(x
: int) -> str:
205 """Convert an int to a string similar to a printf modifier of %#llx."""
206 return str(x
) if x
>= 0 and x
< 10 else hex(x
)
208 def fixdesc(s
: str) -> str:
209 """Fix formatting issue for the desc string."""
212 return removesuffix(removesuffix(removesuffix(s
, '. '),
213 '. '), '.').replace('\n', '\\n').replace(
214 '\"', '\\"').replace('\r', '\\r')
216 def convert_aggr_mode(aggr_mode
: str) -> Optional
[str]:
217 """Returns the aggr_mode_class enum value associated with the JSON string."""
220 aggr_mode_to_enum
= {
224 return aggr_mode_to_enum
[aggr_mode
]
226 def convert_metric_constraint(metric_constraint
: str) -> Optional
[str]:
227 """Returns the metric_event_groups enum value associated with the JSON string."""
228 if not metric_constraint
:
230 metric_constraint_to_enum
= {
231 'NO_GROUP_EVENTS': '1',
232 'NO_GROUP_EVENTS_NMI': '2',
233 'NO_NMI_WATCHDOG': '2',
234 'NO_GROUP_EVENTS_SMT': '3',
236 return metric_constraint_to_enum
[metric_constraint
]
238 def lookup_msr(num
: str) -> Optional
[str]:
239 """Converts the msr number, or first in a list to the appropriate event field."""
244 0x1A6: 'offcore_rsp=',
245 0x1A7: 'offcore_rsp=',
248 return msrmap
[int(num
.split(',', 1)[0], 0)]
250 def real_event(name
: str, event
: str) -> Optional
[str]:
251 """Convert well known event names to an event string otherwise use the event argument."""
253 'inst_retired.any': 'event=0xc0,period=2000003',
254 'inst_retired.any_p': 'event=0xc0,period=2000003',
255 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
256 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
257 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
258 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
262 if name
.lower() in fixed
:
263 return fixed
[name
.lower()]
266 def unit_to_pmu(unit
: str) -> Optional
[str]:
267 """Convert a JSON Unit to Linux PMU name."""
269 return 'default_core'
270 # Comment brought over from jevents.c:
271 # it's not realistic to keep adding these, we need something more scalable ...
273 'CBO': 'uncore_cbox',
274 'QPI LL': 'uncore_qpi',
275 'SBO': 'uncore_sbox',
276 'iMPH-U': 'uncore_arb',
277 'CPU-M-CF': 'cpum_cf',
278 'CPU-M-SF': 'cpum_sf',
279 'PAI-CRYPTO' : 'pai_crypto',
280 'PAI-EXT' : 'pai_ext',
281 'UPI LL': 'uncore_upi',
282 'hisi_sicl,cpa': 'hisi_sicl,cpa',
283 'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
284 'hisi_sccl,hha': 'hisi_sccl,hha',
285 'hisi_sccl,l3c': 'hisi_sccl,l3c',
286 'imx8_ddr': 'imx8_ddr',
287 'imx9_ddr': 'imx9_ddr',
291 'cpu_core': 'cpu_core',
292 'cpu_atom': 'cpu_atom',
293 'ali_drw': 'ali_drw',
294 'arm_cmn': 'arm_cmn',
297 return table
[unit
] if unit
in table
else f
'uncore_{unit.lower()}'
299 def is_zero(val
: str) -> bool:
301 if val
.startswith('0x'):
302 return int(val
, 16) == 0
308 def canonicalize_value(val
: str) -> str:
310 if val
.startswith('0x'):
311 return llx(int(val
, 16))
317 if 'EventCode' in jd
:
318 eventcode
= int(jd
['EventCode'].split(',', 1)[0], 0)
320 eventcode |
= int(jd
['ExtSel']) << 8
321 configcode
= int(jd
['ConfigCode'], 0) if 'ConfigCode' in jd
else None
322 eventidcode
= int(jd
['EventidCode'], 0) if 'EventidCode' in jd
else None
323 self
.name
= jd
['EventName'].lower() if 'EventName' in jd
else None
325 self
.compat
= jd
.get('Compat')
326 self
.desc
= fixdesc(jd
.get('BriefDescription'))
327 self
.long_desc
= fixdesc(jd
.get('PublicDescription'))
328 precise
= jd
.get('PEBS')
329 msr
= lookup_msr(jd
.get('MSRIndex'))
330 msrval
= jd
.get('MSRValue')
333 extra_desc
+= ' Supports address when precise'
337 extra_desc
+= ' Spec update: ' + jd
['Errata']
338 self
.pmu
= unit_to_pmu(jd
.get('Unit'))
339 filter = jd
.get('Filter')
340 self
.unit
= jd
.get('ScaleUnit')
341 self
.perpkg
= jd
.get('PerPkg')
342 self
.aggr_mode
= convert_aggr_mode(jd
.get('AggregationMode'))
343 self
.deprecated
= jd
.get('Deprecated')
344 self
.metric_name
= jd
.get('MetricName')
345 self
.metric_group
= jd
.get('MetricGroup')
346 self
.metricgroup_no_group
= jd
.get('MetricgroupNoGroup')
347 self
.default_metricgroup_name
= jd
.get('DefaultMetricgroupName')
348 self
.event_grouping
= convert_metric_constraint(jd
.get('MetricConstraint'))
349 self
.metric_expr
= None
350 if 'MetricExpr' in jd
:
351 self
.metric_expr
= metric
.ParsePerfJson(jd
['MetricExpr']).Simplify()
352 # Note, the metric formula for the threshold isn't parsed as the &
353 # and > have incorrect precedence.
354 self
.metric_threshold
= jd
.get('MetricThreshold')
356 arch_std
= jd
.get('ArchStdEvent')
357 if precise
and self
.desc
and '(Precise Event)' not in self
.desc
:
358 extra_desc
+= ' (Must be precise)' if precise
== '2' else (' (Precise '
361 if configcode
is not None:
362 event
= f
'config={llx(configcode)}'
363 elif eventidcode
is not None:
364 event
= f
'eventid={llx(eventidcode)}'
366 event
= f
'event={llx(eventcode)}'
368 ('AnyThread', 'any='),
369 ('PortMask', 'ch_mask='),
370 ('CounterMask', 'cmask='),
371 ('EdgeDetect', 'edge='),
372 ('FCMask', 'fc_mask='),
374 ('SampleAfterValue', 'period='),
376 ('NodeType', 'type='),
377 ('RdWrMask', 'rdwrmask='),
378 ('EnAllCores', 'enallcores='),
379 ('EnAllSlices', 'enallslices='),
380 ('SliceId', 'sliceid='),
381 ('ThreadMask', 'threadmask='),
383 for key
, value
in event_fields
:
384 if key
in jd
and not is_zero(jd
[key
]):
385 event
+= f
',{value}{canonicalize_value(jd[key])}'
387 event
+= f
',{filter}'
389 event
+= f
',{msr}{msrval}'
390 if self
.desc
and extra_desc
:
391 self
.desc
+= extra_desc
392 if self
.long_desc
and extra_desc
:
393 self
.long_desc
+= extra_desc
395 if arch_std
.lower() in _arch_std_events
:
396 event
= _arch_std_events
[arch_std
.lower()].event
397 # Copy from the architecture standard event to self for undefined fields.
398 for attr
, value
in _arch_std_events
[arch_std
.lower()].__dict
__.items():
399 if hasattr(self
, attr
) and not getattr(self
, attr
):
400 setattr(self
, attr
, value
)
402 raise argparse
.ArgumentTypeError('Cannot find arch std event:', arch_std
)
404 self
.event
= real_event(self
.name
, event
)
406 def __repr__(self
) -> str:
407 """String representation primarily for debugging."""
409 for attr
, value
in self
.__dict
__.items():
411 s
+= f
'\t{attr} = {value},\n'
414 def build_c_string(self
, metric
: bool) -> str:
416 for attr
in _json_metric_attributes
if metric
else _json_event_attributes
:
417 x
= getattr(self
, attr
)
418 if metric
and x
and attr
== 'metric_expr':
419 # Convert parsed metric expressions into a string. Slashes
420 # must be doubled in the file.
421 x
= x
.ToPerfJson().replace('\\', '\\\\')
422 if metric
and x
and attr
== 'metric_threshold':
423 x
= x
.replace('\\', '\\\\')
424 if attr
in _json_enum_attributes
:
427 s
+= f
'{x}\\000' if x
else '\\000'
430 def to_c_string(self
, metric
: bool) -> str:
431 """Representation of the event as a C struct initializer."""
433 s
= self
.build_c_string(metric
)
434 return f
'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
437 @lru_cache(maxsize
=None)
438 def read_json_events(path
: str, topic
: str) -> Sequence
[JsonEvent
]:
439 """Read json events from the specified file."""
441 events
= json
.load(open(path
), object_hook
=JsonEvent
)
442 except BaseException
as err
:
443 print(f
"Exception processing {path}")
445 metrics
: list[Tuple
[str, str, metric
.Expression
]] = []
448 if event
.metric_name
and '-' not in event
.metric_name
:
449 metrics
.append((event
.pmu
, event
.metric_name
, event
.metric_expr
))
450 updates
= metric
.RewriteMetricsInTermsOfOthers(metrics
)
453 if event
.metric_name
in updates
:
454 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
455 # f'to\n"{updates[event.metric_name]}"')
456 event
.metric_expr
= updates
[event
.metric_name
]
460 def preprocess_arch_std_files(archpath
: str) -> None:
461 """Read in all architecture standard events."""
462 global _arch_std_events
463 for item
in os
.scandir(archpath
):
464 if item
.is_file() and item
.name
.endswith('.json'):
465 for event
in read_json_events(item
.path
, topic
=''):
467 _arch_std_events
[event
.name
.lower()] = event
468 if event
.metric_name
:
469 _arch_std_events
[event
.metric_name
.lower()] = event
472 def add_events_table_entries(item
: os
.DirEntry
, topic
: str) -> None:
473 """Add contents of file to _pending_events table."""
474 for e
in read_json_events(item
.path
, topic
):
476 _pending_events
.append(e
)
478 _pending_metrics
.append(e
)
481 def print_pending_events() -> None:
482 """Optionally close events table."""
484 def event_cmp_key(j
: JsonEvent
) -> Tuple
[str, str, bool, str, str]:
485 def fix_none(s
: Optional
[str]) -> str:
490 return (fix_none(j
.pmu
).replace(',','_'), fix_none(j
.name
), j
.desc
is not None, fix_none(j
.topic
),
491 fix_none(j
.metric_name
))
493 global _pending_events
494 if not _pending_events
:
497 global _pending_events_tblname
498 if _pending_events_tblname
.endswith('_sys'):
499 global _sys_event_tables
500 _sys_event_tables
.append(_pending_events_tblname
)
503 _event_tables
.append(_pending_events_tblname
)
509 for event
in sorted(_pending_events
, key
=event_cmp_key
):
510 if last_pmu
and last_pmu
== event
.pmu
:
511 assert event
.name
!= last_name
, f
"Duplicate event: {last_pmu}/{last_name}/ in {_pending_events_tblname}"
512 if event
.pmu
!= last_pmu
:
514 _args
.output_file
.write('};\n')
515 pmu_name
= event
.pmu
.replace(',', '_')
516 _args
.output_file
.write(
517 f
'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n')
520 pmus
.add((event
.pmu
, pmu_name
))
522 _args
.output_file
.write(event
.to_c_string(metric
=False))
523 last_name
= event
.name
526 _args
.output_file
.write(f
"""
529 const struct pmu_table_entry {_pending_events_tblname}[] = {{
531 for (pmu
, tbl_pmu
) in sorted(pmus
):
532 pmu_name
= f
"{pmu}\\000"
533 _args
.output_file
.write(f
"""{{
534 .entries = {_pending_events_tblname}_{tbl_pmu},
535 .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}),
536 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
539 _args
.output_file
.write('};\n\n')
541 def print_pending_metrics() -> None:
542 """Optionally close metrics table."""
544 def metric_cmp_key(j
: JsonEvent
) -> Tuple
[bool, str, str]:
545 def fix_none(s
: Optional
[str]) -> str:
550 return (j
.desc
is not None, fix_none(j
.pmu
), fix_none(j
.metric_name
))
552 global _pending_metrics
553 if not _pending_metrics
:
556 global _pending_metrics_tblname
557 if _pending_metrics_tblname
.endswith('_sys'):
558 global _sys_metric_tables
559 _sys_metric_tables
.append(_pending_metrics_tblname
)
562 _metric_tables
.append(_pending_metrics_tblname
)
567 for metric
in sorted(_pending_metrics
, key
=metric_cmp_key
):
568 if metric
.pmu
!= last_pmu
:
570 _args
.output_file
.write('};\n')
571 pmu_name
= metric
.pmu
.replace(',', '_')
572 _args
.output_file
.write(
573 f
'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n')
575 last_pmu
= metric
.pmu
576 pmus
.add((metric
.pmu
, pmu_name
))
578 _args
.output_file
.write(metric
.to_c_string(metric
=True))
579 _pending_metrics
= []
581 _args
.output_file
.write(f
"""
584 const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
586 for (pmu
, tbl_pmu
) in sorted(pmus
):
587 pmu_name
= f
"{pmu}\\000"
588 _args
.output_file
.write(f
"""{{
589 .entries = {_pending_metrics_tblname}_{tbl_pmu},
590 .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}),
591 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
594 _args
.output_file
.write('};\n\n')
596 def get_topic(topic
: str) -> str:
597 if topic
.endswith('metrics.json'):
599 return removesuffix(topic
, '.json').replace('-', ' ')
601 def preprocess_one_file(parents
: Sequence
[str], item
: os
.DirEntry
) -> None:
606 # base dir or too deep
608 if level
== 0 or level
> 4:
611 # Ignore other directories. If the file name does not have a .json
612 # extension, ignore it. It could be a readme.txt for instance.
613 if not item
.is_file() or not item
.name
.endswith('.json'):
616 if item
.name
== 'metricgroups.json':
617 metricgroup_descriptions
= json
.load(open(item
.path
))
618 for mgroup
in metricgroup_descriptions
:
619 assert len(mgroup
) > 1, parents
620 description
= f
"{metricgroup_descriptions[mgroup]}\\000"
621 mgroup
= f
"{mgroup}\\000"
622 _bcs
.add(mgroup
, metric
=True)
623 _bcs
.add(description
, metric
=True)
624 _metricgroups
[mgroup
] = description
627 topic
= get_topic(item
.name
)
628 for event
in read_json_events(item
.path
, topic
):
629 pmu_name
= f
"{event.pmu}\\000"
631 _bcs
.add(pmu_name
, metric
=False)
632 _bcs
.add(event
.build_c_string(metric
=False), metric
=False)
633 if event
.metric_name
:
634 _bcs
.add(pmu_name
, metric
=True)
635 _bcs
.add(event
.build_c_string(metric
=True), metric
=True)
637 def process_one_file(parents
: Sequence
[str], item
: os
.DirEntry
) -> None:
638 """Process a JSON file during the main walk."""
639 def is_leaf_dir_ignoring_sys(path
: str) -> bool:
640 for item
in os
.scandir(path
):
641 if item
.is_dir() and item
.name
!= 'sys':
645 # Model directories are leaves (ignoring possible sys
646 # directories). The FTW will walk into the directory next. Flush
647 # pending events and metrics and update the table names for the new
649 if item
.is_dir() and is_leaf_dir_ignoring_sys(item
.path
):
650 print_pending_events()
651 print_pending_metrics()
653 global _pending_events_tblname
654 _pending_events_tblname
= file_name_to_table_name('pmu_events_', parents
, item
.name
)
655 global _pending_metrics_tblname
656 _pending_metrics_tblname
= file_name_to_table_name('pmu_metrics_', parents
, item
.name
)
658 if item
.name
== 'sys':
659 _sys_event_table_to_metric_table_mapping
[_pending_events_tblname
] = _pending_metrics_tblname
662 # base dir or too deep
664 if level
== 0 or level
> 4:
667 # Ignore other directories. If the file name does not have a .json
668 # extension, ignore it. It could be a readme.txt for instance.
669 if not item
.is_file() or not item
.name
.endswith('.json') or item
.name
== 'metricgroups.json':
672 add_events_table_entries(item
, get_topic(item
.name
))
675 def print_mapping_table(archs
: Sequence
[str]) -> None:
676 """Read the mapfile and generate the struct from cpuid string to event table."""
677 _args
.output_file
.write("""
678 /* Struct used to make the PMU event table implementation opaque to callers. */
679 struct pmu_events_table {
680 const struct pmu_table_entry *pmus;
684 /* Struct used to make the PMU metric table implementation opaque to callers. */
685 struct pmu_metrics_table {
686 const struct pmu_table_entry *pmus;
691 * Map a CPU to its table of PMU events. The CPU is identified by the
692 * cpuid field, which is an arch-specific identifier for the CPU.
693 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
694 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
696 * The cpuid can contain any character other than the comma.
698 struct pmu_events_map {
701 struct pmu_events_table event_table;
702 struct pmu_metrics_table metric_table;
706 * Global table mapping each known CPU for the architecture to its
707 * table of PMU events.
709 const struct pmu_events_map pmu_events_map[] = {
713 _args
.output_file
.write("""{
714 \t.arch = "testarch",
715 \t.cpuid = "testcpu",
717 \t\t.pmus = pmu_events__test_soc_cpu,
718 \t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
721 \t\t.pmus = pmu_metrics__test_soc_cpu,
722 \t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
726 elif arch
== 'common':
727 _args
.output_file
.write("""{
731 \t\t.pmus = pmu_events__common,
732 \t\t.num_pmus = ARRAY_SIZE(pmu_events__common),
734 \t.metric_table = {},
738 with
open(f
'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile
:
739 table
= csv
.reader(csvfile
)
742 # Skip the first row or any row beginning with #.
743 if not first
and len(row
) > 0 and not row
[0].startswith('#'):
744 event_tblname
= file_name_to_table_name('pmu_events_', [], row
[2].replace('/', '_'))
745 if event_tblname
in _event_tables
:
746 event_size
= f
'ARRAY_SIZE({event_tblname})'
748 event_tblname
= 'NULL'
750 metric_tblname
= file_name_to_table_name('pmu_metrics_', [], row
[2].replace('/', '_'))
751 if metric_tblname
in _metric_tables
:
752 metric_size
= f
'ARRAY_SIZE({metric_tblname})'
754 metric_tblname
= 'NULL'
756 if event_size
== '0' and metric_size
== '0':
758 cpuid
= row
[0].replace('\\', '\\\\')
759 _args
.output_file
.write(f
"""{{
761 \t.cpuid = "{cpuid}",
763 \t\t.pmus = {event_tblname},
764 \t\t.num_pmus = {event_size}
767 \t\t.pmus = {metric_tblname},
768 \t\t.num_pmus = {metric_size}
774 _args
.output_file
.write("""{
777 \t.event_table = { 0, 0 },
778 \t.metric_table = { 0, 0 },
784 def print_system_mapping_table() -> None:
785 """C struct mapping table array for tables from /sys directories."""
786 _args
.output_file
.write("""
787 struct pmu_sys_events {
789 \tstruct pmu_events_table event_table;
790 \tstruct pmu_metrics_table metric_table;
793 static const struct pmu_sys_events pmu_sys_event_tables[] = {
795 printed_metric_tables
= []
796 for tblname
in _sys_event_tables
:
797 _args
.output_file
.write(f
"""\t{{
798 \t\t.event_table = {{
799 \t\t\t.pmus = {tblname},
800 \t\t\t.num_pmus = ARRAY_SIZE({tblname})
802 metric_tblname
= _sys_event_table_to_metric_table_mapping
[tblname
]
803 if metric_tblname
in _sys_metric_tables
:
804 _args
.output_file
.write(f
"""
805 \t\t.metric_table = {{
806 \t\t\t.pmus = {metric_tblname},
807 \t\t\t.num_pmus = ARRAY_SIZE({metric_tblname})
809 printed_metric_tables
.append(metric_tblname
)
810 _args
.output_file
.write(f
"""
811 \t\t.name = \"{tblname}\",
814 for tblname
in _sys_metric_tables
:
815 if tblname
in printed_metric_tables
:
817 _args
.output_file
.write(f
"""\t{{
818 \t\t.metric_table = {{
819 \t\t\t.pmus = {tblname},
820 \t\t\t.num_pmus = ARRAY_SIZE({tblname})
822 \t\t.name = \"{tblname}\",
825 _args
.output_file
.write("""\t{
826 \t\t.event_table = { 0, 0 },
827 \t\t.metric_table = { 0, 0 },
831 static void decompress_event(int offset, struct pmu_event *pe)
833 \tconst char *p = &big_c_string[offset];
835 for attr
in _json_event_attributes
:
836 _args
.output_file
.write(f
'\n\tpe->{attr} = ')
837 if attr
in _json_enum_attributes
:
838 _args
.output_file
.write("*p - '0';\n")
840 _args
.output_file
.write("(*p == '\\0' ? NULL : p);\n")
841 if attr
== _json_event_attributes
[-1]:
843 if attr
in _json_enum_attributes
:
844 _args
.output_file
.write('\tp++;')
846 _args
.output_file
.write('\twhile (*p++);')
847 _args
.output_file
.write("""}
849 static void decompress_metric(int offset, struct pmu_metric *pm)
851 \tconst char *p = &big_c_string[offset];
853 for attr
in _json_metric_attributes
:
854 _args
.output_file
.write(f
'\n\tpm->{attr} = ')
855 if attr
in _json_enum_attributes
:
856 _args
.output_file
.write("*p - '0';\n")
858 _args
.output_file
.write("(*p == '\\0' ? NULL : p);\n")
859 if attr
== _json_metric_attributes
[-1]:
861 if attr
in _json_enum_attributes
:
862 _args
.output_file
.write('\tp++;')
864 _args
.output_file
.write('\twhile (*p++);')
865 _args
.output_file
.write("""}
867 static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
868 const struct pmu_table_entry *pmu,
869 pmu_event_iter_fn fn,
873 struct pmu_event pe = {
874 .pmu = &big_c_string[pmu->pmu_name.offset],
877 for (uint32_t i = 0; i < pmu->num_entries; i++) {
878 decompress_event(pmu->entries[i].offset, &pe);
881 ret = fn(&pe, table, data);
888 static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
889 const struct pmu_table_entry *pmu,
891 pmu_event_iter_fn fn,
894 struct pmu_event pe = {
895 .pmu = &big_c_string[pmu->pmu_name.offset],
897 int low = 0, high = pmu->num_entries - 1;
899 while (low <= high) {
900 int cmp, mid = (low + high) / 2;
902 decompress_event(pmu->entries[mid].offset, &pe);
904 if (!pe.name && !name)
907 if (!pe.name && name) {
911 if (pe.name && !name) {
916 cmp = strcasecmp(pe.name, name);
926 return fn ? fn(&pe, table, data) : 0;
928 return PMU_EVENTS__NOT_FOUND;
931 int pmu_events_table__for_each_event(const struct pmu_events_table *table,
932 struct perf_pmu *pmu,
933 pmu_event_iter_fn fn,
936 for (size_t i = 0; i < table->num_pmus; i++) {
937 const struct pmu_table_entry *table_pmu = &table->pmus[i];
938 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
941 if (pmu && !pmu__name_match(pmu, pmu_name))
944 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
951 int pmu_events_table__find_event(const struct pmu_events_table *table,
952 struct perf_pmu *pmu,
954 pmu_event_iter_fn fn,
957 for (size_t i = 0; i < table->num_pmus; i++) {
958 const struct pmu_table_entry *table_pmu = &table->pmus[i];
959 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
962 if (!pmu__name_match(pmu, pmu_name))
965 ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
966 if (ret != PMU_EVENTS__NOT_FOUND)
969 return PMU_EVENTS__NOT_FOUND;
972 size_t pmu_events_table__num_events(const struct pmu_events_table *table,
973 struct perf_pmu *pmu)
977 for (size_t i = 0; i < table->num_pmus; i++) {
978 const struct pmu_table_entry *table_pmu = &table->pmus[i];
979 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
981 if (pmu__name_match(pmu, pmu_name))
982 count += table_pmu->num_entries;
987 static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
988 const struct pmu_table_entry *pmu,
989 pmu_metric_iter_fn fn,
993 struct pmu_metric pm = {
994 .pmu = &big_c_string[pmu->pmu_name.offset],
997 for (uint32_t i = 0; i < pmu->num_entries; i++) {
998 decompress_metric(pmu->entries[i].offset, &pm);
1001 ret = fn(&pm, table, data);
1008 int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
1009 pmu_metric_iter_fn fn,
1012 for (size_t i = 0; i < table->num_pmus; i++) {
1013 int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
1022 static const struct pmu_events_map *map_for_cpu(struct perf_cpu cpu)
1025 const struct pmu_events_map *map;
1026 struct perf_cpu cpu;
1029 const struct pmu_events_map *map;
1032 static bool has_last_result, has_last_map_search;
1033 const struct pmu_events_map *map = NULL;
1037 if (has_last_result && last_result.cpu.cpu == cpu.cpu)
1038 return last_result.map;
1040 cpuid = get_cpuid_allow_env_override(cpu);
1043 * On some platforms which uses cpus map, cpuid can be NULL for
1044 * PMUs other than CORE PMUs.
1047 goto out_update_last_result;
1049 if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) {
1050 map = last_map_search.map;
1055 map = &pmu_events_map[i++];
1062 if (!strcmp_cpuid_str(map->cpuid, cpuid))
1065 free(last_map_search.cpuid);
1066 last_map_search.cpuid = cpuid;
1067 last_map_search.map = map;
1068 has_last_map_search = true;
1070 out_update_last_result:
1071 last_result.cpu = cpu;
1072 last_result.map = map;
1073 has_last_result = true;
1077 static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu)
1079 struct perf_cpu cpu = {-1};
1082 cpu = perf_cpu_map__min(pmu->cpus);
1083 return map_for_cpu(cpu);
1086 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
1088 const struct pmu_events_map *map = map_for_pmu(pmu);
1094 return &map->event_table;
1096 for (size_t i = 0; i < map->event_table.num_pmus; i++) {
1097 const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i];
1098 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1100 if (pmu__name_match(pmu, pmu_name))
1101 return &map->event_table;
1106 const struct pmu_metrics_table *pmu_metrics_table__find(void)
1108 struct perf_cpu cpu = {-1};
1109 const struct pmu_events_map *map = map_for_cpu(cpu);
1111 return map ? &map->metric_table : NULL;
1114 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
1116 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1119 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1120 return &tables->event_table;
1125 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
1127 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1130 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1131 return &tables->metric_table;
1136 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
1138 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1141 int ret = pmu_events_table__for_each_event(&tables->event_table,
1142 /*pmu=*/ NULL, fn, data);
1150 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
1152 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1155 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1163 const struct pmu_events_table *find_sys_events_table(const char *name)
1165 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1168 if (!strcmp(tables->name, name))
1169 return &tables->event_table;
1174 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
1176 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1179 int ret = pmu_events_table__for_each_event(&tables->event_table,
1180 /*pmu=*/ NULL, fn, data);
1188 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
1190 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1193 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1202 def print_metricgroups() -> None:
1203 _args
.output_file
.write("""
1204 static const int metricgroups[][2] = {
1206 for mgroup
in sorted(_metricgroups
):
1207 description
= _metricgroups
[mgroup
]
1208 _args
.output_file
.write(
1209 f
'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n'
1211 _args
.output_file
.write("""
1214 const char *describe_metricgroup(const char *group)
1216 int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
1218 while (low <= high) {
1219 int mid = (low + high) / 2;
1220 const char *mgroup = &big_c_string[metricgroups[mid][0]];
1221 int cmp = strcmp(mgroup, group);
1224 return &big_c_string[metricgroups[mid][1]];
1225 } else if (cmp < 0) {
1238 def dir_path(path
: str) -> str:
1239 """Validate path is a directory for argparse."""
1240 if os
.path
.isdir(path
):
1242 raise argparse
.ArgumentTypeError(f
'\'{path}\' is not a valid directory')
1244 def ftw(path
: str, parents
: Sequence
[str],
1245 action
: Callable
[[Sequence
[str], os
.DirEntry
], None]) -> None:
1246 """Replicate the directory/file walking behavior of C's file tree walk."""
1247 for item
in sorted(os
.scandir(path
), key
=lambda e
: e
.name
):
1248 if _args
.model
!= 'all' and item
.is_dir():
1249 # Check if the model matches one in _args.model.
1250 if len(parents
) == _args
.model
.split(',')[0].count('/'):
1251 # We're testing the correct directory.
1252 item_path
= '/'.join(parents
) + ('/' if len(parents
) > 0 else '') + item
.name
1253 if 'test' not in item_path
and 'common' not in item_path
and item_path
not in _args
.model
.split(','):
1255 action(parents
, item
)
1257 ftw(item
.path
, parents
+ [item
.name
], action
)
1259 ap
= argparse
.ArgumentParser()
1260 ap
.add_argument('arch', help='Architecture name like x86')
1261 ap
.add_argument('model', help='''Select a model such as skylake to
1262 reduce the code size. Normally set to "all". For architectures like
1263 ARM64 with an implementor/model, the model must include the implementor
1264 such as "arm/cortex-a34".''',
1269 help='Root of tree containing architecture directories containing json files'
1272 'output_file', type=argparse
.FileType('w', encoding
='utf-8'), nargs
='?', default
=sys
.stdout
)
1273 _args
= ap
.parse_args()
1275 _args
.output_file
.write(f
"""
1276 /* SPDX-License-Identifier: GPL-2.0 */
1277 /* THIS FILE WAS AUTOGENERATED BY jevents.py arch={_args.arch} model={_args.model} ! */
1279 _args
.output_file
.write("""
1280 #include <pmu-events/pmu-events.h>
1281 #include "util/header.h"
1282 #include "util/pmu.h"
1286 struct compact_pmu_event {
1290 struct pmu_table_entry {
1291 const struct compact_pmu_event *entries;
1292 uint32_t num_entries;
1293 struct compact_pmu_event pmu_name;
1298 for item
in os
.scandir(_args
.starting_dir
):
1299 if not item
.is_dir():
1301 if item
.name
== _args
.arch
or _args
.arch
== 'all' or item
.name
== 'test' or item
.name
== 'common':
1302 archs
.append(item
.name
)
1304 if len(archs
) < 2 and _args
.arch
!= 'none':
1305 raise IOError(f
'Missing architecture directory \'{_args.arch}\'')
1309 arch_path
= f
'{_args.starting_dir}/{arch}'
1310 preprocess_arch_std_files(arch_path
)
1311 ftw(arch_path
, [], preprocess_one_file
)
1314 _args
.output_file
.write('static const char *const big_c_string =\n')
1315 for s
in _bcs
.big_string
:
1316 _args
.output_file
.write(s
)
1317 _args
.output_file
.write(';\n\n')
1319 arch_path
= f
'{_args.starting_dir}/{arch}'
1320 ftw(arch_path
, [], process_one_file
)
1321 print_pending_events()
1322 print_pending_metrics()
1324 print_mapping_table(archs
)
1325 print_system_mapping_table()
1326 print_metricgroups()
1328 if __name__
== '__main__':