target-sh4: Introduce SuperHCPU subclasses
[qemu/agraf.git] / scripts / simpletrace.py
blob8bbcb42cc4126dad27b43259f84b40161bcba088
1 #!/usr/bin/env python
3 # Pretty-printer for simple trace backend binary trace files
5 # Copyright IBM, Corp. 2010
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
10 # For help see docs/tracing.txt
12 import struct
13 import re
14 import inspect
15 from tracetool import _read_events, Event
16 from tracetool.backend.simple import is_string
18 header_event_id = 0xffffffffffffffff
19 header_magic = 0xf2b177cb0aa429b4
20 dropped_event_id = 0xfffffffffffffffe
22 log_header_fmt = '=QQQ'
23 rec_header_fmt = '=QQII'
25 def read_header(fobj, hfmt):
26 '''Read a trace record header'''
27 hlen = struct.calcsize(hfmt)
28 hdr = fobj.read(hlen)
29 if len(hdr) != hlen:
30 return None
31 return struct.unpack(hfmt, hdr)
33 def get_record(edict, rechdr, fobj):
34 """Deserialize a trace record from a file into a tuple (event_num, timestamp, arg1, ..., arg6)."""
35 if rechdr is None:
36 return None
37 rec = (rechdr[0], rechdr[1])
38 if rechdr[0] != dropped_event_id:
39 event_id = rechdr[0]
40 event = edict[event_id]
41 for type, name in event.args:
42 if is_string(type):
43 l = fobj.read(4)
44 (len,) = struct.unpack('=L', l)
45 s = fobj.read(len)
46 rec = rec + (s,)
47 else:
48 (value,) = struct.unpack('=Q', fobj.read(8))
49 rec = rec + (value,)
50 else:
51 (value,) = struct.unpack('=Q', fobj.read(8))
52 rec = rec + (value,)
53 return rec
56 def read_record(edict, fobj):
57 """Deserialize a trace record from a file into a tuple (event_num, timestamp, arg1, ..., arg6)."""
58 rechdr = read_header(fobj, rec_header_fmt)
59 return get_record(edict, rechdr, fobj) # return tuple of record elements
61 def read_trace_file(edict, fobj):
62 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, arg1, ..., arg6)."""
63 header = read_header(fobj, log_header_fmt)
64 if header is None or \
65 header[0] != header_event_id or \
66 header[1] != header_magic:
67 raise ValueError('Not a valid trace file!')
68 if header[2] != 0 and \
69 header[2] != 2:
70 raise ValueError('Unknown version of tracelog format!')
72 log_version = header[2]
73 if log_version == 0:
74 raise ValueError('Older log format, not supported with this QEMU release!')
76 while True:
77 rec = read_record(edict, fobj)
78 if rec is None:
79 break
81 yield rec
83 class Analyzer(object):
84 """A trace file analyzer which processes trace records.
86 An analyzer can be passed to run() or process(). The begin() method is
87 invoked, then each trace record is processed, and finally the end() method
88 is invoked.
90 If a method matching a trace event name exists, it is invoked to process
91 that trace record. Otherwise the catchall() method is invoked."""
93 def begin(self):
94 """Called at the start of the trace."""
95 pass
97 def catchall(self, event, rec):
98 """Called if no specific method for processing a trace event has been found."""
99 pass
101 def end(self):
102 """Called at the end of the trace."""
103 pass
105 def process(events, log, analyzer):
106 """Invoke an analyzer on each event in a log."""
107 if isinstance(events, str):
108 events = _read_events(open(events, 'r'))
109 if isinstance(log, str):
110 log = open(log, 'rb')
112 enabled_events = []
113 dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
114 edict = {dropped_event_id: dropped_event}
116 for e in events:
117 if 'disable' not in e.properties:
118 enabled_events.append(e)
119 for num, event in enumerate(enabled_events):
120 edict[num] = event
122 def build_fn(analyzer, event):
123 if isinstance(event, str):
124 return analyzer.catchall
126 fn = getattr(analyzer, event.name, None)
127 if fn is None:
128 return analyzer.catchall
130 event_argcount = len(event.args)
131 fn_argcount = len(inspect.getargspec(fn)[0]) - 1
132 if fn_argcount == event_argcount + 1:
133 # Include timestamp as first argument
134 return lambda _, rec: fn(*rec[1:2 + event_argcount])
135 else:
136 # Just arguments, no timestamp
137 return lambda _, rec: fn(*rec[2:2 + event_argcount])
139 analyzer.begin()
140 fn_cache = {}
141 for rec in read_trace_file(edict, log):
142 event_num = rec[0]
143 event = edict[event_num]
144 if event_num not in fn_cache:
145 fn_cache[event_num] = build_fn(analyzer, event)
146 fn_cache[event_num](event, rec)
147 analyzer.end()
149 def run(analyzer):
150 """Execute an analyzer on a trace file given on the command-line.
152 This function is useful as a driver for simple analysis scripts. More
153 advanced scripts will want to call process() instead."""
154 import sys
156 if len(sys.argv) != 3:
157 sys.stderr.write('usage: %s <trace-events> <trace-file>\n' % sys.argv[0])
158 sys.exit(1)
160 events = _read_events(open(sys.argv[1], 'r'))
161 process(events, sys.argv[2], analyzer)
163 if __name__ == '__main__':
164 class Formatter(Analyzer):
165 def __init__(self):
166 self.last_timestamp = None
168 def catchall(self, event, rec):
169 i = 1
170 timestamp = rec[1]
171 if self.last_timestamp is None:
172 self.last_timestamp = timestamp
173 delta_ns = timestamp - self.last_timestamp
174 self.last_timestamp = timestamp
176 fields = [event.name, '%0.3f' % (delta_ns / 1000.0)]
177 for type, name in event.args:
178 if is_string(type):
179 fields.append('%s=%s' % (name, rec[i + 1]))
180 else:
181 fields.append('%s=0x%x' % (name, rec[i + 1]))
182 i += 1
183 print ' '.join(fields)
185 run(Formatter())