rcu: Make non-preemptive schedule be Tasks RCU quiescent state
[linux/fpc-iii.git] / tools / perf / scripts / python / event_analyzing_sample.py
blob163c39fa12d92baf83630670e1301799ac301909
1 # event_analyzing_sample.py: general event handler in python
3 # Current perf report is already very powerful with the annotation integrated,
4 # and this script is not trying to be as powerful as perf report, but
5 # providing end user/developer a flexible way to analyze the events other
6 # than trace points.
8 # The 2 database related functions in this script just show how to gather
9 # the basic information, and users can modify and write their own functions
10 # according to their specific requirement.
12 # The first function "show_general_events" just does a basic grouping for all
13 # generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
14 # for a x86 HW PMU event: PEBS with load latency data.
17 import os
18 import sys
19 import math
20 import struct
21 import sqlite3
23 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
24 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
26 from perf_trace_context import *
27 from EventClass import *
30 # If the perf.data has a big number of samples, then the insert operation
31 # will be very time consuming (about 10+ minutes for 10000 samples) if the
32 # .db database is on disk. Move the .db file to RAM based FS to speedup
33 # the handling, which will cut the time down to several seconds.
35 con = sqlite3.connect("/dev/shm/perf.db")
36 con.isolation_level = None
38 def trace_begin():
39 print "In trace_begin:\n"
42 # Will create several tables at the start, pebs_ll is for PEBS data with
43 # load latency info, while gen_events is for general event.
45 con.execute("""
46 create table if not exists gen_events (
47 name text,
48 symbol text,
49 comm text,
50 dso text
51 );""")
52 con.execute("""
53 create table if not exists pebs_ll (
54 name text,
55 symbol text,
56 comm text,
57 dso text,
58 flags integer,
59 ip integer,
60 status integer,
61 dse integer,
62 dla integer,
63 lat integer
64 );""")
67 # Create and insert event object to a database so that user could
68 # do more analysis with simple database commands.
70 def process_event(param_dict):
71 event_attr = param_dict["attr"]
72 sample = param_dict["sample"]
73 raw_buf = param_dict["raw_buf"]
74 comm = param_dict["comm"]
75 name = param_dict["ev_name"]
77 # Symbol and dso info are not always resolved
78 if (param_dict.has_key("dso")):
79 dso = param_dict["dso"]
80 else:
81 dso = "Unknown_dso"
83 if (param_dict.has_key("symbol")):
84 symbol = param_dict["symbol"]
85 else:
86 symbol = "Unknown_symbol"
88 # Create the event object and insert it to the right table in database
89 event = create_event(name, comm, dso, symbol, raw_buf)
90 insert_db(event)
92 def insert_db(event):
93 if event.ev_type == EVTYPE_GENERIC:
94 con.execute("insert into gen_events values(?, ?, ?, ?)",
95 (event.name, event.symbol, event.comm, event.dso))
96 elif event.ev_type == EVTYPE_PEBS_LL:
97 event.ip &= 0x7fffffffffffffff
98 event.dla &= 0x7fffffffffffffff
99 con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
100 (event.name, event.symbol, event.comm, event.dso, event.flags,
101 event.ip, event.status, event.dse, event.dla, event.lat))
103 def trace_end():
104 print "In trace_end:\n"
105 # We show the basic info for the 2 type of event classes
106 show_general_events()
107 show_pebs_ll()
108 con.close()
111 # As the event number may be very big, so we can't use linear way
112 # to show the histogram in real number, but use a log2 algorithm.
115 def num2sym(num):
116 # Each number will have at least one '#'
117 snum = '#' * (int)(math.log(num, 2) + 1)
118 return snum
120 def show_general_events():
122 # Check the total record number in the table
123 count = con.execute("select count(*) from gen_events")
124 for t in count:
125 print "There is %d records in gen_events table" % t[0]
126 if t[0] == 0:
127 return
129 print "Statistics about the general events grouped by thread/symbol/dso: \n"
131 # Group by thread
132 commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
133 print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
134 for row in commq:
135 print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
137 # Group by symbol
138 print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
139 symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
140 for row in symbolq:
141 print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
143 # Group by dso
144 print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
145 dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
146 for row in dsoq:
147 print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
150 # This function just shows the basic info, and we could do more with the
151 # data in the tables, like checking the function parameters when some
152 # big latency events happen.
154 def show_pebs_ll():
156 count = con.execute("select count(*) from pebs_ll")
157 for t in count:
158 print "There is %d records in pebs_ll table" % t[0]
159 if t[0] == 0:
160 return
162 print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
164 # Group by thread
165 commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
166 print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
167 for row in commq:
168 print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
170 # Group by symbol
171 print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
172 symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
173 for row in symbolq:
174 print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
176 # Group by dse
177 dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
178 print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
179 for row in dseq:
180 print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
182 # Group by latency
183 latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
184 print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
185 for row in latq:
186 print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
188 def trace_unhandled(event_name, context, event_fields_dict):
189 print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])