Merge tag 'ntb-5.11' of git://github.com/jonmason/ntb
[linux/fpc-iii.git] / tools / perf / scripts / python / event_analyzing_sample.py
blobaa1e2cfa26a6dd156bf549f8210c0448ff7b90d7
1 # event_analyzing_sample.py: general event handler in python
2 # SPDX-License-Identifier: GPL-2.0
4 # Current perf report is already very powerful with the annotation integrated,
5 # and this script is not trying to be as powerful as perf report, but
6 # providing end user/developer a flexible way to analyze the events other
7 # than trace points.
9 # The 2 database related functions in this script just show how to gather
10 # the basic information, and users can modify and write their own functions
11 # according to their specific requirement.
13 # The first function "show_general_events" just does a basic grouping for all
14 # generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
15 # for a x86 HW PMU event: PEBS with load latency data.
18 from __future__ import print_function
20 import os
21 import sys
22 import math
23 import struct
24 import sqlite3
26 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
27 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
29 from perf_trace_context import *
30 from EventClass import *
33 # If the perf.data has a big number of samples, then the insert operation
34 # will be very time consuming (about 10+ minutes for 10000 samples) if the
35 # .db database is on disk. Move the .db file to RAM based FS to speedup
36 # the handling, which will cut the time down to several seconds.
38 con = sqlite3.connect("/dev/shm/perf.db")
39 con.isolation_level = None
41 def trace_begin():
42 print("In trace_begin:\n")
45 # Will create several tables at the start, pebs_ll is for PEBS data with
46 # load latency info, while gen_events is for general event.
48 con.execute("""
49 create table if not exists gen_events (
50 name text,
51 symbol text,
52 comm text,
53 dso text
54 );""")
55 con.execute("""
56 create table if not exists pebs_ll (
57 name text,
58 symbol text,
59 comm text,
60 dso text,
61 flags integer,
62 ip integer,
63 status integer,
64 dse integer,
65 dla integer,
66 lat integer
67 );""")
70 # Create and insert event object to a database so that user could
71 # do more analysis with simple database commands.
73 def process_event(param_dict):
74 event_attr = param_dict["attr"]
75 sample = param_dict["sample"]
76 raw_buf = param_dict["raw_buf"]
77 comm = param_dict["comm"]
78 name = param_dict["ev_name"]
80 # Symbol and dso info are not always resolved
81 if ("dso" in param_dict):
82 dso = param_dict["dso"]
83 else:
84 dso = "Unknown_dso"
86 if ("symbol" in param_dict):
87 symbol = param_dict["symbol"]
88 else:
89 symbol = "Unknown_symbol"
91 # Create the event object and insert it to the right table in database
92 event = create_event(name, comm, dso, symbol, raw_buf)
93 insert_db(event)
95 def insert_db(event):
96 if event.ev_type == EVTYPE_GENERIC:
97 con.execute("insert into gen_events values(?, ?, ?, ?)",
98 (event.name, event.symbol, event.comm, event.dso))
99 elif event.ev_type == EVTYPE_PEBS_LL:
100 event.ip &= 0x7fffffffffffffff
101 event.dla &= 0x7fffffffffffffff
102 con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
103 (event.name, event.symbol, event.comm, event.dso, event.flags,
104 event.ip, event.status, event.dse, event.dla, event.lat))
106 def trace_end():
107 print("In trace_end:\n")
108 # We show the basic info for the 2 type of event classes
109 show_general_events()
110 show_pebs_ll()
111 con.close()
114 # As the event number may be very big, so we can't use linear way
115 # to show the histogram in real number, but use a log2 algorithm.
118 def num2sym(num):
119 # Each number will have at least one '#'
120 snum = '#' * (int)(math.log(num, 2) + 1)
121 return snum
123 def show_general_events():
125 # Check the total record number in the table
126 count = con.execute("select count(*) from gen_events")
127 for t in count:
128 print("There is %d records in gen_events table" % t[0])
129 if t[0] == 0:
130 return
132 print("Statistics about the general events grouped by thread/symbol/dso: \n")
134 # Group by thread
135 commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
136 print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
137 for row in commq:
138 print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
140 # Group by symbol
141 print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
142 symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
143 for row in symbolq:
144 print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
146 # Group by dso
147 print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74))
148 dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
149 for row in dsoq:
150 print("%40s %8d %s" % (row[0], row[1], num2sym(row[1])))
153 # This function just shows the basic info, and we could do more with the
154 # data in the tables, like checking the function parameters when some
155 # big latency events happen.
157 def show_pebs_ll():
159 count = con.execute("select count(*) from pebs_ll")
160 for t in count:
161 print("There is %d records in pebs_ll table" % t[0])
162 if t[0] == 0:
163 return
165 print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n")
167 # Group by thread
168 commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
169 print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42))
170 for row in commq:
171 print("%16s %8d %s" % (row[0], row[1], num2sym(row[1])))
173 # Group by symbol
174 print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58))
175 symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
176 for row in symbolq:
177 print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
179 # Group by dse
180 dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
181 print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58))
182 for row in dseq:
183 print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
185 # Group by latency
186 latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
187 print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58))
188 for row in latq:
189 print("%32s %8d %s" % (row[0], row[1], num2sym(row[1])))
191 def trace_unhandled(event_name, context, event_fields_dict):
192 print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))