chromeos: bluetooth: add BluetoothInputClient
[chromium-blink-merge.git] / tools / valgrind / drmemory_analyze.py
blob97d4635eebf1b99032d72fa3dca60129d75aa808
1 #!/usr/bin/env python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 # drmemory_analyze.py
8 ''' Given a Dr. Memory output file, parses errors and uniques them.'''
10 from collections import defaultdict
11 import common
12 import hashlib
13 import logging
14 import optparse
15 import os
16 import re
17 import subprocess
18 import sys
19 import time
21 class DrMemoryError:
22 def __init__(self, report, suppression, testcase):
23 self._report = report
24 self._testcase = testcase
26 # Chromium-specific transformations of the suppressions:
27 # Replace 'any_test.exe' and 'chrome.dll' with '*', then remove the
28 # Dr.Memory-generated error ids from the name= lines as they don't
29 # make sense in a multiprocess report.
30 supp_lines = suppression.split("\n")
31 for l in xrange(len(supp_lines)):
32 if supp_lines[l].startswith("name="):
33 supp_lines[l] = "name=<insert_a_suppression_name_here>"
34 if supp_lines[l].startswith("chrome.dll!"):
35 supp_lines[l] = supp_lines[l].replace("chrome.dll!", "*!")
36 bang_index = supp_lines[l].find("!")
37 d_exe_index = supp_lines[l].find(".exe!")
38 if bang_index >= 4 and d_exe_index + 4 == bang_index:
39 supp_lines[l] = "*" + supp_lines[l][bang_index:]
40 self._suppression = "\n".join(supp_lines)
42 def __str__(self):
43 output = self._report + "\n"
44 if self._testcase:
45 output += "The report came from the `%s` test.\n" % self._testcase
46 output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
47 output += (" For more info on using suppressions see "
48 "http://dev.chromium.org/developers/how-tos/using-drmemory#TOC-Suppressing-error-reports-from-the-\n")
49 output += "{\n%s\n}\n" % self._suppression
50 return output
52 # This is a device-independent hash identifying the suppression.
53 # By printing out this hash we can find duplicate reports between tests and
54 # different shards running on multiple buildbots
55 def ErrorHash(self):
56 return int(hashlib.md5(self._suppression).hexdigest()[:16], 16)
58 def __hash__(self):
59 return hash(self._suppression)
61 def __eq__(self, rhs):
62 return self._suppression == rhs
65 class DrMemoryAnalyzer:
66 ''' Given a set of Dr.Memory output files, parse all the errors out of
67 them, unique them and output the results.'''
69 def __init__(self):
70 self.known_errors = set()
71 self.error_count = 0;
73 def ReadLine(self):
74 self.line_ = self.cur_fd_.readline()
76 def ReadSection(self):
77 result = [self.line_]
78 self.ReadLine()
79 while len(self.line_.strip()) > 0:
80 result.append(self.line_)
81 self.ReadLine()
82 return result
84 def ParseReportFile(self, filename, testcase):
85 ret = []
87 # First, read the generated suppressions file so we can easily lookup a
88 # suppression for a given error.
89 supp_fd = open(filename.replace("results", "suppress"), 'r')
90 generated_suppressions = {} # Key -> Error #, Value -> Suppression text.
91 for line in supp_fd:
92 # NOTE: this regexp looks fragile. Might break if the generated
93 # suppression format slightly changes.
94 m = re.search("# Suppression for Error #([0-9]+)", line.strip())
95 if not m:
96 continue
97 error_id = int(m.groups()[0])
98 assert error_id not in generated_suppressions
99 # OK, now read the next suppression:
100 cur_supp = ""
101 for supp_line in supp_fd:
102 if supp_line.startswith("#") or supp_line.strip() == "":
103 break
104 cur_supp += supp_line
105 generated_suppressions[error_id] = cur_supp.strip()
106 supp_fd.close()
108 self.cur_fd_ = open(filename, 'r')
109 while True:
110 self.ReadLine()
111 if (self.line_ == ''): break
113 match = re.search("^Error #([0-9]+): (.*)", self.line_)
114 if match:
115 error_id = int(match.groups()[0])
116 self.line_ = match.groups()[1].strip() + "\n"
117 report = "".join(self.ReadSection()).strip()
118 suppression = generated_suppressions[error_id]
119 ret.append(DrMemoryError(report, suppression, testcase))
121 if re.search("SUPPRESSIONS USED:", self.line_):
122 self.ReadLine()
123 while self.line_.strip() != "":
124 line = self.line_.strip()
125 (count, name) = re.match(" *([0-9]+)x(?: \(leaked .*\))?: (.*)",
126 line).groups()
127 count = int(count)
128 self.used_suppressions[name] += count
129 self.ReadLine()
131 if self.line_.startswith("ASSERT FAILURE"):
132 ret.append(self.line_.strip())
134 self.cur_fd_.close()
135 return ret
137 def Report(self, filenames, testcase, check_sanity):
138 sys.stdout.flush()
139 # TODO(timurrrr): support positive tests / check_sanity==True
140 self.used_suppressions = defaultdict(int)
142 to_report = []
143 reports_for_this_test = set()
144 for f in filenames:
145 cur_reports = self.ParseReportFile(f, testcase)
147 # Filter out the reports that were there in previous tests.
148 for r in cur_reports:
149 if r in reports_for_this_test:
150 # A similar report is about to be printed for this test.
151 pass
152 elif r in self.known_errors:
153 # A similar report has already been printed in one of the prev tests.
154 to_report.append("This error was already printed in some "
155 "other test, see 'hash=#%016X#'" % r.ErrorHash())
156 reports_for_this_test.add(r)
157 else:
158 self.known_errors.add(r)
159 reports_for_this_test.add(r)
160 to_report.append(r)
162 common.PrintUsedSuppressionsList(self.used_suppressions)
164 if not to_report:
165 logging.info("PASS: No error reports found")
166 return 0
168 sys.stdout.flush()
169 sys.stderr.flush()
170 logging.info("Found %i error reports" % len(to_report))
171 for report in to_report:
172 self.error_count += 1
173 logging.info("Report #%d\n%s" % (self.error_count, report))
174 logging.info("Total: %i error reports" % len(to_report))
175 sys.stdout.flush()
176 return -1
179 def main():
180 '''For testing only. The DrMemoryAnalyze class should be imported instead.'''
181 parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
182 parser.add_option("", "--source_dir",
183 help="path to top of source tree for this build"
184 "(used to normalize source paths in baseline)")
186 (options, args) = parser.parse_args()
187 if len(args) == 0:
188 parser.error("no filename specified")
189 filenames = args
191 logging.getLogger().setLevel(logging.INFO)
192 return DrMemoryAnalyzer().Report(filenames, None, False)
195 if __name__ == '__main__':
196 sys.exit(main())