Rubber-stamped by Brady Eidson.
[webbrowser.git] / WebKitTools / CodeCoverage / regenerate-coverage-display
blobc25b41284150e7c921751d4abdba70e95ae1e588
1 #!/usr/bin/env python
3 # Copyright (C) 2004, 2005, 2006 Nathaniel Smith
4 # Copyright (C) 2007 Holger Hans Peter Freyther
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions
8 # are met:
10 # 1. Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 # 2. Redistributions in binary form must reproduce the above copyright
13 # notice, this list of conditions and the following disclaimer in the
14 # documentation and/or other materials provided with the distribution.
15 # 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
16 # its contributors may be used to endorse or promote products derived
17 # from this software without specific prior written permission.
19 # THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
20 # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 # HTML output inspired by the output of lcov as found on the GStreamer
32 # site. I assume this is not copyrightable.
37 # Read all CSV files and
38 # Create an overview file
43 import sys
44 import csv
45 import glob
46 import time
47 import os
48 import os.path
49 import datetime
50 import shutil
52 os.environ["TTFPATH"] = ":".join(["/usr/share/fonts/truetype/" + d
53 for d in "ttf-bitstream-vera",
54 "freefont",
55 "msttcorefonts"])
57 level_LOW = 10
58 level_MEDIUM = 70
60 def copy_files(dest_dir):
61 """
62 Copy the CSS and the png's to the destination directory
63 """
64 images = ["amber.png", "emerald.png", "glass.png", "ruby.png", "snow.png"]
65 css = "gcov.css"
66 (base_path, name) = os.path.split(__file__)
67 base_path = os.path.abspath(base_path)
69 shutil.copyfile(os.path.join(base_path,css), os.path.join(dest_dir,css))
70 map(lambda x: shutil.copyfile(os.path.join(base_path,x), os.path.join(dest_dir,x)), images)
72 def sumcov(cov):
73 return "%.2f%% (%s/%s)" % (cov[1] * 100.0 / (cov[0] or 1), cov[1], cov[0])
75 def create_page(dest_dir, name):
76 index = open(os.path.join(dest_dir, name), "w")
77 index.write("""<HTML>
78 <HEAD>
79 <TITLE>WebKit test coverage information</TITLE>
80 <link rel="stylesheet" type="text/css" href="gcov.css">
81 </HEAD>
82 <BODY>
83 """)
84 return index
86 def generate_header(file, last_time, total_lines, total_executed, path, image):
87 product = "WebKit"
88 date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(last_time))
89 covered_lines = sumcov((total_lines, total_executed))
91 file.write("""<table width="100%%" border=0 cellspacing=0 cellpadding=0>
92 <tr><td class="title">GCOV code coverage report</td></tr>
93 <tr><td class="ruler"><img src="glass.png" width=3 height=3 alt=""></td></tr>
95 <tr>
96 <td width="100%%">
97 <table cellpadding=1 border=0 width="100%%">
98 <tr>
99 <td class="headerItem" width="20%%">Current&nbsp;view:</td>
100 <td class="headerValue" width="80%%" colspan=4>%(path)s</td>
101 </tr>
102 <tr>
103 <td class="headerItem" width="20%%">Test:</td>
104 <td class="headerValue" width="80%%" colspan=4>%(product)s</td>
105 </tr>
106 <tr>
107 <td class="headerItem" width="20%%">Date:</td>
108 <td class="headerValue" width="20%%">%(date)s</td>
109 <td width="20%%"></td>
110 <td class="headerItem" width="20%%">Instrumented&nbsp;lines:</td>
111 <td class="headerValue" width="20%%">%(total_lines)s</td>
112 </tr>
113 <tr>
114 <td class="headerItem" width="20%%">Code&nbsp;covered:</td>
115 <td class="headerValue" width="20%%">%(covered_lines)s</td>
116 <td width="20%%"></td>
117 <td class="headerItem" width="20%%">Executed&nbsp;lines:</td>
118 <td class="headerValue" width="20%%">%(total_executed)s</td>
119 </tr>
120 </table>
121 </td>
122 </tr>
123 <tr><td class="ruler"><img src="glass.png" width=3 height=3 alt=""></td></tr>
124 </table>""" % vars())
125 # disabled for now <tr><td><img src="%(image)s"></td></tr>
127 def generate_table_item(file, name, total_lines, covered_lines):
128 covered_precise = (covered_lines*100.0)/(total_lines or 1.0)
129 covered = int(round(covered_precise))
130 remainder = 100-covered
131 (image,perClass,numClass) = coverage_icon(covered_precise)
132 site = "%s.html" % name.replace(os.path.sep,'__')
133 file.write("""
134 <tr>
135 <td class="coverFile"><a href="%(site)s">%(name)s</a></td>
136 <td class="coverBar" align="center">
137 <table border=0 cellspacing=0 cellpadding=1><tr><td class="coverBarOutline"><img src="%(image)s" width=%(covered)s height=10 alt="%(covered_precise).2f"><img src="snow.png" width=%(remainder)s height=10 alt="%(covered_precise).2f"></td></tr></table>
138 </td>
139 <td class="%(perClass)s">%(covered_precise).2f&nbsp;%%</td>
140 <td class="%(numClass)s">%(covered_lines)s&nbsp;/&nbsp;%(total_lines)s&nbsp;lines</td>
141 </tr>
142 """ % vars())
144 def generate_table_header_start(file):
145 file.write("""<center>
146 <table width="80%%" cellpadding=2 cellspacing=1 border=0>
148 <tr>
149 <td width="50%%"><br></td>
150 <td width="15%%"></td>
151 <td width="15%%"></td>
152 <td width="20%%"></td>
153 </tr>
155 <tr>
156 <td class="tableHead">Directory&nbsp;name</td>
157 <td class="tableHead" colspan=3>Coverage</td>
158 </tr>
159 """)
161 def coverage_icon(percent):
162 if percent < level_LOW:
163 return ("ruby.png", "coverPerLo", "coverNumLo")
164 elif percent < level_MEDIUM:
165 return ("amber.png", "coverPerMed", "coverNumMed")
166 else:
167 return ("emerald.png", "coverPerHi", "coverNumHi")
169 def replace(text, *pairs):
171 From pydoc... almost identical at least
173 from string import split, join
174 while pairs:
175 (a,b) = pairs[0]
176 text = join(split(text, a), b)
177 pairs = pairs[1:]
178 return text
180 def escape(text):
182 Escape string to be conform HTML
184 return replace(text,
185 ('&', '&amp;'),
186 ('<', '&lt;' ),
187 ('>', '&gt;' ) )
189 def generate_table_header_end(file):
190 file.write("""</table>
191 </center>""")
193 def write_title_page(dest_dir, last_time, last_tot_lines, last_tot_covered, dir_series):
195 Write the index.html with a overview of each directory
197 index= create_page(dest_dir, "index.html")
198 generate_header(index, last_time, last_tot_lines, last_tot_covered, "directory", "images/Total.png")
199 # Create the directory overview
200 generate_table_header_start(index)
201 dirs = dir_series.keys()
202 dirs.sort()
203 for dir in dirs:
204 (dir_files, total_lines, covered_lines,_) = dir_series[dir][-1]
205 generate_table_item(index, dir, total_lines, covered_lines)
206 generate_table_header_end(index)
208 index.write("""</BODY></HTML>""")
209 index.close()
211 def write_directory_site(dest_dir, dir_name, last_time, dir_series, file_series):
212 escaped_dir = dir_name.replace(os.path.sep,'__')
213 site = create_page(dest_dir, "%s.html" % escaped_dir)
214 (_,tot_lines,tot_covered,files) = dir_series[dir_name][-1]
215 generate_header(site, last_time, tot_lines, tot_covered, "directory - %s" % dir_name, "images/%s.png" % escaped_dir)
217 files.sort()
219 generate_table_header_start(site)
220 for file in files:
221 (lines,covered) = file_series[file][-1]
222 generate_table_item(site, file, lines, covered)
224 generate_table_header_end(site)
225 site.write("""</BODY></HTML>""")
226 site.close()
228 def write_file_site(dest_dir, file_name, last_time, data_dir, last_id, file_series):
229 escaped_name = file_name.replace(os.path.sep,'__')
230 site = create_page(dest_dir, "%s.html" % escaped_name)
231 (tot_lines,tot_covered) = file_series[file_name][-1]
232 generate_header(site, last_time, tot_lines, tot_covered, "file - %s" % file_name, "images/%s.png" % escaped_name)
234 path = "%s/%s.annotated%s" % (data_dir,last_id,file_name)
236 # In contrast to the lcov we want to show files that have been compiled
237 # but have not been tested at all. This means we have sourcefiles with 0
238 # lines covered in the path but they are not lcov files.
239 # To identify them we check the first line now. If we see that we can
240 # continue
241 # -: 0:Source:
242 try:
243 file = open(path, "r")
244 except:
245 return
246 all_lines = file.read().split("\n")
248 # Convert the gcov file to HTML if we have a chanche to do so
249 # Scan each line and see if it was covered or not and escape the
250 # text
251 if len(all_lines) == 0 or not "-: 0:Source:" in all_lines[0]:
252 site.write("<p>The file was not excercised</p>")
253 else:
254 site.write("""</br><table cellpadding=0 cellspacing=0 border=0>
255 <tr>
256 <td><br></td>
257 </tr>
258 <tr>
259 <td><pre class="source">
260 """)
261 for line in all_lines:
262 split_line = line.split(':',2)
263 # e.g. at the EOF
264 if len(split_line) == 1:
265 continue
266 line_number = split_line[1].strip()
267 if line_number == "0":
268 continue
269 covered = 15*" "
270 end = ""
271 if "#####" in split_line[0]:
272 covered = '<span class="lineNoCov">%15s' % "0"
273 end = "</span>"
274 elif split_line[0].strip() != "-":
275 covered = '<span class="lineCov">%15s' % split_line[0].strip()
276 end = "</span>"
278 escaped_line = escape(split_line[2])
279 str = '<span class="lineNum">%(line_number)10s </span>%(covered)s: %(escaped_line)s%(end)s\n' % vars()
280 site.write(str)
281 site.write("</pre></td></tr></table>")
282 site.write("</BODY></HTML>")
283 site.close()
285 def main(progname, args):
286 if len(args) != 2:
287 sys.exit("Usage: %s DATADIR OUTDIR" % progname)
289 branch = "WebKit from trunk"
290 datadir, outdir = args
292 # First, load in all data from the data directory.
293 data = []
294 for datapath in glob.glob(os.path.join(datadir, "*.csv")):
295 data.append(read_csv(datapath))
296 # Sort by time
297 data.sort()
299 # Calculate time series for each file.
300 times = [sample[0] for sample in data]
301 times = [datetime.datetime.utcfromtimestamp(t) for t in times]
303 all_files = {}
304 all_dirs = {}
305 for sample in data:
306 t, i, tot_line, tot_cover, per_file, per_dir = sample
307 all_files.update(per_file)
308 all_dirs.update(per_dir)
309 total_series = []
310 file_serieses = dict([[k, [(0, 0)] * len(times)] for k in all_files.keys()])
311 dir_serieses = dict([[k, [(0, 0, 0, [])] * len(times)] for k in all_dirs.keys()])
312 data_idx = 0
313 for sample in data:
314 t, i, tot_line, tot_cover, per_file, per_dir = sample
315 total_series.append([tot_line, tot_cover])
316 for f, covinfo in per_file.items():
317 file_serieses[f][data_idx] = covinfo
318 for f, covinfo in per_dir.items():
319 dir_serieses[f][data_idx] = covinfo
320 data_idx += 1
323 # Okay, ready to start outputting. First make sure our directories
324 # exist.
325 if not os.path.exists(outdir):
326 os.makedirs(outdir)
327 rel_imgdir = "images"
328 imgdir = os.path.join(outdir, rel_imgdir)
329 if not os.path.exists(imgdir):
330 os.makedirs(imgdir)
333 # And look up the latest revision id, and coverage information
334 last_time, last_id, last_tot_lines, last_tot_covered = data[-1][:4]
336 # Now start generating our html file
337 copy_files(outdir)
338 write_title_page(outdir, last_time, last_tot_lines, last_tot_covered, dir_serieses)
340 dir_keys = dir_serieses.keys()
341 dir_keys.sort()
342 for dir_name in dir_keys:
343 write_directory_site(outdir, dir_name, last_time, dir_serieses, file_serieses)
345 file_keys = file_serieses.keys()
346 for file_name in file_keys:
347 write_file_site(outdir, file_name, last_time, datadir, last_id, file_serieses)
349 def read_csv(path):
350 r = csv.reader(open(path, "r"))
351 # First line is id, time
352 for row in r:
353 id, time_str = row
354 break
355 time = int(float(time_str))
356 # Rest of lines are path, total_lines, covered_lines
357 per_file = {}
358 per_dir = {}
359 grand_total_lines, grand_covered_lines = 0, 0
360 for row in r:
361 path, total_lines_str, covered_lines_str = row
362 total_lines = int(total_lines_str)
363 covered_lines = int(covered_lines_str)
364 grand_total_lines += total_lines
365 grand_covered_lines += covered_lines
366 per_file[path] = [total_lines, covered_lines]
368 # Update dir statistics
369 dirname = os.path.dirname(path)
370 if not dirname in per_dir:
371 per_dir[dirname] = (0,0,0,[])
372 (dir_files,dir_total_lines,dir_covered_lines, files) = per_dir[dirname]
373 dir_files += 1
374 dir_total_lines += total_lines
375 dir_covered_lines += covered_lines
376 files.append(path)
377 per_dir[dirname] = (dir_files,dir_total_lines,dir_covered_lines,files)
378 return [time, id, grand_total_lines, grand_covered_lines, per_file, per_dir]
380 if __name__ == "__main__":
381 import sys
382 main(sys.argv[0], sys.argv[1:])