[refactor] More post-NSS WebCrypto cleanups (utility functions).
[chromium-blink-merge.git] / testing / scripts / common.py
blob3697c4b7373860f1dfffe0c720a5d8d7128587e5
1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 import argparse
6 import contextlib
7 import json
8 import os
9 import subprocess
10 import sys
11 import tempfile
14 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
15 SRC_DIR = os.path.abspath(
16 os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir))
19 # run-webkit-tests returns the number of failures as the return
20 # code, but caps the return code at 101 to avoid overflow or colliding
21 # with reserved values from the shell.
22 MAX_FAILURES_EXIT_STATUS = 101
25 def run_script(argv, funcs):
26 def parse_json(path):
27 with open(path) as f:
28 return json.load(f)
29 parser = argparse.ArgumentParser()
30 # TODO(phajdan.jr): Make build-config-fs required after passing it in recipe.
31 parser.add_argument('--build-config-fs')
32 parser.add_argument('--paths', type=parse_json, default={})
33 # Properties describe the environment of the build, and are the same per
34 # script invocation.
35 parser.add_argument('--properties', type=parse_json, default={})
36 # Args contains per-invocation arguments that potentially change the
37 # behavior of the script.
38 parser.add_argument('--args', type=parse_json, default=[])
40 parser.add_argument(
41 '--use-src-side-runtest-py', action='store_true',
42 help='Use the src-side copy of runtest.py, as opposed to the build-side '
43 'one')
45 subparsers = parser.add_subparsers()
47 run_parser = subparsers.add_parser('run')
48 run_parser.add_argument(
49 '--output', type=argparse.FileType('w'), required=True)
50 run_parser.add_argument('--filter-file', type=argparse.FileType('r'))
51 run_parser.set_defaults(func=funcs['run'])
53 run_parser = subparsers.add_parser('compile_targets')
54 run_parser.add_argument(
55 '--output', type=argparse.FileType('w'), required=True)
56 run_parser.set_defaults(func=funcs['compile_targets'])
58 args = parser.parse_args(argv)
59 return args.func(args)
62 def run_command(argv):
63 print 'Running %r' % argv
64 rc = subprocess.call(argv)
65 print 'Command %r returned exit code %d' % (argv, rc)
66 return rc
69 def run_runtest(cmd_args, runtest_args):
70 if cmd_args.use_src_side_runtest_py:
71 cmd = [
72 sys.executable,
73 os.path.join(
74 cmd_args.paths['checkout'], 'infra', 'scripts', 'runtest_wrapper.py'),
75 '--path-build', cmd_args.paths['build'],
76 '--',
78 else:
79 cmd = [
80 sys.executable,
81 os.path.join(cmd_args.paths['build'], 'scripts', 'tools', 'runit.py'),
82 '--show-path',
83 sys.executable,
84 os.path.join(cmd_args.paths['build'], 'scripts', 'slave', 'runtest.py'),
86 return run_command(cmd + [
87 '--target', cmd_args.build_config_fs,
88 '--xvfb',
89 '--builder-name', cmd_args.properties['buildername'],
90 '--slave-name', cmd_args.properties['slavename'],
91 '--build-number', str(cmd_args.properties['buildnumber']),
92 '--build-properties', json.dumps(cmd_args.properties),
93 ] + runtest_args)
96 @contextlib.contextmanager
97 def temporary_file():
98 fd, path = tempfile.mkstemp()
99 os.close(fd)
100 try:
101 yield path
102 finally:
103 os.remove(path)
106 def parse_common_test_results(json_results, test_separator='/'):
107 def convert_trie_to_flat_paths(trie, prefix=None):
108 # Also see webkitpy.layout_tests.layout_package.json_results_generator
109 result = {}
110 for name, data in trie.iteritems():
111 if prefix:
112 name = prefix + test_separator + name
113 if len(data) and not 'actual' in data and not 'expected' in data:
114 result.update(convert_trie_to_flat_paths(data, name))
115 else:
116 result[name] = data
117 return result
119 results = {
120 'passes': {},
121 'unexpected_passes': {},
122 'failures': {},
123 'unexpected_failures': {},
124 'flakes': {},
125 'unexpected_flakes': {},
128 # TODO(dpranke): crbug.com/357866 - we should simplify the handling of
129 # both the return code and parsing the actual results, below.
131 passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE',
132 'NEEDSMANUALREBASELINE')
134 for test, result in convert_trie_to_flat_paths(
135 json_results['tests']).iteritems():
136 key = 'unexpected_' if result.get('is_unexpected') else ''
137 data = result['actual']
138 actual_results = data.split()
139 last_result = actual_results[-1]
140 expected_results = result['expected'].split()
142 if (len(actual_results) > 1 and
143 (last_result in expected_results or last_result in passing_statuses)):
144 key += 'flakes'
145 elif last_result in passing_statuses:
146 key += 'passes'
147 # TODO(dpranke): crbug.com/357867 ... Why are we assigning result
148 # instead of actual_result here. Do we even need these things to be
149 # hashes, or just lists?
150 data = result
151 else:
152 key += 'failures'
153 results[key][test] = data
155 return results
158 def parse_gtest_test_results(json_results):
159 failures = set()
160 for cur_iteration_data in json_results.get('per_iteration_data', []):
161 for test_fullname, results in cur_iteration_data.iteritems():
162 # Results is a list with one entry per test try. Last one is the final
163 # result, the only we care about here.
164 last_result = results[-1]
166 if last_result['status'] != 'SUCCESS':
167 failures.add(test_fullname)
169 return {
170 'failures': sorted(failures),