Adding Peter Thatcher to the owners file.
[chromium-blink-merge.git] / testing / scripts / common.py
blob1aefcd5b79557d7eaeef85b379bf8fe093b5b1c9
1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 import argparse
6 import contextlib
7 import json
8 import os
9 import subprocess
10 import sys
11 import tempfile
14 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
15 SRC_DIR = os.path.abspath(
16 os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir))
19 # run-webkit-tests returns the number of failures as the return
20 # code, but caps the return code at 101 to avoid overflow or colliding
21 # with reserved values from the shell.
22 MAX_FAILURES_EXIT_STATUS = 101
25 def run_script(argv, funcs):
26 def parse_json(path):
27 with open(path) as f:
28 return json.load(f)
29 parser = argparse.ArgumentParser()
30 # TODO(phajdan.jr): Make build-config-fs required after passing it in recipe.
31 parser.add_argument('--build-config-fs')
32 parser.add_argument('--paths', type=parse_json, default={})
33 # Properties describe the environment of the build, and are the same per
34 # script invocation.
35 parser.add_argument('--properties', type=parse_json, default={})
36 # Args contains per-invocation arguments that potentially change the
37 # behavior of the script.
38 parser.add_argument('--args', type=parse_json, default=[])
40 subparsers = parser.add_subparsers()
42 run_parser = subparsers.add_parser('run')
43 run_parser.add_argument(
44 '--output', type=argparse.FileType('w'), required=True)
45 run_parser.add_argument('--filter-file', type=argparse.FileType('r'))
46 run_parser.set_defaults(func=funcs['run'])
48 run_parser = subparsers.add_parser('compile_targets')
49 run_parser.add_argument(
50 '--output', type=argparse.FileType('w'), required=True)
51 run_parser.set_defaults(func=funcs['compile_targets'])
53 args = parser.parse_args(argv)
54 return args.func(args)
57 def run_command(argv):
58 print 'Running %r' % argv
59 rc = subprocess.call(argv)
60 print 'Command %r returned exit code %d' % (argv, rc)
61 return rc
64 def run_runtest(cmd_args, runtest_args):
65 return run_command([
66 sys.executable,
67 os.path.join(cmd_args.paths['build'], 'scripts', 'tools', 'runit.py'),
68 '--show-path',
69 sys.executable,
70 os.path.join(cmd_args.paths['build'], 'scripts', 'slave', 'runtest.py'),
71 '--target', cmd_args.build_config_fs,
72 '--xvfb',
73 '--builder-name', cmd_args.properties['buildername'],
74 '--slave-name', cmd_args.properties['slavename'],
75 '--build-number', str(cmd_args.properties['buildnumber']),
76 '--build-properties', json.dumps(cmd_args.properties),
77 ] + runtest_args)
80 @contextlib.contextmanager
81 def temporary_file():
82 fd, path = tempfile.mkstemp()
83 os.close(fd)
84 try:
85 yield path
86 finally:
87 os.remove(path)
90 def parse_common_test_results(json_results, test_separator='/'):
91 def convert_trie_to_flat_paths(trie, prefix=None):
92 # Also see webkitpy.layout_tests.layout_package.json_results_generator
93 result = {}
94 for name, data in trie.iteritems():
95 if prefix:
96 name = prefix + test_separator + name
97 if len(data) and not 'actual' in data and not 'expected' in data:
98 result.update(convert_trie_to_flat_paths(data, name))
99 else:
100 result[name] = data
101 return result
103 results = {
104 'passes': {},
105 'unexpected_passes': {},
106 'failures': {},
107 'unexpected_failures': {},
108 'flakes': {},
109 'unexpected_flakes': {},
112 # TODO(dpranke): crbug.com/357866 - we should simplify the handling of
113 # both the return code and parsing the actual results, below.
115 passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE',
116 'NEEDSMANUALREBASELINE')
118 for test, result in convert_trie_to_flat_paths(
119 json_results['tests']).iteritems():
120 key = 'unexpected_' if result.get('is_unexpected') else ''
121 data = result['actual']
122 actual_results = data.split()
123 last_result = actual_results[-1]
124 expected_results = result['expected'].split()
126 if (len(actual_results) > 1 and
127 (last_result in expected_results or last_result in passing_statuses)):
128 key += 'flakes'
129 elif last_result in passing_statuses:
130 key += 'passes'
131 # TODO(dpranke): crbug.com/357867 ... Why are we assigning result
132 # instead of actual_result here. Do we even need these things to be
133 # hashes, or just lists?
134 data = result
135 else:
136 key += 'failures'
137 results[key][test] = data
139 return results