1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
7 # Special import necessary because filename contains dash characters.
8 bisect_perf_module
= __import__('bisect-perf-regression')
11 class BisectPerfRegressionTest(unittest
.TestCase
):
12 """Test case for top-level functions in the bisect-perf-regrssion module."""
15 """Sets up the test environment before each test method."""
19 """Cleans up the test environment after each test method."""
22 def testCalculateTruncatedMeanRaisesError(self
):
23 """CalculateTrunctedMean raises an error when passed an empty list."""
24 with self
.assertRaises(TypeError):
25 bisect_perf_module
.CalculateTruncatedMean([], 0)
27 def testCalculateMeanSingleNum(self
):
28 """Tests the CalculateMean function with a single number."""
29 self
.assertEqual(3.0, bisect_perf_module
.CalculateMean([3]))
31 def testCalculateMeanShortList(self
):
32 """Tests the CalculateMean function with a short list."""
33 self
.assertEqual(0.5, bisect_perf_module
.CalculateMean([-3, 0, 1, 4]))
35 def testCalculateMeanCompareAlternateImplementation(self
):
36 """Tests CalculateMean by comparing against an alternate implementation."""
37 def AlternateMeanFunction(values
):
38 """Simple arithmetic mean function."""
39 return sum(values
) / float(len(values
))
40 test_values_lists
= [[1], [5, 6.5, 1.2, 3], [-3, 0, 1, 4],
41 [-3, -1, 0.12, 0.752, 3.33, 8, 16, 32, 439]]
42 for values
in test_values_lists
:
44 AlternateMeanFunction(values
),
45 bisect_perf_module
.CalculateMean(values
))
47 def testCalculateConfidence(self
):
48 """Tests the confidence calculation."""
49 bad_values
= [[0, 1], [1, 2]]
50 good_values
= [[6, 7], [7, 8]]
51 # Closest means are mean(1, 2) and mean(6, 7).
53 # Standard deviation of [n-1, n, n, n+1] is 0.8165.
54 stddev_sum
= 0.8165 + 0.8165
55 # Expected confidence is an int in the range [0, 100].
56 expected_confidence
= min(100, int(100 * distance
/ float(stddev_sum
)))
59 bisect_perf_module
.CalculateConfidence(bad_values
, good_values
))
61 def testCalculateConfidence0(self
):
62 """Tests the confidence calculation when it's expected to be 0."""
63 bad_values
= [[0, 1], [1, 2], [4, 5], [0, 2]]
64 good_values
= [[4, 5], [6, 7], [7, 8]]
65 # Both groups have value lists with means of 4.5, which means distance
66 # between groups is zero, and thus confidence is zero.
68 0, bisect_perf_module
.CalculateConfidence(bad_values
, good_values
))
70 def testCalculateConfidence100(self
):
71 """Tests the confidence calculation when it's expected to be 100."""
72 bad_values
= [[1, 1], [1, 1]]
73 good_values
= [[1.2, 1.2], [1.2, 1.2]]
74 # Standard deviation in both groups is zero, so confidence is 100.
76 100, bisect_perf_module
.CalculateConfidence(bad_values
, good_values
))
78 if __name__
== '__main__':