ozone: evdev: Sync caps lock LED state to evdev
[chromium-blink-merge.git] / tools / perf / measurements / task_execution_time_unittest.py
blobed04076a2510db6ce9507c82d9c1fb31c68cd4bd
1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 from measurements import task_execution_time
6 from telemetry import decorators
7 from telemetry.core import wpr_modes
8 from telemetry.page import page as page_module
9 from telemetry.results import page_test_results
10 from telemetry.timeline import model as model_module
11 from telemetry.timeline import slice as slice_data
12 from telemetry.unittest_util import options_for_unittests
13 from telemetry.unittest_util import page_test_test_case
16 class TestTaskExecutionTimePage(page_module.Page):
18 def __init__(self, page_set, base_dir):
19 super(TestTaskExecutionTimePage, self).__init__(
20 'file://blank.html', page_set, base_dir)
22 def RunPageInteractions(self, action_runner):
23 interaction = action_runner.BeginGestureInteraction(
24 'ScrollAction', is_smooth=True)
25 action_runner.ScrollPage()
26 interaction.End()
29 class TaskExecutionTimeUnitTest(page_test_test_case.PageTestTestCase):
31 def setUp(self):
32 self._options = options_for_unittests.GetCopy()
33 self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
34 self._first_thread_name = (
35 task_execution_time.TaskExecutionTime._RENDERER_THREADS[0])
36 self._measurement = None
37 self._page_set = None
39 @decorators.Enabled('android')
40 def testSomeResultsReturnedFromDummyPage(self):
41 self._GenerateDataForEmptyPageSet()
43 results = self.RunMeasurement(self._measurement,
44 self._page_set,
45 options=self._options)
47 self.assertGreater(len(results.all_page_specific_values), 0)
49 @decorators.Enabled('android')
50 def testSlicesConformToRequiredNamingConventionsUsingDummyPage(self):
51 """This test ensures the presence of required keywords.
53 Some arbitrary keywords are required to generate the names of the top 10
54 tasks. The code has a weak dependancy on 'src_func', 'class' and 'line'
55 existing; if they exist in a slice's args they are used to generate a
56 name, if they don't exists the code falls back to using the name of the
57 slice, which is less clear.
59 If the code has been refactored and these keywords no longer exist
60 the code that relies on them in task_execution_time.py should be
61 updated to use the appropriate technique for assertaining this data
62 (and this test changed in the same way).
63 """
64 self._GenerateDataForEmptyPageSet()
66 self.RunMeasurement(self._measurement,
67 self._page_set,
68 options=self._options)
70 required_keywords = {'src_func': 0, 'class': 0, 'line': 0}
72 # Check all slices and count the uses of the required keywords.
73 for thread in self._measurement._renderer_process.threads.itervalues():
74 for slice_info in thread.IterAllSlices():
75 _CheckSliceForKeywords(slice_info, required_keywords)
77 # Confirm that all required keywords have at least one instance.
78 for use_counts in required_keywords.itervalues():
79 self.assertGreater(use_counts, 0)
81 def testMockedResultsCorrectlyReturned(self):
82 data = self._GenerateResultsFromMockedData()
84 # Confirm we get back 4 results (3 tasks and a section-use %).
85 self.assertEqual(len(data.results.all_page_specific_values), 4)
87 # Check that the 3 tasks we added exist in the resulting output
88 # sorted.
89 task_prefix = 'process 1:%s:' % (self._first_thread_name)
90 slow_result = self._findResultFromName(task_prefix + 'slow', data)
91 self.assertEqual(slow_result.value, 1000)
93 medium_result = self._findResultFromName(task_prefix + 'medium', data)
94 self.assertEqual(medium_result.value, 500)
96 fast_result = self._findResultFromName(task_prefix + 'fast', data)
97 self.assertEqual(fast_result.value, 1)
99 def testNonIdlePercentagesAreCorrect(self):
100 data = self._GenerateResultsFromMockedData()
102 # Confirm that 100% of tasks are in the normal section.
103 percentage_result = self._findResultFromName(
104 'process 1:%s:Section_%s' % (
105 self._first_thread_name,
106 task_execution_time.TaskExecutionTime.NORMAL_SECTION),
107 data)
108 self.assertEqual(percentage_result.value, 100)
110 def testIdleTasksAreReported(self):
111 data = self._GenerateResultsFromMockedIdleData()
113 # The 'slow_sub_slice' should be inside the Idle section and therefore
114 # removed from the results.
115 for result in data.results.all_page_specific_values:
116 if 'slow_sub_slice' in result.name:
117 self.fail('Tasks within idle section should not be reported')
119 # The 'not_idle' slice should not have the IDLE_SECTION added to its name
120 # and should exist.
121 for result in data.results.all_page_specific_values:
122 if 'not_idle' in result.name:
123 self.assertTrue(
124 task_execution_time.TaskExecutionTime.IDLE_SECTION
125 not in result.name)
126 break
127 else:
128 self.fail('Task was incorrectly marked as Idle')
130 def testIdlePercentagesAreCorrect(self):
131 data = self._GenerateResultsFromMockedIdleData()
133 # Check the percentage section usage is correctly calculated.
134 # Total = 1000 (idle) + 250 (normal), so normal = (250 * 100) / 1250 = 20%.
135 normal_percentage_result = self._findResultFromName(
136 'process 1:%s:Section_%s' % (
137 self._first_thread_name,
138 task_execution_time.TaskExecutionTime.NORMAL_SECTION),
139 data)
140 self.assertEqual(normal_percentage_result.value, 20)
141 # Check the percentage section usage is correctly calculated.
142 idle_percentage_result = self._findResultFromName(
143 'process 1:%s:Section_%s' % (
144 self._first_thread_name,
145 task_execution_time.TaskExecutionTime.IDLE_SECTION),
146 data)
147 self.assertEqual(idle_percentage_result.value, 80)
149 def testTopNTasksAreCorrectlyReported(self):
150 data = self._GenerateDataForEmptyPageSet()
152 # Add too many increasing-durtation tasks and confirm we only get the
153 # slowest _NUMBER_OF_RESULTS_TO_DISPLAY tasks reported back.
154 duration = 0
155 extra = 5
156 for duration in xrange(
157 task_execution_time.TaskExecutionTime._NUMBER_OF_RESULTS_TO_DISPLAY +
158 extra):
159 data.AddSlice('task' + str(duration), 0, duration)
161 # Run the code we are testing.
162 self._measurement.ValidateAndMeasurePage(None, None, data.results)
164 # Check that the last (i.e. biggest) _NUMBER_OF_RESULTS_TO_DISPLAY get
165 # returned in the results.
166 for duration in xrange(
167 extra,
168 extra +
169 task_execution_time.TaskExecutionTime._NUMBER_OF_RESULTS_TO_DISPLAY):
170 self._findResultFromName(
171 'process 1:%s:task%s' % (self._first_thread_name, str(duration)),
172 data)
174 def _findResultFromName(self, name, data):
175 for result in data.results.all_page_specific_values:
176 if result.name == name:
177 return result
178 self.fail('Expected result "%s" missing.' % (name))
180 def _GenerateResultsFromMockedData(self):
181 data = self._GenerateDataForEmptyPageSet()
183 data.AddSlice('fast', 0, 1)
184 data.AddSlice('medium', 0, 500)
185 data.AddSlice('slow', 0, 1000)
187 # Run the code we are testing and return results.
188 self._measurement.ValidateAndMeasurePage(None, None, data.results)
189 return data
191 def _GenerateResultsFromMockedIdleData(self):
192 data = self._GenerateDataForEmptyPageSet()
194 # Make a slice that looks like an idle task parent.
195 slice_start_time = 0
196 slow_slice_duration = 1000
197 fast_slice_duration = 250
198 parent_slice = data.AddSlice(
199 task_execution_time.TaskExecutionTime.IDLE_SECTION_TRIGGER,
200 slice_start_time,
201 slow_slice_duration)
202 # Add a sub-slice, this should be reported back as occuring in idle time.
203 sub_slice = slice_data.Slice(
204 None,
205 'category',
206 'slow_sub_slice',
207 slice_start_time,
208 slow_slice_duration,
209 slice_start_time,
210 slow_slice_duration,
212 parent_slice.sub_slices.append(sub_slice)
214 # Add a non-idle task.
215 data.AddSlice('not_idle', slice_start_time, fast_slice_duration)
217 # Run the code we are testing.
218 self._measurement.ValidateAndMeasurePage(None, None, data.results)
220 return data
222 def _GenerateDataForEmptyPageSet(self):
223 self._measurement = task_execution_time.TaskExecutionTime()
224 self._page_set = self.CreateEmptyPageSet()
225 page = TestTaskExecutionTimePage(self._page_set, self._page_set.base_dir)
226 self._page_set.AddUserStory(page)
228 # Get the name of a thread used by task_execution_time metric and set up
229 # some dummy execution data pretending to be from that thread & process.
230 data = TaskExecutionTestData(self._first_thread_name)
231 self._measurement._renderer_process = data._renderer_process
233 # Pretend we are about to run the tests to silence lower level asserts.
234 data.results.WillRunPage(page)
236 return data
239 def _CheckSliceForKeywords(slice_info, required_keywords):
240 for argument in slice_info.args:
241 if argument in required_keywords:
242 required_keywords[argument] += 1
243 # recurse into our sub-slices.
244 for sub_slice in slice_info.sub_slices:
245 _CheckSliceForKeywords(sub_slice, required_keywords)
248 class TaskExecutionTestData(object):
250 def __init__(self, thread_name):
251 self._model = model_module.TimelineModel()
252 self._renderer_process = self._model.GetOrCreateProcess(1)
253 self._renderer_thread = self._renderer_process.GetOrCreateThread(2)
254 self._renderer_thread.name = thread_name
255 self._results = page_test_results.PageTestResults()
257 @property
258 def results(self):
259 return self._results
261 def AddSlice(self, name, timestamp, duration):
262 new_slice = slice_data.Slice(
263 None,
264 'category',
265 name,
266 timestamp,
267 duration,
268 timestamp,
269 duration,
271 self._renderer_thread.all_slices.append(new_slice)
272 return new_slice