Don't add extra app list launcher page webviews.
[chromium-blink-merge.git] / tools / perf / measurements / task_execution_time_unittest.py
blobe2137e15eb24f4b5a0171eecc95dc045a4824261
1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 from telemetry.core import wpr_modes
6 from telemetry import decorators
7 from telemetry.page import page as page_module
8 from telemetry.results import page_test_results
9 from telemetry.timeline import model as model_module
10 from telemetry.timeline import slice as slice_data
11 from telemetry.unittest_util import options_for_unittests
12 from telemetry.unittest_util import page_test_test_case
14 from measurements import task_execution_time
17 class TestTaskExecutionTimePage(page_module.Page):
19 def __init__(self, page_set, base_dir):
20 super(TestTaskExecutionTimePage, self).__init__(
21 'file://blank.html', page_set, base_dir)
23 def RunPageInteractions(self, action_runner):
24 interaction = action_runner.BeginGestureInteraction(
25 'ScrollAction')
26 action_runner.ScrollPage()
27 interaction.End()
30 class TaskExecutionTimeUnitTest(page_test_test_case.PageTestTestCase):
32 def setUp(self):
33 self._options = options_for_unittests.GetCopy()
34 self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
35 self._first_thread_name = (
36 task_execution_time.TaskExecutionTime._RENDERER_THREADS[0])
37 self._measurement = None
38 self._page_set = None
40 @decorators.Enabled('android')
41 def testSomeResultsReturnedFromDummyPage(self):
42 self._GenerateDataForEmptyPageSet()
44 results = self.RunMeasurement(self._measurement,
45 self._page_set,
46 options=self._options)
48 self.assertGreater(len(results.all_page_specific_values), 0)
50 # http://crbug.com/466994
51 @decorators.Disabled
52 def testSlicesConformToRequiredNamingConventionsUsingDummyPage(self):
53 """This test ensures the presence of required keywords.
55 Some arbitrary keywords are required to generate the names of the top 10
56 tasks. The code has a weak dependancy on 'src_func', 'class' and 'line'
57 existing; if they exist in a slice's args they are used to generate a
58 name, if they don't exists the code falls back to using the name of the
59 slice, which is less clear.
61 If the code has been refactored and these keywords no longer exist
62 the code that relies on them in task_execution_time.py should be
63 updated to use the appropriate technique for assertaining this data
64 (and this test changed in the same way).
65 """
66 self._GenerateDataForEmptyPageSet()
68 self.RunMeasurement(self._measurement,
69 self._page_set,
70 options=self._options)
72 required_keywords = {'src_func': 0, 'class': 0, 'line': 0}
74 # Check all slices and count the uses of the required keywords.
75 for thread in self._measurement._renderer_process.threads.itervalues():
76 for slice_info in thread.IterAllSlices():
77 _CheckSliceForKeywords(slice_info, required_keywords)
79 # Confirm that all required keywords have at least one instance.
80 for use_counts in required_keywords.itervalues():
81 self.assertGreater(use_counts, 0)
83 def testMockedResultsCorrectlyReturned(self):
84 data = self._GenerateResultsFromMockedData()
86 # Confirm we get back 4 results (3 tasks and a section-use %).
87 self.assertEqual(len(data.results.all_page_specific_values), 4)
89 # Check that the 3 tasks we added exist in the resulting output
90 # sorted.
91 task_prefix = 'process 1:%s:' % (self._first_thread_name)
92 slow_result = self._findResultFromName(task_prefix + 'slow', data)
93 self.assertEqual(slow_result.value, 1000)
95 medium_result = self._findResultFromName(task_prefix + 'medium', data)
96 self.assertEqual(medium_result.value, 500)
98 fast_result = self._findResultFromName(task_prefix + 'fast', data)
99 self.assertEqual(fast_result.value, 1)
101 def testNonIdlePercentagesAreCorrect(self):
102 data = self._GenerateResultsFromMockedData()
104 # Confirm that 100% of tasks are in the normal section.
105 percentage_result = self._findResultFromName(
106 'process 1:%s:Section_%s' % (
107 self._first_thread_name,
108 task_execution_time.TaskExecutionTime.NORMAL_SECTION),
109 data)
110 self.assertEqual(percentage_result.value, 100)
112 def testIdleTasksAreReported(self):
113 data = self._GenerateResultsFromMockedIdleData()
115 # The 'slow_sub_slice' should be inside the Idle section and therefore
116 # removed from the results.
117 for result in data.results.all_page_specific_values:
118 if 'slow_sub_slice' in result.name:
119 self.fail('Tasks within idle section should not be reported')
121 # The 'not_idle' slice should not have the IDLE_SECTION added to its name
122 # and should exist.
123 for result in data.results.all_page_specific_values:
124 if 'not_idle' in result.name:
125 self.assertTrue(
126 task_execution_time.TaskExecutionTime.IDLE_SECTION
127 not in result.name)
128 break
129 else:
130 self.fail('Task was incorrectly marked as Idle')
132 def testIdlePercentagesAreCorrect(self):
133 data = self._GenerateResultsFromMockedIdleData()
135 # Check the percentage section usage is correctly calculated.
136 # Total = 1000 (idle) + 250 (normal), so normal = (250 * 100) / 1250 = 20%.
137 normal_percentage_result = self._findResultFromName(
138 'process 1:%s:Section_%s' % (
139 self._first_thread_name,
140 task_execution_time.TaskExecutionTime.NORMAL_SECTION),
141 data)
142 self.assertEqual(normal_percentage_result.value, 20)
143 # Check the percentage section usage is correctly calculated.
144 idle_percentage_result = self._findResultFromName(
145 'process 1:%s:Section_%s' % (
146 self._first_thread_name,
147 task_execution_time.TaskExecutionTime.IDLE_SECTION),
148 data)
149 self.assertEqual(idle_percentage_result.value, 80)
151 def testTopNTasksAreCorrectlyReported(self):
152 data = self._GenerateDataForEmptyPageSet()
154 # Add too many increasing-durtation tasks and confirm we only get the
155 # slowest _NUMBER_OF_RESULTS_TO_DISPLAY tasks reported back.
156 duration = 0
157 extra = 5
158 for duration in xrange(
159 task_execution_time.TaskExecutionTime._NUMBER_OF_RESULTS_TO_DISPLAY +
160 extra):
161 data.AddSlice('task' + str(duration), 0, duration)
163 # Run the code we are testing.
164 self._measurement.ValidateAndMeasurePage(None, None, data.results)
166 # Check that the last (i.e. biggest) _NUMBER_OF_RESULTS_TO_DISPLAY get
167 # returned in the results.
168 for duration in xrange(
169 extra,
170 extra +
171 task_execution_time.TaskExecutionTime._NUMBER_OF_RESULTS_TO_DISPLAY):
172 self._findResultFromName(
173 'process 1:%s:task%s' % (self._first_thread_name, str(duration)),
174 data)
176 def _findResultFromName(self, name, data):
177 for result in data.results.all_page_specific_values:
178 if result.name == name:
179 return result
180 self.fail('Expected result "%s" missing.' % (name))
182 def _GenerateResultsFromMockedData(self):
183 data = self._GenerateDataForEmptyPageSet()
185 data.AddSlice('fast', 0, 1)
186 data.AddSlice('medium', 0, 500)
187 data.AddSlice('slow', 0, 1000)
189 # Run the code we are testing and return results.
190 self._measurement.ValidateAndMeasurePage(None, None, data.results)
191 return data
193 def _GenerateResultsFromMockedIdleData(self):
194 data = self._GenerateDataForEmptyPageSet()
196 # Make a slice that looks like an idle task parent.
197 slice_start_time = 0
198 slow_slice_duration = 1000
199 fast_slice_duration = 250
200 parent_slice = data.AddSlice(
201 task_execution_time.TaskExecutionTime.IDLE_SECTION_TRIGGER,
202 slice_start_time,
203 slow_slice_duration)
204 # Add a sub-slice, this should be reported back as occuring in idle time.
205 sub_slice = slice_data.Slice(
206 None,
207 'category',
208 'slow_sub_slice',
209 slice_start_time,
210 slow_slice_duration,
211 slice_start_time,
212 slow_slice_duration,
214 parent_slice.sub_slices.append(sub_slice)
216 # Add a non-idle task.
217 data.AddSlice('not_idle', slice_start_time, fast_slice_duration)
219 # Run the code we are testing.
220 self._measurement.ValidateAndMeasurePage(None, None, data.results)
222 return data
224 def _GenerateDataForEmptyPageSet(self):
225 self._measurement = task_execution_time.TaskExecutionTime()
226 self._page_set = self.CreateEmptyPageSet()
227 page = TestTaskExecutionTimePage(self._page_set, self._page_set.base_dir)
228 self._page_set.AddUserStory(page)
230 # Get the name of a thread used by task_execution_time metric and set up
231 # some dummy execution data pretending to be from that thread & process.
232 data = TaskExecutionTestData(self._first_thread_name)
233 self._measurement._renderer_process = data._renderer_process
235 # Pretend we are about to run the tests to silence lower level asserts.
236 data.results.WillRunPage(page)
238 return data
241 def _CheckSliceForKeywords(slice_info, required_keywords):
242 for argument in slice_info.args:
243 if argument in required_keywords:
244 required_keywords[argument] += 1
245 # recurse into our sub-slices.
246 for sub_slice in slice_info.sub_slices:
247 _CheckSliceForKeywords(sub_slice, required_keywords)
250 class TaskExecutionTestData(object):
252 def __init__(self, thread_name):
253 self._model = model_module.TimelineModel()
254 self._renderer_process = self._model.GetOrCreateProcess(1)
255 self._renderer_thread = self._renderer_process.GetOrCreateThread(2)
256 self._renderer_thread.name = thread_name
257 self._results = page_test_results.PageTestResults()
259 @property
260 def results(self):
261 return self._results
263 def AddSlice(self, name, timestamp, duration):
264 new_slice = slice_data.Slice(
265 None,
266 'category',
267 name,
268 timestamp,
269 duration,
270 timestamp,
271 duration,
273 self._renderer_thread.all_slices.append(new_slice)
274 return new_slice