Backed out 2 changesets (bug 1943998) for causing wd failures @ phases.py CLOSED...
[gecko.git] / tools / lint / perfdocs / framework_gatherers.py
blob377e91e4e77d1d5f4292850275b9ade3bc788801
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
4 import json
5 import os
6 import pathlib
7 import re
9 from gecko_taskgraph.util.attributes import match_run_on_projects
10 from manifestparser import TestManifest
11 from mozperftest.script import ScriptInfo
13 from perfdocs.doc_helpers import TableBuilder
14 from perfdocs.logger import PerfDocLogger
15 from perfdocs.utils import read_yaml
17 logger = PerfDocLogger()
19 BRANCHES = [
20 "mozilla-central",
21 "autoland",
22 "mozilla-release",
23 "mozilla-beta",
26 """
27 This file is for framework specific gatherers since manifests
28 might be parsed differently in each of them. The gatherers
29 must implement the FrameworkGatherer class.
30 """
33 class FrameworkGatherer(object):
34 """
35 Abstract class for framework gatherers.
36 """
38 def __init__(self, yaml_path, workspace_dir, taskgraph={}):
39 """
40 Generic initialization for a framework gatherer.
41 """
42 self.workspace_dir = workspace_dir
43 self._yaml_path = yaml_path
44 self._taskgraph = taskgraph
45 self._suite_list = {}
46 self._test_list = {}
47 self._descriptions = {}
48 self._manifest_path = ""
49 self._manifest = None
50 self.script_infos = {}
51 self._task_list = {}
52 self._task_match_pattern = re.compile(r"([\w\W]*/[pgo|opt]*)-([\w\W]*)")
54 def _build_section_with_header(self, title, content, header_type=None):
55 """
56 Adds a section to the documentation with the title as the type mentioned
57 and paragraph as content mentioned.
58 :param title: title of the section
59 :param content: content of section paragraph
60 :param header_type: type of the title heading
61 """
62 heading_map = {"H2": "*", "H3": "=", "H4": "-", "H5": "^"}
63 return [title, heading_map.get(header_type, "^") * len(title), content, ""]
65 def _get_metric_heading(self, metric, metrics_info):
66 """
67 Gets the heading of a specific metric.
69 :param str metric: The metric to search for.
70 :param dict metrics_info: The information of all the
71 metrics that were documented.
72 :return str: The heading to use for the given metric.
73 """
74 for metric_heading, metric_info in metrics_info.items():
75 if metric == metric_heading or any(
76 metric == alias for alias in metric_info.get("aliases", [])
78 return metric_heading
79 if metric_info.get("matcher"):
80 match = re.search(metric_info["matcher"], metric)
81 if match:
82 return metric_heading
84 raise Exception(f"Could not find a metric heading for `{metric}`")
86 def get_task_match(self, task_name):
87 return re.search(self._task_match_pattern, task_name)
89 def get_manifest_path(self):
90 """
91 Returns the path to the manifest based on the
92 manifest entry in the frameworks YAML configuration
93 file.
95 :return str: Path to the manifest.
96 """
97 if self._manifest_path:
98 return self._manifest_path
100 yaml_content = read_yaml(self._yaml_path)
101 self._manifest_path = pathlib.Path(self.workspace_dir, yaml_content["manifest"])
102 return self._manifest_path
104 def get_suite_list(self):
106 Each framework gatherer must return a dictionary with
107 the following structure. Note that the test names must
108 be relative paths so that issues can be correctly issued
109 by the reviewbot.
111 :return dict: A dictionary with the following structure: {
112 "suite_name": [
113 'testing/raptor/test1',
114 'testing/raptor/test2'
118 raise NotImplementedError
120 def build_metrics_documentation(self, yaml_content):
122 Each framework that provides a page with descriptions about the
123 metrics it produces must implement this method. The metrics defined
124 for the framework can be found in the `yaml_content` variable.
126 The framework gatherer is expected to produce the full documentation
127 for all the metrics defined in the yaml_content at once. This is done
128 to allow differentiation between how metrics are displayed between
129 the different frameworks.
131 :param dict yaml_content: A dictionary of the YAML config file for
132 the specific framework.
133 :return list: A list of all the lines being added to the metrics
134 documentation.
136 raise NotImplementedError
138 def build_command_to_run_locally(self, framework_command, title):
140 Each framework has specifics to running it locally. This command
141 passes arguments to this function to ensure we can construct those
142 commands consistently, and return it so it can be in the mozilla source docs.
144 :param str framework_command: A string that has the framework specific
145 commands needed to run tests
146 :param str title: A string of the test name, added on after the framework
147 specific commands (see above framework_command param:)
148 :return str: Returns the command to run locally, this output is added to
149 the mozilla source docs, and is formatted
151 command_to_run_locally = " * Command to Run Locally\n\n"
152 command_to_run_locally += " .. code-block::\n\n"
153 command_to_run_locally += f" ./mach {framework_command} {title}\n\n"
154 return command_to_run_locally
157 class RaptorGatherer(FrameworkGatherer):
159 Gatherer for the Raptor framework.
162 def get_suite_list(self):
164 Returns a dictionary containing a mapping from suites
165 to the tests they contain.
167 :return dict: A dictionary with the following structure: {
168 "suite_name": [
169 'testing/raptor/test1',
170 'testing/raptor/test2'
174 if self._suite_list:
175 return self._suite_list
177 manifest_path = self.get_manifest_path()
179 # Get the tests from the manifest
180 test_manifest = TestManifest([str(manifest_path)], strict=False)
181 test_list = test_manifest.active_tests(exists=False, disabled=False)
183 # Parse the tests into the expected dictionary
184 for test in test_list:
185 # Get the top-level suite
186 s = os.path.basename(test["here"])
187 if s not in self._suite_list:
188 self._suite_list[s] = []
190 # Get the individual test
191 fpath = re.sub(".*testing", "testing", test["manifest"])
193 if fpath not in self._suite_list[s]:
194 self._suite_list[s].append(fpath)
196 return self._suite_list
198 def _get_ci_tasks(self):
199 for task in self._taskgraph.keys():
200 if type(self._taskgraph[task]) is dict:
201 command = self._taskgraph[task]["task"]["payload"].get("command", [])
202 run_on_projects = self._taskgraph[task]["attributes"]["run_on_projects"]
203 else:
204 command = self._taskgraph[task].task["payload"].get("command", [])
205 run_on_projects = self._taskgraph[task].attributes["run_on_projects"]
207 test_match = re.search(r"[\s']--test[\s=](.+?)[\s']", str(command))
208 task_match = self.get_task_match(task)
209 if test_match and task_match:
210 test = test_match.group(1)
211 platform = task_match.group(1)
212 test_name = task_match.group(2)
214 item = {"test_name": test_name, "run_on_projects": run_on_projects}
215 self._task_list.setdefault(test, {}).setdefault(platform, []).append(
216 item
219 def _get_subtests_from_ini(self, manifest_path, suite_name):
221 Returns a list of (sub)tests from an ini file containing the test definitions.
223 :param str manifest_path: path to the ini file
224 :return list: the list of the tests
226 desc_exclusion = ["here", "manifest_relpath", "path", "relpath"]
227 test_manifest = TestManifest(
228 [str(manifest_path)], strict=False, document=True, add_line_no=True
230 test_list = test_manifest.active_tests(exists=False, disabled=False)
231 subtests = {}
232 for subtest in test_list:
233 subtests[subtest["name"]] = subtest["manifest"]
235 description = {}
236 for key, value in subtest.items():
237 if key not in desc_exclusion:
238 description[key] = value
240 # Add searchfox link
241 key = list(test_manifest.source_documents.keys())[0]
243 if (
244 test_manifest.source_documents[key]
245 and subtest["name"] in test_manifest.source_documents[key].keys()
247 description["link searchfox"] = (
248 "https://searchfox.org/mozilla-central/source/"
249 + pathlib.Path(manifest_path).as_posix()
250 + "#"
251 + test_manifest.source_documents[key][subtest["name"]]["lineno"]
254 # Prepare alerting metrics for verification
255 description["metrics"] = [
256 metric.strip()
257 for metric in description.get("alert_on", "").split(",")
258 if metric.strip() != ""
260 if (
261 description.get("gather_cpuTime", None)
262 or "cpuTime" in description.get("measure", [])
263 or suite_name in ["desktop", "interactive", "mobile"]
265 description["metrics"].append("cpuTime")
267 subtests[subtest["name"]] = description
268 self._descriptions.setdefault(suite_name, []).append(description)
270 self._descriptions[suite_name].sort(key=lambda item: item["name"])
272 return subtests
274 def _get_metric_heading(self, metric, metrics_info):
276 Finds, and returns the correct heading for a metric to target in a reference link.
278 :param str metric: The metric to search for.
279 :param dict metrics_info: The information of all the
280 metrics that were documented.
281 :return str: A formatted string containing the reference link to the
282 documented metric.
284 metric_heading = super(RaptorGatherer, self)._get_metric_heading(
285 metric, metrics_info
287 return f"`{metric} <raptor-metrics.html#{metric_heading.lower().replace(' ', '-')}>`__"
289 def get_test_list(self):
291 Returns a dictionary containing the tests in every suite ini file.
293 :return dict: A dictionary with the following structure: {
294 "suite_name": {
295 'raptor_test1',
296 'raptor_test2'
300 if self._test_list:
301 return self._test_list
303 suite_list = self.get_suite_list()
305 # Iterate over each manifest path from suite_list[suite_name]
306 # and place the subtests into self._test_list under the same key
307 for suite_name, manifest_paths in suite_list.items():
308 if not self._test_list.get(suite_name):
309 self._test_list[suite_name] = {}
310 for manifest_path in manifest_paths:
311 subtest_list = self._get_subtests_from_ini(manifest_path, suite_name)
312 self._test_list[suite_name].update(subtest_list)
314 self._get_ci_tasks()
316 return self._test_list
318 def build_test_description(
319 self, title, test_description="", suite_name="", metrics_info=None
321 matcher = []
322 browsers = [
323 "firefox",
324 "chrome",
325 "refbrow",
326 "fennec68",
327 "geckoview",
328 "fenix",
330 test_name = [f"{title}-{browser}" for browser in browsers]
331 test_name.append(title)
333 for suite, val in self._descriptions.items():
334 for test in val:
335 if test["name"] in test_name and suite_name == suite:
336 matcher.append(test)
338 if len(matcher) == 0:
339 logger.critical(
340 "No tests exist for the following name "
341 "(obtained from config.yml): {}".format(title)
343 raise Exception(
344 "No tests exist for the following name "
345 "(obtained from config.yml): {}".format(title)
348 result = f".. dropdown:: {title}\n"
349 result += f" :class-container: anchor-id-{title}-{suite_name[0]}\n\n"
350 result += self.build_command_to_run_locally("raptor -t", title)
352 for idx, description in enumerate(matcher):
353 if description["name"] != title:
354 result += f" {idx+1}. **{description['name']}**\n\n"
355 if "owner" in description.keys():
356 result += f" **Owner**: {description['owner']}\n\n"
357 if test_description:
358 result += f" **Description**: {test_description}\n\n"
360 for key in sorted(description.keys()):
361 if key in ["owner", "name", "manifest", "metrics"]:
362 continue
363 sub_title = key.replace("_", " ")
364 if key == "test_url":
365 if "<" in description[key] or ">" in description[key]:
366 description[key] = description[key].replace("<", r"\<")
367 description[key] = description[key].replace(">", r"\>")
368 result += f" * **{sub_title}**: `<{description[key]}>`__\n"
369 elif key == "secondary_url":
370 result += f" * **{sub_title}**: `<{description[key]}>`__\n"
371 elif key == "link searchfox":
372 result += f" * **{sub_title}**: `<{description[key]}>`__\n"
373 elif key in ["playback_pageset_manifest"]:
374 result += (
375 f" * **{sub_title}**: "
376 f"{description[key].replace('{subtest}', description['name'])}\n"
378 elif key == "alert_on":
379 result += (
380 f" * **{sub_title}**: "
381 + ", ".join(
382 self._get_metric_heading(metric.strip(), metrics_info)
383 for metric in description[key]
384 .replace("\n", " ")
385 .replace(",", " ")
386 .split()
388 + "\n"
390 else:
391 if "\n" in description[key]:
392 description[key] = description[key].replace("\n", " ")
393 result += f" * **{sub_title}**: {description[key]}\n"
395 if self._task_list.get(title, []):
396 result += " * **Test Task**:\n\n"
397 for platform in sorted(self._task_list[title]):
398 self._task_list[title][platform].sort(key=lambda x: x["test_name"])
400 table = TableBuilder(
401 title=platform,
402 widths=[30] + [15 for x in BRANCHES],
403 header_rows=1,
404 headers=[["Test Name"] + BRANCHES],
405 indent=3,
408 for task in self._task_list[title][platform]:
409 values = [task["test_name"]]
410 values += [
412 "\u2705"
413 if match_run_on_projects(x, task["run_on_projects"])
414 else "\u274C"
416 for x in BRANCHES
418 table.add_row(values)
419 result += f"{table.finish_table()}\n"
421 return [result]
423 def build_suite_section(self, title, content):
424 return self._build_section_with_header(
425 title.capitalize(), content, header_type="H4"
428 def build_metrics_documentation(self, parsed_metrics):
429 metrics_documentation = []
430 for metric, metric_info in sorted(
431 parsed_metrics.items(), key=lambda item: item[0]
433 metric_content = metric_info["description"] + "\n\n"
435 metric_content += (
436 f" * **Aliases**: {', '.join(sorted(metric_info['aliases']))}\n"
438 if metric_info.get("location", None):
439 metric_content += " * **Tests using it**:\n"
441 for suite, tests in sorted(
442 metric_info["location"].items(), key=lambda item: item[0]
444 metric_content += f" * **{suite.capitalize()}**: "
446 test_links = []
447 for test in sorted(tests):
448 test_links.append(
449 f"`{test} <raptor.html#{test}-{suite.lower()[0]}>`__"
452 metric_content += ", ".join(test_links) + "\n"
454 metrics_documentation.extend(
455 self._build_section_with_header(
456 metric, metric_content, header_type="H3"
460 return metrics_documentation
463 class MozperftestGatherer(FrameworkGatherer):
465 Gatherer for the Mozperftest framework.
468 def get_test_list(self):
470 Returns a dictionary containing the tests that are in perftest.toml manifest.
472 :return dict: A dictionary with the following structure: {
473 "suite_name": {
474 'perftest_test1',
475 'perftest_test2',
479 for path in list(pathlib.Path(self.workspace_dir).rglob("perftest.toml")):
480 if "obj-" in str(path) or "objdir-" in str(path):
481 continue
482 suite_name = str(path.parent).replace(str(self.workspace_dir), "")
484 # If the workspace dir doesn't end with a forward-slash,
485 # the substitution above won't work completely
486 if suite_name.startswith("/") or suite_name.startswith("\\"):
487 suite_name = suite_name[1:]
489 # We have to add new paths to the logger as we search
490 # because mozperftest tests exist in multiple places in-tree
491 PerfDocLogger.PATHS.append(suite_name)
493 # Get the tests from perftest.toml
494 test_manifest = TestManifest([str(path)], strict=False)
495 test_list = test_manifest.active_tests(exists=False, disabled=True)
496 for test in test_list:
497 si = ScriptInfo(test["path"])
498 self.script_infos[si["name"].replace(".", "")] = si
499 self._test_list.setdefault(suite_name.replace("\\", "/"), {}).update(
500 {si["name"].replace(".", ""): {"path": str(path)}}
503 return self._test_list
505 def build_test_description(
506 self, title, test_description="", suite_name="", metrics_info=None
508 return [str(self.script_infos[title])]
510 def build_suite_section(self, title, content):
511 return self._build_section_with_header(title, content, header_type="H4")
514 class TalosGatherer(FrameworkGatherer):
515 def _get_ci_tasks(self):
516 with open(
517 pathlib.Path(self.workspace_dir, "testing", "talos", "talos.json")
518 ) as f:
519 config_suites = json.load(f)["suites"]
521 for task_name in self._taskgraph.keys():
522 task = self._taskgraph[task_name]
524 if type(task) is dict:
525 is_talos = task["task"]["extra"].get("suite", [])
526 command = task["task"]["payload"].get("command", [])
527 run_on_projects = task["attributes"]["run_on_projects"]
528 else:
529 is_talos = task.task["extra"].get("suite", [])
530 command = task.task["payload"].get("command", [])
531 run_on_projects = task.attributes["run_on_projects"]
533 suite_match = re.search(r"[\s']--suite[\s=](.+?)[\s']", str(command))
534 task_match = self.get_task_match(task_name)
535 if "talos" == is_talos and task_match:
536 suite = suite_match.group(1)
537 platform = task_match.group(1)
538 test_name = task_match.group(2)
539 item = {"test_name": test_name, "run_on_projects": run_on_projects}
541 for test in config_suites[suite]["tests"]:
542 self._task_list.setdefault(test, {}).setdefault(
543 platform, []
544 ).append(item)
546 def get_test_list(self):
547 from talos import test as talos_test
549 test_lists = talos_test.test_dict()
550 mod = __import__("talos.test", fromlist=test_lists)
552 suite_name = "Talos Tests"
554 for test in test_lists:
555 self._test_list.setdefault(suite_name, {}).update({test: {}})
557 klass = getattr(mod, test)
558 self._descriptions.setdefault(test, klass.__dict__)
560 self._get_ci_tasks()
562 return self._test_list
564 def build_test_description(
565 self, title, test_description="", suite_name="", metrics_info=None
567 result = f".. dropdown:: {title}\n"
568 result += f" :class-container: anchor-id-{title}\n\n"
569 result += self.build_command_to_run_locally("talos-test -a", title)
571 yml_descriptions = [s.strip() for s in test_description.split("- ") if s]
572 for description in yml_descriptions:
573 if "Example Data" in description:
574 # Example Data for using code block
575 example_list = [s.strip() for s in description.split("* ")]
576 result += f" * {example_list[0]}\n"
577 result += "\n .. code-block::\n\n"
578 for example in example_list[1:]:
579 result += f" {example}\n"
580 result += "\n"
582 elif " * " in description:
583 # Sub List
584 sub_list = [s.strip() for s in description.split(" * ")]
585 result += f" * {sub_list[0]}\n"
586 for sub in sub_list[1:]:
587 result += f" * {sub}\n"
589 else:
590 # General List
591 result += f" * {description}\n"
593 if title in self._descriptions:
594 for key in sorted(self._descriptions[title]):
595 if key.startswith("__") and key.endswith("__"):
596 continue
597 elif key == "filters":
598 continue
600 # On windows, we get the paths in the wrong style
601 value = self._descriptions[title][key]
602 if isinstance(value, dict):
603 for k, v in value.items():
604 if isinstance(v, str) and "\\" in v:
605 value[k] = str(v).replace("\\", r"/")
606 result += r" * " + key + r": " + str(value) + r"\n"
608 if self._task_list.get(title, []):
609 result += " * **Test Task**:\n\n"
610 for platform in sorted(self._task_list[title]):
611 self._task_list[title][platform].sort(key=lambda x: x["test_name"])
613 table = TableBuilder(
614 title=platform,
615 widths=[30] + [15 for x in BRANCHES],
616 header_rows=1,
617 headers=[["Test Name"] + BRANCHES],
618 indent=3,
621 for task in self._task_list[title][platform]:
622 values = [task["test_name"]]
623 values += [
625 "\u2705"
626 if match_run_on_projects(x, task["run_on_projects"])
627 else "\u274C"
629 for x in BRANCHES
631 table.add_row(values)
632 result += f"{table.finish_table()}\n"
634 return [result]
636 def build_suite_section(self, title, content):
637 return self._build_section_with_header(title, content, header_type="H2")
640 class AwsyGatherer(FrameworkGatherer):
642 Gatherer for the Awsy framework.
645 def _generate_ci_tasks(self):
646 for task_name in self._taskgraph.keys():
647 task = self._taskgraph[task_name]
649 if type(task) is dict:
650 awsy_test = task["task"]["extra"].get("suite", [])
651 run_on_projects = task["attributes"]["run_on_projects"]
652 else:
653 awsy_test = task.task["extra"].get("suite", [])
654 run_on_projects = task.attributes["run_on_projects"]
656 task_match = self.get_task_match(task_name)
658 if "awsy" in awsy_test and task_match:
659 platform = task_match.group(1)
660 test_name = task_match.group(2)
661 item = {"test_name": test_name, "run_on_projects": run_on_projects}
662 self._task_list.setdefault(platform, []).append(item)
664 def get_suite_list(self):
665 self._suite_list = {"Awsy tests": ["tp6", "base", "dmd", "tp5"]}
666 return self._suite_list
668 def get_test_list(self):
669 self._generate_ci_tasks()
670 return {
671 "Awsy tests": {
672 "tp6": {},
673 "base": {},
674 "dmd": {},
675 "tp5": {},
679 def build_suite_section(self, title, content):
680 return self._build_section_with_header(
681 title.capitalize(), content, header_type="H4"
684 def build_test_description(
685 self, title, test_description="", suite_name="", metrics_info=None
687 dropdown_suite_name = suite_name.replace(" ", "-")
688 result = f".. dropdown:: {title} ({test_description})\n"
689 result += f" :class-container: anchor-id-{title}-{dropdown_suite_name}\n\n"
690 result += self.build_command_to_run_locally(
691 "awsy-test", "" if title == "tp6" else f"--{title}"
694 awsy_data = read_yaml(self._yaml_path)["suites"]["Awsy tests"]
695 if "owner" in awsy_data.keys():
696 result += f" **Owner**: {awsy_data['owner']}\n\n"
697 result += " * **Test Task**:\n"
699 # tp5 tests are represented by awsy-e10s test names
700 # while the others have their title in test names
701 search_tag = "awsy-e10s" if title == "tp5" else title
702 for platform in sorted(self._task_list.keys()):
703 result += f" * {platform}\n"
704 for test_dict in sorted(
705 self._task_list[platform], key=lambda d: d["test_name"]
707 if search_tag in test_dict["test_name"]:
708 run_on_project = ": " + (
709 ", ".join(test_dict["run_on_projects"])
710 if test_dict["run_on_projects"]
711 else "None"
713 result += (
714 f" * {test_dict['test_name']}{run_on_project}\n"
716 result += "\n"
718 return [result]
721 class StaticGatherer(FrameworkGatherer):
723 A noop gatherer for frameworks with static-only documentation.
726 pass