6 from lldbsuite
.test
.decorators
import *
7 from lldbsuite
.test
.lldbtest
import *
8 from lldbsuite
.test
import lldbutil
11 class TestCase(TestBase
):
12 NO_DEBUG_INFO_TESTCASE
= True
14 def test_enable_disable(self
):
16 Test "statistics disable" and "statistics enable". These don't do
17 anything anymore for cheap to gather statistics. In the future if
18 statistics are expensive to gather, we can enable the feature inside
19 of LLDB and test that enabling and disabling stops expesive information
23 target
= self
.createTestTarget()
27 substrs
=["need to enable statistics before disabling"],
30 self
.expect("statistics enable")
31 self
.expect("statistics enable", substrs
=["already enabled"], error
=True)
32 self
.expect("statistics disable")
35 substrs
=["need to enable statistics before disabling"],
39 def verify_key_in_dict(self
, key
, d
, description
):
41 key
, d
, 'make sure key "%s" is in dictionary %s' % (key
, description
)
44 def verify_key_not_in_dict(self
, key
, d
, description
):
46 key
, d
, 'make sure key "%s" is in dictionary %s' % (key
, description
)
49 def verify_keys(self
, dict, description
, keys_exist
, keys_missing
=None):
51 Verify that all keys in "keys_exist" list are top level items in
52 "dict", and that all keys in "keys_missing" do not exist as top
53 level items in "dict".
56 for key
in keys_exist
:
57 self
.verify_key_in_dict(key
, dict, description
)
59 for key
in keys_missing
:
60 self
.verify_key_not_in_dict(key
, dict, description
)
62 def verify_success_fail_count(self
, stats
, key
, num_successes
, num_fails
):
63 self
.verify_key_in_dict(key
, stats
, 'stats["%s"]' % (key
))
64 success_fail_dict
= stats
[key
]
66 success_fail_dict
["successes"], num_successes
, "make sure success count"
69 success_fail_dict
["failures"], num_fails
, "make sure success count"
72 def get_target_stats(self
, debug_stats
):
73 if "targets" in debug_stats
:
74 return debug_stats
["targets"][0]
77 def get_command_stats(self
, debug_stats
):
78 if "commands" in debug_stats
:
79 return debug_stats
["commands"]
82 def test_expressions_frame_var_counts(self
):
84 lldbutil
.run_to_source_breakpoint(
85 self
, "// break here", lldb
.SBFileSpec("main.cpp")
88 self
.expect("expr patatino", substrs
=["27"])
89 stats
= self
.get_target_stats(self
.get_stats())
90 self
.verify_success_fail_count(stats
, "expressionEvaluation", 1, 0)
94 substrs
=["undeclared identifier 'doesnt_exist'"],
96 # Doesn't successfully execute.
97 self
.expect("expr int *i = nullptr; *i", error
=True)
98 # Interpret an integer as an array with 3 elements is a failure for
99 # the "expr" command, but the expression evaluation will succeed and
100 # be counted as a success even though the "expr" options will for the
101 # command to fail. It is more important to track expression evaluation
102 # from all sources instead of just through the command, so this was
103 # changed. If we want to track command success and fails, we can do
104 # so using another metric.
108 substrs
=["expression cannot be used with --element-count"],
110 # We should have gotten 3 new failures and the previous success.
111 stats
= self
.get_target_stats(self
.get_stats())
112 self
.verify_success_fail_count(stats
, "expressionEvaluation", 2, 2)
114 self
.expect("statistics enable")
115 # 'frame var' with enabled statistics will change stats.
116 self
.expect("frame var", substrs
=["27"])
117 stats
= self
.get_target_stats(self
.get_stats())
118 self
.verify_success_fail_count(stats
, "frameVariable", 1, 0)
120 # Test that "stopCount" is available when the process has run
121 self
.assertIn("stopCount", stats
, 'ensure "stopCount" is in target JSON')
123 stats
["stopCount"], 0, 'make sure "stopCount" is greater than zero'
126 def test_default_no_run(self
):
127 """Test "statistics dump" without running the target.
129 When we don't run the target, we expect to not see any 'firstStopTime'
130 or 'launchOrAttachTime' top level keys that measure the launch or
131 attach of the target.
133 Output expected to be something like:
135 (lldb) statistics dump
141 "targetCreateTime": 0.26566899599999999,
142 "expressionEvaluation": {
150 "moduleIdentifiers": [...],
153 "totalDebugInfoByteSize": 182522234,
154 "totalDebugInfoIndexTime": 2.33343,
155 "totalDebugInfoParseTime": 8.2121400240000071,
156 "totalSymbolTableParseTime": 0.123,
157 "totalSymbolTableIndexTime": 0.234,
161 target
= self
.createTestTarget()
162 debug_stats
= self
.get_stats()
167 "totalSymbolTableParseTime",
168 "totalSymbolTableIndexTime",
169 "totalSymbolTablesLoadedFromCache",
170 "totalSymbolTablesSavedToCache",
171 "totalDebugInfoByteSize",
172 "totalDebugInfoIndexTime",
173 "totalDebugInfoIndexLoadedFromCache",
174 "totalDebugInfoIndexSavedToCache",
175 "totalDebugInfoParseTime",
177 self
.verify_keys(debug_stats
, '"debug_stats"', debug_stat_keys
, None)
178 stats
= debug_stats
["targets"][0]
180 "expressionEvaluation",
185 keys_missing
= ["firstStopTime", "launchOrAttachTime"]
186 self
.verify_keys(stats
, '"stats"', keys_exist
, keys_missing
)
187 self
.assertGreater(stats
["targetCreateTime"], 0.0)
189 def test_default_with_run(self
):
190 """Test "statistics dump" when running the target to a breakpoint.
192 When we run the target, we expect to see 'launchOrAttachTime' and
193 'firstStopTime' top level keys.
195 Output expected to be something like:
197 (lldb) statistics dump
203 "firstStopTime": 0.34164492800000001,
204 "launchOrAttachTime": 0.31969605400000001,
205 "moduleIdentifiers": [...],
206 "targetCreateTime": 0.0040863039999999998
207 "expressionEvaluation": {
217 "totalDebugInfoByteSize": 182522234,
218 "totalDebugInfoIndexTime": 2.33343,
219 "totalDebugInfoParseTime": 8.2121400240000071,
220 "totalSymbolTableParseTime": 0.123,
221 "totalSymbolTableIndexTime": 0.234,
226 target
= self
.createTestTarget()
227 lldbutil
.run_to_source_breakpoint(
228 self
, "// break here", lldb
.SBFileSpec("main.cpp")
230 debug_stats
= self
.get_stats()
235 "totalSymbolTableParseTime",
236 "totalSymbolTableIndexTime",
237 "totalSymbolTablesLoadedFromCache",
238 "totalSymbolTablesSavedToCache",
239 "totalDebugInfoByteSize",
240 "totalDebugInfoIndexTime",
241 "totalDebugInfoIndexLoadedFromCache",
242 "totalDebugInfoIndexSavedToCache",
243 "totalDebugInfoParseTime",
245 self
.verify_keys(debug_stats
, '"debug_stats"', debug_stat_keys
, None)
246 stats
= debug_stats
["targets"][0]
248 "expressionEvaluation",
251 "launchOrAttachTime",
254 "summaryProviderStatistics",
256 self
.verify_keys(stats
, '"stats"', keys_exist
, None)
257 self
.assertGreater(stats
["firstStopTime"], 0.0)
258 self
.assertGreater(stats
["launchOrAttachTime"], 0.0)
259 self
.assertGreater(stats
["targetCreateTime"], 0.0)
261 def test_memory(self
):
263 Test "statistics dump" and the memory information.
266 exe
= self
.getBuildArtifact("a.out")
267 target
= self
.createTestTarget(file_path
=exe
)
268 debug_stats
= self
.get_stats()
273 "totalSymbolTableParseTime",
274 "totalSymbolTableIndexTime",
275 "totalSymbolTablesLoadedFromCache",
276 "totalSymbolTablesSavedToCache",
277 "totalDebugInfoParseTime",
278 "totalDebugInfoIndexTime",
279 "totalDebugInfoIndexLoadedFromCache",
280 "totalDebugInfoIndexSavedToCache",
281 "totalDebugInfoByteSize",
283 self
.verify_keys(debug_stats
, '"debug_stats"', debug_stat_keys
, None)
285 memory
= debug_stats
["memory"]
289 self
.verify_keys(memory
, '"memory"', memory_keys
, None)
291 strings
= memory
["strings"]
297 self
.verify_keys(strings
, '"strings"', strings_keys
, None)
299 def find_module_in_metrics(self
, path
, stats
):
300 modules
= stats
["modules"]
301 for module
in modules
:
302 if module
["path"] == path
:
306 def find_module_by_id_in_metrics(self
, id, stats
):
307 modules
= stats
["modules"]
308 for module
in modules
:
309 if module
["identifier"] == id:
313 def test_modules(self
):
315 Test "statistics dump" and the module information.
318 exe
= self
.getBuildArtifact("a.out")
319 target
= self
.createTestTarget(file_path
=exe
)
320 debug_stats
= self
.get_stats()
325 "totalSymbolTableParseTime",
326 "totalSymbolTableIndexTime",
327 "totalSymbolTablesLoadedFromCache",
328 "totalSymbolTablesSavedToCache",
329 "totalDebugInfoParseTime",
330 "totalDebugInfoIndexTime",
331 "totalDebugInfoIndexLoadedFromCache",
332 "totalDebugInfoIndexSavedToCache",
333 "totalDebugInfoByteSize",
335 self
.verify_keys(debug_stats
, '"debug_stats"', debug_stat_keys
, None)
336 stats
= debug_stats
["targets"][0]
340 self
.verify_keys(stats
, '"stats"', keys_exist
, None)
341 exe_module
= self
.find_module_in_metrics(exe
, debug_stats
)
344 "debugInfoIndexLoadedFromCache",
345 "debugInfoIndexTime",
346 "debugInfoIndexSavedToCache",
347 "debugInfoParseTime",
350 "symbolTableIndexTime",
351 "symbolTableLoadedFromCache",
352 "symbolTableParseTime",
353 "symbolTableSavedToCache",
357 self
.assertNotEqual(exe_module
, None)
358 self
.verify_keys(exe_module
, 'module dict for "%s"' % (exe
), module_keys
)
360 def test_commands(self
):
362 Test "statistics dump" and the command information.
365 exe
= self
.getBuildArtifact("a.out")
366 target
= self
.createTestTarget(file_path
=exe
)
368 interp
= self
.dbg
.GetCommandInterpreter()
369 result
= lldb
.SBCommandReturnObject()
370 interp
.HandleCommand("target list", result
)
371 interp
.HandleCommand("target list", result
)
373 debug_stats
= self
.get_stats()
375 command_stats
= self
.get_command_stats(debug_stats
)
376 self
.assertNotEqual(command_stats
, None)
377 self
.assertEqual(command_stats
["target list"], 2)
379 def test_breakpoints(self
):
380 """Test "statistics dump"
382 Output expected to be something like:
389 "firstStopTime": 0.34164492800000001,
390 "launchOrAttachTime": 0.31969605400000001,
391 "moduleIdentifiers": [...],
392 "targetCreateTime": 0.0040863039999999998
393 "expressionEvaluation": {
405 "resolveTime": 2.65438675
410 "resolveTime": 4.3632581669999997
415 "totalDebugInfoByteSize": 182522234,
416 "totalDebugInfoIndexTime": 2.33343,
417 "totalDebugInfoParseTime": 8.2121400240000071,
418 "totalSymbolTableParseTime": 0.123,
419 "totalSymbolTableIndexTime": 0.234,
420 "totalBreakpointResolveTime": 7.0176449170000001
425 target
= self
.createTestTarget()
426 self
.runCmd("b main.cpp:7")
427 self
.runCmd("b a_function")
428 debug_stats
= self
.get_stats()
433 "totalSymbolTableParseTime",
434 "totalSymbolTableIndexTime",
435 "totalSymbolTablesLoadedFromCache",
436 "totalSymbolTablesSavedToCache",
437 "totalDebugInfoParseTime",
438 "totalDebugInfoIndexTime",
439 "totalDebugInfoIndexLoadedFromCache",
440 "totalDebugInfoIndexSavedToCache",
441 "totalDebugInfoByteSize",
443 self
.verify_keys(debug_stats
, '"debug_stats"', debug_stat_keys
, None)
444 target_stats
= debug_stats
["targets"][0]
447 "expressionEvaluation",
451 "totalBreakpointResolveTime",
452 "summaryProviderStatistics",
454 self
.verify_keys(target_stats
, '"stats"', keys_exist
, None)
455 self
.assertGreater(target_stats
["totalBreakpointResolveTime"], 0.0)
456 breakpoints
= target_stats
["breakpoints"]
462 "numResolvedLocations",
465 for breakpoint
in breakpoints
:
467 breakpoint
, 'target_stats["breakpoints"]', bp_keys_exist
, None
472 def test_dsym_binary_has_symfile_in_stats(self
):
474 Test that if our executable has a stand alone dSYM file containing
475 debug information, that the dSYM file path is listed as a key/value
476 pair in the "a.out" binaries module stats. Also verify the the main
477 executable's module statistics has a debug info size that is greater
478 than zero as the dSYM contains debug info.
480 self
.build(debug_info
="dsym")
482 exe
= self
.getBuildArtifact(exe_name
)
483 dsym
= self
.getBuildArtifact(exe_name
+ ".dSYM")
484 # Make sure the executable file exists after building.
485 self
.assertTrue(os
.path
.exists(exe
))
486 # Make sure the dSYM file exists after building.
487 self
.assertTrue(os
.path
.isdir(dsym
))
490 target
= self
.createTestTarget(file_path
=exe
)
492 debug_stats
= self
.get_stats()
494 exe_stats
= self
.find_module_in_metrics(exe
, debug_stats
)
495 # If we have a dSYM file, there should be a key/value pair in the module
496 # statistics and the path should match the dSYM file path in the build
498 self
.assertIn("symbolFilePath", exe_stats
)
499 stats_dsym
= exe_stats
["symbolFilePath"]
501 # Make sure main executable's module info has debug info size that is
502 # greater than zero as the dSYM file and main executable work together
503 # in the lldb.SBModule class to provide the data.
504 self
.assertGreater(exe_stats
["debugInfoByteSize"], 0)
506 # The "dsym" variable contains the bundle directory for the dSYM, while
507 # the "stats_dsym" will have the
508 self
.assertIn(dsym
, stats_dsym
)
509 # Since we have a dSYM file, we should not be loading DWARF from the .o
510 # files and the .o file module identifiers should NOT be in the module
512 self
.assertNotIn("symbolFileModuleIdentifiers", exe_stats
)
516 def test_no_dsym_binary_has_symfile_identifiers_in_stats(self
):
518 Test that if our executable loads debug info from the .o files,
519 that the module statistics contains a 'symbolFileModuleIdentifiers'
520 key which is a list of module identifiers, and verify that the
521 module identifier can be used to find the .o file's module stats.
522 Also verify the the main executable's module statistics has a debug
523 info size that is zero, as the main executable itself has no debug
524 info, but verify that the .o files have debug info size that is
525 greater than zero. This test ensures that we don't double count
528 self
.build(debug_info
="dwarf")
530 exe
= self
.getBuildArtifact(exe_name
)
531 dsym
= self
.getBuildArtifact(exe_name
+ ".dSYM")
532 # Make sure the executable file exists after building.
533 self
.assertTrue(os
.path
.exists(exe
))
534 # Make sure the dSYM file doesn't exist after building.
535 self
.assertFalse(os
.path
.isdir(dsym
))
538 target
= self
.createTestTarget(file_path
=exe
)
540 # Force the 'main.o' .o file's DWARF to be loaded so it will show up
542 self
.runCmd("b main.cpp:7")
544 debug_stats
= self
.get_stats("--all-targets")
546 exe_stats
= self
.find_module_in_metrics(exe
, debug_stats
)
547 # If we don't have a dSYM file, there should not be a key/value pair in
548 # the module statistics.
549 self
.assertNotIn("symbolFilePath", exe_stats
)
551 # Make sure main executable's module info has debug info size that is
552 # zero as there is no debug info in the main executable, only in the
553 # .o files. The .o files will also only be loaded if something causes
554 # them to be loaded, so we set a breakpoint to force the .o file debug
556 self
.assertEqual(exe_stats
["debugInfoByteSize"], 0)
558 # When we don't have a dSYM file, the SymbolFileDWARFDebugMap class
559 # should create modules for each .o file that contains DWARF that the
560 # symbol file creates, so we need to verify that we have a valid module
561 # identifier for main.o that is we should not be loading DWARF from the .o
562 # files and the .o file module identifiers should NOT be in the module
564 self
.assertIn("symbolFileModuleIdentifiers", exe_stats
)
566 symfileIDs
= exe_stats
["symbolFileModuleIdentifiers"]
567 for symfileID
in symfileIDs
:
568 o_module
= self
.find_module_by_id_in_metrics(symfileID
, debug_stats
)
569 self
.assertNotEqual(o_module
, None)
570 # Make sure each .o file has some debug info bytes.
571 self
.assertGreater(o_module
["debugInfoByteSize"], 0)
575 def test_had_frame_variable_errors(self
):
577 Test that if we have frame variable errors that we see this in the
578 statistics for the module that had issues.
580 self
.build(debug_info
="dwarf")
582 exe
= self
.getBuildArtifact(exe_name
)
583 dsym
= self
.getBuildArtifact(exe_name
+ ".dSYM")
584 main_obj
= self
.getBuildArtifact("main.o")
585 # Make sure the executable file exists after building.
586 self
.assertTrue(os
.path
.exists(exe
))
587 # Make sure the dSYM file doesn't exist after building.
588 self
.assertFalse(os
.path
.isdir(dsym
))
589 # Make sure the main.o object file exists after building.
590 self
.assertTrue(os
.path
.exists(main_obj
))
592 # Delete the main.o file that contains the debug info so we force an
593 # error when we run to main and try to get variables
596 (target
, process
, thread
, bkpt
) = lldbutil
.run_to_name_breakpoint(self
, "main")
598 # Get stats and verify we had errors.
599 stats
= self
.get_stats()
600 exe_stats
= self
.find_module_in_metrics(exe
, stats
)
601 self
.assertIsNotNone(exe_stats
)
603 # Make sure we have "debugInfoHadVariableErrors" variable that is set to
604 # false before failing to get local variables due to missing .o file.
605 self
.assertFalse(exe_stats
["debugInfoHadVariableErrors"])
607 # Verify that the top level statistic that aggregates the number of
608 # modules with debugInfoHadVariableErrors is zero
609 self
.assertEqual(stats
["totalModuleCountWithVariableErrors"], 0)
611 # Try and fail to get variables
612 vars = thread
.GetFrameAtIndex(0).GetVariables(True, True, False, True)
614 # Make sure we got an error back that indicates that variables were not
616 self
.assertTrue(vars.GetError().Fail())
618 # Get stats and verify we had errors.
619 stats
= self
.get_stats()
620 exe_stats
= self
.find_module_in_metrics(exe
, stats
)
621 self
.assertIsNotNone(exe_stats
)
623 # Make sure we have "hadFrameVariableErrors" variable that is set to
624 # true after failing to get local variables due to missing .o file.
625 self
.assertTrue(exe_stats
["debugInfoHadVariableErrors"])
627 # Verify that the top level statistic that aggregates the number of
628 # modules with debugInfoHadVariableErrors is greater than zero
629 self
.assertGreater(stats
["totalModuleCountWithVariableErrors"], 0)
631 def test_transcript_happy_path(self
):
633 Test "statistics dump" and the transcript information.
636 exe
= self
.getBuildArtifact("a.out")
637 target
= self
.createTestTarget(file_path
=exe
)
638 self
.runCmd("settings set interpreter.save-transcript true")
639 self
.runCmd("version")
641 # Verify the output of a first "statistics dump"
642 debug_stats
= self
.get_stats("--transcript true")
643 self
.assertIn("transcript", debug_stats
)
644 transcript
= debug_stats
["transcript"]
645 self
.assertEqual(len(transcript
), 2)
646 self
.assertEqual(transcript
[0]["commandName"], "version")
647 self
.assertEqual(transcript
[1]["commandName"], "statistics dump")
648 # The first "statistics dump" in the transcript should have no output
649 self
.assertNotIn("output", transcript
[1])
651 # Verify the output of a second "statistics dump"
652 debug_stats
= self
.get_stats("--transcript true")
653 self
.assertIn("transcript", debug_stats
)
654 transcript
= debug_stats
["transcript"]
655 self
.assertEqual(len(transcript
), 3)
656 self
.assertEqual(transcript
[0]["commandName"], "version")
657 self
.assertEqual(transcript
[1]["commandName"], "statistics dump")
658 # The first "statistics dump" in the transcript should have output now
659 self
.assertIn("output", transcript
[1])
660 self
.assertEqual(transcript
[2]["commandName"], "statistics dump")
661 # The second "statistics dump" in the transcript should have no output
662 self
.assertNotIn("output", transcript
[2])
664 def verify_stats(self
, stats
, expectation
, options
):
665 for field_name
in expectation
:
666 idx
= field_name
.find(".")
668 # `field_name` is a top-level field
669 exists
= field_name
in stats
670 should_exist
= expectation
[field_name
]
671 should_exist_string
= "" if should_exist
else "not "
675 f
"'{field_name}' should {should_exist_string}exist for 'statistics dump{options}'",
678 # `field_name` is a string of "<top-level field>.<second-level field>"
679 top_level_field_name
= field_name
[0:idx
]
680 second_level_field_name
= field_name
[idx
+ 1 :]
681 for top_level_field
in (
682 stats
[top_level_field_name
] if top_level_field_name
in stats
else {}
684 exists
= second_level_field_name
in top_level_field
685 should_exist
= expectation
[field_name
]
686 should_exist_string
= "" if should_exist
else "not "
690 f
"'{field_name}' should {should_exist_string}exist for 'statistics dump{options}'",
693 def get_test_cases_for_sections_existence(self
):
694 should_always_exist_or_not
= {
695 "totalDebugInfoEnabled": True,
700 "command_options": "",
705 "targets.moduleIdentifiers": True,
706 "targets.breakpoints": True,
707 "targets.expressionEvaluation": True,
708 "targets.frameVariable": True,
709 "targets.totalSharedLibraryEventHitCount": True,
715 "command_options": " --summary",
717 "SetSummaryOnly": True,
722 "targets.moduleIdentifiers": False,
723 "targets.breakpoints": False,
724 "targets.expressionEvaluation": False,
725 "targets.frameVariable": False,
726 "targets.totalSharedLibraryEventHitCount": True,
731 { # Summary mode with targets
732 "command_options": " --summary --targets=true",
734 "SetSummaryOnly": True,
735 "SetIncludeTargets": True,
740 "targets.moduleIdentifiers": False,
741 "targets.breakpoints": False,
742 "targets.expressionEvaluation": False,
743 "targets.frameVariable": False,
744 "targets.totalSharedLibraryEventHitCount": True,
749 { # Summary mode without targets
750 "command_options": " --summary --targets=false",
752 "SetSummaryOnly": True,
753 "SetIncludeTargets": False,
762 { # Summary mode with modules
763 "command_options": " --summary --modules=true",
765 "SetSummaryOnly": True,
766 "SetIncludeModules": True,
771 "targets.moduleIdentifiers": False,
772 "targets.breakpoints": False,
773 "targets.expressionEvaluation": False,
774 "targets.frameVariable": False,
775 "targets.totalSharedLibraryEventHitCount": True,
780 { # Default mode without modules and transcript
781 "command_options": " --modules=false --transcript=false",
783 "SetIncludeModules": False,
784 "SetIncludeTranscript": False,
789 "targets.moduleIdentifiers": False,
790 "targets.breakpoints": True,
791 "targets.expressionEvaluation": True,
792 "targets.frameVariable": True,
793 "targets.totalSharedLibraryEventHitCount": True,
798 { # Default mode without modules
799 "command_options": " --modules=false",
801 "SetIncludeModules": False,
806 "targets.moduleIdentifiers": False,
807 "targets.breakpoints": True,
808 "targets.expressionEvaluation": True,
809 "targets.frameVariable": True,
810 "targets.totalSharedLibraryEventHitCount": True,
816 return (should_always_exist_or_not
, test_cases
)
818 def test_sections_existence_through_command(self
):
820 Test "statistics dump" and the existence of sections when different
821 options are given through the command line (CLI or HandleCommand).
824 exe
= self
.getBuildArtifact("a.out")
825 target
= self
.createTestTarget(file_path
=exe
)
827 # Create some transcript so that it can be tested.
828 self
.runCmd("settings set interpreter.save-transcript true")
829 self
.runCmd("version")
830 self
.runCmd("b main")
831 # Then disable transcript so that it won't change during verification
832 self
.runCmd("settings set interpreter.save-transcript false")
836 should_always_exist_or_not
,
838 ) = self
.get_test_cases_for_sections_existence()
841 for test_case
in test_cases
:
842 options
= test_case
["command_options"]
843 # Get statistics dump result
844 stats
= self
.get_stats(options
)
845 # Verify that each field should exist (or not)
846 expectation
= {**should_always_exist_or_not
, **test_case
["expect"]}
847 self
.verify_stats(stats
, expectation
, options
)
849 def test_sections_existence_through_api(self
):
851 Test "statistics dump" and the existence of sections when different
852 options are given through the public API.
855 exe
= self
.getBuildArtifact("a.out")
856 target
= self
.createTestTarget(file_path
=exe
)
858 # Create some transcript so that it can be tested.
859 self
.runCmd("settings set interpreter.save-transcript true")
860 self
.runCmd("version")
861 self
.runCmd("b main")
862 # But disable transcript so that it won't change during verification
863 self
.runCmd("settings set interpreter.save-transcript false")
867 should_always_exist_or_not
,
869 ) = self
.get_test_cases_for_sections_existence()
872 for test_case
in test_cases
:
874 options
= test_case
["api_options"]
875 sb_options
= lldb
.SBStatisticsOptions()
876 for method_name
, param_value
in options
.items():
877 getattr(sb_options
, method_name
)(param_value
)
878 # Get statistics dump result
879 stream
= lldb
.SBStream()
880 target
.GetStatistics(sb_options
).GetAsJSON(stream
)
881 stats
= json
.loads(stream
.GetData())
882 # Verify that each field should exist (or not)
883 expectation
= {**should_always_exist_or_not
, **test_case
["expect"]}
884 self
.verify_stats(stats
, expectation
, options
)
886 def test_order_of_options_do_not_matter(self
):
888 Test "statistics dump" and the order of options.
891 exe
= self
.getBuildArtifact("a.out")
892 target
= self
.createTestTarget(file_path
=exe
)
894 # Create some transcript so that it can be tested.
895 self
.runCmd("settings set interpreter.save-transcript true")
896 self
.runCmd("version")
897 self
.runCmd("b main")
898 # Then disable transcript so that it won't change during verification
899 self
.runCmd("settings set interpreter.save-transcript false")
901 # The order of the following options shouldn't matter
903 (" --summary", " --targets=true"),
904 (" --summary", " --targets=false"),
905 (" --summary", " --modules=true"),
906 (" --summary", " --modules=false"),
907 (" --summary", " --transcript=true"),
908 (" --summary", " --transcript=false"),
912 for options
in test_cases
:
913 debug_stats_0
= self
.get_stats(options
[0] + options
[1])
914 debug_stats_1
= self
.get_stats(options
[1] + options
[0])
916 debug_stats_0
= re
.sub(r
"\d+", "0", json
.dumps(debug_stats_0
))
917 debug_stats_1
= re
.sub(r
"\d+", "0", json
.dumps(debug_stats_1
))
918 # Verify that the two output are the same
922 f
"The order of options '{options[0]}' and '{options[1]}' should not matter",
926 def test_summary_statistics_providers(self
):
928 Test summary timing statistics is included in statistics dump when
929 a type with a summary provider exists, and is evaluated.
933 target
= self
.createTestTarget()
934 lldbutil
.run_to_source_breakpoint(
935 self
, "// stop here", lldb
.SBFileSpec("main.cpp")
937 self
.expect("frame var", substrs
=["hello world"])
938 stats
= self
.get_target_stats(self
.get_stats())
939 self
.assertIn("summaryProviderStatistics", stats
)
940 summary_providers
= stats
["summaryProviderStatistics"]
941 # We don't want to take a dependency on the type name, so we just look
942 # for string and that it was called once.
943 summary_provider_str
= str(summary_providers
)
944 self
.assertIn("string", summary_provider_str
)
945 self
.assertIn("'count': 1", summary_provider_str
)
946 self
.assertIn("'totalTime':", summary_provider_str
)
947 # We may hit the std::string C++ provider, or a summary provider string
948 self
.assertIn("'type':", summary_provider_str
)
950 "c++" in summary_provider_str
or "string" in summary_provider_str
953 self
.runCmd("continue")
954 self
.runCmd("command script import BoxFormatter.py")
955 self
.expect("frame var", substrs
=["box = [27]"])
956 stats
= self
.get_target_stats(self
.get_stats())
957 self
.assertIn("summaryProviderStatistics", stats
)
958 summary_providers
= stats
["summaryProviderStatistics"]
959 summary_provider_str
= str(summary_providers
)
960 self
.assertIn("BoxFormatter.summary", summary_provider_str
)
961 self
.assertIn("'count': 1", summary_provider_str
)
962 self
.assertIn("'totalTime':", summary_provider_str
)
963 self
.assertIn("'type': 'python'", summary_provider_str
)
966 def test_summary_statistics_providers_vec(self
):
968 Test summary timing statistics is included in statistics dump when
969 a type with a summary provider exists, and is evaluated. This variation
970 tests that vector recurses into it's child type.
973 target
= self
.createTestTarget()
974 lldbutil
.run_to_source_breakpoint(
975 self
, "// stop vector", lldb
.SBFileSpec("main.cpp")
978 "frame var", substrs
=["int_vec", "double_vec", "[0] = 1", "[7] = 8"]
980 stats
= self
.get_target_stats(self
.get_stats())
981 self
.assertIn("summaryProviderStatistics", stats
)
982 summary_providers
= stats
["summaryProviderStatistics"]
983 summary_provider_str
= str(summary_providers
)
984 self
.assertIn("'count': 2", summary_provider_str
)
985 self
.assertIn("'totalTime':", summary_provider_str
)
986 self
.assertIn("'type':", summary_provider_str
)
987 # We may hit the std::vector C++ provider, or a summary provider string
988 if "c++" in summary_provider_str
:
989 self
.assertIn("std::vector", summary_provider_str
)
992 def test_multiple_targets(self
):
994 Test statistics dump only reports the stats from current target and
995 "statistics dump --all-targets" includes all target stats.
997 da
= {"CXX_SOURCES": "main.cpp", "EXE": self
.getBuildArtifact("a.out")}
998 self
.build(dictionary
=da
)
999 self
.addTearDownCleanup(dictionary
=da
)
1001 db
= {"CXX_SOURCES": "second.cpp", "EXE": self
.getBuildArtifact("second.out")}
1002 self
.build(dictionary
=db
)
1003 self
.addTearDownCleanup(dictionary
=db
)
1005 main_exe
= self
.getBuildArtifact("a.out")
1006 second_exe
= self
.getBuildArtifact("second.out")
1008 (target
, process
, thread
, bkpt
) = lldbutil
.run_to_source_breakpoint(
1009 self
, "// break here", lldb
.SBFileSpec("main.cpp"), None, "a.out"
1011 debugger_stats1
= self
.get_stats()
1012 self
.assertIsNotNone(self
.find_module_in_metrics(main_exe
, debugger_stats1
))
1013 self
.assertIsNone(self
.find_module_in_metrics(second_exe
, debugger_stats1
))
1015 (target
, process
, thread
, bkpt
) = lldbutil
.run_to_source_breakpoint(
1016 self
, "// break here", lldb
.SBFileSpec("second.cpp"), None, "second.out"
1018 debugger_stats2
= self
.get_stats()
1019 self
.assertIsNone(self
.find_module_in_metrics(main_exe
, debugger_stats2
))
1020 self
.assertIsNotNone(self
.find_module_in_metrics(second_exe
, debugger_stats2
))
1022 all_targets_stats
= self
.get_stats("--all-targets")
1023 self
.assertIsNotNone(self
.find_module_in_metrics(main_exe
, all_targets_stats
))
1024 self
.assertIsNotNone(self
.find_module_in_metrics(second_exe
, all_targets_stats
))