16 def dump_memory(base_addr
, data
, num_per_line
, outfile
):
18 hex_string
= binascii
.hexlify(data
)
23 outfile
.write("0x%8.8x: " % (addr
+ i
))
24 bytes_left
= data_len
- i
25 if bytes_left
>= num_per_line
:
26 curr_data_len
= num_per_line
28 curr_data_len
= bytes_left
30 hex_end_idx
= hex_start_idx
+ curr_data_len
* 2
31 curr_hex_str
= hex_string
[hex_start_idx
:hex_end_idx
]
32 # 'curr_hex_str' now contains the hex byte string for the
33 # current line with no spaces between bytes
34 t
= iter(curr_hex_str
)
35 # Print hex bytes separated by space
36 outfile
.write(" ".join(a
+ b
for a
, b
in zip(t
, t
)))
39 # Calculate ASCII string for bytes into 'ascii_str'
41 for j
in range(i
, i
+ curr_data_len
):
43 if ch
in string
.printable
and ch
not in string
.whitespace
:
44 ascii_str
+= "%c" % (ch
)
47 # Print ASCII representation and newline
48 outfile
.write(ascii_str
)
53 def read_packet(f
, verbose
=False, trace_file
=None):
54 """Decode a JSON packet that starts with the content length and is
55 followed by the JSON bytes from a file 'f'. Returns None on EOF.
57 line
= f
.readline().decode("utf-8")
61 # Watch for line that starts with the prefix
62 prefix
= "Content-Length: "
63 if line
.startswith(prefix
):
64 # Decode length of JSON bytes
66 print('content: "%s"' % (line
))
67 length
= int(line
[len(prefix
) :])
69 print('length: "%u"' % (length
))
73 print('empty: "%s"' % (line
))
75 json_str
= f
.read(length
)
77 print('json: "%s"' % (json_str
))
79 trace_file
.write("from adaptor:\n%s\n" % (json_str
))
80 # Decode the JSON bytes into a python dictionary
81 return json
.loads(json_str
)
83 raise Exception("unexpected malformed message from lldb-dap: " + line
)
86 def packet_type_is(packet
, packet_type
):
87 return "type" in packet
and packet
["type"] == packet_type
90 def dump_dap_log(log_file
):
91 print("========= DEBUG ADAPTER PROTOCOL LOGS =========")
93 print("no log file available")
95 with
open(log_file
, "r") as file:
97 print("========= END =========")
100 def read_packet_thread(vs_comm
, log_file
):
104 packet
= read_packet(vs_comm
.recv
, trace_file
=vs_comm
.trace_file
)
105 # `packet` will be `None` on EOF. We want to pass it down to
106 # handle_recv_packet anyway so the main thread can handle unexpected
107 # termination of lldb-dap and stop waiting for new packets.
108 done
= not vs_comm
.handle_recv_packet(packet
)
110 dump_dap_log(log_file
)
113 class DebugCommunication(object):
114 def __init__(self
, recv
, send
, init_commands
, log_file
=None):
115 self
.trace_file
= None
118 self
.recv_packets
= []
119 self
.recv_condition
= threading
.Condition()
120 self
.recv_thread
= threading
.Thread(
121 target
=read_packet_thread
, args
=(self
, log_file
)
123 self
.process_event_body
= None
124 self
.exit_status
= None
125 self
.initialize_body
= None
126 self
.thread_stop_reasons
= {}
127 self
.breakpoint_events
= []
128 self
.progress_events
= []
129 self
.reverse_requests
= []
132 self
.recv_thread
.start()
133 self
.output_condition
= threading
.Condition()
135 self
.configuration_done_sent
= False
136 self
.frame_scopes
= {}
137 self
.init_commands
= init_commands
138 self
.disassembled_instructions
= {}
141 def encode_content(cls
, s
):
142 return ("Content-Length: %u\r\n\r\n%s" % (len(s
), s
)).encode("utf-8")
145 def validate_response(cls
, command
, response
):
146 if command
["command"] != response
["command"]:
147 raise ValueError("command mismatch in response")
148 if command
["seq"] != response
["request_seq"]:
149 raise ValueError("seq mismatch in response")
151 def get_modules(self
):
152 module_list
= self
.request_modules()["body"]["modules"]
154 for module
in module_list
:
155 modules
[module
["name"]] = module
158 def get_output(self
, category
, timeout
=0.0, clear
=True):
159 self
.output_condition
.acquire()
161 if category
in self
.output
:
162 output
= self
.output
[category
]
164 del self
.output
[category
]
166 self
.output_condition
.wait(timeout
)
167 if category
in self
.output
:
168 output
= self
.output
[category
]
170 del self
.output
[category
]
171 self
.output_condition
.release()
174 def collect_output(self
, category
, timeout_secs
, pattern
, clear
=True):
175 end_time
= time
.time() + timeout_secs
176 collected_output
= ""
177 while end_time
> time
.time():
178 output
= self
.get_output(category
, timeout
=0.25, clear
=clear
)
180 collected_output
+= output
181 if pattern
is not None and pattern
in output
:
183 return collected_output
if collected_output
else None
185 def enqueue_recv_packet(self
, packet
):
186 self
.recv_condition
.acquire()
187 self
.recv_packets
.append(packet
)
188 self
.recv_condition
.notify()
189 self
.recv_condition
.release()
191 def handle_recv_packet(self
, packet
):
192 """Called by the read thread that is waiting for all incoming packets
193 to store the incoming packet in "self.recv_packets" in a thread safe
194 way. This function will then signal the "self.recv_condition" to
195 indicate a new packet is available. Returns True if the caller
196 should keep calling this function for more packets.
198 # If EOF, notify the read thread by enqueuing a None.
200 self
.enqueue_recv_packet(None)
203 # Check the packet to see if is an event packet
205 packet_type
= packet
["type"]
206 if packet_type
== "event":
207 event
= packet
["event"]
210 body
= packet
["body"]
211 # Handle the event packet and cache information from these packets
213 if event
== "output":
214 # Store any output we receive so clients can retrieve it later.
215 category
= body
["category"]
216 output
= body
["output"]
217 self
.output_condition
.acquire()
218 if category
in self
.output
:
219 self
.output
[category
] += output
221 self
.output
[category
] = output
222 self
.output_condition
.notify()
223 self
.output_condition
.release()
224 # no need to add 'output' event packets to our packets list
226 elif event
== "process":
227 # When a new process is attached or launched, remember the
228 # details that are available in the body of the event
229 self
.process_event_body
= body
230 elif event
== "stopped":
231 # Each thread that stops with a reason will send a
232 # 'stopped' event. We need to remember the thread stop
233 # reasons since the 'threads' command doesn't return
235 self
._process
_stopped
()
236 tid
= body
["threadId"]
237 self
.thread_stop_reasons
[tid
] = body
238 elif event
== "breakpoint":
239 # Breakpoint events come in when a breakpoint has locations
240 # added or removed. Keep track of them so we can look for them
242 self
.breakpoint_events
.append(packet
)
243 # no need to add 'breakpoint' event packets to our packets list
245 elif event
.startswith("progress"):
246 # Progress events come in as 'progressStart', 'progressUpdate',
247 # and 'progressEnd' events. Keep these around in case test
248 # cases want to verify them.
249 self
.progress_events
.append(packet
)
250 # No need to add 'progress' event packets to our packets list.
253 elif packet_type
== "response":
254 if packet
["command"] == "disconnect":
256 self
.enqueue_recv_packet(packet
)
259 def send_packet(self
, command_dict
, set_sequence
=True):
260 """Take the "command_dict" python dictionary and encode it as a JSON
261 string and send the contents as a packet to the VSCode debug
263 # Set the sequence ID for this command automatically
265 command_dict
["seq"] = self
.sequence
267 # Encode our command dictionary as a JSON string
268 json_str
= json
.dumps(command_dict
, separators
=(",", ":"))
270 self
.trace_file
.write("to adaptor:\n%s\n" % (json_str
))
271 length
= len(json_str
)
273 # Send the encoded JSON packet and flush the 'send' file
274 self
.send
.write(self
.encode_content(json_str
))
277 def recv_packet(self
, filter_type
=None, filter_event
=None, timeout
=None):
278 """Get a JSON packet from the VSCode debug adaptor. This function
279 assumes a thread that reads packets is running and will deliver
280 any received packets by calling handle_recv_packet(...). This
281 function will wait for the packet to arrive and return it when
285 self
.recv_condition
.acquire()
288 for i
, curr_packet
in enumerate(self
.recv_packets
):
291 packet_type
= curr_packet
["type"]
292 if filter_type
is None or packet_type
in filter_type
:
293 if filter_event
is None or (
294 packet_type
== "event"
295 and curr_packet
["event"] in filter_event
297 packet
= self
.recv_packets
.pop(i
)
301 # Sleep until packet is received
302 len_before
= len(self
.recv_packets
)
303 self
.recv_condition
.wait(timeout
)
304 len_after
= len(self
.recv_packets
)
305 if len_before
== len_after
:
306 return None # Timed out
311 self
.recv_condition
.release()
315 def send_recv(self
, command
):
316 """Send a command python dictionary as JSON and receive the JSON
317 response. Validates that the response is the correct sequence and
318 command in the reply. Any events that are received are added to the
319 events list in this object"""
320 self
.send_packet(command
)
323 response_or_request
= self
.recv_packet(filter_type
=["response", "request"])
324 if response_or_request
is None:
325 desc
= 'no response for "%s"' % (command
["command"])
326 raise ValueError(desc
)
327 if response_or_request
["type"] == "response":
328 self
.validate_response(command
, response_or_request
)
329 return response_or_request
331 self
.reverse_requests
.append(response_or_request
)
332 if response_or_request
["command"] == "runInTerminal":
334 response_or_request
["arguments"]["args"],
335 env
=response_or_request
["arguments"]["env"],
341 "request_seq": response_or_request
["seq"],
343 "command": "runInTerminal",
348 elif response_or_request
["command"] == "startDebugging":
353 "request_seq": response_or_request
["seq"],
355 "command": "startDebugging",
361 desc
= 'unknown reverse request "%s"' % (
362 response_or_request
["command"]
364 raise ValueError(desc
)
368 def wait_for_event(self
, filter=None, timeout
=None):
370 return self
.recv_packet(
371 filter_type
="event", filter_event
=filter, timeout
=timeout
375 def wait_for_stopped(self
, timeout
=None):
377 stopped_event
= self
.wait_for_event(
378 filter=["stopped", "exited"], timeout
=timeout
382 stopped_events
.append(stopped_event
)
383 # If we exited, then we are done
384 if stopped_event
["event"] == "exited":
385 self
.exit_status
= stopped_event
["body"]["exitCode"]
388 # Otherwise we stopped and there might be one or more 'stopped'
389 # events for each thread that stopped with a reason, so keep
390 # checking for more 'stopped' events and return all of them
391 stopped_event
= self
.wait_for_event(filter="stopped", timeout
=0.25)
394 return stopped_events
396 def wait_for_exited(self
):
397 event_dict
= self
.wait_for_event("exited")
398 if event_dict
is None:
399 raise ValueError("didn't get exited event")
402 def wait_for_terminated(self
):
403 event_dict
= self
.wait_for_event("terminated")
404 if event_dict
is None:
405 raise ValueError("didn't get terminated event")
408 def get_initialize_value(self
, key
):
409 """Get a value for the given key if it there is a key/value pair in
410 the "initialize" request response body.
412 if self
.initialize_body
and key
in self
.initialize_body
:
413 return self
.initialize_body
[key
]
416 def get_threads(self
):
417 if self
.threads
is None:
418 self
.request_threads()
421 def get_thread_id(self
, threadIndex
=0):
422 """Utility function to get the first thread ID in the thread list.
423 If the thread list is empty, then fetch the threads.
425 if self
.threads
is None:
426 self
.request_threads()
427 if self
.threads
and threadIndex
< len(self
.threads
):
428 return self
.threads
[threadIndex
]["id"]
431 def get_stackFrame(self
, frameIndex
=0, threadId
=None):
432 """Get a single "StackFrame" object from a "stackTrace" request and
433 return the "StackFrame" as a python dictionary, or None on failure
436 threadId
= self
.get_thread_id()
438 print("invalid threadId")
440 response
= self
.request_stackTrace(threadId
, startFrame
=frameIndex
, levels
=1)
442 return response
["body"]["stackFrames"][0]
443 print("invalid response")
446 def get_completions(self
, text
, frameId
=None):
448 stackFrame
= self
.get_stackFrame()
449 frameId
= stackFrame
["id"]
450 response
= self
.request_completions(text
, frameId
)
451 return response
["body"]["targets"]
453 def get_scope_variables(self
, scope_name
, frameIndex
=0, threadId
=None, is_hex
=None):
454 stackFrame
= self
.get_stackFrame(frameIndex
=frameIndex
, threadId
=threadId
)
455 if stackFrame
is None:
457 frameId
= stackFrame
["id"]
458 if frameId
in self
.frame_scopes
:
459 frame_scopes
= self
.frame_scopes
[frameId
]
461 scopes_response
= self
.request_scopes(frameId
)
462 frame_scopes
= scopes_response
["body"]["scopes"]
463 self
.frame_scopes
[frameId
] = frame_scopes
464 for scope
in frame_scopes
:
465 if scope
["name"] == scope_name
:
466 varRef
= scope
["variablesReference"]
467 variables_response
= self
.request_variables(varRef
, is_hex
=is_hex
)
468 if variables_response
:
469 if "body" in variables_response
:
470 body
= variables_response
["body"]
471 if "variables" in body
:
472 vars = body
["variables"]
476 def get_global_variables(self
, frameIndex
=0, threadId
=None):
477 return self
.get_scope_variables(
478 "Globals", frameIndex
=frameIndex
, threadId
=threadId
481 def get_local_variables(self
, frameIndex
=0, threadId
=None, is_hex
=None):
482 return self
.get_scope_variables(
483 "Locals", frameIndex
=frameIndex
, threadId
=threadId
, is_hex
=is_hex
486 def get_registers(self
, frameIndex
=0, threadId
=None):
487 return self
.get_scope_variables(
488 "Registers", frameIndex
=frameIndex
, threadId
=threadId
491 def get_local_variable(self
, name
, frameIndex
=0, threadId
=None, is_hex
=None):
492 locals = self
.get_local_variables(
493 frameIndex
=frameIndex
, threadId
=threadId
, is_hex
=is_hex
496 if "name" in local
and local
["name"] == name
:
500 def get_local_variable_value(self
, name
, frameIndex
=0, threadId
=None, is_hex
=None):
501 variable
= self
.get_local_variable(
502 name
, frameIndex
=frameIndex
, threadId
=threadId
, is_hex
=is_hex
504 if variable
and "value" in variable
:
505 return variable
["value"]
508 def get_local_variable_child(
509 self
, name
, child_name
, frameIndex
=0, threadId
=None, is_hex
=None
511 local
= self
.get_local_variable(name
, frameIndex
, threadId
)
512 if local
["variablesReference"] == 0:
514 children
= self
.request_variables(local
["variablesReference"], is_hex
=is_hex
)[
517 for child
in children
:
518 if child
["name"] == child_name
:
522 def replay_packets(self
, replay_file_path
):
523 f
= open(replay_file_path
, "r")
528 if mode
== "invalid":
530 if line
.startswith("to adapter:"):
532 elif line
.startswith("from adapter:"):
535 command_dict
= read_packet(f
)
536 # Skip the end of line that follows the JSON
538 if command_dict
is None:
539 raise ValueError("decode packet failed from replay file")
541 pprint
.PrettyPrinter(indent
=2).pprint(command_dict
)
542 # raw_input('Press ENTER to send:')
543 self
.send_packet(command_dict
, set_sequence
)
546 print("Replay response:")
547 replay_response
= read_packet(f
)
548 # Skip the end of line that follows the JSON
550 pprint
.PrettyPrinter(indent
=2).pprint(replay_response
)
551 actual_response
= self
.recv_packet()
553 type = actual_response
["type"]
554 print("Actual response:")
555 if type == "response":
556 self
.validate_response(command_dict
, actual_response
)
557 pprint
.PrettyPrinter(indent
=2).pprint(actual_response
)
559 print("error: didn't get a valid response")
573 terminateCommands
=None,
575 postRunCommands
=None,
578 gdbRemoteHostname
=None,
582 args_dict
["pid"] = pid
583 if program
is not None:
584 args_dict
["program"] = program
585 if waitFor
is not None:
586 args_dict
["waitFor"] = waitFor
588 args_dict
["trace"] = trace
589 args_dict
["initCommands"] = self
.init_commands
591 args_dict
["initCommands"].extend(initCommands
)
593 args_dict
["preRunCommands"] = preRunCommands
595 args_dict
["stopCommands"] = stopCommands
597 args_dict
["exitCommands"] = exitCommands
598 if terminateCommands
:
599 args_dict
["terminateCommands"] = terminateCommands
601 args_dict
["attachCommands"] = attachCommands
603 args_dict
["coreFile"] = coreFile
605 args_dict
["postRunCommands"] = postRunCommands
607 args_dict
["sourceMap"] = sourceMap
608 if gdbRemotePort
is not None:
609 args_dict
["gdb-remote-port"] = gdbRemotePort
610 if gdbRemoteHostname
is not None:
611 args_dict
["gdb-remote-hostname"] = gdbRemoteHostname
612 command_dict
= {"command": "attach", "type": "request", "arguments": args_dict
}
613 return self
.send_recv(command_dict
)
615 def request_configurationDone(self
):
617 "command": "configurationDone",
621 response
= self
.send_recv(command_dict
)
623 self
.configuration_done_sent
= True
626 def _process_stopped(self
):
628 self
.frame_scopes
= {}
630 def request_continue(self
, threadId
=None):
631 if self
.exit_status
is not None:
632 raise ValueError("request_continue called after process exited")
633 # If we have launched or attached, then the first continue is done by
634 # sending the 'configurationDone' request
635 if not self
.configuration_done_sent
:
636 return self
.request_configurationDone()
639 threadId
= self
.get_thread_id()
640 args_dict
["threadId"] = threadId
642 "command": "continue",
644 "arguments": args_dict
,
646 response
= self
.send_recv(command_dict
)
647 # Caller must still call wait_for_stopped.
650 def request_restart(self
, restartArguments
=None):
652 "command": "restart",
656 command_dict
["arguments"] = restartArguments
658 response
= self
.send_recv(command_dict
)
659 # Caller must still call wait_for_stopped.
662 def request_disconnect(self
, terminateDebuggee
=None):
664 if terminateDebuggee
is not None:
665 if terminateDebuggee
:
666 args_dict
["terminateDebuggee"] = True
668 args_dict
["terminateDebuggee"] = False
670 "command": "disconnect",
672 "arguments": args_dict
,
674 return self
.send_recv(command_dict
)
676 def request_disassemble(
677 self
, memoryReference
, offset
=-50, instructionCount
=200, resolveSymbols
=True
680 "memoryReference": memoryReference
,
682 "instructionCount": instructionCount
,
683 "resolveSymbols": resolveSymbols
,
686 "command": "disassemble",
688 "arguments": args_dict
,
690 instructions
= self
.send_recv(command_dict
)["body"]["instructions"]
691 for inst
in instructions
:
692 self
.disassembled_instructions
[inst
["address"]] = inst
694 def request_readMemory(self
, memoryReference
, offset
, count
):
696 "memoryReference": memoryReference
,
701 "command": "readMemory",
703 "arguments": args_dict
,
705 return self
.send_recv(command_dict
)
707 def request_evaluate(self
, expression
, frameIndex
=0, threadId
=None, context
=None):
708 stackFrame
= self
.get_stackFrame(frameIndex
=frameIndex
, threadId
=threadId
)
709 if stackFrame
is None:
712 "expression": expression
,
714 "frameId": stackFrame
["id"],
717 "command": "evaluate",
719 "arguments": args_dict
,
721 return self
.send_recv(command_dict
)
723 def request_exceptionInfo(self
, threadId
=None):
725 threadId
= self
.get_thread_id()
726 args_dict
= {"threadId": threadId
}
728 "command": "exceptionInfo",
730 "arguments": args_dict
,
732 return self
.send_recv(command_dict
)
734 def request_initialize(self
, sourceInitFile
):
736 "command": "initialize",
739 "adapterID": "lldb-native",
740 "clientID": "vscode",
741 "columnsStartAt1": True,
742 "linesStartAt1": True,
744 "pathFormat": "path",
745 "supportsRunInTerminalRequest": True,
746 "supportsVariablePaging": True,
747 "supportsVariableType": True,
748 "supportsStartDebuggingRequest": True,
749 "sourceInitFile": sourceInitFile
,
752 response
= self
.send_recv(command_dict
)
754 if "body" in response
:
755 self
.initialize_body
= response
["body"]
767 shellExpandArguments
=False,
773 terminateCommands
=None,
779 postRunCommands
=None,
780 enableAutoVariableSummaries
=False,
781 displayExtendedBacktrace
=False,
782 enableSyntheticChildDebugging
=False,
783 commandEscapePrefix
=None,
784 customFrameFormat
=None,
785 customThreadFormat
=None,
787 args_dict
= {"program": program
}
789 args_dict
["args"] = args
791 args_dict
["cwd"] = cwd
793 args_dict
["env"] = env
795 args_dict
["stopOnEntry"] = stopOnEntry
797 args_dict
["disableSTDIO"] = disableSTDIO
798 if shellExpandArguments
:
799 args_dict
["shellExpandArguments"] = shellExpandArguments
801 args_dict
["trace"] = trace
802 args_dict
["initCommands"] = self
.init_commands
804 args_dict
["initCommands"].extend(initCommands
)
806 args_dict
["preRunCommands"] = preRunCommands
808 args_dict
["stopCommands"] = stopCommands
810 args_dict
["exitCommands"] = exitCommands
811 if terminateCommands
:
812 args_dict
["terminateCommands"] = terminateCommands
814 args_dict
["sourcePath"] = sourcePath
816 args_dict
["debuggerRoot"] = debuggerRoot
818 args_dict
["launchCommands"] = launchCommands
820 args_dict
["sourceMap"] = sourceMap
822 args_dict
["runInTerminal"] = runInTerminal
824 args_dict
["postRunCommands"] = postRunCommands
825 if customFrameFormat
:
826 args_dict
["customFrameFormat"] = customFrameFormat
827 if customThreadFormat
:
828 args_dict
["customThreadFormat"] = customThreadFormat
830 args_dict
["disableASLR"] = disableASLR
831 args_dict
["enableAutoVariableSummaries"] = enableAutoVariableSummaries
832 args_dict
["enableSyntheticChildDebugging"] = enableSyntheticChildDebugging
833 args_dict
["displayExtendedBacktrace"] = displayExtendedBacktrace
834 args_dict
["commandEscapePrefix"] = commandEscapePrefix
835 command_dict
= {"command": "launch", "type": "request", "arguments": args_dict
}
836 response
= self
.send_recv(command_dict
)
838 if response
["success"]:
839 # Wait for a 'process' and 'initialized' event in any order
840 self
.wait_for_event(filter=["process", "initialized"])
841 self
.wait_for_event(filter=["process", "initialized"])
844 def request_next(self
, threadId
, granularity
="statement"):
845 if self
.exit_status
is not None:
846 raise ValueError("request_continue called after process exited")
847 args_dict
= {"threadId": threadId
, "granularity": granularity
}
848 command_dict
= {"command": "next", "type": "request", "arguments": args_dict
}
849 return self
.send_recv(command_dict
)
851 def request_stepIn(self
, threadId
, targetId
, granularity
="statement"):
852 if self
.exit_status
is not None:
853 raise ValueError("request_stepIn called after process exited")
855 "threadId": threadId
,
856 "targetId": targetId
,
857 "granularity": granularity
,
859 command_dict
= {"command": "stepIn", "type": "request", "arguments": args_dict
}
860 return self
.send_recv(command_dict
)
862 def request_stepInTargets(self
, frameId
):
863 if self
.exit_status
is not None:
864 raise ValueError("request_stepInTargets called after process exited")
865 args_dict
= {"frameId": frameId
}
867 "command": "stepInTargets",
869 "arguments": args_dict
,
871 return self
.send_recv(command_dict
)
873 def request_stepOut(self
, threadId
):
874 if self
.exit_status
is not None:
875 raise ValueError("request_stepOut called after process exited")
876 args_dict
= {"threadId": threadId
}
877 command_dict
= {"command": "stepOut", "type": "request", "arguments": args_dict
}
878 return self
.send_recv(command_dict
)
880 def request_pause(self
, threadId
=None):
881 if self
.exit_status
is not None:
882 raise ValueError("request_pause called after process exited")
884 threadId
= self
.get_thread_id()
885 args_dict
= {"threadId": threadId
}
886 command_dict
= {"command": "pause", "type": "request", "arguments": args_dict
}
887 return self
.send_recv(command_dict
)
889 def request_scopes(self
, frameId
):
890 args_dict
= {"frameId": frameId
}
891 command_dict
= {"command": "scopes", "type": "request", "arguments": args_dict
}
892 return self
.send_recv(command_dict
)
894 def request_setBreakpoints(self
, file_path
, line_array
, data
=None):
895 """data is array of parameters for breakpoints in line_array.
896 Each parameter object is 1:1 mapping with entries in line_entry.
897 It contains optional location/hitCondition/logMessage parameters.
899 (dir, base
) = os
.path
.split(file_path
)
900 source_dict
= {"name": base
, "path": file_path
}
902 "source": source_dict
,
903 "sourceModified": False,
905 if line_array
is not None:
906 args_dict
["lines"] = "%s" % line_array
908 for i
, line
in enumerate(line_array
):
909 breakpoint_data
= None
910 if data
is not None and i
< len(data
):
911 breakpoint_data
= data
[i
]
913 if breakpoint_data
is not None:
914 if "condition" in breakpoint_data
and breakpoint_data
["condition"]:
915 bp
["condition"] = breakpoint_data
["condition"]
917 "hitCondition" in breakpoint_data
918 and breakpoint_data
["hitCondition"]
920 bp
["hitCondition"] = breakpoint_data
["hitCondition"]
922 "logMessage" in breakpoint_data
923 and breakpoint_data
["logMessage"]
925 bp
["logMessage"] = breakpoint_data
["logMessage"]
926 breakpoints
.append(bp
)
927 args_dict
["breakpoints"] = breakpoints
930 "command": "setBreakpoints",
932 "arguments": args_dict
,
934 return self
.send_recv(command_dict
)
936 def request_setExceptionBreakpoints(self
, filters
):
937 args_dict
= {"filters": filters
}
939 "command": "setExceptionBreakpoints",
941 "arguments": args_dict
,
943 return self
.send_recv(command_dict
)
945 def request_setFunctionBreakpoints(self
, names
, condition
=None, hitCondition
=None):
949 if condition
is not None:
950 bp
["condition"] = condition
951 if hitCondition
is not None:
952 bp
["hitCondition"] = hitCondition
953 breakpoints
.append(bp
)
954 args_dict
= {"breakpoints": breakpoints
}
956 "command": "setFunctionBreakpoints",
958 "arguments": args_dict
,
960 return self
.send_recv(command_dict
)
962 def request_dataBreakpointInfo(
963 self
, variablesReference
, name
, frameIndex
=0, threadId
=None
965 stackFrame
= self
.get_stackFrame(frameIndex
=frameIndex
, threadId
=threadId
)
966 if stackFrame
is None:
969 "variablesReference": variablesReference
,
971 "frameId": stackFrame
["id"],
974 "command": "dataBreakpointInfo",
976 "arguments": args_dict
,
978 return self
.send_recv(command_dict
)
980 def request_setDataBreakpoint(self
, dataBreakpoints
):
981 """dataBreakpoints is a list of dictionary with following fields:
983 dataId: (address in hex)/(size in bytes)
984 accessType: read/write/readWrite
986 [hitCondition]: string
989 args_dict
= {"breakpoints": dataBreakpoints
}
991 "command": "setDataBreakpoints",
993 "arguments": args_dict
,
995 return self
.send_recv(command_dict
)
997 def request_compileUnits(self
, moduleId
):
998 args_dict
= {"moduleId": moduleId
}
1000 "command": "compileUnits",
1002 "arguments": args_dict
,
1004 response
= self
.send_recv(command_dict
)
1007 def request_completions(self
, text
, frameId
=None):
1008 args_dict
= {"text": text
, "column": len(text
) + 1}
1010 args_dict
["frameId"] = frameId
1012 "command": "completions",
1014 "arguments": args_dict
,
1016 return self
.send_recv(command_dict
)
1018 def request_modules(self
):
1019 return self
.send_recv({"command": "modules", "type": "request"})
1021 def request_stackTrace(
1022 self
, threadId
=None, startFrame
=None, levels
=None, dump
=False
1024 if threadId
is None:
1025 threadId
= self
.get_thread_id()
1026 args_dict
= {"threadId": threadId
}
1027 if startFrame
is not None:
1028 args_dict
["startFrame"] = startFrame
1029 if levels
is not None:
1030 args_dict
["levels"] = levels
1032 "command": "stackTrace",
1034 "arguments": args_dict
,
1036 response
= self
.send_recv(command_dict
)
1038 for idx
, frame
in enumerate(response
["body"]["stackFrames"]):
1039 name
= frame
["name"]
1040 if "line" in frame
and "source" in frame
:
1041 source
= frame
["source"]
1042 if "sourceReference" not in source
:
1043 if "name" in source
:
1044 source_name
= source
["name"]
1045 line
= frame
["line"]
1046 print("[%3u] %s @ %s:%u" % (idx
, name
, source_name
, line
))
1048 print("[%3u] %s" % (idx
, name
))
1051 def request_threads(self
):
1052 """Request a list of all threads and combine any information from any
1053 "stopped" events since those contain more information about why a
1054 thread actually stopped. Returns an array of thread dictionaries
1055 with information about all threads"""
1056 command_dict
= {"command": "threads", "type": "request", "arguments": {}}
1057 response
= self
.send_recv(command_dict
)
1058 body
= response
["body"]
1059 # Fill in "self.threads" correctly so that clients that call
1060 # self.get_threads() or self.get_thread_id(...) can get information
1061 # on threads when the process is stopped.
1062 if "threads" in body
:
1063 self
.threads
= body
["threads"]
1064 for thread
in self
.threads
:
1065 # Copy the thread dictionary so we can add key/value pairs to
1066 # it without affecting the original info from the "threads"
1069 if tid
in self
.thread_stop_reasons
:
1070 thread_stop_info
= self
.thread_stop_reasons
[tid
]
1071 copy_keys
= ["reason", "description", "text"]
1072 for key
in copy_keys
:
1073 if key
in thread_stop_info
:
1074 thread
[key
] = thread_stop_info
[key
]
1079 def request_variables(
1080 self
, variablesReference
, start
=None, count
=None, is_hex
=None
1082 args_dict
= {"variablesReference": variablesReference
}
1083 if start
is not None:
1084 args_dict
["start"] = start
1085 if count
is not None:
1086 args_dict
["count"] = count
1087 if is_hex
is not None:
1088 args_dict
["format"] = {"hex": is_hex
}
1090 "command": "variables",
1092 "arguments": args_dict
,
1094 return self
.send_recv(command_dict
)
1096 def request_setVariable(self
, containingVarRef
, name
, value
, id=None):
1098 "variablesReference": containingVarRef
,
1100 "value": str(value
),
1103 args_dict
["id"] = id
1105 "command": "setVariable",
1107 "arguments": args_dict
,
1109 return self
.send_recv(command_dict
)
1111 def request_locations(self
, locationReference
):
1113 "locationReference": locationReference
,
1116 "command": "locations",
1118 "arguments": args_dict
,
1120 return self
.send_recv(command_dict
)
1122 def request_testGetTargetBreakpoints(self
):
1123 """A request packet used in the LLDB test suite to get all currently
1124 set breakpoint infos for all breakpoints currently set in the
1128 "command": "_testGetTargetBreakpoints",
1132 return self
.send_recv(command_dict
)
1134 def terminate(self
):
1138 def request_setInstructionBreakpoints(self
, memory_reference
=[]):
1140 for i
in memory_reference
:
1142 "instructionReference": i
,
1144 breakpoints
.append(args_dict
)
1145 args_dict
= {"breakpoints": breakpoints
}
1147 "command": "setInstructionBreakpoints",
1149 "arguments": args_dict
,
1151 return self
.send_recv(command_dict
)
1153 class DebugAdaptorServer(DebugCommunication
):
1163 if executable
is not None:
1164 adaptor_env
= os
.environ
.copy()
1166 adaptor_env
.update(env
)
1169 adaptor_env
["LLDBDAP_LOG"] = log_file
1170 self
.process
= subprocess
.Popen(
1172 stdin
=subprocess
.PIPE
,
1173 stdout
=subprocess
.PIPE
,
1174 stderr
=subprocess
.PIPE
,
1177 DebugCommunication
.__init
__(
1178 self
, self
.process
.stdout
, self
.process
.stdin
, init_commands
, log_file
1180 elif port
is not None:
1181 s
= socket
.socket(socket
.AF_INET
, socket
.SOCK_STREAM
)
1182 s
.connect(("127.0.0.1", port
))
1183 DebugCommunication
.__init
__(
1184 self
, s
.makefile("r"), s
.makefile("w"), init_commands
1189 return self
.process
.pid
1192 def terminate(self
):
1193 super(DebugAdaptorServer
, self
).terminate()
1194 if self
.process
is not None:
1195 self
.process
.terminate()
1200 def attach_options_specified(options
):
1201 if options
.pid
is not None:
1207 if options
.attachCmds
:
1212 def run_vscode(dbg
, args
, options
):
1213 dbg
.request_initialize(options
.sourceInitFile
)
1214 if attach_options_specified(options
):
1215 response
= dbg
.request_attach(
1216 program
=options
.program
,
1218 waitFor
=options
.waitFor
,
1219 attachCommands
=options
.attachCmds
,
1220 initCommands
=options
.initCmds
,
1221 preRunCommands
=options
.preRunCmds
,
1222 stopCommands
=options
.stopCmds
,
1223 exitCommands
=options
.exitCmds
,
1224 terminateCommands
=options
.terminateCmds
,
1227 response
= dbg
.request_launch(
1231 cwd
=options
.workingDir
,
1232 debuggerRoot
=options
.debuggerRoot
,
1233 sourcePath
=options
.sourcePath
,
1234 initCommands
=options
.initCmds
,
1235 preRunCommands
=options
.preRunCmds
,
1236 stopCommands
=options
.stopCmds
,
1237 exitCommands
=options
.exitCmds
,
1238 terminateCommands
=options
.terminateCmds
,
1241 if response
["success"]:
1242 if options
.sourceBreakpoints
:
1243 source_to_lines
= {}
1244 for file_line
in options
.sourceBreakpoints
:
1245 (path
, line
) = file_line
.split(":")
1246 if len(path
) == 0 or len(line
) == 0:
1247 print('error: invalid source with line "%s"' % (file_line
))
1250 if path
in source_to_lines
:
1251 source_to_lines
[path
].append(int(line
))
1253 source_to_lines
[path
] = [int(line
)]
1254 for source
in source_to_lines
:
1255 dbg
.request_setBreakpoints(source
, source_to_lines
[source
])
1256 if options
.funcBreakpoints
:
1257 dbg
.request_setFunctionBreakpoints(options
.funcBreakpoints
)
1258 dbg
.request_configurationDone()
1259 dbg
.wait_for_stopped()
1261 if "message" in response
:
1262 print(response
["message"])
1263 dbg
.request_disconnect(terminateDebuggee
=True)
1267 parser
= optparse
.OptionParser(
1269 "A testing framework for the Visual Studio Code Debug Adaptor protocol"
1278 "The path to the command line program that implements the "
1279 "Visual Studio Code Debug Adaptor protocol."
1288 help="The path to the program to debug.",
1297 help="Set the working directory for the process we launch.",
1306 "Set the relative source root for any debug info that has "
1307 "relative paths in it."
1314 dest
="debuggerRoot",
1317 "Set the working directory for lldb-dap for any object files "
1318 "with relative paths in the Mach-o debug map."
1328 "Specify a file containing a packet log to replay with the "
1329 "current Visual Studio Code Debug Adaptor executable."
1337 action
="store_true",
1340 help="Pause waiting for a debugger to attach to the debug adaptor",
1345 action
="store_true",
1346 dest
="sourceInitFile",
1348 help="Whether lldb-dap should source .lldbinit file or not",
1355 help="Attach a socket to a port instead of using STDIN for VSCode",
1363 help="The process ID to attach to",
1369 action
="store_true",
1373 "Specify this option to attach to a process by name. The "
1374 "process name is the basename of the executable specified with "
1375 "the --program option."
1384 dest
="funcBreakpoints",
1386 "Specify the name of a function to break at. "
1387 "Can be specified more than once."
1397 dest
="sourceBreakpoints",
1400 "Specify source breakpoints to set in the format of "
1402 "Can be specified more than once."
1413 "Specify a LLDB command that will attach to a process. "
1414 "Can be specified more than once."
1425 "Specify a LLDB command that will be executed before the target "
1426 "is created. Can be specified more than once."
1437 "Specify a LLDB command that will be executed after the target "
1438 "has been created. Can be specified more than once."
1449 "Specify a LLDB command that will be executed each time the"
1450 "process stops. Can be specified more than once."
1461 "Specify a LLDB command that will be executed when the process "
1462 "exits. Can be specified more than once."
1467 "--terminateCommand",
1470 dest
="terminateCmds",
1473 "Specify a LLDB command that will be executed when the debugging "
1474 "session is terminated. Can be specified more than once."
1484 help=("Specify environment variables to pass to the launched " "process."),
1489 action
="store_true",
1493 "Wait for the next process to be launched whose name matches "
1494 "the basename of the program specified with the --program "
1499 (options
, args
) = parser
.parse_args(sys
.argv
[1:])
1501 if options
.vscode_path
is None and options
.port
is None:
1503 "error: must either specify a path to a Visual Studio Code "
1504 "Debug Adaptor vscode executable path using the --vscode "
1505 "option, or a port to attach to for an existing lldb-dap "
1506 "using the --port option"
1509 dbg
= DebugAdaptorServer(executable
=options
.vscode_path
, port
=options
.port
)
1511 raw_input('Waiting for debugger to attach pid "%i"' % (dbg
.get_pid()))
1513 dbg
.replay_packets(options
.replay
)
1515 run_vscode(dbg
, args
, options
)
1519 if __name__
== "__main__":