1 //===-- StackFrameList.cpp ------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "lldb/Target/StackFrameList.h"
10 #include "lldb/Breakpoint/Breakpoint.h"
11 #include "lldb/Breakpoint/BreakpointLocation.h"
12 #include "lldb/Core/Debugger.h"
13 #include "lldb/Core/SourceManager.h"
14 #include "lldb/Host/StreamFile.h"
15 #include "lldb/Symbol/Block.h"
16 #include "lldb/Symbol/Function.h"
17 #include "lldb/Symbol/Symbol.h"
18 #include "lldb/Target/Process.h"
19 #include "lldb/Target/RegisterContext.h"
20 #include "lldb/Target/StackFrame.h"
21 #include "lldb/Target/StackFrameRecognizer.h"
22 #include "lldb/Target/StopInfo.h"
23 #include "lldb/Target/Target.h"
24 #include "lldb/Target/Thread.h"
25 #include "lldb/Target/Unwind.h"
26 #include "lldb/Utility/LLDBLog.h"
27 #include "lldb/Utility/Log.h"
28 #include "llvm/ADT/SmallPtrSet.h"
32 //#define DEBUG_STACK_FRAMES 1
35 using namespace lldb_private
;
37 // StackFrameList constructor
38 StackFrameList::StackFrameList(Thread
&thread
,
39 const lldb::StackFrameListSP
&prev_frames_sp
,
40 bool show_inline_frames
)
41 : m_thread(thread
), m_prev_frames_sp(prev_frames_sp
), m_frames(),
42 m_selected_frame_idx(), m_concrete_frames_fetched(0),
43 m_current_inlined_depth(UINT32_MAX
),
44 m_current_inlined_pc(LLDB_INVALID_ADDRESS
),
45 m_show_inlined_frames(show_inline_frames
) {
47 m_current_inlined_depth
= prev_frames_sp
->m_current_inlined_depth
;
48 m_current_inlined_pc
= prev_frames_sp
->m_current_inlined_pc
;
52 StackFrameList::~StackFrameList() {
53 // Call clear since this takes a lock and clears the stack frame list in case
54 // another thread is currently using this stack frame list
58 void StackFrameList::CalculateCurrentInlinedDepth() {
59 uint32_t cur_inlined_depth
= GetCurrentInlinedDepth();
60 if (cur_inlined_depth
== UINT32_MAX
) {
61 ResetCurrentInlinedDepth();
65 uint32_t StackFrameList::GetCurrentInlinedDepth() {
66 std::lock_guard
<std::mutex
> guard(m_inlined_depth_mutex
);
67 if (m_show_inlined_frames
&& m_current_inlined_pc
!= LLDB_INVALID_ADDRESS
) {
68 lldb::addr_t cur_pc
= m_thread
.GetRegisterContext()->GetPC();
69 if (cur_pc
!= m_current_inlined_pc
) {
70 m_current_inlined_pc
= LLDB_INVALID_ADDRESS
;
71 m_current_inlined_depth
= UINT32_MAX
;
72 Log
*log
= GetLog(LLDBLog::Step
);
73 if (log
&& log
->GetVerbose())
76 "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
78 return m_current_inlined_depth
;
84 void StackFrameList::ResetCurrentInlinedDepth() {
85 if (!m_show_inlined_frames
)
88 StopInfoSP stop_info_sp
= m_thread
.GetStopInfo();
93 auto inline_depth
= stop_info_sp
->GetSuggestedStackFrameIndex(inlined
);
94 // We're only adjusting the inlined stack here.
95 Log
*log
= GetLog(LLDBLog::Step
);
97 std::lock_guard
<std::mutex
> guard(m_inlined_depth_mutex
);
98 m_current_inlined_depth
= *inline_depth
;
99 m_current_inlined_pc
= m_thread
.GetRegisterContext()->GetPC();
101 if (log
&& log
->GetVerbose())
103 "ResetCurrentInlinedDepth: setting inlined "
104 "depth: %d 0x%" PRIx64
".\n",
105 m_current_inlined_depth
, m_current_inlined_pc
);
107 std::lock_guard
<std::mutex
> guard(m_inlined_depth_mutex
);
108 m_current_inlined_pc
= LLDB_INVALID_ADDRESS
;
109 m_current_inlined_depth
= UINT32_MAX
;
110 if (log
&& log
->GetVerbose())
113 "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
117 bool StackFrameList::DecrementCurrentInlinedDepth() {
118 if (m_show_inlined_frames
) {
119 uint32_t current_inlined_depth
= GetCurrentInlinedDepth();
120 if (current_inlined_depth
!= UINT32_MAX
) {
121 if (current_inlined_depth
> 0) {
122 std::lock_guard
<std::mutex
> guard(m_inlined_depth_mutex
);
123 m_current_inlined_depth
--;
131 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth
) {
132 std::lock_guard
<std::mutex
> guard(m_inlined_depth_mutex
);
133 m_current_inlined_depth
= new_depth
;
134 if (new_depth
== UINT32_MAX
)
135 m_current_inlined_pc
= LLDB_INVALID_ADDRESS
;
137 m_current_inlined_pc
= m_thread
.GetRegisterContext()->GetPC();
140 bool StackFrameList::WereAllFramesFetched() const {
141 std::shared_lock
<std::shared_mutex
> guard(m_list_mutex
);
142 return GetAllFramesFetched();
145 /// A sequence of calls that comprise some portion of a backtrace. Each frame
146 /// is represented as a pair of a callee (Function *) and an address within the
148 struct CallDescriptor
{
150 CallEdge::AddrType address_type
= CallEdge::AddrType::Call
;
151 addr_t address
= LLDB_INVALID_ADDRESS
;
153 using CallSequence
= std::vector
<CallDescriptor
>;
155 /// Find the unique path through the call graph from \p begin (with return PC
156 /// \p return_pc) to \p end. On success this path is stored into \p path, and
157 /// on failure \p path is unchanged.
158 /// This function doesn't currently access StackFrameLists at all, it only looks
159 /// at the frame set in the ExecutionContext it passes around.
160 static void FindInterveningFrames(Function
&begin
, Function
&end
,
161 ExecutionContext
&exe_ctx
, Target
&target
,
162 addr_t return_pc
, CallSequence
&path
,
163 ModuleList
&images
, Log
*log
) {
164 LLDB_LOG(log
, "Finding frames between {0} and {1}, retn-pc={2:x}",
165 begin
.GetDisplayName(), end
.GetDisplayName(), return_pc
);
167 // Find a non-tail calling edge with the correct return PC.
169 for (const auto &edge
: begin
.GetCallEdges())
170 LLDB_LOG(log
, "FindInterveningFrames: found call with retn-PC = {0:x}",
171 edge
->GetReturnPCAddress(begin
, target
));
172 CallEdge
*first_edge
= begin
.GetCallEdgeForReturnAddress(return_pc
, target
);
174 LLDB_LOG(log
, "No call edge outgoing from {0} with retn-PC == {1:x}",
175 begin
.GetDisplayName(), return_pc
);
179 // The first callee may not be resolved, or there may be nothing to fill in.
180 Function
*first_callee
= first_edge
->GetCallee(images
, exe_ctx
);
182 LLDB_LOG(log
, "Could not resolve callee");
185 if (first_callee
== &end
) {
186 LLDB_LOG(log
, "Not searching further, first callee is {0} (retn-PC: {1:x})",
187 end
.GetDisplayName(), return_pc
);
191 // Run DFS on the tail-calling edges out of the first callee to find \p end.
192 // Fully explore the set of functions reachable from the first edge via tail
193 // calls in order to detect ambiguous executions.
195 CallSequence active_path
= {};
196 CallSequence solution_path
= {};
197 llvm::SmallPtrSet
<Function
*, 2> visited_nodes
= {};
198 bool ambiguous
= false;
202 ExecutionContext
&context
;
204 DFS(Function
*end
, ModuleList
&images
, Target
&target
,
205 ExecutionContext
&context
)
206 : end(end
), images(images
), target(target
), context(context
) {}
208 void search(CallEdge
&first_edge
, Function
&first_callee
,
209 CallSequence
&path
) {
210 dfs(first_edge
, first_callee
);
212 path
= std::move(solution_path
);
215 void dfs(CallEdge
¤t_edge
, Function
&callee
) {
216 // Found a path to the target function.
217 if (&callee
== end
) {
218 if (solution_path
.empty())
219 solution_path
= active_path
;
225 // Terminate the search if tail recursion is found, or more generally if
226 // there's more than one way to reach a target. This errs on the side of
227 // caution: it conservatively stops searching when some solutions are
228 // still possible to save time in the average case.
229 if (!visited_nodes
.insert(&callee
).second
) {
234 // Search the calls made from this callee.
235 active_path
.push_back(CallDescriptor
{&callee
});
236 for (const auto &edge
: callee
.GetTailCallingEdges()) {
237 Function
*next_callee
= edge
->GetCallee(images
, context
);
241 std::tie(active_path
.back().address_type
, active_path
.back().address
) =
242 edge
->GetCallerAddress(callee
, target
);
244 dfs(*edge
, *next_callee
);
248 active_path
.pop_back();
252 DFS(&end
, images
, target
, exe_ctx
).search(*first_edge
, *first_callee
, path
);
255 /// Given that \p next_frame will be appended to the frame list, synthesize
256 /// tail call frames between the current end of the list and \p next_frame.
257 /// If any frames are added, adjust the frame index of \p next_frame.
260 /// | ... | <- Completed frames.
264 /// | ... | <- Artificial frames inserted here.
268 /// | ... | <- Not-yet-visited frames.
270 void StackFrameList::SynthesizeTailCallFrames(StackFrame
&next_frame
) {
271 // Cannot synthesize tail call frames when the stack is empty (there is no
272 // "previous" frame).
273 if (m_frames
.empty())
276 TargetSP target_sp
= next_frame
.CalculateTarget();
280 lldb::RegisterContextSP next_reg_ctx_sp
= next_frame
.GetRegisterContext();
281 if (!next_reg_ctx_sp
)
284 Log
*log
= GetLog(LLDBLog::Step
);
286 StackFrame
&prev_frame
= *m_frames
.back().get();
288 // Find the functions prev_frame and next_frame are stopped in. The function
289 // objects are needed to search the lazy call graph for intervening frames.
290 Function
*prev_func
=
291 prev_frame
.GetSymbolContext(eSymbolContextFunction
).function
;
293 LLDB_LOG(log
, "SynthesizeTailCallFrames: can't find previous function");
296 Function
*next_func
=
297 next_frame
.GetSymbolContext(eSymbolContextFunction
).function
;
299 LLDB_LOG(log
, "SynthesizeTailCallFrames: can't find next function");
303 // Try to find the unique sequence of (tail) calls which led from next_frame
306 addr_t return_pc
= next_reg_ctx_sp
->GetPC();
307 Target
&target
= *target_sp
.get();
308 ModuleList
&images
= next_frame
.CalculateTarget()->GetImages();
309 ExecutionContext
exe_ctx(target_sp
, /*get_process=*/true);
310 exe_ctx
.SetFramePtr(&next_frame
);
311 FindInterveningFrames(*next_func
, *prev_func
, exe_ctx
, target
, return_pc
,
314 // Push synthetic tail call frames.
315 for (auto calleeInfo
: llvm::reverse(path
)) {
316 Function
*callee
= calleeInfo
.func
;
317 uint32_t frame_idx
= m_frames
.size();
318 uint32_t concrete_frame_idx
= next_frame
.GetConcreteFrameIndex();
319 addr_t cfa
= LLDB_INVALID_ADDRESS
;
320 bool cfa_is_valid
= false;
321 addr_t pc
= calleeInfo
.address
;
322 // If the callee address refers to the call instruction, we do not want to
323 // subtract 1 from this value.
324 const bool behaves_like_zeroth_frame
=
325 calleeInfo
.address_type
== CallEdge::AddrType::Call
;
327 callee
->CalculateSymbolContext(&sc
);
328 auto synth_frame
= std::make_shared
<StackFrame
>(
329 m_thread
.shared_from_this(), frame_idx
, concrete_frame_idx
, cfa
,
330 cfa_is_valid
, pc
, StackFrame::Kind::Artificial
,
331 behaves_like_zeroth_frame
, &sc
);
332 m_frames
.push_back(synth_frame
);
333 LLDB_LOG(log
, "Pushed frame {0} at {1:x}", callee
->GetDisplayName(), pc
);
336 // If any frames were created, adjust next_frame's index.
338 next_frame
.SetFrameIndex(m_frames
.size());
341 bool StackFrameList::GetFramesUpTo(uint32_t end_idx
,
342 InterruptionControl allow_interrupt
) {
343 // GetFramesUpTo is always called with the intent to add frames, so get the
345 std::unique_lock
<std::shared_mutex
> guard(m_list_mutex
);
346 // Now that we have the lock, check to make sure someone didn't get there
348 if (m_frames
.size() > end_idx
|| GetAllFramesFetched())
351 // Do not fetch frames for an invalid thread.
352 bool was_interrupted
= false;
353 if (!m_thread
.IsValid())
356 // lock the writer side of m_list_mutex as we're going to add frames here:
357 if (!m_show_inlined_frames
) {
358 if (end_idx
< m_concrete_frames_fetched
)
360 // We're adding concrete frames now:
361 // FIXME: This should also be interruptible:
362 FetchOnlyConcreteFramesUpTo(end_idx
);
366 // We're adding concrete and inlined frames now:
367 was_interrupted
= FetchFramesUpTo(end_idx
, allow_interrupt
);
369 #if defined(DEBUG_STACK_FRAMES)
370 s
.PutCString("\n\nNew frames:\n");
374 return was_interrupted
;
377 void StackFrameList::FetchOnlyConcreteFramesUpTo(uint32_t end_idx
) {
378 assert(m_thread
.IsValid() && "Expected valid thread");
379 assert(m_frames
.size() <= end_idx
&& "Expected there to be frames to fill");
381 Unwind
&unwinder
= m_thread
.GetUnwinder();
383 if (end_idx
< m_concrete_frames_fetched
)
386 uint32_t num_frames
= unwinder
.GetFramesUpTo(end_idx
);
387 if (num_frames
<= end_idx
+ 1) {
389 m_concrete_frames_fetched
= UINT32_MAX
;
392 // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
393 // which can lazily query the unwinder to create frames.
394 m_frames
.resize(num_frames
);
397 bool StackFrameList::FetchFramesUpTo(uint32_t end_idx
,
398 InterruptionControl allow_interrupt
) {
399 Unwind
&unwinder
= m_thread
.GetUnwinder();
400 bool was_interrupted
= false;
402 #if defined(DEBUG_STACK_FRAMES)
403 StreamFile
s(stdout
, false);
405 // If we are hiding some frames from the outside world, we need to add
406 // those onto the total count of frames to fetch. However, we don't need
407 // to do that if end_idx is 0 since in that case we always get the first
408 // concrete frame and all the inlined frames below it... And of course, if
409 // end_idx is UINT32_MAX that means get all, so just do that...
411 uint32_t inlined_depth
= 0;
412 if (end_idx
> 0 && end_idx
!= UINT32_MAX
) {
413 inlined_depth
= GetCurrentInlinedDepth();
414 if (inlined_depth
!= UINT32_MAX
) {
416 end_idx
+= inlined_depth
;
420 StackFrameSP unwind_frame_sp
;
421 Debugger
&dbg
= m_thread
.GetProcess()->GetTarget().GetDebugger();
423 uint32_t idx
= m_concrete_frames_fetched
++;
424 lldb::addr_t pc
= LLDB_INVALID_ADDRESS
;
425 lldb::addr_t cfa
= LLDB_INVALID_ADDRESS
;
426 bool behaves_like_zeroth_frame
= (idx
== 0);
428 // We might have already created frame zero, only create it if we need
430 if (m_frames
.empty()) {
431 RegisterContextSP
reg_ctx_sp(m_thread
.GetRegisterContext());
434 const bool success
= unwinder
.GetFrameInfoAtIndex(
435 idx
, cfa
, pc
, behaves_like_zeroth_frame
);
436 // There shouldn't be any way not to get the frame info for frame
437 // 0. But if the unwinder can't make one, lets make one by hand
438 // with the SP as the CFA and see if that gets any further.
440 cfa
= reg_ctx_sp
->GetSP();
441 pc
= reg_ctx_sp
->GetPC();
444 unwind_frame_sp
= std::make_shared
<StackFrame
>(
445 m_thread
.shared_from_this(), m_frames
.size(), idx
, reg_ctx_sp
,
446 cfa
, pc
, behaves_like_zeroth_frame
, nullptr);
447 m_frames
.push_back(unwind_frame_sp
);
450 unwind_frame_sp
= m_frames
.front();
451 cfa
= unwind_frame_sp
->m_id
.GetCallFrameAddress();
454 // Check for interruption when building the frames.
455 // Do the check in idx > 0 so that we'll always create a 0th frame.
456 if (allow_interrupt
&&
457 INTERRUPT_REQUESTED(dbg
, "Interrupted having fetched {0} frames",
459 was_interrupted
= true;
464 unwinder
.GetFrameInfoAtIndex(idx
, cfa
, pc
, behaves_like_zeroth_frame
);
466 // We've gotten to the end of the stack.
467 SetAllFramesFetched();
470 const bool cfa_is_valid
= true;
471 unwind_frame_sp
= std::make_shared
<StackFrame
>(
472 m_thread
.shared_from_this(), m_frames
.size(), idx
, cfa
, cfa_is_valid
,
473 pc
, StackFrame::Kind::Regular
, behaves_like_zeroth_frame
, nullptr);
475 // Create synthetic tail call frames between the previous frame and the
476 // newly-found frame. The new frame's index may change after this call,
477 // although its concrete index will stay the same.
478 SynthesizeTailCallFrames(*unwind_frame_sp
.get());
480 m_frames
.push_back(unwind_frame_sp
);
483 assert(unwind_frame_sp
);
484 SymbolContext unwind_sc
= unwind_frame_sp
->GetSymbolContext(
485 eSymbolContextBlock
| eSymbolContextFunction
);
486 Block
*unwind_block
= unwind_sc
.block
;
487 TargetSP target_sp
= m_thread
.CalculateTarget();
489 Address
curr_frame_address(
490 unwind_frame_sp
->GetFrameCodeAddressForSymbolication());
492 SymbolContext next_frame_sc
;
493 Address next_frame_address
;
495 while (unwind_sc
.GetParentOfInlinedScope(
496 curr_frame_address
, next_frame_sc
, next_frame_address
)) {
497 next_frame_sc
.line_entry
.ApplyFileMappings(target_sp
);
498 behaves_like_zeroth_frame
= false;
499 StackFrameSP
frame_sp(new StackFrame(
500 m_thread
.shared_from_this(), m_frames
.size(), idx
,
501 unwind_frame_sp
->GetRegisterContextSP(), cfa
, next_frame_address
,
502 behaves_like_zeroth_frame
, &next_frame_sc
));
504 m_frames
.push_back(frame_sp
);
505 unwind_sc
= next_frame_sc
;
506 curr_frame_address
= next_frame_address
;
509 } while (m_frames
.size() - 1 < end_idx
);
511 // Don't try to merge till you've calculated all the frames in this stack.
512 if (GetAllFramesFetched() && m_prev_frames_sp
) {
513 StackFrameList
*prev_frames
= m_prev_frames_sp
.get();
514 StackFrameList
*curr_frames
= this;
516 #if defined(DEBUG_STACK_FRAMES)
517 s
.PutCString("\nprev_frames:\n");
518 prev_frames
->Dump(&s
);
519 s
.PutCString("\ncurr_frames:\n");
520 curr_frames
->Dump(&s
);
523 size_t curr_frame_num
, prev_frame_num
;
525 for (curr_frame_num
= curr_frames
->m_frames
.size(),
526 prev_frame_num
= prev_frames
->m_frames
.size();
527 curr_frame_num
> 0 && prev_frame_num
> 0;
528 --curr_frame_num
, --prev_frame_num
) {
529 const size_t curr_frame_idx
= curr_frame_num
- 1;
530 const size_t prev_frame_idx
= prev_frame_num
- 1;
531 StackFrameSP
curr_frame_sp(curr_frames
->m_frames
[curr_frame_idx
]);
532 StackFrameSP
prev_frame_sp(prev_frames
->m_frames
[prev_frame_idx
]);
534 #if defined(DEBUG_STACK_FRAMES)
535 s
.Printf("\n\nCurr frame #%u ", curr_frame_idx
);
537 curr_frame_sp
->Dump(&s
, true, false);
539 s
.PutCString("NULL");
540 s
.Printf("\nPrev frame #%u ", prev_frame_idx
);
542 prev_frame_sp
->Dump(&s
, true, false);
544 s
.PutCString("NULL");
547 StackFrame
*curr_frame
= curr_frame_sp
.get();
548 StackFrame
*prev_frame
= prev_frame_sp
.get();
550 if (curr_frame
== nullptr || prev_frame
== nullptr)
553 // Check the stack ID to make sure they are equal.
554 if (curr_frame
->GetStackID() != prev_frame
->GetStackID())
557 prev_frame
->UpdatePreviousFrameFromCurrentFrame(*curr_frame
);
558 // Now copy the fixed up previous frame into the current frames so the
559 // pointer doesn't change.
560 m_frames
[curr_frame_idx
] = prev_frame_sp
;
562 #if defined(DEBUG_STACK_FRAMES)
563 s
.Printf("\n Copying previous frame to current frame");
566 // We are done with the old stack frame list, we can release it now.
567 m_prev_frames_sp
.reset();
569 // Don't report interrupted if we happen to have gotten all the frames:
570 if (!GetAllFramesFetched())
571 return was_interrupted
;
575 uint32_t StackFrameList::GetNumFrames(bool can_create
) {
576 if (!WereAllFramesFetched() && can_create
) {
577 // Don't allow interrupt or we might not return the correct count
578 GetFramesUpTo(UINT32_MAX
, DoNotAllowInterruption
);
582 std::shared_lock
<std::shared_mutex
> guard(m_list_mutex
);
583 frame_idx
= GetVisibleStackFrameIndex(m_frames
.size());
588 void StackFrameList::Dump(Stream
*s
) {
592 std::shared_lock
<std::shared_mutex
> guard(m_list_mutex
);
594 const_iterator pos
, begin
= m_frames
.begin(), end
= m_frames
.end();
595 for (pos
= begin
; pos
!= end
; ++pos
) {
596 StackFrame
*frame
= (*pos
).get();
597 s
->Printf("%p: ", static_cast<void *>(frame
));
599 frame
->GetStackID().Dump(s
);
600 frame
->DumpUsingSettingsFormat(s
);
602 s
->Printf("frame #%u", (uint32_t)std::distance(begin
, pos
));
608 StackFrameSP
StackFrameList::GetFrameAtIndex(uint32_t idx
) {
609 StackFrameSP frame_sp
;
610 uint32_t original_idx
= idx
;
612 // We're going to consult the m_frames.size, but if there are already
613 // enough frames for our request we don't want to block other readers, so
614 // first acquire the shared lock:
615 { // Scope for shared lock:
616 std::shared_lock
<std::shared_mutex
> guard(m_list_mutex
);
618 uint32_t inlined_depth
= GetCurrentInlinedDepth();
619 if (inlined_depth
!= UINT32_MAX
)
620 idx
+= inlined_depth
;
622 if (idx
< m_frames
.size())
623 frame_sp
= m_frames
[idx
];
627 } // End of reader lock scope
629 // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
630 // there are that many. If there weren't then you asked for too many frames.
631 // GetFramesUpTo returns true if interrupted:
632 if (GetFramesUpTo(idx
, AllowInterruption
)) {
633 Log
*log
= GetLog(LLDBLog::Thread
);
634 LLDB_LOG(log
, "GetFrameAtIndex was interrupted");
638 { // Now we're accessing m_frames as a reader, so acquire the reader lock.
639 std::shared_lock
<std::shared_mutex
> guard(m_list_mutex
);
640 if (idx
< m_frames
.size()) {
641 frame_sp
= m_frames
[idx
];
642 } else if (original_idx
== 0) {
643 // There should ALWAYS be a frame at index 0. If something went wrong
644 // with the CurrentInlinedDepth such that there weren't as many frames as
645 // we thought taking that into account, then reset the current inlined
646 // depth and return the real zeroth frame.
647 if (m_frames
.empty()) {
648 // Why do we have a thread with zero frames, that should not ever
650 assert(!m_thread
.IsValid() && "A valid thread has no frames.");
652 ResetCurrentInlinedDepth();
653 frame_sp
= m_frames
[original_idx
];
656 } // End of reader lock scope
662 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx
) {
663 // First try assuming the unwind index is the same as the frame index. The
664 // unwind index is always greater than or equal to the frame index, so it is
665 // a good place to start. If we have inlined frames we might have 5 concrete
666 // frames (frame unwind indexes go from 0-4), but we might have 15 frames
667 // after we make all the inlined frames. Most of the time the unwind frame
668 // index (or the concrete frame index) is the same as the frame index.
669 uint32_t frame_idx
= unwind_idx
;
670 StackFrameSP
frame_sp(GetFrameAtIndex(frame_idx
));
672 if (frame_sp
->GetFrameIndex() == unwind_idx
)
674 frame_sp
= GetFrameAtIndex(++frame_idx
);
679 static bool CompareStackID(const StackFrameSP
&stack_sp
,
680 const StackID
&stack_id
) {
681 return stack_sp
->GetStackID() < stack_id
;
684 StackFrameSP
StackFrameList::GetFrameWithStackID(const StackID
&stack_id
) {
685 StackFrameSP frame_sp
;
687 if (stack_id
.IsValid()) {
688 uint32_t frame_idx
= 0;
690 // First see if the frame is already realized. This is the scope for
692 std::shared_lock
<std::shared_mutex
> guard(m_list_mutex
);
693 // Do a binary search in case the stack frame is already in our cache
694 collection::const_iterator pos
=
695 llvm::lower_bound(m_frames
, stack_id
, CompareStackID
);
696 if (pos
!= m_frames
.end() && (*pos
)->GetStackID() == stack_id
)
699 // If we needed to add more frames, we would get to here.
701 frame_sp
= GetFrameAtIndex(frame_idx
);
702 if (frame_sp
&& frame_sp
->GetStackID() == stack_id
)
710 bool StackFrameList::SetFrameAtIndex(uint32_t idx
, StackFrameSP
&frame_sp
) {
711 std::unique_lock
<std::shared_mutex
> guard(m_list_mutex
);
712 if (idx
>= m_frames
.size())
713 m_frames
.resize(idx
+ 1);
714 // Make sure allocation succeeded by checking bounds again
715 if (idx
< m_frames
.size()) {
716 m_frames
[idx
] = frame_sp
;
719 return false; // resize failed, out of memory?
722 void StackFrameList::SelectMostRelevantFrame() {
723 // Don't call into the frame recognizers on the private state thread as
724 // they can cause code to run in the target, and that can cause deadlocks
725 // when fetching stop events for the expression.
726 if (m_thread
.GetProcess()->CurrentThreadIsPrivateStateThread())
729 Log
*log
= GetLog(LLDBLog::Thread
);
731 // Only the top frame should be recognized.
732 StackFrameSP frame_sp
= GetFrameAtIndex(0);
734 LLDB_LOG(log
, "Failed to construct Frame #0");
738 RecognizedStackFrameSP recognized_frame_sp
= frame_sp
->GetRecognizedFrame();
740 if (recognized_frame_sp
) {
741 if (StackFrameSP most_relevant_frame_sp
=
742 recognized_frame_sp
->GetMostRelevantFrame()) {
743 LLDB_LOG(log
, "Found most relevant frame at index {0}",
744 most_relevant_frame_sp
->GetFrameIndex());
745 SetSelectedFrame(most_relevant_frame_sp
.get());
749 LLDB_LOG(log
, "Frame #0 not recognized");
751 // If this thread has a non-trivial StopInfo, then let it suggest
752 // a most relevant frame:
753 StopInfoSP stop_info_sp
= m_thread
.GetStopInfo();
754 uint32_t stack_idx
= 0;
755 bool found_relevant
= false;
757 // Here we're only asking the stop info if it wants to adjust the real stack
758 // index. We have to ask about the m_inlined_stack_depth in
759 // Thread::ShouldStop since the plans need to reason with that info.
760 bool inlined
= false;
761 std::optional
<uint32_t> stack_opt
=
762 stop_info_sp
->GetSuggestedStackFrameIndex(inlined
);
764 stack_idx
= *stack_opt
;
765 found_relevant
= true;
769 frame_sp
= GetFrameAtIndex(stack_idx
);
771 LLDB_LOG(log
, "Stop info suggested relevant frame {0} but it didn't exist",
773 else if (found_relevant
)
774 LLDB_LOG(log
, "Setting selected frame from stop info to {0}", stack_idx
);
775 // Note, we don't have to worry about "inlined" frames here, because we've
776 // already calculated the inlined frame in Thread::ShouldStop, and
777 // SetSelectedFrame will take care of that adjustment for us.
778 SetSelectedFrame(frame_sp
.get());
781 LLDB_LOG(log
, "No relevant frame!");
785 StackFrameList::GetSelectedFrameIndex(SelectMostRelevant select_most_relevant
) {
786 if (!m_selected_frame_idx
&& select_most_relevant
)
787 SelectMostRelevantFrame();
788 if (!m_selected_frame_idx
) {
789 // If we aren't selecting the most relevant frame, and the selected frame
790 // isn't set, then don't force a selection here, just return 0.
791 if (!select_most_relevant
)
793 // If the inlined stack frame is set, then use that:
794 m_selected_frame_idx
= 0;
796 return *m_selected_frame_idx
;
799 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame
*frame
) {
800 std::shared_lock
<std::shared_mutex
> guard(m_list_mutex
);
803 const_iterator begin
= m_frames
.begin();
804 const_iterator end
= m_frames
.end();
805 m_selected_frame_idx
= 0;
807 for (pos
= begin
; pos
!= end
; ++pos
) {
808 if (pos
->get() == frame
) {
809 m_selected_frame_idx
= std::distance(begin
, pos
);
810 uint32_t inlined_depth
= GetCurrentInlinedDepth();
811 if (inlined_depth
!= UINT32_MAX
)
812 m_selected_frame_idx
= *m_selected_frame_idx
- inlined_depth
;
816 SetDefaultFileAndLineToSelectedFrame();
817 return *m_selected_frame_idx
;
820 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx
) {
821 StackFrameSP
frame_sp(GetFrameAtIndex(idx
));
823 SetSelectedFrame(frame_sp
.get());
829 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
830 if (m_thread
.GetID() ==
831 m_thread
.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
832 StackFrameSP
frame_sp(
833 GetFrameAtIndex(GetSelectedFrameIndex(DoNoSelectMostRelevantFrame
)));
835 SymbolContext sc
= frame_sp
->GetSymbolContext(eSymbolContextLineEntry
);
836 if (sc
.line_entry
.GetFile())
837 m_thread
.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
838 sc
.line_entry
.file_sp
, sc
.line_entry
.line
);
843 // The thread has been run, reset the number stack frames to zero so we can
844 // determine how many frames we have lazily.
845 // Note, we don't actually re-use StackFrameLists, we always make a new
846 // StackFrameList every time we stop, and then copy frame information frame
847 // by frame from the old to the new StackFrameList. So the comment above,
848 // does not describe how StackFrameLists are currently used.
849 // Clear is currently only used to clear the list in the destructor.
850 void StackFrameList::Clear() {
851 std::unique_lock
<std::shared_mutex
> guard(m_list_mutex
);
853 m_concrete_frames_fetched
= 0;
854 m_selected_frame_idx
.reset();
858 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame
*stack_frame_ptr
) {
859 std::shared_lock
<std::shared_mutex
> guard(m_list_mutex
);
861 const_iterator begin
= m_frames
.begin();
862 const_iterator end
= m_frames
.end();
863 lldb::StackFrameSP ret_sp
;
865 for (pos
= begin
; pos
!= end
; ++pos
) {
866 if (pos
->get() == stack_frame_ptr
) {
874 size_t StackFrameList::GetStatus(Stream
&strm
, uint32_t first_frame
,
875 uint32_t num_frames
, bool show_frame_info
,
876 uint32_t num_frames_with_source
,
877 bool show_unique
, bool show_hidden
,
878 const char *selected_frame_marker
) {
879 size_t num_frames_displayed
= 0;
884 StackFrameSP frame_sp
;
885 uint32_t frame_idx
= 0;
888 // Don't let the last frame wrap around...
889 if (num_frames
== UINT32_MAX
)
890 last_frame
= UINT32_MAX
;
892 last_frame
= first_frame
+ num_frames
;
894 StackFrameSP selected_frame_sp
=
895 m_thread
.GetSelectedFrame(DoNoSelectMostRelevantFrame
);
896 const char *unselected_marker
= nullptr;
898 if (selected_frame_marker
) {
899 size_t len
= strlen(selected_frame_marker
);
900 buffer
.insert(buffer
.begin(), len
, ' ');
901 unselected_marker
= buffer
.c_str();
903 const char *marker
= nullptr;
904 for (frame_idx
= first_frame
; frame_idx
< last_frame
; ++frame_idx
) {
905 frame_sp
= GetFrameAtIndex(frame_idx
);
909 if (selected_frame_marker
!= nullptr) {
910 if (frame_sp
== selected_frame_sp
)
911 marker
= selected_frame_marker
;
913 marker
= unselected_marker
;
916 // Hide uninteresting frames unless it's the selected frame.
917 if (!show_hidden
&& frame_sp
!= selected_frame_sp
&& frame_sp
->IsHidden())
920 // Check for interruption here. If we're fetching arguments, this loop
922 Debugger
&dbg
= m_thread
.GetProcess()->GetTarget().GetDebugger();
923 if (INTERRUPT_REQUESTED(
924 dbg
, "Interrupted dumping stack for thread {0:x} with {1} shown.",
925 m_thread
.GetID(), num_frames_displayed
))
929 if (!frame_sp
->GetStatus(strm
, show_frame_info
,
930 num_frames_with_source
> (first_frame
- frame_idx
),
931 show_unique
, marker
))
933 ++num_frames_displayed
;
937 return num_frames_displayed
;