Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / lldb / source / Target / ThreadPlanStack.cpp
blob1572931429071d77ed770003515c0c841834a5eb
1 //===-- ThreadPlanStack.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "lldb/Target/ThreadPlanStack.h"
10 #include "lldb/Target/Process.h"
11 #include "lldb/Target/Target.h"
12 #include "lldb/Target/Thread.h"
13 #include "lldb/Target/ThreadPlan.h"
14 #include "lldb/Utility/Log.h"
16 using namespace lldb;
17 using namespace lldb_private;
19 static void PrintPlanElement(Stream &s, const ThreadPlanSP &plan,
20 lldb::DescriptionLevel desc_level,
21 int32_t elem_idx) {
22 s.IndentMore();
23 s.Indent();
24 s.Printf("Element %d: ", elem_idx);
25 plan->GetDescription(&s, desc_level);
26 s.EOL();
27 s.IndentLess();
30 ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
31 if (make_null) {
32 // The ThreadPlanNull doesn't do anything to the Thread, so this is actually
33 // still a const operation.
34 m_plans.push_back(
35 ThreadPlanSP(new ThreadPlanNull(const_cast<Thread &>(thread))));
39 void ThreadPlanStack::DumpThreadPlans(Stream &s,
40 lldb::DescriptionLevel desc_level,
41 bool include_internal) const {
42 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
43 s.IndentMore();
44 PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
45 PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
46 include_internal);
47 PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level,
48 include_internal);
49 s.IndentLess();
52 void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
53 const PlanStack &stack,
54 lldb::DescriptionLevel desc_level,
55 bool include_internal) const {
56 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
57 // If the stack is empty, just exit:
58 if (stack.empty())
59 return;
61 // Make sure there are public completed plans:
62 bool any_public = false;
63 if (!include_internal) {
64 for (auto plan : stack) {
65 if (!plan->GetPrivate()) {
66 any_public = true;
67 break;
72 if (include_internal || any_public) {
73 int print_idx = 0;
74 s.Indent();
75 s << stack_name << ":\n";
76 for (auto plan : stack) {
77 if (!include_internal && plan->GetPrivate())
78 continue;
79 PrintPlanElement(s, plan, desc_level, print_idx++);
84 size_t ThreadPlanStack::CheckpointCompletedPlans() {
85 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
86 m_completed_plan_checkpoint++;
87 m_completed_plan_store.insert(
88 std::make_pair(m_completed_plan_checkpoint, m_completed_plans));
89 return m_completed_plan_checkpoint;
92 void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
93 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
94 auto result = m_completed_plan_store.find(checkpoint);
95 assert(result != m_completed_plan_store.end() &&
96 "Asked for a checkpoint that didn't exist");
97 m_completed_plans.swap((*result).second);
98 m_completed_plan_store.erase(result);
101 void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) {
102 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
103 m_completed_plan_store.erase(checkpoint);
106 void ThreadPlanStack::ThreadDestroyed(Thread *thread) {
107 // Tell the plan stacks that this thread is going away:
108 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
109 for (ThreadPlanSP plan : m_plans)
110 plan->ThreadDestroyed();
112 for (ThreadPlanSP plan : m_discarded_plans)
113 plan->ThreadDestroyed();
115 for (ThreadPlanSP plan : m_completed_plans)
116 plan->ThreadDestroyed();
118 // Now clear the current plan stacks:
119 m_plans.clear();
120 m_discarded_plans.clear();
121 m_completed_plans.clear();
123 // Push a ThreadPlanNull on the plan stack. That way we can continue
124 // assuming that the plan stack is never empty, but if somebody errantly asks
125 // questions of a destroyed thread without checking first whether it is
126 // destroyed, they won't crash.
127 if (thread != nullptr) {
128 lldb::ThreadPlanSP null_plan_sp(new ThreadPlanNull(*thread));
129 m_plans.push_back(null_plan_sp);
133 void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) {
134 // If the thread plan doesn't already have a tracer, give it its parent's
135 // tracer:
136 // The first plan has to be a base plan:
137 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
138 assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
139 "Zeroth plan must be a base plan");
141 if (!new_plan_sp->GetThreadPlanTracer()) {
142 assert(!m_plans.empty());
143 new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer());
145 m_plans.push_back(new_plan_sp);
146 new_plan_sp->DidPush();
149 lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
150 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
151 assert(m_plans.size() > 1 && "Can't pop the base thread plan");
153 // Note that moving the top element of the vector would leave it in an
154 // undefined state, and break the guarantee that the stack's thread plans are
155 // all valid.
156 lldb::ThreadPlanSP plan_sp = m_plans.back();
157 m_plans.pop_back();
158 m_completed_plans.push_back(plan_sp);
159 plan_sp->DidPop();
160 return plan_sp;
163 lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
164 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
165 assert(m_plans.size() > 1 && "Can't discard the base thread plan");
167 // Note that moving the top element of the vector would leave it in an
168 // undefined state, and break the guarantee that the stack's thread plans are
169 // all valid.
170 lldb::ThreadPlanSP plan_sp = m_plans.back();
171 m_plans.pop_back();
172 m_discarded_plans.push_back(plan_sp);
173 plan_sp->DidPop();
174 return plan_sp;
177 // If the input plan is nullptr, discard all plans. Otherwise make sure this
178 // plan is in the stack, and if so discard up to and including it.
179 void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
180 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
181 int stack_size = m_plans.size();
183 if (up_to_plan_ptr == nullptr) {
184 for (int i = stack_size - 1; i > 0; i--)
185 DiscardPlan();
186 return;
189 bool found_it = false;
190 for (int i = stack_size - 1; i > 0; i--) {
191 if (m_plans[i].get() == up_to_plan_ptr) {
192 found_it = true;
193 break;
197 if (found_it) {
198 bool last_one = false;
199 for (int i = stack_size - 1; i > 0 && !last_one; i--) {
200 if (GetCurrentPlan().get() == up_to_plan_ptr)
201 last_one = true;
202 DiscardPlan();
207 void ThreadPlanStack::DiscardAllPlans() {
208 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
209 int stack_size = m_plans.size();
210 for (int i = stack_size - 1; i > 0; i--) {
211 DiscardPlan();
215 void ThreadPlanStack::DiscardConsultingControllingPlans() {
216 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
217 while (true) {
218 int controlling_plan_idx;
219 bool discard = true;
221 // Find the first controlling plan, see if it wants discarding, and if yes
222 // discard up to it.
223 for (controlling_plan_idx = m_plans.size() - 1; controlling_plan_idx >= 0;
224 controlling_plan_idx--) {
225 if (m_plans[controlling_plan_idx]->IsControllingPlan()) {
226 discard = m_plans[controlling_plan_idx]->OkayToDiscard();
227 break;
231 // If the controlling plan doesn't want to get discarded, then we're done.
232 if (!discard)
233 return;
235 // First pop all the dependent plans:
236 for (int i = m_plans.size() - 1; i > controlling_plan_idx; i--) {
237 DiscardPlan();
240 // Now discard the controlling plan itself.
241 // The bottom-most plan never gets discarded. "OkayToDiscard" for it
242 // means discard it's dependent plans, but not it...
243 if (controlling_plan_idx > 0) {
244 DiscardPlan();
249 lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const {
250 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
251 assert(m_plans.size() != 0 && "There will always be a base plan.");
252 return m_plans.back();
255 lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
256 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
257 if (m_completed_plans.empty())
258 return {};
260 if (!skip_private)
261 return m_completed_plans.back();
263 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
264 lldb::ThreadPlanSP completed_plan_sp;
265 completed_plan_sp = m_completed_plans[i];
266 if (!completed_plan_sp->GetPrivate())
267 return completed_plan_sp;
269 return {};
272 lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
273 bool skip_private) const {
274 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
275 uint32_t idx = 0;
277 for (lldb::ThreadPlanSP plan_sp : m_plans) {
278 if (skip_private && plan_sp->GetPrivate())
279 continue;
280 if (idx == plan_idx)
281 return plan_sp;
282 idx++;
284 return {};
287 lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
288 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
289 if (m_completed_plans.empty())
290 return {};
292 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
293 lldb::ValueObjectSP return_valobj_sp;
294 return_valobj_sp = m_completed_plans[i]->GetReturnValueObject();
295 if (return_valobj_sp)
296 return return_valobj_sp;
298 return {};
301 lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
302 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
303 if (m_completed_plans.empty())
304 return {};
306 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
307 lldb::ExpressionVariableSP expression_variable_sp;
308 expression_variable_sp = m_completed_plans[i]->GetExpressionVariable();
309 if (expression_variable_sp)
310 return expression_variable_sp;
312 return {};
314 bool ThreadPlanStack::AnyPlans() const {
315 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
316 // There is always a base plan...
317 return m_plans.size() > 1;
320 bool ThreadPlanStack::AnyCompletedPlans() const {
321 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
322 return !m_completed_plans.empty();
325 bool ThreadPlanStack::AnyDiscardedPlans() const {
326 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
327 return !m_discarded_plans.empty();
330 bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
331 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
332 for (auto plan : m_completed_plans) {
333 if (plan.get() == in_plan)
334 return true;
336 return false;
339 bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
340 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
341 for (auto plan : m_discarded_plans) {
342 if (plan.get() == in_plan)
343 return true;
345 return false;
348 ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
349 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
350 if (current_plan == nullptr)
351 return nullptr;
353 // Look first in the completed plans, if the plan is here and there is
354 // a completed plan above it, return that.
355 int stack_size = m_completed_plans.size();
356 for (int i = stack_size - 1; i > 0; i--) {
357 if (current_plan == m_completed_plans[i].get())
358 return m_completed_plans[i - 1].get();
361 // If this is the first completed plan, the previous one is the
362 // bottom of the regular plan stack.
363 if (stack_size > 0 && m_completed_plans[0].get() == current_plan) {
364 return GetCurrentPlan().get();
367 // Otherwise look for it in the regular plans.
368 stack_size = m_plans.size();
369 for (int i = stack_size - 1; i > 0; i--) {
370 if (current_plan == m_plans[i].get())
371 return m_plans[i - 1].get();
373 return nullptr;
376 ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
377 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
378 int stack_size = m_plans.size();
380 for (int i = stack_size - 1; i > 0; i--) {
381 if (m_plans[i]->GetKind() == ThreadPlan::eKindCallFunction)
382 return m_plans[i].get();
384 return nullptr;
387 void ThreadPlanStack::ClearThreadCache() {
388 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
389 for (lldb::ThreadPlanSP thread_plan_sp : m_plans)
390 thread_plan_sp->ClearThreadCache();
393 void ThreadPlanStack::WillResume() {
394 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
395 m_completed_plans.clear();
396 m_discarded_plans.clear();
399 void ThreadPlanStackMap::Update(ThreadList &current_threads,
400 bool delete_missing,
401 bool check_for_new) {
403 std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex);
404 // Now find all the new threads and add them to the map:
405 if (check_for_new) {
406 for (auto thread : current_threads.Threads()) {
407 lldb::tid_t cur_tid = thread->GetID();
408 if (!Find(cur_tid)) {
409 AddThread(*thread);
410 thread->QueueBasePlan(true);
415 // If we aren't reaping missing threads at this point,
416 // we are done.
417 if (!delete_missing)
418 return;
419 // Otherwise scan for absent TID's.
420 std::vector<lldb::tid_t> missing_threads;
421 // If we are going to delete plans from the plan stack,
422 // then scan for absent TID's:
423 for (auto &thread_plans : m_plans_list) {
424 lldb::tid_t cur_tid = thread_plans.first;
425 ThreadSP thread_sp = current_threads.FindThreadByID(cur_tid);
426 if (!thread_sp)
427 missing_threads.push_back(cur_tid);
429 for (lldb::tid_t tid : missing_threads) {
430 RemoveTID(tid);
434 void ThreadPlanStackMap::DumpPlans(Stream &strm,
435 lldb::DescriptionLevel desc_level,
436 bool internal, bool condense_if_trivial,
437 bool skip_unreported) {
438 std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex);
439 for (auto &elem : m_plans_list) {
440 lldb::tid_t tid = elem.first;
441 uint32_t index_id = 0;
442 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
444 if (skip_unreported) {
445 if (!thread_sp)
446 continue;
448 if (thread_sp)
449 index_id = thread_sp->GetIndexID();
451 if (condense_if_trivial) {
452 if (!elem.second.AnyPlans() && !elem.second.AnyCompletedPlans() &&
453 !elem.second.AnyDiscardedPlans()) {
454 strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
455 strm.IndentMore();
456 strm.Indent();
457 strm.Printf("No active thread plans\n");
458 strm.IndentLess();
459 return;
463 strm.Indent();
464 strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
466 elem.second.DumpThreadPlans(strm, desc_level, internal);
470 bool ThreadPlanStackMap::DumpPlansForTID(Stream &strm, lldb::tid_t tid,
471 lldb::DescriptionLevel desc_level,
472 bool internal,
473 bool condense_if_trivial,
474 bool skip_unreported) {
475 std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex);
476 uint32_t index_id = 0;
477 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
479 if (skip_unreported) {
480 if (!thread_sp) {
481 strm.Format("Unknown TID: {0}", tid);
482 return false;
486 if (thread_sp)
487 index_id = thread_sp->GetIndexID();
488 ThreadPlanStack *stack = Find(tid);
489 if (!stack) {
490 strm.Format("Unknown TID: {0}\n", tid);
491 return false;
494 if (condense_if_trivial) {
495 if (!stack->AnyPlans() && !stack->AnyCompletedPlans() &&
496 !stack->AnyDiscardedPlans()) {
497 strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
498 strm.IndentMore();
499 strm.Indent();
500 strm.Printf("No active thread plans\n");
501 strm.IndentLess();
502 return true;
506 strm.Indent();
507 strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
509 stack->DumpThreadPlans(strm, desc_level, internal);
510 return true;
513 bool ThreadPlanStackMap::PrunePlansForTID(lldb::tid_t tid) {
514 // We only remove the plans for unreported TID's.
515 std::lock_guard<std::recursive_mutex> guard(m_stack_map_mutex);
516 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
517 if (thread_sp)
518 return false;
520 return RemoveTID(tid);