[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Support / Parallel.cpp
blob9a2e1003da5a29648b036faa997526f5a5f1f53b
1 //===- llvm/Support/Parallel.cpp - Parallel algorithms --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "llvm/Support/Parallel.h"
10 #include "llvm/Config/llvm-config.h"
11 #include "llvm/Support/ManagedStatic.h"
12 #include "llvm/Support/Threading.h"
14 #include <atomic>
15 #include <future>
16 #include <stack>
17 #include <thread>
18 #include <vector>
20 llvm::ThreadPoolStrategy llvm::parallel::strategy;
22 #if LLVM_ENABLE_THREADS
24 namespace llvm {
25 namespace parallel {
26 namespace detail {
28 namespace {
30 /// An abstract class that takes closures and runs them asynchronously.
31 class Executor {
32 public:
33 virtual ~Executor() = default;
34 virtual void add(std::function<void()> func) = 0;
36 static Executor *getDefaultExecutor();
39 /// An implementation of an Executor that runs closures on a thread pool
40 /// in filo order.
41 class ThreadPoolExecutor : public Executor {
42 public:
43 explicit ThreadPoolExecutor(ThreadPoolStrategy S = hardware_concurrency()) {
44 unsigned ThreadCount = S.compute_thread_count();
45 // Spawn all but one of the threads in another thread as spawning threads
46 // can take a while.
47 Threads.reserve(ThreadCount);
48 Threads.resize(1);
49 std::lock_guard<std::mutex> Lock(Mutex);
50 Threads[0] = std::thread([this, ThreadCount, S] {
51 for (unsigned I = 1; I < ThreadCount; ++I) {
52 Threads.emplace_back([=] { work(S, I); });
53 if (Stop)
54 break;
56 ThreadsCreated.set_value();
57 work(S, 0);
58 });
61 void stop() {
63 std::lock_guard<std::mutex> Lock(Mutex);
64 if (Stop)
65 return;
66 Stop = true;
68 Cond.notify_all();
69 ThreadsCreated.get_future().wait();
72 ~ThreadPoolExecutor() override {
73 stop();
74 std::thread::id CurrentThreadId = std::this_thread::get_id();
75 for (std::thread &T : Threads)
76 if (T.get_id() == CurrentThreadId)
77 T.detach();
78 else
79 T.join();
82 struct Creator {
83 static void *call() { return new ThreadPoolExecutor(strategy); }
85 struct Deleter {
86 static void call(void *Ptr) { ((ThreadPoolExecutor *)Ptr)->stop(); }
89 void add(std::function<void()> F) override {
91 std::lock_guard<std::mutex> Lock(Mutex);
92 WorkStack.push(F);
94 Cond.notify_one();
97 private:
98 void work(ThreadPoolStrategy S, unsigned ThreadID) {
99 S.apply_thread_strategy(ThreadID);
100 while (true) {
101 std::unique_lock<std::mutex> Lock(Mutex);
102 Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); });
103 if (Stop)
104 break;
105 auto Task = WorkStack.top();
106 WorkStack.pop();
107 Lock.unlock();
108 Task();
112 std::atomic<bool> Stop{false};
113 std::stack<std::function<void()>> WorkStack;
114 std::mutex Mutex;
115 std::condition_variable Cond;
116 std::promise<void> ThreadsCreated;
117 std::vector<std::thread> Threads;
120 Executor *Executor::getDefaultExecutor() {
121 // The ManagedStatic enables the ThreadPoolExecutor to be stopped via
122 // llvm_shutdown() which allows a "clean" fast exit, e.g. via _exit(). This
123 // stops the thread pool and waits for any worker thread creation to complete
124 // but does not wait for the threads to finish. The wait for worker thread
125 // creation to complete is important as it prevents intermittent crashes on
126 // Windows due to a race condition between thread creation and process exit.
128 // The ThreadPoolExecutor will only be destroyed when the static unique_ptr to
129 // it is destroyed, i.e. in a normal full exit. The ThreadPoolExecutor
130 // destructor ensures it has been stopped and waits for worker threads to
131 // finish. The wait is important as it prevents intermittent crashes on
132 // Windows when the process is doing a full exit.
134 // The Windows crashes appear to only occur with the MSVC static runtimes and
135 // are more frequent with the debug static runtime.
137 // This also prevents intermittent deadlocks on exit with the MinGW runtime.
139 static ManagedStatic<ThreadPoolExecutor, ThreadPoolExecutor::Creator,
140 ThreadPoolExecutor::Deleter>
141 ManagedExec;
142 static std::unique_ptr<ThreadPoolExecutor> Exec(&(*ManagedExec));
143 return Exec.get();
145 } // namespace
147 static std::atomic<int> TaskGroupInstances;
149 // Latch::sync() called by the dtor may cause one thread to block. If is a dead
150 // lock if all threads in the default executor are blocked. To prevent the dead
151 // lock, only allow the first TaskGroup to run tasks parallelly. In the scenario
152 // of nested parallel_for_each(), only the outermost one runs parallelly.
153 TaskGroup::TaskGroup() : Parallel(TaskGroupInstances++ == 0) {}
154 TaskGroup::~TaskGroup() { --TaskGroupInstances; }
156 void TaskGroup::spawn(std::function<void()> F) {
157 if (Parallel) {
158 L.inc();
159 Executor::getDefaultExecutor()->add([&, F] {
160 F();
161 L.dec();
163 } else {
164 F();
168 } // namespace detail
169 } // namespace parallel
170 } // namespace llvm
171 #endif // LLVM_ENABLE_THREADS