1 //===--- Threading.cpp - Abstractions for multithreading ------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "support/Threading.h"
10 #include "support/Trace.h"
11 #include "llvm/ADT/ScopeExit.h"
12 #include "llvm/Support/Threading.h"
13 #include "llvm/Support/thread.h"
19 #elif defined(__APPLE__)
20 #include <sys/resource.h>
28 void Notification::notify() {
30 std::lock_guard
<std::mutex
> Lock(Mu
);
32 // Broadcast with the lock held. This ensures that it's safe to destroy
33 // a Notification after wait() returns, even from another thread.
38 bool Notification::wait(Deadline D
) const {
39 std::unique_lock
<std::mutex
> Lock(Mu
);
40 return clangd::wait(Lock
, CV
, D
, [&] { return Notified
; });
43 Semaphore::Semaphore(std::size_t MaxLocks
) : FreeSlots(MaxLocks
) {}
45 bool Semaphore::try_lock() {
46 std::unique_lock
<std::mutex
> Lock(Mutex
);
54 void Semaphore::lock() {
55 trace::Span
Span("WaitForFreeSemaphoreSlot");
56 // trace::Span can also acquire locks in ctor and dtor, we make sure it
57 // happens when Semaphore's own lock is not held.
59 std::unique_lock
<std::mutex
> Lock(Mutex
);
60 SlotsChanged
.wait(Lock
, [&]() { return FreeSlots
> 0; });
65 void Semaphore::unlock() {
66 std::unique_lock
<std::mutex
> Lock(Mutex
);
70 SlotsChanged
.notify_one();
73 AsyncTaskRunner::~AsyncTaskRunner() { wait(); }
75 bool AsyncTaskRunner::wait(Deadline D
) const {
76 std::unique_lock
<std::mutex
> Lock(Mutex
);
77 return clangd::wait(Lock
, TasksReachedZero
, D
,
78 [&] { return InFlightTasks
== 0; });
81 void AsyncTaskRunner::runAsync(const llvm::Twine
&Name
,
82 llvm::unique_function
<void()> Action
) {
84 std::lock_guard
<std::mutex
> Lock(Mutex
);
88 auto CleanupTask
= llvm::make_scope_exit([this]() {
89 std::lock_guard
<std::mutex
> Lock(Mutex
);
90 int NewTasksCnt
= --InFlightTasks
;
91 if (NewTasksCnt
== 0) {
92 // Note: we can't unlock here because we don't want the object to be
93 // destroyed before we notify.
94 TasksReachedZero
.notify_one();
98 auto Task
= [Name
= Name
.str(), Action
= std::move(Action
),
99 Cleanup
= std::move(CleanupTask
)]() mutable {
100 llvm::set_thread_name(Name
);
102 // Make sure function stored by ThreadFunc is destroyed before Cleanup runs.
106 // Ensure our worker threads have big enough stacks to run clang.
108 /*clang::DesiredStackSize*/ std::optional
<unsigned>(8 << 20),
113 Deadline
timeoutSeconds(std::optional
<double> Seconds
) {
114 using namespace std::chrono
;
116 return Deadline::infinity();
117 return steady_clock::now() +
118 duration_cast
<steady_clock::duration
>(duration
<double>(*Seconds
));
121 void wait(std::unique_lock
<std::mutex
> &Lock
, std::condition_variable
&CV
,
123 if (D
== Deadline::zero())
125 if (D
== Deadline::infinity())
126 return CV
.wait(Lock
);
127 CV
.wait_until(Lock
, D
.time());
130 bool PeriodicThrottler::operator()() {
131 Rep Now
= Stopwatch::now().time_since_epoch().count();
132 Rep OldNext
= Next
.load(std::memory_order_acquire
);
135 // We're ready to run (but may be racing other threads).
136 // Work out the updated target time, and run if we successfully bump it.
137 Rep NewNext
= Now
+ Period
;
138 return Next
.compare_exchange_strong(OldNext
, NewNext
,
139 std::memory_order_acq_rel
);
142 } // namespace clangd