1 //===--- Threading.cpp - Abstractions for multithreading ------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "support/Threading.h"
10 #include "support/Trace.h"
11 #include "llvm/ADT/ScopeExit.h"
12 #include "llvm/Support/Threading.h"
13 #include "llvm/Support/thread.h"
18 #elif defined(__APPLE__)
19 #include <sys/resource.h>
27 void Notification::notify() {
29 std::lock_guard
<std::mutex
> Lock(Mu
);
31 // Broadcast with the lock held. This ensures that it's safe to destroy
32 // a Notification after wait() returns, even from another thread.
37 bool Notification::wait(Deadline D
) const {
38 std::unique_lock
<std::mutex
> Lock(Mu
);
39 return clangd::wait(Lock
, CV
, D
, [&] { return Notified
; });
42 Semaphore::Semaphore(std::size_t MaxLocks
) : FreeSlots(MaxLocks
) {}
44 bool Semaphore::try_lock() {
45 std::unique_lock
<std::mutex
> Lock(Mutex
);
53 void Semaphore::lock() {
54 trace::Span
Span("WaitForFreeSemaphoreSlot");
55 // trace::Span can also acquire locks in ctor and dtor, we make sure it
56 // happens when Semaphore's own lock is not held.
58 std::unique_lock
<std::mutex
> Lock(Mutex
);
59 SlotsChanged
.wait(Lock
, [&]() { return FreeSlots
> 0; });
64 void Semaphore::unlock() {
65 std::unique_lock
<std::mutex
> Lock(Mutex
);
69 SlotsChanged
.notify_one();
72 AsyncTaskRunner::~AsyncTaskRunner() { wait(); }
74 bool AsyncTaskRunner::wait(Deadline D
) const {
75 std::unique_lock
<std::mutex
> Lock(Mutex
);
76 return clangd::wait(Lock
, TasksReachedZero
, D
,
77 [&] { return InFlightTasks
== 0; });
80 void AsyncTaskRunner::runAsync(const llvm::Twine
&Name
,
81 llvm::unique_function
<void()> Action
) {
83 std::lock_guard
<std::mutex
> Lock(Mutex
);
87 auto CleanupTask
= llvm::make_scope_exit([this]() {
88 std::lock_guard
<std::mutex
> Lock(Mutex
);
89 int NewTasksCnt
= --InFlightTasks
;
90 if (NewTasksCnt
== 0) {
91 // Note: we can't unlock here because we don't want the object to be
92 // destroyed before we notify.
93 TasksReachedZero
.notify_one();
97 auto Task
= [Name
= Name
.str(), Action
= std::move(Action
),
98 Cleanup
= std::move(CleanupTask
)]() mutable {
99 llvm::set_thread_name(Name
);
101 // Make sure function stored by ThreadFunc is destroyed before Cleanup runs.
105 // Ensure our worker threads have big enough stacks to run clang.
107 /*clang::DesiredStackSize*/ llvm::Optional
<unsigned>(8 << 20),
112 Deadline
timeoutSeconds(llvm::Optional
<double> Seconds
) {
113 using namespace std::chrono
;
115 return Deadline::infinity();
116 return steady_clock::now() +
117 duration_cast
<steady_clock::duration
>(duration
<double>(*Seconds
));
120 void wait(std::unique_lock
<std::mutex
> &Lock
, std::condition_variable
&CV
,
122 if (D
== Deadline::zero())
124 if (D
== Deadline::infinity())
125 return CV
.wait(Lock
);
126 CV
.wait_until(Lock
, D
.time());
129 bool PeriodicThrottler::operator()() {
130 Rep Now
= Stopwatch::now().time_since_epoch().count();
131 Rep OldNext
= Next
.load(std::memory_order_acquire
);
134 // We're ready to run (but may be racing other threads).
135 // Work out the updated target time, and run if we successfully bump it.
136 Rep NewNext
= Now
+ Period
;
137 return Next
.compare_exchange_strong(OldNext
, NewNext
,
138 std::memory_order_acq_rel
);
141 } // namespace clangd