[TargetVersion] Only enable on RISC-V and AArch64 (#115991)
[llvm-project.git] / clang-tools-extra / clangd / support / Threading.cpp
blobf42db89b483c0a40d49b16d58a2083b02d71bd0f
1 //===--- Threading.cpp - Abstractions for multithreading ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "support/Threading.h"
10 #include "support/Trace.h"
11 #include "llvm/ADT/ScopeExit.h"
12 #include "llvm/Support/Threading.h"
13 #include "llvm/Support/thread.h"
14 #include <atomic>
15 #include <optional>
16 #include <thread>
17 #ifdef __USE_POSIX
18 #include <pthread.h>
19 #elif defined(__APPLE__)
20 #include <sys/resource.h>
21 #elif defined(_WIN32)
22 #include <windows.h>
23 #endif
25 namespace clang {
26 namespace clangd {
28 void Notification::notify() {
30 std::lock_guard<std::mutex> Lock(Mu);
31 Notified = true;
32 // Broadcast with the lock held. This ensures that it's safe to destroy
33 // a Notification after wait() returns, even from another thread.
34 CV.notify_all();
38 bool Notification::wait(Deadline D) const {
39 std::unique_lock<std::mutex> Lock(Mu);
40 return clangd::wait(Lock, CV, D, [&] { return Notified; });
43 Semaphore::Semaphore(std::size_t MaxLocks) : FreeSlots(MaxLocks) {}
45 bool Semaphore::try_lock() {
46 std::unique_lock<std::mutex> Lock(Mutex);
47 if (FreeSlots > 0) {
48 --FreeSlots;
49 return true;
51 return false;
54 void Semaphore::lock() {
55 trace::Span Span("WaitForFreeSemaphoreSlot");
56 // trace::Span can also acquire locks in ctor and dtor, we make sure it
57 // happens when Semaphore's own lock is not held.
59 std::unique_lock<std::mutex> Lock(Mutex);
60 SlotsChanged.wait(Lock, [&]() { return FreeSlots > 0; });
61 --FreeSlots;
65 void Semaphore::unlock() {
66 std::unique_lock<std::mutex> Lock(Mutex);
67 ++FreeSlots;
68 Lock.unlock();
70 SlotsChanged.notify_one();
73 AsyncTaskRunner::~AsyncTaskRunner() { wait(); }
75 bool AsyncTaskRunner::wait(Deadline D) const {
76 std::unique_lock<std::mutex> Lock(Mutex);
77 return clangd::wait(Lock, TasksReachedZero, D,
78 [&] { return InFlightTasks == 0; });
81 void AsyncTaskRunner::runAsync(const llvm::Twine &Name,
82 llvm::unique_function<void()> Action) {
84 std::lock_guard<std::mutex> Lock(Mutex);
85 ++InFlightTasks;
88 auto CleanupTask = llvm::make_scope_exit([this]() {
89 std::lock_guard<std::mutex> Lock(Mutex);
90 int NewTasksCnt = --InFlightTasks;
91 if (NewTasksCnt == 0) {
92 // Note: we can't unlock here because we don't want the object to be
93 // destroyed before we notify.
94 TasksReachedZero.notify_one();
96 });
98 auto Task = [Name = Name.str(), Action = std::move(Action),
99 Cleanup = std::move(CleanupTask)]() mutable {
100 llvm::set_thread_name(Name);
101 Action();
102 // Make sure function stored by ThreadFunc is destroyed before Cleanup runs.
103 Action = nullptr;
106 // Ensure our worker threads have big enough stacks to run clang.
107 llvm::thread Thread(
108 /*clang::DesiredStackSize*/ std::optional<unsigned>(8 << 20),
109 std::move(Task));
110 Thread.detach();
113 Deadline timeoutSeconds(std::optional<double> Seconds) {
114 using namespace std::chrono;
115 if (!Seconds)
116 return Deadline::infinity();
117 return steady_clock::now() +
118 duration_cast<steady_clock::duration>(duration<double>(*Seconds));
121 void wait(std::unique_lock<std::mutex> &Lock, std::condition_variable &CV,
122 Deadline D) {
123 if (D == Deadline::zero())
124 return;
125 if (D == Deadline::infinity())
126 return CV.wait(Lock);
127 CV.wait_until(Lock, D.time());
130 bool PeriodicThrottler::operator()() {
131 Rep Now = Stopwatch::now().time_since_epoch().count();
132 Rep OldNext = Next.load(std::memory_order_acquire);
133 if (Now < OldNext)
134 return false;
135 // We're ready to run (but may be racing other threads).
136 // Work out the updated target time, and run if we successfully bump it.
137 Rep NewNext = Now + Period;
138 return Next.compare_exchange_strong(OldNext, NewNext,
139 std::memory_order_acq_rel);
142 } // namespace clangd
143 } // namespace clang