1 //===-- ThreadingTests.cpp --------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "support/Threading.h"
10 #include "llvm/ADT/DenseMap.h"
11 #include "gmock/gmock.h"
12 #include "gtest/gtest.h"
18 class ThreadingTest
: public ::testing::Test
{};
20 TEST_F(ThreadingTest
, TaskRunner
) {
21 const int TasksCnt
= 100;
22 // This should be const, but MSVC does not allow to use const vars in lambdas
23 // without capture. On the other hand, clang gives a warning that capture of
24 // const var is not required.
25 // Making it non-const makes both compilers happy.
26 int IncrementsPerTask
= 1000;
29 int Counter(0); /* GUARDED_BY(Mutex) */
31 AsyncTaskRunner Tasks
;
32 auto ScheduleIncrements
= [&]() {
33 for (int TaskI
= 0; TaskI
< TasksCnt
; ++TaskI
) {
34 Tasks
.runAsync("task", [&Counter
, &Mutex
, IncrementsPerTask
]() {
35 for (int Increment
= 0; Increment
< IncrementsPerTask
; ++Increment
) {
36 std::lock_guard
<std::mutex
> Lock(Mutex
);
44 // Make sure runAsync is not running tasks synchronously on the same
45 // thread by locking the Mutex used for increments.
46 std::lock_guard
<std::mutex
> Lock(Mutex
);
52 std::lock_guard
<std::mutex
> Lock(Mutex
);
53 ASSERT_EQ(Counter
, TasksCnt
* IncrementsPerTask
);
57 std::lock_guard
<std::mutex
> Lock(Mutex
);
62 // Check that destructor has waited for tasks to finish.
63 std::lock_guard
<std::mutex
> Lock(Mutex
);
64 ASSERT_EQ(Counter
, TasksCnt
* IncrementsPerTask
);
67 TEST_F(ThreadingTest
, Memoize
) {
68 const unsigned NumThreads
= 5;
69 const unsigned NumKeys
= 100;
70 const unsigned NumIterations
= 100;
72 Memoize
<llvm::DenseMap
<int, int>> Cache
;
73 std::atomic
<unsigned> ComputeCount(0);
74 std::atomic
<int> ComputeResult
[NumKeys
];
75 std::fill(std::begin(ComputeResult
), std::end(ComputeResult
), -1);
77 AsyncTaskRunner Tasks
;
78 for (unsigned I
= 0; I
< NumThreads
; ++I
)
79 Tasks
.runAsync("worker" + std::to_string(I
), [&] {
80 for (unsigned J
= 0; J
< NumIterations
; J
++)
81 for (unsigned K
= 0; K
< NumKeys
; K
++) {
82 int Result
= Cache
.get(K
, [&] { return ++ComputeCount
; });
83 EXPECT_THAT(ComputeResult
[K
].exchange(Result
),
84 testing::AnyOf(-1, Result
))
85 << "Got inconsistent results from memoize";
89 EXPECT_GE(ComputeCount
, NumKeys
) << "Computed each key once";
90 EXPECT_LE(ComputeCount
, NumThreads
* NumKeys
)
91 << "Worst case, computed each key in every thread";
92 for (int Result
: ComputeResult
)
93 EXPECT_GT(Result
, 0) << "All results in expected domain";
96 TEST_F(ThreadingTest
, MemoizeDeterministic
) {
97 Memoize
<llvm::DenseMap
<int, char>> Cache
;
99 // Spawn two parallel computations, A and B.
100 // Force concurrency: neither can finish until both have started.
101 // Verify that cache returns consistent results.
102 AsyncTaskRunner Tasks
;
103 std::atomic
<char> ValueA(0), ValueB(0);
104 Notification ReleaseA
, ReleaseB
;
105 Tasks
.runAsync("A", [&] {
106 ValueA
= Cache
.get(0, [&] {
112 Tasks
.runAsync("A", [&] {
113 ValueB
= Cache
.get(0, [&] {
121 ASSERT_EQ(ValueA
, ValueB
);
122 ASSERT_THAT(ValueA
.load(), testing::AnyOf('A', 'B'));
125 // It's hard to write a real test of this class, std::chrono is awkward to mock.
126 // But test some degenerate cases at least.
127 TEST(PeriodicThrottlerTest
, Minimal
) {
128 PeriodicThrottler
Once(std::chrono::hours(24));
130 EXPECT_FALSE(Once());
131 EXPECT_FALSE(Once());
133 PeriodicThrottler
Later(std::chrono::hours(24),
134 /*Delay=*/std::chrono::hours(24));
135 EXPECT_FALSE(Later());
136 EXPECT_FALSE(Later());
137 EXPECT_FALSE(Later());
139 PeriodicThrottler
Always(std::chrono::seconds(0));
140 EXPECT_TRUE(Always());
141 EXPECT_TRUE(Always());
142 EXPECT_TRUE(Always());
145 } // namespace clangd