Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / compiler-rt / lib / scudo / standalone / tsd.h
blobb2108a01900bcce5b20d1c2615eb4168865f4457
1 //===-- tsd.h ---------------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #ifndef SCUDO_TSD_H_
10 #define SCUDO_TSD_H_
12 #include "atomic_helpers.h"
13 #include "common.h"
14 #include "mutex.h"
15 #include "thread_annotations.h"
17 #include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
18 #include <pthread.h>
20 // With some build setups, this might still not be defined.
21 #ifndef PTHREAD_DESTRUCTOR_ITERATIONS
22 #define PTHREAD_DESTRUCTOR_ITERATIONS 4
23 #endif
25 namespace scudo {
27 template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
28 using ThisT = TSD<Allocator>;
29 u8 DestructorIterations = 0;
31 void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
32 DCHECK_EQ(DestructorIterations, 0U);
33 DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
34 Instance->initCache(&Cache);
35 DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
38 inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
39 if (Mutex.tryLock()) {
40 atomic_store_relaxed(&Precedence, 0);
41 return true;
43 if (atomic_load_relaxed(&Precedence) == 0)
44 atomic_store_relaxed(&Precedence,
45 static_cast<uptr>(getMonotonicTimeFast() >>
46 FIRST_32_SECOND_64(16, 0)));
47 return false;
49 inline void lock() NO_THREAD_SAFETY_ANALYSIS {
50 atomic_store_relaxed(&Precedence, 0);
51 Mutex.lock();
53 inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
54 inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
56 void commitBack(Allocator *Instance) { Instance->commitBack(this); }
58 // As the comments attached to `getCache()`, the TSD doesn't always need to be
59 // locked. In that case, we would only skip the check before we have all TSDs
60 // locked in all paths.
61 void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
62 if (SCUDO_DEBUG && !BypassCheck)
63 Mutex.assertHeld();
66 // Ideally, we may want to assert that all the operations on
67 // Cache/QuarantineCache always have the `Mutex` acquired. However, the
68 // current architecture of accessing TSD is not easy to cooperate with the
69 // thread-safety analysis because of pointer aliasing. So now we just add the
70 // assertion on the getters of Cache/QuarantineCache.
72 // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
73 // TSD doesn't always require holding the lock. Add this assertion while the
74 // lock is always acquired.
75 typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
76 typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
77 return QuarantineCache;
80 private:
81 HybridMutex Mutex;
82 atomic_uptr Precedence = {};
84 typename Allocator::CacheT Cache GUARDED_BY(Mutex);
85 typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
88 } // namespace scudo
90 #endif // SCUDO_TSD_H_