2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved.
3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "wtf/AddressSanitizer.h"
34 #include "wtf/Assertions.h"
43 #if defined(THREAD_SANITIZER)
44 #include <sanitizer/tsan_interface_atomic.h>
47 #if defined(ADDRESS_SANITIZER)
48 #include <sanitizer/asan_interface.h>
55 // atomicAdd returns the result of the addition.
56 ALWAYS_INLINE
int atomicAdd(int volatile* addend
, int increment
)
58 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend
), static_cast<long>(increment
)) + increment
;
60 ALWAYS_INLINE
unsigned atomicAdd(unsigned volatile* addend
, unsigned increment
)
62 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend
), static_cast<long>(increment
)) + increment
;
65 ALWAYS_INLINE
unsigned long long atomicAdd(unsigned long long volatile* addend
, unsigned long long increment
)
67 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend
), static_cast<long long>(increment
)) + increment
;
71 // atomicSubtract returns the result of the subtraction.
72 ALWAYS_INLINE
int atomicSubtract(int volatile* addend
, int decrement
)
74 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend
), static_cast<long>(-decrement
)) - decrement
;
76 ALWAYS_INLINE
unsigned atomicSubtract(unsigned volatile* addend
, unsigned decrement
)
78 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend
), -static_cast<long>(decrement
)) - decrement
;
81 ALWAYS_INLINE
unsigned long long atomicSubtract(unsigned long long volatile* addend
, unsigned long long decrement
)
83 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend
), -static_cast<long long>(decrement
)) - decrement
;
87 ALWAYS_INLINE
int atomicIncrement(int volatile* addend
) { return InterlockedIncrement(reinterpret_cast<long volatile*>(addend
)); }
88 ALWAYS_INLINE
int atomicDecrement(int volatile* addend
) { return InterlockedDecrement(reinterpret_cast<long volatile*>(addend
)); }
90 ALWAYS_INLINE
int64_t atomicIncrement(int64_t volatile* addend
) { return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend
)); }
91 ALWAYS_INLINE
int64_t atomicDecrement(int64_t volatile* addend
) { return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend
)); }
93 ALWAYS_INLINE
int atomicTestAndSetToOne(int volatile* ptr
)
95 int ret
= InterlockedExchange(reinterpret_cast<long volatile*>(ptr
), 1);
96 ASSERT(!ret
|| ret
== 1);
100 ALWAYS_INLINE
void atomicSetOneToZero(int volatile* ptr
)
103 InterlockedExchange(reinterpret_cast<long volatile*>(ptr
), 0);
108 // atomicAdd returns the result of the addition.
109 ALWAYS_INLINE
int atomicAdd(int volatile* addend
, int increment
) { return __sync_add_and_fetch(addend
, increment
); }
110 ALWAYS_INLINE
unsigned atomicAdd(unsigned volatile* addend
, unsigned increment
) { return __sync_add_and_fetch(addend
, increment
); }
111 ALWAYS_INLINE
unsigned long atomicAdd(unsigned long volatile* addend
, unsigned long increment
) { return __sync_add_and_fetch(addend
, increment
); }
112 // atomicSubtract returns the result of the subtraction.
113 ALWAYS_INLINE
int atomicSubtract(int volatile* addend
, int decrement
) { return __sync_sub_and_fetch(addend
, decrement
); }
114 ALWAYS_INLINE
unsigned atomicSubtract(unsigned volatile* addend
, unsigned decrement
) { return __sync_sub_and_fetch(addend
, decrement
); }
115 ALWAYS_INLINE
unsigned long atomicSubtract(unsigned long volatile* addend
, unsigned long decrement
) { return __sync_sub_and_fetch(addend
, decrement
); }
117 ALWAYS_INLINE
int atomicIncrement(int volatile* addend
) { return atomicAdd(addend
, 1); }
118 ALWAYS_INLINE
int atomicDecrement(int volatile* addend
) { return atomicSubtract(addend
, 1); }
120 ALWAYS_INLINE
int64_t atomicIncrement(int64_t volatile* addend
) { return __sync_add_and_fetch(addend
, 1); }
121 ALWAYS_INLINE
int64_t atomicDecrement(int64_t volatile* addend
) { return __sync_sub_and_fetch(addend
, 1); }
123 ALWAYS_INLINE
int atomicTestAndSetToOne(int volatile* ptr
)
125 int ret
= __sync_lock_test_and_set(ptr
, 1);
126 ASSERT(!ret
|| ret
== 1);
130 ALWAYS_INLINE
void atomicSetOneToZero(int volatile* ptr
)
133 __sync_lock_release(ptr
);
137 #if defined(THREAD_SANITIZER)
138 // The definitions below assume an LP64 data model. This is fine because
139 // TSan is only supported on x86_64 Linux.
140 #if CPU(64BIT) && OS(LINUX)
141 ALWAYS_INLINE
void releaseStore(volatile int* ptr
, int value
)
143 __tsan_atomic32_store(ptr
, value
, __tsan_memory_order_release
);
145 ALWAYS_INLINE
void releaseStore(volatile unsigned* ptr
, unsigned value
)
147 __tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr
), static_cast<int>(value
), __tsan_memory_order_release
);
149 ALWAYS_INLINE
void releaseStore(volatile long* ptr
, long value
)
151 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64
*>(ptr
), static_cast<__tsan_atomic64
>(value
), __tsan_memory_order_release
);
153 ALWAYS_INLINE
void releaseStore(volatile unsigned long* ptr
, unsigned long value
)
155 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64
*>(ptr
), static_cast<__tsan_atomic64
>(value
), __tsan_memory_order_release
);
157 ALWAYS_INLINE
void releaseStore(volatile unsigned long long* ptr
, unsigned long long value
)
159 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64
*>(ptr
), static_cast<__tsan_atomic64
>(value
), __tsan_memory_order_release
);
161 ALWAYS_INLINE
void releaseStore(void* volatile* ptr
, void* value
)
163 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64
*>(ptr
), reinterpret_cast<__tsan_atomic64
>(value
), __tsan_memory_order_release
);
166 ALWAYS_INLINE
int acquireLoad(volatile const int* ptr
)
168 return __tsan_atomic32_load(ptr
, __tsan_memory_order_acquire
);
170 ALWAYS_INLINE
unsigned acquireLoad(volatile const unsigned* ptr
)
172 return static_cast<unsigned>(__tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr
), __tsan_memory_order_acquire
));
174 ALWAYS_INLINE
long acquireLoad(volatile const long* ptr
)
176 return static_cast<long>(__tsan_atomic64_load(reinterpret_cast<volatile const __tsan_atomic64
*>(ptr
), __tsan_memory_order_acquire
));
178 ALWAYS_INLINE
unsigned long acquireLoad(volatile const unsigned long* ptr
)
180 return static_cast<unsigned long>(__tsan_atomic64_load(reinterpret_cast<volatile const __tsan_atomic64
*>(ptr
), __tsan_memory_order_acquire
));
182 ALWAYS_INLINE
void* acquireLoad(void* volatile const* ptr
)
184 return reinterpret_cast<void*>(__tsan_atomic64_load(reinterpret_cast<volatile const __tsan_atomic64
*>(ptr
), __tsan_memory_order_acquire
));
188 #else // defined(THREAD_SANITIZER)
190 #if CPU(X86) || CPU(X86_64)
191 // Only compiler barrier is needed.
193 // Starting from Visual Studio 2005 compiler guarantees acquire and release
194 // semantics for operations on volatile variables. See MSDN entry for
195 // MemoryBarrier macro.
196 #define MEMORY_BARRIER()
198 #define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory")
200 #elif CPU(ARM) && (OS(LINUX) || OS(ANDROID))
201 // On ARM __sync_synchronize generates dmb which is very expensive on single
202 // core devices which don't actually need it. Avoid the cost by calling into
203 // kuser_memory_barrier helper.
204 inline void memoryBarrier()
206 // Note: This is a function call, which is also an implicit compiler barrier.
207 typedef void (*KernelMemoryBarrierFunc
)();
208 ((KernelMemoryBarrierFunc
)0xffff0fa0)();
210 #define MEMORY_BARRIER() memoryBarrier()
212 // Fallback to the compiler intrinsic on all other platforms.
213 #define MEMORY_BARRIER() __sync_synchronize()
216 ALWAYS_INLINE
void releaseStore(volatile int* ptr
, int value
)
221 ALWAYS_INLINE
void releaseStore(volatile unsigned* ptr
, unsigned value
)
226 ALWAYS_INLINE
void releaseStore(volatile long* ptr
, long value
)
231 ALWAYS_INLINE
void releaseStore(volatile unsigned long* ptr
, unsigned long value
)
237 ALWAYS_INLINE
void releaseStore(volatile unsigned long long* ptr
, unsigned long long value
)
243 ALWAYS_INLINE
void releaseStore(void* volatile* ptr
, void* value
)
249 ALWAYS_INLINE
int acquireLoad(volatile const int* ptr
)
255 ALWAYS_INLINE
unsigned acquireLoad(volatile const unsigned* ptr
)
257 unsigned value
= *ptr
;
261 ALWAYS_INLINE
long acquireLoad(volatile const long* ptr
)
267 ALWAYS_INLINE
unsigned long acquireLoad(volatile const unsigned long* ptr
)
269 unsigned long value
= *ptr
;
274 ALWAYS_INLINE
unsigned long long acquireLoad(volatile const unsigned long long* ptr
)
276 unsigned long long value
= *ptr
;
281 ALWAYS_INLINE
void* acquireLoad(void* volatile const* ptr
)
288 #if defined(ADDRESS_SANITIZER)
290 NO_SANITIZE_ADDRESS ALWAYS_INLINE
void asanUnsafeReleaseStore(volatile unsigned* ptr
, unsigned value
)
296 NO_SANITIZE_ADDRESS ALWAYS_INLINE
unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr
)
298 unsigned value
= *ptr
;
303 #endif // defined(ADDRESS_SANITIZER)
305 #undef MEMORY_BARRIER
309 #if !defined(ADDRESS_SANITIZER)
311 ALWAYS_INLINE
void asanUnsafeReleaseStore(volatile unsigned* ptr
, unsigned value
)
313 releaseStore(ptr
, value
);
316 ALWAYS_INLINE
unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr
)
318 return acquireLoad(ptr
);
325 using WTF::atomicAdd
;
326 using WTF::atomicSubtract
;
327 using WTF::atomicDecrement
;
328 using WTF::atomicIncrement
;
329 using WTF::atomicTestAndSetToOne
;
330 using WTF::atomicSetOneToZero
;
331 using WTF::acquireLoad
;
332 using WTF::releaseStore
;
334 // These methods allow loading from and storing to poisoned memory. Only
335 // use these methods if you know what you are doing since they will
336 // silence use-after-poison errors from ASan.
337 using WTF::asanUnsafeAcquireLoad
;
338 using WTF::asanUnsafeReleaseStore
;