1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include <algorithm> // for min()
9 #include "base/atomicops.h"
10 #include "testing/gtest/include/gtest/gtest.h"
12 // Number of bits in a size_t.
13 static const int kSizeBits
= 8 * sizeof(size_t);
14 // The maximum size of a size_t.
15 static const size_t kMaxSize
= ~static_cast<size_t>(0);
16 // Maximum positive size of a size_t if it were signed.
17 static const size_t kMaxSignedSize
= ((size_t(1) << (kSizeBits
-1)) - 1);
18 // An allocation size which is not too big to be reasonable.
19 static const size_t kNotTooBig
= 100000;
20 // An allocation size which is just too big.
21 static const size_t kTooBig
= ~static_cast<size_t>(0);
27 // Fill a buffer of the specified size with a predetermined pattern
28 static void Fill(unsigned char* buffer
, int n
) {
29 for (int i
= 0; i
< n
; i
++) {
30 buffer
[i
] = (i
& 0xff);
34 // Check that the specified buffer has the predetermined pattern
35 // generated by Fill()
36 static bool Valid(unsigned char* buffer
, int n
) {
37 for (int i
= 0; i
< n
; i
++) {
38 if (buffer
[i
] != (i
& 0xff)) {
45 // Check that a buffer is completely zeroed.
46 static bool IsZeroed(unsigned char* buffer
, int n
) {
47 for (int i
= 0; i
< n
; i
++) {
56 static void CheckAlignment(void* p
, int align
) {
57 EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p
) & (align
-1));
60 // Return the next interesting size/delta to check. Returns -1 if no more.
61 static int NextSize(int size
) {
66 // Find next power of two
71 // Yield (power-1, power, power+1)
78 assert(size
== power
);
85 template <class AtomicType
>
86 static void TestAtomicIncrement() {
87 // For now, we just test single threaded execution
89 // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
90 // outside the expected address bounds. This is in particular to
91 // test that some future change to the asm code doesn't cause the
92 // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines.
99 AtomicType prev_word_value
, next_word_value
;
100 memset(&prev_word_value
, 0xFF, sizeof(AtomicType
));
101 memset(&next_word_value
, 0xEE, sizeof(AtomicType
));
103 s
.prev_word
= prev_word_value
;
105 s
.next_word
= next_word_value
;
107 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 1), 1);
108 EXPECT_EQ(s
.count
, 1);
109 EXPECT_EQ(s
.prev_word
, prev_word_value
);
110 EXPECT_EQ(s
.next_word
, next_word_value
);
112 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 2), 3);
113 EXPECT_EQ(s
.count
, 3);
114 EXPECT_EQ(s
.prev_word
, prev_word_value
);
115 EXPECT_EQ(s
.next_word
, next_word_value
);
117 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 3), 6);
118 EXPECT_EQ(s
.count
, 6);
119 EXPECT_EQ(s
.prev_word
, prev_word_value
);
120 EXPECT_EQ(s
.next_word
, next_word_value
);
122 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -3), 3);
123 EXPECT_EQ(s
.count
, 3);
124 EXPECT_EQ(s
.prev_word
, prev_word_value
);
125 EXPECT_EQ(s
.next_word
, next_word_value
);
127 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -2), 1);
128 EXPECT_EQ(s
.count
, 1);
129 EXPECT_EQ(s
.prev_word
, prev_word_value
);
130 EXPECT_EQ(s
.next_word
, next_word_value
);
132 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -1), 0);
133 EXPECT_EQ(s
.count
, 0);
134 EXPECT_EQ(s
.prev_word
, prev_word_value
);
135 EXPECT_EQ(s
.next_word
, next_word_value
);
137 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -1), -1);
138 EXPECT_EQ(s
.count
, -1);
139 EXPECT_EQ(s
.prev_word
, prev_word_value
);
140 EXPECT_EQ(s
.next_word
, next_word_value
);
142 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -4), -5);
143 EXPECT_EQ(s
.count
, -5);
144 EXPECT_EQ(s
.prev_word
, prev_word_value
);
145 EXPECT_EQ(s
.next_word
, next_word_value
);
147 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 5), 0);
148 EXPECT_EQ(s
.count
, 0);
149 EXPECT_EQ(s
.prev_word
, prev_word_value
);
150 EXPECT_EQ(s
.next_word
, next_word_value
);
154 #define NUM_BITS(T) (sizeof(T) * 8)
157 template <class AtomicType
>
158 static void TestCompareAndSwap() {
159 AtomicType value
= 0;
160 AtomicType prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, 0, 1);
164 // Use test value that has non-zero bits in both halves, more for testing
165 // 64-bit implementation on 32-bit platforms.
166 const AtomicType k_test_val
= (static_cast<uint64_t>(1) <<
167 (NUM_BITS(AtomicType
) - 2)) + 11;
169 prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, 0, 5);
170 EXPECT_EQ(k_test_val
, value
);
171 EXPECT_EQ(k_test_val
, prev
);
174 prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, k_test_val
, 5);
176 EXPECT_EQ(k_test_val
, prev
);
180 template <class AtomicType
>
181 static void TestAtomicExchange() {
182 AtomicType value
= 0;
183 AtomicType new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, 1);
185 EXPECT_EQ(0, new_value
);
187 // Use test value that has non-zero bits in both halves, more for testing
188 // 64-bit implementation on 32-bit platforms.
189 const AtomicType k_test_val
= (static_cast<uint64_t>(1) <<
190 (NUM_BITS(AtomicType
) - 2)) + 11;
192 new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, k_test_val
);
193 EXPECT_EQ(k_test_val
, value
);
194 EXPECT_EQ(k_test_val
, new_value
);
197 new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, 5);
199 EXPECT_EQ(k_test_val
, new_value
);
203 template <class AtomicType
>
204 static void TestAtomicIncrementBounds() {
205 // Test increment at the half-width boundary of the atomic type.
206 // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
207 AtomicType test_val
= static_cast<uint64_t>(1) << (NUM_BITS(AtomicType
) / 2);
208 AtomicType value
= test_val
- 1;
209 AtomicType new_value
= base::subtle::NoBarrier_AtomicIncrement(&value
, 1);
210 EXPECT_EQ(test_val
, value
);
211 EXPECT_EQ(value
, new_value
);
213 base::subtle::NoBarrier_AtomicIncrement(&value
, -1);
214 EXPECT_EQ(test_val
- 1, value
);
217 // This is a simple sanity check that values are correct. Not testing
219 template <class AtomicType
>
220 static void TestStore() {
221 const AtomicType kVal1
= static_cast<AtomicType
>(0xa5a5a5a5a5a5a5a5LL
);
222 const AtomicType kVal2
= static_cast<AtomicType
>(-1);
226 base::subtle::NoBarrier_Store(&value
, kVal1
);
227 EXPECT_EQ(kVal1
, value
);
228 base::subtle::NoBarrier_Store(&value
, kVal2
);
229 EXPECT_EQ(kVal2
, value
);
231 base::subtle::Acquire_Store(&value
, kVal1
);
232 EXPECT_EQ(kVal1
, value
);
233 base::subtle::Acquire_Store(&value
, kVal2
);
234 EXPECT_EQ(kVal2
, value
);
236 base::subtle::Release_Store(&value
, kVal1
);
237 EXPECT_EQ(kVal1
, value
);
238 base::subtle::Release_Store(&value
, kVal2
);
239 EXPECT_EQ(kVal2
, value
);
242 // This is a simple sanity check that values are correct. Not testing
244 template <class AtomicType
>
245 static void TestLoad() {
246 const AtomicType kVal1
= static_cast<AtomicType
>(0xa5a5a5a5a5a5a5a5LL
);
247 const AtomicType kVal2
= static_cast<AtomicType
>(-1);
252 EXPECT_EQ(kVal1
, base::subtle::NoBarrier_Load(&value
));
254 EXPECT_EQ(kVal2
, base::subtle::NoBarrier_Load(&value
));
257 EXPECT_EQ(kVal1
, base::subtle::Acquire_Load(&value
));
259 EXPECT_EQ(kVal2
, base::subtle::Acquire_Load(&value
));
262 EXPECT_EQ(kVal1
, base::subtle::Release_Load(&value
));
264 EXPECT_EQ(kVal2
, base::subtle::Release_Load(&value
));
267 template <class AtomicType
>
268 static void TestAtomicOps() {
269 TestCompareAndSwap
<AtomicType
>();
270 TestAtomicExchange
<AtomicType
>();
271 TestAtomicIncrementBounds
<AtomicType
>();
272 TestStore
<AtomicType
>();
273 TestLoad
<AtomicType
>();
276 static void TestCalloc(size_t n
, size_t s
, bool ok
) {
277 char* p
= reinterpret_cast<char*>(calloc(n
, s
));
279 EXPECT_EQ(NULL
, p
) << "calloc(n, s) should not succeed";
281 EXPECT_NE(reinterpret_cast<void*>(NULL
), p
) <<
282 "calloc(n, s) should succeed";
283 for (int i
= 0; i
< n
*s
; i
++) {
284 EXPECT_EQ('\0', p
[i
]);
290 // MSVC C4530 complains about exception handler usage when exceptions are
291 // disabled. Temporarily disable that warning so we can test that they are, in
294 #pragma warning(push)
295 #pragma warning(disable: 4530)
298 // A global test counter for number of times the NewHandler is called.
299 static int news_handled
= 0;
300 static void TestNewHandler() {
302 throw std::bad_alloc();
305 // Because we compile without exceptions, we expect these will not throw.
306 static void TestOneNewWithoutExceptions(void* (*func
)(size_t),
310 void* ptr
= (*func
)(kNotTooBig
);
311 EXPECT_NE(reinterpret_cast<void*>(NULL
), ptr
) <<
312 "allocation should not have failed.";
314 EXPECT_EQ(0, 1) << "allocation threw unexpected exception.";
319 void* rv
= (*func
)(kTooBig
);
321 EXPECT_FALSE(should_throw
) << "allocation should have thrown.";
323 EXPECT_TRUE(should_throw
) << "allocation threw unexpected exception.";
327 static void TestNothrowNew(void* (*func
)(size_t)) {
330 // test without new_handler:
331 std::new_handler saved_handler
= std::set_new_handler(0);
332 TestOneNewWithoutExceptions(func
, false);
334 // test with new_handler:
335 std::set_new_handler(TestNewHandler
);
336 TestOneNewWithoutExceptions(func
, true);
337 EXPECT_EQ(news_handled
, 1) << "nothrow new_handler was not called.";
338 std::set_new_handler(saved_handler
);
347 //-----------------------------------------------------------------------------
349 TEST(Atomics
, AtomicIncrementWord
) {
350 TestAtomicIncrement
<AtomicWord
>();
353 TEST(Atomics
, AtomicIncrement32
) {
354 TestAtomicIncrement
<Atomic32
>();
357 TEST(Atomics
, AtomicOpsWord
) {
358 TestAtomicIncrement
<AtomicWord
>();
361 TEST(Atomics
, AtomicOps32
) {
362 TestAtomicIncrement
<Atomic32
>();
365 TEST(Allocators
, Malloc
) {
366 // Try allocating data with a bunch of alignments and sizes
367 for (int size
= 1; size
< 1048576; size
*= 2) {
368 unsigned char* ptr
= reinterpret_cast<unsigned char*>(malloc(size
));
369 CheckAlignment(ptr
, 2); // Should be 2 byte aligned
371 EXPECT_TRUE(Valid(ptr
, size
));
376 TEST(Allocators
, Calloc
) {
377 TestCalloc(0, 0, true);
378 TestCalloc(0, 1, true);
379 TestCalloc(1, 1, true);
380 TestCalloc(1<<10, 0, true);
381 TestCalloc(1<<20, 0, true);
382 TestCalloc(0, 1<<10, true);
383 TestCalloc(0, 1<<20, true);
384 TestCalloc(1<<20, 2, true);
385 TestCalloc(2, 1<<20, true);
386 TestCalloc(1000, 1000, true);
388 TestCalloc(kMaxSize
, 2, false);
389 TestCalloc(2, kMaxSize
, false);
390 TestCalloc(kMaxSize
, kMaxSize
, false);
392 TestCalloc(kMaxSignedSize
, 3, false);
393 TestCalloc(3, kMaxSignedSize
, false);
394 TestCalloc(kMaxSignedSize
, kMaxSignedSize
, false);
397 TEST(Allocators
, New
) {
398 TestNothrowNew(&::operator new);
399 TestNothrowNew(&::operator new[]);
402 // This makes sure that reallocing a small number of bytes in either
403 // direction doesn't cause us to allocate new memory.
404 TEST(Allocators
, Realloc1
) {
405 int start_sizes
[] = { 100, 1000, 10000, 100000 };
406 int deltas
[] = { 1, -2, 4, -8, 16, -32, 64, -128 };
408 for (int s
= 0; s
< sizeof(start_sizes
)/sizeof(*start_sizes
); ++s
) {
409 void* p
= malloc(start_sizes
[s
]);
411 // The larger the start-size, the larger the non-reallocing delta.
412 for (int d
= 0; d
< s
*2; ++d
) {
413 void* new_p
= realloc(p
, start_sizes
[s
] + deltas
[d
]);
414 ASSERT_EQ(p
, new_p
); // realloc should not allocate new memory
416 // Test again, but this time reallocing smaller first.
417 for (int d
= 0; d
< s
*2; ++d
) {
418 void* new_p
= realloc(p
, start_sizes
[s
] - deltas
[d
]);
419 ASSERT_EQ(p
, new_p
); // realloc should not allocate new memory
425 TEST(Allocators
, Realloc2
) {
426 for (int src_size
= 0; src_size
>= 0; src_size
= NextSize(src_size
)) {
427 for (int dst_size
= 0; dst_size
>= 0; dst_size
= NextSize(dst_size
)) {
428 unsigned char* src
= reinterpret_cast<unsigned char*>(malloc(src_size
));
431 reinterpret_cast<unsigned char*>(realloc(src
, dst_size
));
432 EXPECT_TRUE(Valid(dst
, min(src_size
, dst_size
)));
434 EXPECT_TRUE(Valid(dst
, dst_size
));
435 if (dst
!= NULL
) free(dst
);
439 // Now make sure realloc works correctly even when we overflow the
440 // packed cache, so some entries are evicted from the cache.
441 // The cache has 2^12 entries, keyed by page number.
442 const int kNumEntries
= 1 << 14;
443 int** p
= reinterpret_cast<int**>(malloc(sizeof(*p
) * kNumEntries
));
445 for (int i
= 0; i
< kNumEntries
; i
++) {
446 // no page size is likely to be bigger than 8192?
447 p
[i
] = reinterpret_cast<int*>(malloc(8192));
448 p
[i
][1000] = i
; // use memory deep in the heart of p
450 for (int i
= 0; i
< kNumEntries
; i
++) {
451 p
[i
] = reinterpret_cast<int*>(realloc(p
[i
], 9000));
453 for (int i
= 0; i
< kNumEntries
; i
++) {
457 EXPECT_EQ(kNumEntries
/2 * (kNumEntries
- 1), sum
); // assume kNE is even
461 TEST(Allocators
, ReallocZero
) {
462 // Test that realloc to zero does not return NULL.
463 for (int size
= 0; size
>= 0; size
= NextSize(size
)) {
464 char* ptr
= reinterpret_cast<char*>(malloc(size
));
465 EXPECT_NE(static_cast<char*>(NULL
), ptr
);
466 ptr
= reinterpret_cast<char*>(realloc(ptr
, 0));
467 EXPECT_NE(static_cast<char*>(NULL
), ptr
);
475 TEST(Allocators
, Recalloc
) {
476 for (int src_size
= 0; src_size
>= 0; src_size
= NextSize(src_size
)) {
477 for (int dst_size
= 0; dst_size
>= 0; dst_size
= NextSize(dst_size
)) {
479 reinterpret_cast<unsigned char*>(_recalloc(NULL
, 1, src_size
));
480 EXPECT_TRUE(IsZeroed(src
, src_size
));
483 reinterpret_cast<unsigned char*>(_recalloc(src
, 1, dst_size
));
484 EXPECT_TRUE(Valid(dst
, min(src_size
, dst_size
)));
486 EXPECT_TRUE(Valid(dst
, dst_size
));
493 // Test windows specific _aligned_malloc() and _aligned_free() methods.
494 TEST(Allocators
, AlignedMalloc
) {
495 // Try allocating data with a bunch of alignments and sizes
496 static const int kTestAlignments
[] = {8, 16, 256, 4096, 8192, 16384};
497 for (int size
= 1; size
> 0; size
= NextSize(size
)) {
498 for (int i
= 0; i
< ARRAYSIZE(kTestAlignments
); ++i
) {
499 unsigned char* ptr
= static_cast<unsigned char*>(
500 _aligned_malloc(size
, kTestAlignments
[i
]));
501 CheckAlignment(ptr
, kTestAlignments
[i
]);
503 EXPECT_TRUE(Valid(ptr
, size
));
505 // Make a second allocation of the same size and alignment to prevent
506 // allocators from passing this test by accident. Per jar, tcmalloc
507 // provides allocations for new (never before seen) sizes out of a thread
508 // local heap of a given "size class." Each time the test requests a new
509 // size, it will usually get the first element of a span, which is a
510 // 4K aligned allocation.
511 unsigned char* ptr2
= static_cast<unsigned char*>(
512 _aligned_malloc(size
, kTestAlignments
[i
]));
513 CheckAlignment(ptr2
, kTestAlignments
[i
]);
515 EXPECT_TRUE(Valid(ptr2
, size
));
517 // Should never happen, but sanity check just in case.
518 ASSERT_NE(ptr
, ptr2
);
528 int main(int argc
, char** argv
) {
529 testing::InitGoogleTest(&argc
, argv
);
530 return RUN_ALL_TESTS();