1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include <algorithm> // for min()
9 #include "base/atomicops.h"
10 #include "testing/gtest/include/gtest/gtest.h"
12 // Number of bits in a size_t.
13 static const int kSizeBits
= 8 * sizeof(size_t);
14 // The maximum size of a size_t.
15 static const size_t kMaxSize
= ~static_cast<size_t>(0);
16 // Maximum positive size of a size_t if it were signed.
17 static const size_t kMaxSignedSize
= ((size_t(1) << (kSizeBits
-1)) - 1);
18 // An allocation size which is not too big to be reasonable.
19 static const size_t kNotTooBig
= 100000;
20 // An allocation size which is just too big.
21 static const size_t kTooBig
= ~static_cast<size_t>(0);
27 // Fill a buffer of the specified size with a predetermined pattern
28 static void Fill(unsigned char* buffer
, int n
) {
29 for (int i
= 0; i
< n
; i
++) {
30 buffer
[i
] = (i
& 0xff);
34 // Check that the specified buffer has the predetermined pattern
35 // generated by Fill()
36 static bool Valid(unsigned char* buffer
, int n
) {
37 for (int i
= 0; i
< n
; i
++) {
38 if (buffer
[i
] != (i
& 0xff)) {
45 // Check that a buffer is completely zeroed.
46 static bool IsZeroed(unsigned char* buffer
, int n
) {
47 for (int i
= 0; i
< n
; i
++) {
56 static void CheckAlignment(void* p
, int align
) {
57 EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p
) & (align
-1));
60 // Return the next interesting size/delta to check. Returns -1 if no more.
61 static int NextSize(int size
) {
66 // Find next power of two
71 // Yield (power-1, power, power+1)
78 assert(size
== power
);
85 template <class AtomicType
>
86 static void TestAtomicIncrement() {
87 // For now, we just test single threaded execution
89 // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
90 // outside the expected address bounds. This is in particular to
91 // test that some future change to the asm code doesn't cause the
92 // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines.
99 AtomicType prev_word_value
, next_word_value
;
100 memset(&prev_word_value
, 0xFF, sizeof(AtomicType
));
101 memset(&next_word_value
, 0xEE, sizeof(AtomicType
));
103 s
.prev_word
= prev_word_value
;
105 s
.next_word
= next_word_value
;
107 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 1), 1);
108 EXPECT_EQ(s
.count
, 1);
109 EXPECT_EQ(s
.prev_word
, prev_word_value
);
110 EXPECT_EQ(s
.next_word
, next_word_value
);
112 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 2), 3);
113 EXPECT_EQ(s
.count
, 3);
114 EXPECT_EQ(s
.prev_word
, prev_word_value
);
115 EXPECT_EQ(s
.next_word
, next_word_value
);
117 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 3), 6);
118 EXPECT_EQ(s
.count
, 6);
119 EXPECT_EQ(s
.prev_word
, prev_word_value
);
120 EXPECT_EQ(s
.next_word
, next_word_value
);
122 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -3), 3);
123 EXPECT_EQ(s
.count
, 3);
124 EXPECT_EQ(s
.prev_word
, prev_word_value
);
125 EXPECT_EQ(s
.next_word
, next_word_value
);
127 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -2), 1);
128 EXPECT_EQ(s
.count
, 1);
129 EXPECT_EQ(s
.prev_word
, prev_word_value
);
130 EXPECT_EQ(s
.next_word
, next_word_value
);
132 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -1), 0);
133 EXPECT_EQ(s
.count
, 0);
134 EXPECT_EQ(s
.prev_word
, prev_word_value
);
135 EXPECT_EQ(s
.next_word
, next_word_value
);
137 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -1), -1);
138 EXPECT_EQ(s
.count
, -1);
139 EXPECT_EQ(s
.prev_word
, prev_word_value
);
140 EXPECT_EQ(s
.next_word
, next_word_value
);
142 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -4), -5);
143 EXPECT_EQ(s
.count
, -5);
144 EXPECT_EQ(s
.prev_word
, prev_word_value
);
145 EXPECT_EQ(s
.next_word
, next_word_value
);
147 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 5), 0);
148 EXPECT_EQ(s
.count
, 0);
149 EXPECT_EQ(s
.prev_word
, prev_word_value
);
150 EXPECT_EQ(s
.next_word
, next_word_value
);
154 #define NUM_BITS(T) (sizeof(T) * 8)
157 template <class AtomicType
>
158 static void TestCompareAndSwap() {
159 AtomicType value
= 0;
160 AtomicType prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, 0, 1);
164 // Use test value that has non-zero bits in both halves, more for testing
165 // 64-bit implementation on 32-bit platforms.
166 const AtomicType k_test_val
= (static_cast<uint64_t>(1) <<
167 (NUM_BITS(AtomicType
) - 2)) + 11;
169 prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, 0, 5);
170 EXPECT_EQ(k_test_val
, value
);
171 EXPECT_EQ(k_test_val
, prev
);
174 prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, k_test_val
, 5);
176 EXPECT_EQ(k_test_val
, prev
);
180 template <class AtomicType
>
181 static void TestAtomicExchange() {
182 AtomicType value
= 0;
183 AtomicType new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, 1);
185 EXPECT_EQ(0, new_value
);
187 // Use test value that has non-zero bits in both halves, more for testing
188 // 64-bit implementation on 32-bit platforms.
189 const AtomicType k_test_val
= (static_cast<uint64_t>(1) <<
190 (NUM_BITS(AtomicType
) - 2)) + 11;
192 new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, k_test_val
);
193 EXPECT_EQ(k_test_val
, value
);
194 EXPECT_EQ(k_test_val
, new_value
);
197 new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, 5);
199 EXPECT_EQ(k_test_val
, new_value
);
203 template <class AtomicType
>
204 static void TestAtomicIncrementBounds() {
205 // Test increment at the half-width boundary of the atomic type.
206 // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
207 AtomicType test_val
= static_cast<uint64_t>(1) << (NUM_BITS(AtomicType
) / 2);
208 AtomicType value
= test_val
- 1;
209 AtomicType new_value
= base::subtle::NoBarrier_AtomicIncrement(&value
, 1);
210 EXPECT_EQ(test_val
, value
);
211 EXPECT_EQ(value
, new_value
);
213 base::subtle::NoBarrier_AtomicIncrement(&value
, -1);
214 EXPECT_EQ(test_val
- 1, value
);
217 // This is a simple sanity check that values are correct. Not testing
219 template <class AtomicType
>
220 static void TestStore() {
221 const AtomicType kVal1
= static_cast<AtomicType
>(0xa5a5a5a5a5a5a5a5LL
);
222 const AtomicType kVal2
= static_cast<AtomicType
>(-1);
226 base::subtle::NoBarrier_Store(&value
, kVal1
);
227 EXPECT_EQ(kVal1
, value
);
228 base::subtle::NoBarrier_Store(&value
, kVal2
);
229 EXPECT_EQ(kVal2
, value
);
231 base::subtle::Acquire_Store(&value
, kVal1
);
232 EXPECT_EQ(kVal1
, value
);
233 base::subtle::Acquire_Store(&value
, kVal2
);
234 EXPECT_EQ(kVal2
, value
);
236 base::subtle::Release_Store(&value
, kVal1
);
237 EXPECT_EQ(kVal1
, value
);
238 base::subtle::Release_Store(&value
, kVal2
);
239 EXPECT_EQ(kVal2
, value
);
242 // This is a simple sanity check that values are correct. Not testing
244 template <class AtomicType
>
245 static void TestLoad() {
246 const AtomicType kVal1
= static_cast<AtomicType
>(0xa5a5a5a5a5a5a5a5LL
);
247 const AtomicType kVal2
= static_cast<AtomicType
>(-1);
252 EXPECT_EQ(kVal1
, base::subtle::NoBarrier_Load(&value
));
254 EXPECT_EQ(kVal2
, base::subtle::NoBarrier_Load(&value
));
257 EXPECT_EQ(kVal1
, base::subtle::Acquire_Load(&value
));
259 EXPECT_EQ(kVal2
, base::subtle::Acquire_Load(&value
));
262 EXPECT_EQ(kVal1
, base::subtle::Release_Load(&value
));
264 EXPECT_EQ(kVal2
, base::subtle::Release_Load(&value
));
267 template <class AtomicType
>
268 static void TestAtomicOps() {
269 TestCompareAndSwap
<AtomicType
>();
270 TestAtomicExchange
<AtomicType
>();
271 TestAtomicIncrementBounds
<AtomicType
>();
272 TestStore
<AtomicType
>();
273 TestLoad
<AtomicType
>();
276 static void TestCalloc(size_t n
, size_t s
, bool ok
) {
277 char* p
= reinterpret_cast<char*>(calloc(n
, s
));
279 EXPECT_EQ(NULL
, p
) << "calloc(n, s) should not succeed";
281 EXPECT_NE(reinterpret_cast<void*>(NULL
), p
) <<
282 "calloc(n, s) should succeed";
283 for (int i
= 0; i
< n
*s
; i
++) {
284 EXPECT_EQ('\0', p
[i
]);
291 // A global test counter for number of times the NewHandler is called.
292 static int news_handled
= 0;
293 static void TestNewHandler() {
295 throw std::bad_alloc();
298 // Because we compile without exceptions, we expect these will not throw.
299 static void TestOneNewWithoutExceptions(void* (*func
)(size_t),
303 void* ptr
= (*func
)(kNotTooBig
);
304 EXPECT_NE(reinterpret_cast<void*>(NULL
), ptr
) <<
305 "allocation should not have failed.";
307 EXPECT_EQ(0, 1) << "allocation threw unexpected exception.";
312 void* rv
= (*func
)(kTooBig
);
314 EXPECT_FALSE(should_throw
) << "allocation should have thrown.";
316 EXPECT_TRUE(should_throw
) << "allocation threw unexpected exception.";
320 static void TestNothrowNew(void* (*func
)(size_t)) {
323 // test without new_handler:
324 std::new_handler saved_handler
= std::set_new_handler(0);
325 TestOneNewWithoutExceptions(func
, false);
327 // test with new_handler:
328 std::set_new_handler(TestNewHandler
);
329 TestOneNewWithoutExceptions(func
, true);
330 EXPECT_EQ(news_handled
, 1) << "nothrow new_handler was not called.";
331 std::set_new_handler(saved_handler
);
336 //-----------------------------------------------------------------------------
338 TEST(Atomics
, AtomicIncrementWord
) {
339 TestAtomicIncrement
<AtomicWord
>();
342 TEST(Atomics
, AtomicIncrement32
) {
343 TestAtomicIncrement
<Atomic32
>();
346 TEST(Atomics
, AtomicOpsWord
) {
347 TestAtomicIncrement
<AtomicWord
>();
350 TEST(Atomics
, AtomicOps32
) {
351 TestAtomicIncrement
<Atomic32
>();
354 TEST(Allocators
, Malloc
) {
355 // Try allocating data with a bunch of alignments and sizes
356 for (int size
= 1; size
< 1048576; size
*= 2) {
357 unsigned char* ptr
= reinterpret_cast<unsigned char*>(malloc(size
));
358 CheckAlignment(ptr
, 2); // Should be 2 byte aligned
360 EXPECT_TRUE(Valid(ptr
, size
));
365 TEST(Allocators
, Calloc
) {
366 TestCalloc(0, 0, true);
367 TestCalloc(0, 1, true);
368 TestCalloc(1, 1, true);
369 TestCalloc(1<<10, 0, true);
370 TestCalloc(1<<20, 0, true);
371 TestCalloc(0, 1<<10, true);
372 TestCalloc(0, 1<<20, true);
373 TestCalloc(1<<20, 2, true);
374 TestCalloc(2, 1<<20, true);
375 TestCalloc(1000, 1000, true);
377 TestCalloc(kMaxSize
, 2, false);
378 TestCalloc(2, kMaxSize
, false);
379 TestCalloc(kMaxSize
, kMaxSize
, false);
381 TestCalloc(kMaxSignedSize
, 3, false);
382 TestCalloc(3, kMaxSignedSize
, false);
383 TestCalloc(kMaxSignedSize
, kMaxSignedSize
, false);
386 TEST(Allocators
, New
) {
387 TestNothrowNew(&::operator new);
388 TestNothrowNew(&::operator new[]);
391 // This makes sure that reallocing a small number of bytes in either
392 // direction doesn't cause us to allocate new memory.
393 TEST(Allocators
, Realloc1
) {
394 int start_sizes
[] = { 100, 1000, 10000, 100000 };
395 int deltas
[] = { 1, -2, 4, -8, 16, -32, 64, -128 };
397 for (int s
= 0; s
< sizeof(start_sizes
)/sizeof(*start_sizes
); ++s
) {
398 void* p
= malloc(start_sizes
[s
]);
400 // The larger the start-size, the larger the non-reallocing delta.
401 for (int d
= 0; d
< s
*2; ++d
) {
402 void* new_p
= realloc(p
, start_sizes
[s
] + deltas
[d
]);
403 ASSERT_EQ(p
, new_p
); // realloc should not allocate new memory
405 // Test again, but this time reallocing smaller first.
406 for (int d
= 0; d
< s
*2; ++d
) {
407 void* new_p
= realloc(p
, start_sizes
[s
] - deltas
[d
]);
408 ASSERT_EQ(p
, new_p
); // realloc should not allocate new memory
414 TEST(Allocators
, Realloc2
) {
415 for (int src_size
= 0; src_size
>= 0; src_size
= NextSize(src_size
)) {
416 for (int dst_size
= 0; dst_size
>= 0; dst_size
= NextSize(dst_size
)) {
417 unsigned char* src
= reinterpret_cast<unsigned char*>(malloc(src_size
));
420 reinterpret_cast<unsigned char*>(realloc(src
, dst_size
));
421 EXPECT_TRUE(Valid(dst
, min(src_size
, dst_size
)));
423 EXPECT_TRUE(Valid(dst
, dst_size
));
424 if (dst
!= NULL
) free(dst
);
428 // Now make sure realloc works correctly even when we overflow the
429 // packed cache, so some entries are evicted from the cache.
430 // The cache has 2^12 entries, keyed by page number.
431 const int kNumEntries
= 1 << 14;
432 int** p
= reinterpret_cast<int**>(malloc(sizeof(*p
) * kNumEntries
));
434 for (int i
= 0; i
< kNumEntries
; i
++) {
435 // no page size is likely to be bigger than 8192?
436 p
[i
] = reinterpret_cast<int*>(malloc(8192));
437 p
[i
][1000] = i
; // use memory deep in the heart of p
439 for (int i
= 0; i
< kNumEntries
; i
++) {
440 p
[i
] = reinterpret_cast<int*>(realloc(p
[i
], 9000));
442 for (int i
= 0; i
< kNumEntries
; i
++) {
446 EXPECT_EQ(kNumEntries
/2 * (kNumEntries
- 1), sum
); // assume kNE is even
450 TEST(Allocators
, ReallocZero
) {
451 // Test that realloc to zero does not return NULL.
452 for (int size
= 0; size
>= 0; size
= NextSize(size
)) {
453 char* ptr
= reinterpret_cast<char*>(malloc(size
));
454 EXPECT_NE(static_cast<char*>(NULL
), ptr
);
455 ptr
= reinterpret_cast<char*>(realloc(ptr
, 0));
456 EXPECT_NE(static_cast<char*>(NULL
), ptr
);
464 TEST(Allocators
, Recalloc
) {
465 for (int src_size
= 0; src_size
>= 0; src_size
= NextSize(src_size
)) {
466 for (int dst_size
= 0; dst_size
>= 0; dst_size
= NextSize(dst_size
)) {
468 reinterpret_cast<unsigned char*>(_recalloc(NULL
, 1, src_size
));
469 EXPECT_TRUE(IsZeroed(src
, src_size
));
472 reinterpret_cast<unsigned char*>(_recalloc(src
, 1, dst_size
));
473 EXPECT_TRUE(Valid(dst
, min(src_size
, dst_size
)));
475 EXPECT_TRUE(Valid(dst
, dst_size
));
482 // Test windows specific _aligned_malloc() and _aligned_free() methods.
483 TEST(Allocators
, AlignedMalloc
) {
484 // Try allocating data with a bunch of alignments and sizes
485 static const int kTestAlignments
[] = {8, 16, 256, 4096, 8192, 16384};
486 for (int size
= 1; size
> 0; size
= NextSize(size
)) {
487 for (int i
= 0; i
< ARRAYSIZE(kTestAlignments
); ++i
) {
488 unsigned char* ptr
= static_cast<unsigned char*>(
489 _aligned_malloc(size
, kTestAlignments
[i
]));
490 CheckAlignment(ptr
, kTestAlignments
[i
]);
492 EXPECT_TRUE(Valid(ptr
, size
));
494 // Make a second allocation of the same size and alignment to prevent
495 // allocators from passing this test by accident. Per jar, tcmalloc
496 // provides allocations for new (never before seen) sizes out of a thread
497 // local heap of a given "size class." Each time the test requests a new
498 // size, it will usually get the first element of a span, which is a
499 // 4K aligned allocation.
500 unsigned char* ptr2
= static_cast<unsigned char*>(
501 _aligned_malloc(size
, kTestAlignments
[i
]));
502 CheckAlignment(ptr2
, kTestAlignments
[i
]);
504 EXPECT_TRUE(Valid(ptr2
, size
));
506 // Should never happen, but sanity check just in case.
507 ASSERT_NE(ptr
, ptr2
);
517 int main(int argc
, char** argv
) {
518 testing::InitGoogleTest(&argc
, argv
);
519 return RUN_ALL_TESTS();