1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include <algorithm> // for min()
8 #include "base/atomicops.h"
9 #include "testing/gtest/include/gtest/gtest.h"
11 // Number of bits in a size_t.
12 static const int kSizeBits
= 8 * sizeof(size_t);
13 // The maximum size of a size_t.
14 static const size_t kMaxSize
= ~static_cast<size_t>(0);
15 // Maximum positive size of a size_t if it were signed.
16 static const size_t kMaxSignedSize
= ((size_t(1) << (kSizeBits
-1)) - 1);
17 // An allocation size which is not too big to be reasonable.
18 static const size_t kNotTooBig
= 100000;
19 // An allocation size which is just too big.
20 static const size_t kTooBig
= ~static_cast<size_t>(0);
26 // Fill a buffer of the specified size with a predetermined pattern
27 static void Fill(unsigned char* buffer
, int n
) {
28 for (int i
= 0; i
< n
; i
++) {
29 buffer
[i
] = (i
& 0xff);
33 // Check that the specified buffer has the predetermined pattern
34 // generated by Fill()
35 static bool Valid(unsigned char* buffer
, int n
) {
36 for (int i
= 0; i
< n
; i
++) {
37 if (buffer
[i
] != (i
& 0xff)) {
44 // Check that a buffer is completely zeroed.
45 static bool IsZeroed(unsigned char* buffer
, int n
) {
46 for (int i
= 0; i
< n
; i
++) {
55 static void CheckAlignment(void* p
, int align
) {
56 EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p
) & (align
-1));
59 // Return the next interesting size/delta to check. Returns -1 if no more.
60 static int NextSize(int size
) {
65 // Find next power of two
70 // Yield (power-1, power, power+1)
77 assert(size
== power
);
84 #define GG_ULONGLONG(x) static_cast<uint64>(x)
86 template <class AtomicType
>
87 static void TestAtomicIncrement() {
88 // For now, we just test single threaded execution
90 // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
91 // outside the expected address bounds. This is in particular to
92 // test that some future change to the asm code doesn't cause the
93 // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines.
100 AtomicType prev_word_value
, next_word_value
;
101 memset(&prev_word_value
, 0xFF, sizeof(AtomicType
));
102 memset(&next_word_value
, 0xEE, sizeof(AtomicType
));
104 s
.prev_word
= prev_word_value
;
106 s
.next_word
= next_word_value
;
108 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 1), 1);
109 EXPECT_EQ(s
.count
, 1);
110 EXPECT_EQ(s
.prev_word
, prev_word_value
);
111 EXPECT_EQ(s
.next_word
, next_word_value
);
113 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 2), 3);
114 EXPECT_EQ(s
.count
, 3);
115 EXPECT_EQ(s
.prev_word
, prev_word_value
);
116 EXPECT_EQ(s
.next_word
, next_word_value
);
118 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 3), 6);
119 EXPECT_EQ(s
.count
, 6);
120 EXPECT_EQ(s
.prev_word
, prev_word_value
);
121 EXPECT_EQ(s
.next_word
, next_word_value
);
123 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -3), 3);
124 EXPECT_EQ(s
.count
, 3);
125 EXPECT_EQ(s
.prev_word
, prev_word_value
);
126 EXPECT_EQ(s
.next_word
, next_word_value
);
128 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -2), 1);
129 EXPECT_EQ(s
.count
, 1);
130 EXPECT_EQ(s
.prev_word
, prev_word_value
);
131 EXPECT_EQ(s
.next_word
, next_word_value
);
133 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -1), 0);
134 EXPECT_EQ(s
.count
, 0);
135 EXPECT_EQ(s
.prev_word
, prev_word_value
);
136 EXPECT_EQ(s
.next_word
, next_word_value
);
138 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -1), -1);
139 EXPECT_EQ(s
.count
, -1);
140 EXPECT_EQ(s
.prev_word
, prev_word_value
);
141 EXPECT_EQ(s
.next_word
, next_word_value
);
143 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, -4), -5);
144 EXPECT_EQ(s
.count
, -5);
145 EXPECT_EQ(s
.prev_word
, prev_word_value
);
146 EXPECT_EQ(s
.next_word
, next_word_value
);
148 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s
.count
, 5), 0);
149 EXPECT_EQ(s
.count
, 0);
150 EXPECT_EQ(s
.prev_word
, prev_word_value
);
151 EXPECT_EQ(s
.next_word
, next_word_value
);
155 #define NUM_BITS(T) (sizeof(T) * 8)
158 template <class AtomicType
>
159 static void TestCompareAndSwap() {
160 AtomicType value
= 0;
161 AtomicType prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, 0, 1);
165 // Use test value that has non-zero bits in both halves, more for testing
166 // 64-bit implementation on 32-bit platforms.
167 const AtomicType k_test_val
= (GG_ULONGLONG(1) <<
168 (NUM_BITS(AtomicType
) - 2)) + 11;
170 prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, 0, 5);
171 EXPECT_EQ(k_test_val
, value
);
172 EXPECT_EQ(k_test_val
, prev
);
175 prev
= base::subtle::NoBarrier_CompareAndSwap(&value
, k_test_val
, 5);
177 EXPECT_EQ(k_test_val
, prev
);
181 template <class AtomicType
>
182 static void TestAtomicExchange() {
183 AtomicType value
= 0;
184 AtomicType new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, 1);
186 EXPECT_EQ(0, new_value
);
188 // Use test value that has non-zero bits in both halves, more for testing
189 // 64-bit implementation on 32-bit platforms.
190 const AtomicType k_test_val
= (GG_ULONGLONG(1) <<
191 (NUM_BITS(AtomicType
) - 2)) + 11;
193 new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, k_test_val
);
194 EXPECT_EQ(k_test_val
, value
);
195 EXPECT_EQ(k_test_val
, new_value
);
198 new_value
= base::subtle::NoBarrier_AtomicExchange(&value
, 5);
200 EXPECT_EQ(k_test_val
, new_value
);
204 template <class AtomicType
>
205 static void TestAtomicIncrementBounds() {
206 // Test increment at the half-width boundary of the atomic type.
207 // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
208 AtomicType test_val
= GG_ULONGLONG(1) << (NUM_BITS(AtomicType
) / 2);
209 AtomicType value
= test_val
- 1;
210 AtomicType new_value
= base::subtle::NoBarrier_AtomicIncrement(&value
, 1);
211 EXPECT_EQ(test_val
, value
);
212 EXPECT_EQ(value
, new_value
);
214 base::subtle::NoBarrier_AtomicIncrement(&value
, -1);
215 EXPECT_EQ(test_val
- 1, value
);
218 // This is a simple sanity check that values are correct. Not testing
220 template <class AtomicType
>
221 static void TestStore() {
222 const AtomicType kVal1
= static_cast<AtomicType
>(0xa5a5a5a5a5a5a5a5LL
);
223 const AtomicType kVal2
= static_cast<AtomicType
>(-1);
227 base::subtle::NoBarrier_Store(&value
, kVal1
);
228 EXPECT_EQ(kVal1
, value
);
229 base::subtle::NoBarrier_Store(&value
, kVal2
);
230 EXPECT_EQ(kVal2
, value
);
232 base::subtle::Acquire_Store(&value
, kVal1
);
233 EXPECT_EQ(kVal1
, value
);
234 base::subtle::Acquire_Store(&value
, kVal2
);
235 EXPECT_EQ(kVal2
, value
);
237 base::subtle::Release_Store(&value
, kVal1
);
238 EXPECT_EQ(kVal1
, value
);
239 base::subtle::Release_Store(&value
, kVal2
);
240 EXPECT_EQ(kVal2
, value
);
243 // This is a simple sanity check that values are correct. Not testing
245 template <class AtomicType
>
246 static void TestLoad() {
247 const AtomicType kVal1
= static_cast<AtomicType
>(0xa5a5a5a5a5a5a5a5LL
);
248 const AtomicType kVal2
= static_cast<AtomicType
>(-1);
253 EXPECT_EQ(kVal1
, base::subtle::NoBarrier_Load(&value
));
255 EXPECT_EQ(kVal2
, base::subtle::NoBarrier_Load(&value
));
258 EXPECT_EQ(kVal1
, base::subtle::Acquire_Load(&value
));
260 EXPECT_EQ(kVal2
, base::subtle::Acquire_Load(&value
));
263 EXPECT_EQ(kVal1
, base::subtle::Release_Load(&value
));
265 EXPECT_EQ(kVal2
, base::subtle::Release_Load(&value
));
268 template <class AtomicType
>
269 static void TestAtomicOps() {
270 TestCompareAndSwap
<AtomicType
>();
271 TestAtomicExchange
<AtomicType
>();
272 TestAtomicIncrementBounds
<AtomicType
>();
273 TestStore
<AtomicType
>();
274 TestLoad
<AtomicType
>();
277 static void TestCalloc(size_t n
, size_t s
, bool ok
) {
278 char* p
= reinterpret_cast<char*>(calloc(n
, s
));
280 EXPECT_EQ(NULL
, p
) << "calloc(n, s) should not succeed";
282 EXPECT_NE(reinterpret_cast<void*>(NULL
), p
) <<
283 "calloc(n, s) should succeed";
284 for (int i
= 0; i
< n
*s
; i
++) {
285 EXPECT_EQ('\0', p
[i
]);
292 // A global test counter for number of times the NewHandler is called.
293 static int news_handled
= 0;
294 static void TestNewHandler() {
296 throw std::bad_alloc();
299 // Because we compile without exceptions, we expect these will not throw.
300 static void TestOneNewWithoutExceptions(void* (*func
)(size_t),
304 void* ptr
= (*func
)(kNotTooBig
);
305 EXPECT_NE(reinterpret_cast<void*>(NULL
), ptr
) <<
306 "allocation should not have failed.";
308 EXPECT_EQ(0, 1) << "allocation threw unexpected exception.";
313 void* rv
= (*func
)(kTooBig
);
315 EXPECT_FALSE(should_throw
) << "allocation should have thrown.";
317 EXPECT_TRUE(should_throw
) << "allocation threw unexpected exception.";
321 static void TestNothrowNew(void* (*func
)(size_t)) {
324 // test without new_handler:
325 std::new_handler saved_handler
= std::set_new_handler(0);
326 TestOneNewWithoutExceptions(func
, false);
328 // test with new_handler:
329 std::set_new_handler(TestNewHandler
);
330 TestOneNewWithoutExceptions(func
, true);
331 EXPECT_EQ(news_handled
, 1) << "nothrow new_handler was not called.";
332 std::set_new_handler(saved_handler
);
337 //-----------------------------------------------------------------------------
339 TEST(Atomics
, AtomicIncrementWord
) {
340 TestAtomicIncrement
<AtomicWord
>();
343 TEST(Atomics
, AtomicIncrement32
) {
344 TestAtomicIncrement
<Atomic32
>();
347 TEST(Atomics
, AtomicOpsWord
) {
348 TestAtomicIncrement
<AtomicWord
>();
351 TEST(Atomics
, AtomicOps32
) {
352 TestAtomicIncrement
<Atomic32
>();
355 TEST(Allocators
, Malloc
) {
356 // Try allocating data with a bunch of alignments and sizes
357 for (int size
= 1; size
< 1048576; size
*= 2) {
358 unsigned char* ptr
= reinterpret_cast<unsigned char*>(malloc(size
));
359 CheckAlignment(ptr
, 2); // Should be 2 byte aligned
361 EXPECT_TRUE(Valid(ptr
, size
));
366 TEST(Allocators
, Calloc
) {
367 TestCalloc(0, 0, true);
368 TestCalloc(0, 1, true);
369 TestCalloc(1, 1, true);
370 TestCalloc(1<<10, 0, true);
371 TestCalloc(1<<20, 0, true);
372 TestCalloc(0, 1<<10, true);
373 TestCalloc(0, 1<<20, true);
374 TestCalloc(1<<20, 2, true);
375 TestCalloc(2, 1<<20, true);
376 TestCalloc(1000, 1000, true);
378 TestCalloc(kMaxSize
, 2, false);
379 TestCalloc(2, kMaxSize
, false);
380 TestCalloc(kMaxSize
, kMaxSize
, false);
382 TestCalloc(kMaxSignedSize
, 3, false);
383 TestCalloc(3, kMaxSignedSize
, false);
384 TestCalloc(kMaxSignedSize
, kMaxSignedSize
, false);
387 TEST(Allocators
, New
) {
388 TestNothrowNew(&::operator new);
389 TestNothrowNew(&::operator new[]);
392 // This makes sure that reallocing a small number of bytes in either
393 // direction doesn't cause us to allocate new memory.
394 TEST(Allocators
, Realloc1
) {
395 int start_sizes
[] = { 100, 1000, 10000, 100000 };
396 int deltas
[] = { 1, -2, 4, -8, 16, -32, 64, -128 };
398 for (int s
= 0; s
< sizeof(start_sizes
)/sizeof(*start_sizes
); ++s
) {
399 void* p
= malloc(start_sizes
[s
]);
401 // The larger the start-size, the larger the non-reallocing delta.
402 for (int d
= 0; d
< s
*2; ++d
) {
403 void* new_p
= realloc(p
, start_sizes
[s
] + deltas
[d
]);
404 ASSERT_EQ(p
, new_p
); // realloc should not allocate new memory
406 // Test again, but this time reallocing smaller first.
407 for (int d
= 0; d
< s
*2; ++d
) {
408 void* new_p
= realloc(p
, start_sizes
[s
] - deltas
[d
]);
409 ASSERT_EQ(p
, new_p
); // realloc should not allocate new memory
415 TEST(Allocators
, Realloc2
) {
416 for (int src_size
= 0; src_size
>= 0; src_size
= NextSize(src_size
)) {
417 for (int dst_size
= 0; dst_size
>= 0; dst_size
= NextSize(dst_size
)) {
418 unsigned char* src
= reinterpret_cast<unsigned char*>(malloc(src_size
));
421 reinterpret_cast<unsigned char*>(realloc(src
, dst_size
));
422 EXPECT_TRUE(Valid(dst
, min(src_size
, dst_size
)));
424 EXPECT_TRUE(Valid(dst
, dst_size
));
425 if (dst
!= NULL
) free(dst
);
429 // Now make sure realloc works correctly even when we overflow the
430 // packed cache, so some entries are evicted from the cache.
431 // The cache has 2^12 entries, keyed by page number.
432 const int kNumEntries
= 1 << 14;
433 int** p
= reinterpret_cast<int**>(malloc(sizeof(*p
) * kNumEntries
));
435 for (int i
= 0; i
< kNumEntries
; i
++) {
436 // no page size is likely to be bigger than 8192?
437 p
[i
] = reinterpret_cast<int*>(malloc(8192));
438 p
[i
][1000] = i
; // use memory deep in the heart of p
440 for (int i
= 0; i
< kNumEntries
; i
++) {
441 p
[i
] = reinterpret_cast<int*>(realloc(p
[i
], 9000));
443 for (int i
= 0; i
< kNumEntries
; i
++) {
447 EXPECT_EQ(kNumEntries
/2 * (kNumEntries
- 1), sum
); // assume kNE is even
451 TEST(Allocators
, ReallocZero
) {
452 // Test that realloc to zero does not return NULL.
453 for (int size
= 0; size
>= 0; size
= NextSize(size
)) {
454 char* ptr
= reinterpret_cast<char*>(malloc(size
));
455 EXPECT_NE(static_cast<char*>(NULL
), ptr
);
456 ptr
= reinterpret_cast<char*>(realloc(ptr
, 0));
457 EXPECT_NE(static_cast<char*>(NULL
), ptr
);
465 TEST(Allocators
, Recalloc
) {
466 for (int src_size
= 0; src_size
>= 0; src_size
= NextSize(src_size
)) {
467 for (int dst_size
= 0; dst_size
>= 0; dst_size
= NextSize(dst_size
)) {
469 reinterpret_cast<unsigned char*>(_recalloc(NULL
, 1, src_size
));
470 EXPECT_TRUE(IsZeroed(src
, src_size
));
473 reinterpret_cast<unsigned char*>(_recalloc(src
, 1, dst_size
));
474 EXPECT_TRUE(Valid(dst
, min(src_size
, dst_size
)));
476 EXPECT_TRUE(Valid(dst
, dst_size
));
483 // Test windows specific _aligned_malloc() and _aligned_free() methods.
484 TEST(Allocators
, AlignedMalloc
) {
485 // Try allocating data with a bunch of alignments and sizes
486 static const int kTestAlignments
[] = {8, 16, 256, 4096, 8192, 16384};
487 for (int size
= 1; size
> 0; size
= NextSize(size
)) {
488 for (int i
= 0; i
< ARRAYSIZE(kTestAlignments
); ++i
) {
489 unsigned char* ptr
= static_cast<unsigned char*>(
490 _aligned_malloc(size
, kTestAlignments
[i
]));
491 CheckAlignment(ptr
, kTestAlignments
[i
]);
493 EXPECT_TRUE(Valid(ptr
, size
));
495 // Make a second allocation of the same size and alignment to prevent
496 // allocators from passing this test by accident. Per jar, tcmalloc
497 // provides allocations for new (never before seen) sizes out of a thread
498 // local heap of a given "size class." Each time the test requests a new
499 // size, it will usually get the first element of a span, which is a
500 // 4K aligned allocation.
501 unsigned char* ptr2
= static_cast<unsigned char*>(
502 _aligned_malloc(size
, kTestAlignments
[i
]));
503 CheckAlignment(ptr2
, kTestAlignments
[i
]);
505 EXPECT_TRUE(Valid(ptr2
, size
));
507 // Should never happen, but sanity check just in case.
508 ASSERT_NE(ptr
, ptr2
);
518 int main(int argc
, char** argv
) {
519 testing::InitGoogleTest(&argc
, argv
);
520 return RUN_ALL_TESTS();