Roll src/third_party/skia f4e5995:23d4320
[chromium-blink-merge.git] / base / allocator / allocator_unittest.cc
bloba39b8384452bdf8bdf7247bda4c1a88b5d8c4d58
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <algorithm> // for min()
9 #include "base/atomicops.h"
10 #include "testing/gtest/include/gtest/gtest.h"
12 // Number of bits in a size_t.
13 static const int kSizeBits = 8 * sizeof(size_t);
14 // The maximum size of a size_t.
15 static const size_t kMaxSize = ~static_cast<size_t>(0);
16 // Maximum positive size of a size_t if it were signed.
17 static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1);
18 // An allocation size which is not too big to be reasonable.
19 static const size_t kNotTooBig = 100000;
20 // An allocation size which is just too big.
21 static const size_t kTooBig = ~static_cast<size_t>(0);
23 namespace {
25 using std::min;
27 // Fill a buffer of the specified size with a predetermined pattern
28 static void Fill(unsigned char* buffer, int n) {
29 for (int i = 0; i < n; i++) {
30 buffer[i] = (i & 0xff);
34 // Check that the specified buffer has the predetermined pattern
35 // generated by Fill()
36 static bool Valid(unsigned char* buffer, int n) {
37 for (int i = 0; i < n; i++) {
38 if (buffer[i] != (i & 0xff)) {
39 return false;
42 return true;
45 // Check that a buffer is completely zeroed.
46 static bool IsZeroed(unsigned char* buffer, int n) {
47 for (int i = 0; i < n; i++) {
48 if (buffer[i] != 0) {
49 return false;
52 return true;
55 // Check alignment
56 static void CheckAlignment(void* p, int align) {
57 EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p) & (align-1));
60 // Return the next interesting size/delta to check. Returns -1 if no more.
61 static int NextSize(int size) {
62 if (size < 100)
63 return size+1;
65 if (size < 100000) {
66 // Find next power of two
67 int power = 1;
68 while (power < size)
69 power <<= 1;
71 // Yield (power-1, power, power+1)
72 if (size < power-1)
73 return power-1;
75 if (size == power-1)
76 return power;
78 assert(size == power);
79 return power+1;
80 } else {
81 return -1;
85 template <class AtomicType>
86 static void TestAtomicIncrement() {
87 // For now, we just test single threaded execution
89 // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
90 // outside the expected address bounds. This is in particular to
91 // test that some future change to the asm code doesn't cause the
92 // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines.
93 struct {
94 AtomicType prev_word;
95 AtomicType count;
96 AtomicType next_word;
97 } s;
99 AtomicType prev_word_value, next_word_value;
100 memset(&prev_word_value, 0xFF, sizeof(AtomicType));
101 memset(&next_word_value, 0xEE, sizeof(AtomicType));
103 s.prev_word = prev_word_value;
104 s.count = 0;
105 s.next_word = next_word_value;
107 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1);
108 EXPECT_EQ(s.count, 1);
109 EXPECT_EQ(s.prev_word, prev_word_value);
110 EXPECT_EQ(s.next_word, next_word_value);
112 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3);
113 EXPECT_EQ(s.count, 3);
114 EXPECT_EQ(s.prev_word, prev_word_value);
115 EXPECT_EQ(s.next_word, next_word_value);
117 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6);
118 EXPECT_EQ(s.count, 6);
119 EXPECT_EQ(s.prev_word, prev_word_value);
120 EXPECT_EQ(s.next_word, next_word_value);
122 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3);
123 EXPECT_EQ(s.count, 3);
124 EXPECT_EQ(s.prev_word, prev_word_value);
125 EXPECT_EQ(s.next_word, next_word_value);
127 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1);
128 EXPECT_EQ(s.count, 1);
129 EXPECT_EQ(s.prev_word, prev_word_value);
130 EXPECT_EQ(s.next_word, next_word_value);
132 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0);
133 EXPECT_EQ(s.count, 0);
134 EXPECT_EQ(s.prev_word, prev_word_value);
135 EXPECT_EQ(s.next_word, next_word_value);
137 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1);
138 EXPECT_EQ(s.count, -1);
139 EXPECT_EQ(s.prev_word, prev_word_value);
140 EXPECT_EQ(s.next_word, next_word_value);
142 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5);
143 EXPECT_EQ(s.count, -5);
144 EXPECT_EQ(s.prev_word, prev_word_value);
145 EXPECT_EQ(s.next_word, next_word_value);
147 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0);
148 EXPECT_EQ(s.count, 0);
149 EXPECT_EQ(s.prev_word, prev_word_value);
150 EXPECT_EQ(s.next_word, next_word_value);
154 #define NUM_BITS(T) (sizeof(T) * 8)
157 template <class AtomicType>
158 static void TestCompareAndSwap() {
159 AtomicType value = 0;
160 AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
161 EXPECT_EQ(1, value);
162 EXPECT_EQ(0, prev);
164 // Use test value that has non-zero bits in both halves, more for testing
165 // 64-bit implementation on 32-bit platforms.
166 const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
167 (NUM_BITS(AtomicType) - 2)) + 11;
168 value = k_test_val;
169 prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
170 EXPECT_EQ(k_test_val, value);
171 EXPECT_EQ(k_test_val, prev);
173 value = k_test_val;
174 prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
175 EXPECT_EQ(5, value);
176 EXPECT_EQ(k_test_val, prev);
180 template <class AtomicType>
181 static void TestAtomicExchange() {
182 AtomicType value = 0;
183 AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
184 EXPECT_EQ(1, value);
185 EXPECT_EQ(0, new_value);
187 // Use test value that has non-zero bits in both halves, more for testing
188 // 64-bit implementation on 32-bit platforms.
189 const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
190 (NUM_BITS(AtomicType) - 2)) + 11;
191 value = k_test_val;
192 new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
193 EXPECT_EQ(k_test_val, value);
194 EXPECT_EQ(k_test_val, new_value);
196 value = k_test_val;
197 new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
198 EXPECT_EQ(5, value);
199 EXPECT_EQ(k_test_val, new_value);
203 template <class AtomicType>
204 static void TestAtomicIncrementBounds() {
205 // Test increment at the half-width boundary of the atomic type.
206 // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
207 AtomicType test_val = static_cast<uint64_t>(1) << (NUM_BITS(AtomicType) / 2);
208 AtomicType value = test_val - 1;
209 AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
210 EXPECT_EQ(test_val, value);
211 EXPECT_EQ(value, new_value);
213 base::subtle::NoBarrier_AtomicIncrement(&value, -1);
214 EXPECT_EQ(test_val - 1, value);
217 // This is a simple sanity check that values are correct. Not testing
218 // atomicity
219 template <class AtomicType>
220 static void TestStore() {
221 const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
222 const AtomicType kVal2 = static_cast<AtomicType>(-1);
224 AtomicType value;
226 base::subtle::NoBarrier_Store(&value, kVal1);
227 EXPECT_EQ(kVal1, value);
228 base::subtle::NoBarrier_Store(&value, kVal2);
229 EXPECT_EQ(kVal2, value);
231 base::subtle::Acquire_Store(&value, kVal1);
232 EXPECT_EQ(kVal1, value);
233 base::subtle::Acquire_Store(&value, kVal2);
234 EXPECT_EQ(kVal2, value);
236 base::subtle::Release_Store(&value, kVal1);
237 EXPECT_EQ(kVal1, value);
238 base::subtle::Release_Store(&value, kVal2);
239 EXPECT_EQ(kVal2, value);
242 // This is a simple sanity check that values are correct. Not testing
243 // atomicity
244 template <class AtomicType>
245 static void TestLoad() {
246 const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
247 const AtomicType kVal2 = static_cast<AtomicType>(-1);
249 AtomicType value;
251 value = kVal1;
252 EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value));
253 value = kVal2;
254 EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value));
256 value = kVal1;
257 EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value));
258 value = kVal2;
259 EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value));
261 value = kVal1;
262 EXPECT_EQ(kVal1, base::subtle::Release_Load(&value));
263 value = kVal2;
264 EXPECT_EQ(kVal2, base::subtle::Release_Load(&value));
267 template <class AtomicType>
268 static void TestAtomicOps() {
269 TestCompareAndSwap<AtomicType>();
270 TestAtomicExchange<AtomicType>();
271 TestAtomicIncrementBounds<AtomicType>();
272 TestStore<AtomicType>();
273 TestLoad<AtomicType>();
276 static void TestCalloc(size_t n, size_t s, bool ok) {
277 char* p = reinterpret_cast<char*>(calloc(n, s));
278 if (!ok) {
279 EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed";
280 } else {
281 EXPECT_NE(reinterpret_cast<void*>(NULL), p) <<
282 "calloc(n, s) should succeed";
283 for (int i = 0; i < n*s; i++) {
284 EXPECT_EQ('\0', p[i]);
286 free(p);
290 // MSVC C4530 complains about exception handler usage when exceptions are
291 // disabled. Temporarily disable that warning so we can test that they are, in
292 // fact, disabled.
293 #if defined(OS_WIN)
294 #pragma warning(push)
295 #pragma warning(disable: 4530)
296 #endif
298 // A global test counter for number of times the NewHandler is called.
299 static int news_handled = 0;
300 static void TestNewHandler() {
301 ++news_handled;
302 throw std::bad_alloc();
305 // Because we compile without exceptions, we expect these will not throw.
306 static void TestOneNewWithoutExceptions(void* (*func)(size_t),
307 bool should_throw) {
308 // success test
309 try {
310 void* ptr = (*func)(kNotTooBig);
311 EXPECT_NE(reinterpret_cast<void*>(NULL), ptr) <<
312 "allocation should not have failed.";
313 } catch(...) {
314 EXPECT_EQ(0, 1) << "allocation threw unexpected exception.";
317 // failure test
318 try {
319 void* rv = (*func)(kTooBig);
320 EXPECT_EQ(NULL, rv);
321 EXPECT_FALSE(should_throw) << "allocation should have thrown.";
322 } catch(...) {
323 EXPECT_TRUE(should_throw) << "allocation threw unexpected exception.";
327 static void TestNothrowNew(void* (*func)(size_t)) {
328 news_handled = 0;
330 // test without new_handler:
331 std::new_handler saved_handler = std::set_new_handler(0);
332 TestOneNewWithoutExceptions(func, false);
334 // test with new_handler:
335 std::set_new_handler(TestNewHandler);
336 TestOneNewWithoutExceptions(func, true);
337 EXPECT_EQ(news_handled, 1) << "nothrow new_handler was not called.";
338 std::set_new_handler(saved_handler);
341 #if defined(OS_WIN)
342 #pragma warning(pop)
343 #endif
345 } // namespace
347 //-----------------------------------------------------------------------------
349 TEST(Atomics, AtomicIncrementWord) {
350 TestAtomicIncrement<AtomicWord>();
353 TEST(Atomics, AtomicIncrement32) {
354 TestAtomicIncrement<Atomic32>();
357 TEST(Atomics, AtomicOpsWord) {
358 TestAtomicIncrement<AtomicWord>();
361 TEST(Atomics, AtomicOps32) {
362 TestAtomicIncrement<Atomic32>();
365 TEST(Allocators, Malloc) {
366 // Try allocating data with a bunch of alignments and sizes
367 for (int size = 1; size < 1048576; size *= 2) {
368 unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size));
369 CheckAlignment(ptr, 2); // Should be 2 byte aligned
370 Fill(ptr, size);
371 EXPECT_TRUE(Valid(ptr, size));
372 free(ptr);
376 TEST(Allocators, Calloc) {
377 TestCalloc(0, 0, true);
378 TestCalloc(0, 1, true);
379 TestCalloc(1, 1, true);
380 TestCalloc(1<<10, 0, true);
381 TestCalloc(1<<20, 0, true);
382 TestCalloc(0, 1<<10, true);
383 TestCalloc(0, 1<<20, true);
384 TestCalloc(1<<20, 2, true);
385 TestCalloc(2, 1<<20, true);
386 TestCalloc(1000, 1000, true);
388 TestCalloc(kMaxSize, 2, false);
389 TestCalloc(2, kMaxSize, false);
390 TestCalloc(kMaxSize, kMaxSize, false);
392 TestCalloc(kMaxSignedSize, 3, false);
393 TestCalloc(3, kMaxSignedSize, false);
394 TestCalloc(kMaxSignedSize, kMaxSignedSize, false);
397 TEST(Allocators, New) {
398 TestNothrowNew(&::operator new);
399 TestNothrowNew(&::operator new[]);
402 // This makes sure that reallocing a small number of bytes in either
403 // direction doesn't cause us to allocate new memory.
404 TEST(Allocators, Realloc1) {
405 int start_sizes[] = { 100, 1000, 10000, 100000 };
406 int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 };
408 for (int s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) {
409 void* p = malloc(start_sizes[s]);
410 ASSERT_TRUE(p);
411 // The larger the start-size, the larger the non-reallocing delta.
412 for (int d = 0; d < s*2; ++d) {
413 void* new_p = realloc(p, start_sizes[s] + deltas[d]);
414 ASSERT_EQ(p, new_p); // realloc should not allocate new memory
416 // Test again, but this time reallocing smaller first.
417 for (int d = 0; d < s*2; ++d) {
418 void* new_p = realloc(p, start_sizes[s] - deltas[d]);
419 ASSERT_EQ(p, new_p); // realloc should not allocate new memory
421 free(p);
425 TEST(Allocators, Realloc2) {
426 for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
427 for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
428 unsigned char* src = reinterpret_cast<unsigned char*>(malloc(src_size));
429 Fill(src, src_size);
430 unsigned char* dst =
431 reinterpret_cast<unsigned char*>(realloc(src, dst_size));
432 EXPECT_TRUE(Valid(dst, min(src_size, dst_size)));
433 Fill(dst, dst_size);
434 EXPECT_TRUE(Valid(dst, dst_size));
435 if (dst != NULL) free(dst);
439 // Now make sure realloc works correctly even when we overflow the
440 // packed cache, so some entries are evicted from the cache.
441 // The cache has 2^12 entries, keyed by page number.
442 const int kNumEntries = 1 << 14;
443 int** p = reinterpret_cast<int**>(malloc(sizeof(*p) * kNumEntries));
444 int sum = 0;
445 for (int i = 0; i < kNumEntries; i++) {
446 // no page size is likely to be bigger than 8192?
447 p[i] = reinterpret_cast<int*>(malloc(8192));
448 p[i][1000] = i; // use memory deep in the heart of p
450 for (int i = 0; i < kNumEntries; i++) {
451 p[i] = reinterpret_cast<int*>(realloc(p[i], 9000));
453 for (int i = 0; i < kNumEntries; i++) {
454 sum += p[i][1000];
455 free(p[i]);
457 EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum); // assume kNE is even
458 free(p);
461 TEST(Allocators, ReallocZero) {
462 // Test that realloc to zero does not return NULL.
463 for (int size = 0; size >= 0; size = NextSize(size)) {
464 char* ptr = reinterpret_cast<char*>(malloc(size));
465 EXPECT_NE(static_cast<char*>(NULL), ptr);
466 ptr = reinterpret_cast<char*>(realloc(ptr, 0));
467 EXPECT_NE(static_cast<char*>(NULL), ptr);
468 if (ptr)
469 free(ptr);
473 #ifdef WIN32
474 // Test recalloc
475 TEST(Allocators, Recalloc) {
476 for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
477 for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
478 unsigned char* src =
479 reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size));
480 EXPECT_TRUE(IsZeroed(src, src_size));
481 Fill(src, src_size);
482 unsigned char* dst =
483 reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size));
484 EXPECT_TRUE(Valid(dst, min(src_size, dst_size)));
485 Fill(dst, dst_size);
486 EXPECT_TRUE(Valid(dst, dst_size));
487 if (dst != NULL)
488 free(dst);
493 // Test windows specific _aligned_malloc() and _aligned_free() methods.
494 TEST(Allocators, AlignedMalloc) {
495 // Try allocating data with a bunch of alignments and sizes
496 static const int kTestAlignments[] = {8, 16, 256, 4096, 8192, 16384};
497 for (int size = 1; size > 0; size = NextSize(size)) {
498 for (int i = 0; i < ARRAYSIZE(kTestAlignments); ++i) {
499 unsigned char* ptr = static_cast<unsigned char*>(
500 _aligned_malloc(size, kTestAlignments[i]));
501 CheckAlignment(ptr, kTestAlignments[i]);
502 Fill(ptr, size);
503 EXPECT_TRUE(Valid(ptr, size));
505 // Make a second allocation of the same size and alignment to prevent
506 // allocators from passing this test by accident. Per jar, tcmalloc
507 // provides allocations for new (never before seen) sizes out of a thread
508 // local heap of a given "size class." Each time the test requests a new
509 // size, it will usually get the first element of a span, which is a
510 // 4K aligned allocation.
511 unsigned char* ptr2 = static_cast<unsigned char*>(
512 _aligned_malloc(size, kTestAlignments[i]));
513 CheckAlignment(ptr2, kTestAlignments[i]);
514 Fill(ptr2, size);
515 EXPECT_TRUE(Valid(ptr2, size));
517 // Should never happen, but sanity check just in case.
518 ASSERT_NE(ptr, ptr2);
519 _aligned_free(ptr);
520 _aligned_free(ptr2);
525 #endif
528 int main(int argc, char** argv) {
529 testing::InitGoogleTest(&argc, argv);
530 return RUN_ALL_TESTS();