Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / third_party / WebKit / Source / wtf / PartitionAllocTest.cpp
blob996c085d357b6a7c72fbb70e4c01e7ff61a20eee
1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "config.h"
32 #include "wtf/PartitionAlloc.h"
34 #include "wtf/BitwiseOperations.h"
35 #include "wtf/CPU.h"
36 #include "wtf/OwnPtr.h"
37 #include "wtf/PassOwnPtr.h"
38 #include "wtf/Vector.h"
39 #include <gtest/gtest.h>
40 #include <stdlib.h>
41 #include <string.h>
43 #if OS(POSIX)
44 #include <sys/mman.h>
45 #include <sys/resource.h>
46 #include <sys/time.h>
48 #ifndef MAP_ANONYMOUS
49 #define MAP_ANONYMOUS MAP_ANON
50 #endif
51 #endif // OS(POSIX)
53 #if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
55 namespace WTF {
57 namespace {
59 const size_t kTestMaxAllocation = 4096;
60 SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
61 PartitionAllocatorGeneric genericAllocator;
63 const size_t kTestAllocSize = 16;
64 #if !ENABLE(ASSERT)
65 const size_t kPointerOffset = 0;
66 const size_t kExtraAllocSize = 0;
67 #else
68 const size_t kPointerOffset = WTF::kCookieSize;
69 const size_t kExtraAllocSize = WTF::kCookieSize * 2;
70 #endif
71 const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
72 const size_t kTestBucketIndex = kRealAllocSize >> WTF::kBucketShift;
74 void TestSetup()
76 allocator.init();
77 genericAllocator.init();
80 void TestShutdown()
82 // We expect no leaks in the general case. We have a test for leak
83 // detection.
84 EXPECT_TRUE(allocator.shutdown());
85 EXPECT_TRUE(genericAllocator.shutdown());
88 #if !CPU(64BIT) || OS(POSIX)
89 bool SetAddressSpaceLimit()
91 #if !CPU(64BIT)
92 // 32 bits => address space is limited already.
93 return true;
94 #elif OS(POSIX) && !OS(MACOSX)
95 // Mac will accept RLIMIT_AS changes but it is not enforced.
96 // See https://crbug.com/435269 and rdar://17576114.
97 const size_t kAddressSpaceLimit = static_cast<size_t>(4096) * 1024 * 1024;
98 struct rlimit limit;
99 if (getrlimit(RLIMIT_AS, &limit) != 0)
100 return false;
101 if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
102 limit.rlim_cur = kAddressSpaceLimit;
103 if (setrlimit(RLIMIT_AS, &limit) != 0)
104 return false;
106 return true;
107 #else
108 return false;
109 #endif
112 bool ClearAddressSpaceLimit()
114 #if !CPU(64BIT)
115 return true;
116 #elif OS(POSIX)
117 struct rlimit limit;
118 if (getrlimit(RLIMIT_AS, &limit) != 0)
119 return false;
120 limit.rlim_cur = limit.rlim_max;
121 if (setrlimit(RLIMIT_AS, &limit) != 0)
122 return false;
123 return true;
124 #else
125 return false;
126 #endif
128 #endif
130 PartitionPage* GetFullPage(size_t size)
132 size_t realSize = size + kExtraAllocSize;
133 size_t bucketIdx = realSize >> kBucketShift;
134 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
135 size_t numSlots = (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / realSize;
136 void* first = 0;
137 void* last = 0;
138 size_t i;
139 for (i = 0; i < numSlots; ++i) {
140 void* ptr = partitionAlloc(allocator.root(), size);
141 EXPECT_TRUE(ptr);
142 if (!i)
143 first = partitionCookieFreePointerAdjust(ptr);
144 else if (i == numSlots - 1)
145 last = partitionCookieFreePointerAdjust(ptr);
147 EXPECT_EQ(partitionPointerToPage(first), partitionPointerToPage(last));
148 if (bucket->numSystemPagesPerSlotSpan == kNumSystemPagesPerPartitionPage)
149 EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & kPartitionPageBaseMask);
150 EXPECT_EQ(numSlots, static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots));
151 EXPECT_EQ(0, bucket->activePagesHead->freelistHead);
152 EXPECT_TRUE(bucket->activePagesHead);
153 EXPECT_TRUE(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage);
154 return bucket->activePagesHead;
157 void FreeFullPage(PartitionPage* page)
159 size_t size = page->bucket->slotSize;
160 size_t numSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / size;
161 EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots)));
162 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
163 size_t i;
164 for (i = 0; i < numSlots; ++i) {
165 partitionFree(ptr + kPointerOffset);
166 ptr += size;
170 void CycleFreeCache(size_t size)
172 size_t realSize = size + kExtraAllocSize;
173 size_t bucketIdx = realSize >> kBucketShift;
174 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
175 ASSERT(!bucket->activePagesHead->numAllocatedSlots);
177 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
178 void* ptr = partitionAlloc(allocator.root(), size);
179 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
180 partitionFree(ptr);
181 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
182 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex);
186 void CycleGenericFreeCache(size_t size)
188 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
189 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
190 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
191 PartitionBucket* bucket = page->bucket;
192 EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots);
193 partitionFreeGeneric(genericAllocator.root(), ptr);
194 EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots);
195 EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex);
199 void CheckPageInCore(void* ptr, bool inCore)
201 #if OS(LINUX)
202 unsigned char ret;
203 EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret));
204 EXPECT_EQ(inCore, ret);
205 #endif
208 class MockPartitionStatsDumper : public PartitionStatsDumper {
209 public:
210 MockPartitionStatsDumper()
211 : m_totalResidentBytes(0)
212 , m_totalActiveBytes(0)
213 , m_totalDecommittableBytes(0)
214 , m_totalDiscardableBytes(0) { }
216 void partitionDumpTotals(const char* partitionName, const PartitionMemoryStats* memoryStats) override
218 EXPECT_GE(memoryStats->totalMmappedBytes, memoryStats->totalResidentBytes);
219 EXPECT_EQ(m_totalResidentBytes, memoryStats->totalResidentBytes);
220 EXPECT_EQ(m_totalActiveBytes, memoryStats->totalActiveBytes);
221 EXPECT_EQ(m_totalDecommittableBytes, memoryStats->totalDecommittableBytes);
222 EXPECT_EQ(m_totalDiscardableBytes, memoryStats->totalDiscardableBytes);
225 void partitionsDumpBucketStats(const char* partitionName, const PartitionBucketMemoryStats* memoryStats) override
227 (void) partitionName;
228 EXPECT_TRUE(memoryStats->isValid);
229 EXPECT_EQ(0u, memoryStats->bucketSlotSize & kAllocationGranularityMask);
230 m_bucketStats.append(*memoryStats);
231 m_totalResidentBytes += memoryStats->residentBytes;
232 m_totalActiveBytes += memoryStats->activeBytes;
233 m_totalDecommittableBytes += memoryStats->decommittableBytes;
234 m_totalDiscardableBytes += memoryStats->discardableBytes;
237 bool IsMemoryAllocationRecorded()
239 return m_totalResidentBytes != 0 && m_totalActiveBytes != 0;
242 const PartitionBucketMemoryStats* GetBucketStats(size_t bucketSize)
244 for (size_t i = 0; i < m_bucketStats.size(); ++i) {
245 if (m_bucketStats[i].bucketSlotSize == bucketSize)
246 return &m_bucketStats[i];
248 return 0;
251 private:
252 size_t m_totalResidentBytes;
253 size_t m_totalActiveBytes;
254 size_t m_totalDecommittableBytes;
255 size_t m_totalDiscardableBytes;
257 Vector<PartitionBucketMemoryStats> m_bucketStats;
260 } // anonymous namespace
262 // Check that the most basic of allocate / free pairs work.
263 TEST(PartitionAllocTest, Basic)
265 TestSetup();
266 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
267 PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage;
269 EXPECT_FALSE(bucket->emptyPagesHead);
270 EXPECT_FALSE(bucket->decommittedPagesHead);
271 EXPECT_EQ(seedPage, bucket->activePagesHead);
272 EXPECT_EQ(0, bucket->activePagesHead->nextPage);
274 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
275 EXPECT_TRUE(ptr);
276 EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask);
277 // Check that the offset appears to include a guard page.
278 EXPECT_EQ(kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask);
280 partitionFree(ptr);
281 // Expect that the last active page gets noticed as empty but doesn't get
282 // decommitted.
283 EXPECT_TRUE(bucket->emptyPagesHead);
284 EXPECT_FALSE(bucket->decommittedPagesHead);
286 TestShutdown();
289 // Check that we can detect a memory leak.
290 TEST(PartitionAllocTest, SimpleLeak)
292 TestSetup();
293 void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize);
294 (void)leakedPtr;
295 void* leakedPtr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
296 (void)leakedPtr2;
297 EXPECT_FALSE(allocator.shutdown());
298 EXPECT_FALSE(genericAllocator.shutdown());
301 // Test multiple allocations, and freelist handling.
302 TEST(PartitionAllocTest, MultiAlloc)
304 TestSetup();
306 char* ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
307 char* ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
308 EXPECT_TRUE(ptr1);
309 EXPECT_TRUE(ptr2);
310 ptrdiff_t diff = ptr2 - ptr1;
311 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
313 // Check that we re-use the just-freed slot.
314 partitionFree(ptr2);
315 ptr2 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
316 EXPECT_TRUE(ptr2);
317 diff = ptr2 - ptr1;
318 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
319 partitionFree(ptr1);
320 ptr1 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
321 EXPECT_TRUE(ptr1);
322 diff = ptr2 - ptr1;
323 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
325 char* ptr3 = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
326 EXPECT_TRUE(ptr3);
327 diff = ptr3 - ptr1;
328 EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
330 partitionFree(ptr1);
331 partitionFree(ptr2);
332 partitionFree(ptr3);
334 TestShutdown();
337 // Test a bucket with multiple pages.
338 TEST(PartitionAllocTest, MultiPages)
340 TestSetup();
341 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
343 PartitionPage* page = GetFullPage(kTestAllocSize);
344 FreeFullPage(page);
345 EXPECT_TRUE(bucket->emptyPagesHead);
346 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
347 EXPECT_EQ(0, page->nextPage);
348 EXPECT_EQ(0, page->numAllocatedSlots);
350 page = GetFullPage(kTestAllocSize);
351 PartitionPage* page2 = GetFullPage(kTestAllocSize);
353 EXPECT_EQ(page2, bucket->activePagesHead);
354 EXPECT_EQ(0, page2->nextPage);
355 EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & kSuperPageBaseMask);
357 // Fully free the non-current page. This will leave us with no current
358 // active page because one is empty and the other is full.
359 FreeFullPage(page);
360 EXPECT_EQ(0, page->numAllocatedSlots);
361 EXPECT_TRUE(bucket->emptyPagesHead);
362 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
364 // Allocate a new page, it should pull from the freelist.
365 page = GetFullPage(kTestAllocSize);
366 EXPECT_FALSE(bucket->emptyPagesHead);
367 EXPECT_EQ(page, bucket->activePagesHead);
369 FreeFullPage(page);
370 FreeFullPage(page2);
371 EXPECT_EQ(0, page->numAllocatedSlots);
372 EXPECT_EQ(0, page2->numAllocatedSlots);
373 EXPECT_EQ(0, page2->numUnprovisionedSlots);
374 EXPECT_NE(-1, page2->emptyCacheIndex);
376 TestShutdown();
379 // Test some finer aspects of internal page transitions.
380 TEST(PartitionAllocTest, PageTransitions)
382 TestSetup();
383 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
385 PartitionPage* page1 = GetFullPage(kTestAllocSize);
386 EXPECT_EQ(page1, bucket->activePagesHead);
387 EXPECT_EQ(0, page1->nextPage);
388 PartitionPage* page2 = GetFullPage(kTestAllocSize);
389 EXPECT_EQ(page2, bucket->activePagesHead);
390 EXPECT_EQ(0, page2->nextPage);
392 // Bounce page1 back into the non-full list then fill it up again.
393 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
394 partitionFree(ptr);
395 EXPECT_EQ(page1, bucket->activePagesHead);
396 (void) partitionAlloc(allocator.root(), kTestAllocSize);
397 EXPECT_EQ(page1, bucket->activePagesHead);
398 EXPECT_EQ(page2, bucket->activePagesHead->nextPage);
400 // Allocating another page at this point should cause us to scan over page1
401 // (which is both full and NOT our current page), and evict it from the
402 // freelist. Older code had a O(n^2) condition due to failure to do this.
403 PartitionPage* page3 = GetFullPage(kTestAllocSize);
404 EXPECT_EQ(page3, bucket->activePagesHead);
405 EXPECT_EQ(0, page3->nextPage);
407 // Work out a pointer into page2 and free it.
408 ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset;
409 partitionFree(ptr);
410 // Trying to allocate at this time should cause us to cycle around to page2
411 // and find the recently freed slot.
412 char* newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
413 EXPECT_EQ(ptr, newPtr);
414 EXPECT_EQ(page2, bucket->activePagesHead);
415 EXPECT_EQ(page3, page2->nextPage);
417 // Work out a pointer into page1 and free it. This should pull the page
418 // back into the list of available pages.
419 ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
420 partitionFree(ptr);
421 // This allocation should be satisfied by page1.
422 newPtr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
423 EXPECT_EQ(ptr, newPtr);
424 EXPECT_EQ(page1, bucket->activePagesHead);
425 EXPECT_EQ(page2, page1->nextPage);
427 FreeFullPage(page3);
428 FreeFullPage(page2);
429 FreeFullPage(page1);
431 // Allocating whilst in this state exposed a bug, so keep the test.
432 ptr = reinterpret_cast<char*>(partitionAlloc(allocator.root(), kTestAllocSize));
433 partitionFree(ptr);
435 TestShutdown();
438 // Test some corner cases relating to page transitions in the internal
439 // free page list metadata bucket.
440 TEST(PartitionAllocTest, FreePageListPageTransitions)
442 TestSetup();
443 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
445 size_t numToFillFreeListPage = kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize);
446 // The +1 is because we need to account for the fact that the current page
447 // never gets thrown on the freelist.
448 ++numToFillFreeListPage;
449 OwnPtr<PartitionPage*[]> pages = adoptArrayPtr(new PartitionPage*[numToFillFreeListPage]);
451 size_t i;
452 for (i = 0; i < numToFillFreeListPage; ++i) {
453 pages[i] = GetFullPage(kTestAllocSize);
455 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
456 for (i = 0; i < numToFillFreeListPage; ++i)
457 FreeFullPage(pages[i]);
458 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
459 EXPECT_TRUE(bucket->emptyPagesHead);
461 // Allocate / free in a different bucket size so we get control of a
462 // different free page list. We need two pages because one will be the last
463 // active page and not get freed.
464 PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
465 PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
466 FreeFullPage(page1);
467 FreeFullPage(page2);
469 for (i = 0; i < numToFillFreeListPage; ++i) {
470 pages[i] = GetFullPage(kTestAllocSize);
472 EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead);
474 for (i = 0; i < numToFillFreeListPage; ++i)
475 FreeFullPage(pages[i]);
476 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
477 EXPECT_TRUE(bucket->emptyPagesHead);
479 TestShutdown();
482 // Test a large series of allocations that cross more than one underlying
483 // 64KB super page allocation.
484 TEST(PartitionAllocTest, MultiPageAllocs)
486 TestSetup();
487 // This is guaranteed to cross a super page boundary because the first
488 // partition page "slot" will be taken up by a guard page.
489 size_t numPagesNeeded = kNumPartitionPagesPerSuperPage;
490 // The super page should begin and end in a guard so we one less page in
491 // order to allocate a single page in the new super page.
492 --numPagesNeeded;
494 EXPECT_GT(numPagesNeeded, 1u);
495 OwnPtr<PartitionPage*[]> pages;
496 pages = adoptArrayPtr(new PartitionPage*[numPagesNeeded]);
497 uintptr_t firstSuperPageBase = 0;
498 size_t i;
499 for (i = 0; i < numPagesNeeded; ++i) {
500 pages[i] = GetFullPage(kTestAllocSize);
501 void* storagePtr = partitionPageToPointer(pages[i]);
502 if (!i)
503 firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask;
504 if (i == numPagesNeeded - 1) {
505 uintptr_t secondSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask;
506 uintptr_t secondSuperPageOffset = reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageOffsetMask;
507 EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
508 // Check that we allocated a guard page for the second page.
509 EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset);
512 for (i = 0; i < numPagesNeeded; ++i)
513 FreeFullPage(pages[i]);
515 TestShutdown();
518 // Test the generic allocation functions that can handle arbitrary sizes and
519 // reallocing etc.
520 TEST(PartitionAllocTest, GenericAlloc)
522 TestSetup();
524 void* ptr = partitionAllocGeneric(genericAllocator.root(), 1);
525 EXPECT_TRUE(ptr);
526 partitionFreeGeneric(genericAllocator.root(), ptr);
527 ptr = partitionAllocGeneric(genericAllocator.root(), kGenericMaxBucketed + 1);
528 EXPECT_TRUE(ptr);
529 partitionFreeGeneric(genericAllocator.root(), ptr);
531 ptr = partitionAllocGeneric(genericAllocator.root(), 1);
532 EXPECT_TRUE(ptr);
533 void* origPtr = ptr;
534 char* charPtr = static_cast<char*>(ptr);
535 *charPtr = 'A';
537 // Change the size of the realloc, remaining inside the same bucket.
538 void* newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 2);
539 EXPECT_EQ(ptr, newPtr);
540 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
541 EXPECT_EQ(ptr, newPtr);
542 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericSmallestBucket);
543 EXPECT_EQ(ptr, newPtr);
545 // Change the size of the realloc, switching buckets.
546 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericSmallestBucket + 1);
547 EXPECT_NE(newPtr, ptr);
548 // Check that the realloc copied correctly.
549 char* newCharPtr = static_cast<char*>(newPtr);
550 EXPECT_EQ(*newCharPtr, 'A');
551 #if ENABLE(ASSERT)
552 // Subtle: this checks for an old bug where we copied too much from the
553 // source of the realloc. The condition can be detected by a trashing of
554 // the uninitialized value in the space of the upsized allocation.
555 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket)));
556 #endif
557 *newCharPtr = 'B';
558 // The realloc moved. To check that the old allocation was freed, we can
559 // do an alloc of the old allocation size and check that the old allocation
560 // address is at the head of the freelist and reused.
561 void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1);
562 EXPECT_EQ(reusedPtr, origPtr);
563 partitionFreeGeneric(genericAllocator.root(), reusedPtr);
565 // Downsize the realloc.
566 ptr = newPtr;
567 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
568 EXPECT_EQ(newPtr, origPtr);
569 newCharPtr = static_cast<char*>(newPtr);
570 EXPECT_EQ(*newCharPtr, 'B');
571 *newCharPtr = 'C';
573 // Upsize the realloc to outside the partition.
574 ptr = newPtr;
575 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBucketed + 1);
576 EXPECT_NE(newPtr, ptr);
577 newCharPtr = static_cast<char*>(newPtr);
578 EXPECT_EQ(*newCharPtr, 'C');
579 *newCharPtr = 'D';
581 // Upsize and downsize the realloc, remaining outside the partition.
582 ptr = newPtr;
583 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBucketed * 10);
584 newCharPtr = static_cast<char*>(newPtr);
585 EXPECT_EQ(*newCharPtr, 'D');
586 *newCharPtr = 'E';
587 ptr = newPtr;
588 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBucketed * 2);
589 newCharPtr = static_cast<char*>(newPtr);
590 EXPECT_EQ(*newCharPtr, 'E');
591 *newCharPtr = 'F';
593 // Downsize the realloc to inside the partition.
594 ptr = newPtr;
595 newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1);
596 EXPECT_NE(newPtr, ptr);
597 EXPECT_EQ(newPtr, origPtr);
598 newCharPtr = static_cast<char*>(newPtr);
599 EXPECT_EQ(*newCharPtr, 'F');
601 partitionFreeGeneric(genericAllocator.root(), newPtr);
602 TestShutdown();
605 // Test the generic allocation functions can handle some specific sizes of
606 // interest.
607 TEST(PartitionAllocTest, GenericAllocSizes)
609 TestSetup();
611 void* ptr = partitionAllocGeneric(genericAllocator.root(), 0);
612 EXPECT_TRUE(ptr);
613 partitionFreeGeneric(genericAllocator.root(), ptr);
615 // kPartitionPageSize is interesting because it results in just one
616 // allocation per page, which tripped up some corner cases.
617 size_t size = kPartitionPageSize - kExtraAllocSize;
618 ptr = partitionAllocGeneric(genericAllocator.root(), size);
619 EXPECT_TRUE(ptr);
620 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
621 EXPECT_TRUE(ptr2);
622 partitionFreeGeneric(genericAllocator.root(), ptr);
623 // Should be freeable at this point.
624 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
625 EXPECT_NE(-1, page->emptyCacheIndex);
626 partitionFreeGeneric(genericAllocator.root(), ptr2);
628 size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) - kSystemPageSize) / 2) - kExtraAllocSize;
629 ptr = partitionAllocGeneric(genericAllocator.root(), size);
630 EXPECT_TRUE(ptr);
631 memset(ptr, 'A', size);
632 ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
633 EXPECT_TRUE(ptr2);
634 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size);
635 EXPECT_TRUE(ptr3);
636 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size);
637 EXPECT_TRUE(ptr4);
639 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
640 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr3));
641 EXPECT_NE(page, page2);
643 partitionFreeGeneric(genericAllocator.root(), ptr);
644 partitionFreeGeneric(genericAllocator.root(), ptr3);
645 partitionFreeGeneric(genericAllocator.root(), ptr2);
646 // Should be freeable at this point.
647 EXPECT_NE(-1, page->emptyCacheIndex);
648 EXPECT_EQ(0, page->numAllocatedSlots);
649 EXPECT_EQ(0, page->numUnprovisionedSlots);
650 void* newPtr = partitionAllocGeneric(genericAllocator.root(), size);
651 EXPECT_EQ(ptr3, newPtr);
652 newPtr = partitionAllocGeneric(genericAllocator.root(), size);
653 EXPECT_EQ(ptr2, newPtr);
654 #if OS(LINUX) && !ENABLE(ASSERT)
655 // On Linux, we have a guarantee that freelisting a page should cause its
656 // contents to be nulled out. We check for null here to detect an bug we
657 // had where a large slot size was causing us to not properly free all
658 // resources back to the system.
659 // We only run the check when asserts are disabled because when they are
660 // enabled, the allocated area is overwritten with an "uninitialized"
661 // byte pattern.
662 EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
663 #endif
664 partitionFreeGeneric(genericAllocator.root(), newPtr);
665 partitionFreeGeneric(genericAllocator.root(), ptr3);
666 partitionFreeGeneric(genericAllocator.root(), ptr4);
668 // Can we allocate a massive (512MB) size?
669 // Allocate 512MB, but +1, to test for cookie writing alignment issues.
670 ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024 + 1);
671 partitionFreeGeneric(genericAllocator.root(), ptr);
673 // Check a more reasonable, but still direct mapped, size.
674 // Chop a system page and a byte off to test for rounding errors.
675 size = 20 * 1024 * 1024;
676 size -= kSystemPageSize;
677 size -= 1;
678 ptr = partitionAllocGeneric(genericAllocator.root(), size);
679 char* charPtr = reinterpret_cast<char*>(ptr);
680 *(charPtr + (size - 1)) = 'A';
681 partitionFreeGeneric(genericAllocator.root(), ptr);
683 // Can we free null?
684 partitionFreeGeneric(genericAllocator.root(), 0);
686 // Do we correctly get a null for a failed allocation?
687 EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), PartitionAllocReturnNull, 3u * 1024 * 1024 * 1024));
689 TestShutdown();
692 // Test that we can fetch the real allocated size after an allocation.
693 TEST(PartitionAllocTest, GenericAllocGetSize)
695 TestSetup();
697 void* ptr;
698 size_t requestedSize, actualSize, predictedSize;
700 EXPECT_TRUE(partitionAllocSupportsGetSize());
702 // Allocate something small.
703 requestedSize = 511 - kExtraAllocSize;
704 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
705 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
706 EXPECT_TRUE(ptr);
707 actualSize = partitionAllocGetSize(ptr);
708 EXPECT_EQ(predictedSize, actualSize);
709 EXPECT_LT(requestedSize, actualSize);
710 partitionFreeGeneric(genericAllocator.root(), ptr);
712 // Allocate a size that should be a perfect match for a bucket, because it
713 // is an exact power of 2.
714 requestedSize = (256 * 1024) - kExtraAllocSize;
715 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
716 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
717 EXPECT_TRUE(ptr);
718 actualSize = partitionAllocGetSize(ptr);
719 EXPECT_EQ(predictedSize, actualSize);
720 EXPECT_EQ(requestedSize, actualSize);
721 partitionFreeGeneric(genericAllocator.root(), ptr);
723 // Allocate a size that is a system page smaller than a bucket. GetSize()
724 // should return a larger size than we asked for now.
725 requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize;
726 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
727 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
728 EXPECT_TRUE(ptr);
729 actualSize = partitionAllocGetSize(ptr);
730 EXPECT_EQ(predictedSize, actualSize);
731 EXPECT_EQ(requestedSize + kSystemPageSize, actualSize);
732 // Check that we can write at the end of the reported size too.
733 char* charPtr = reinterpret_cast<char*>(ptr);
734 *(charPtr + (actualSize - 1)) = 'A';
735 partitionFreeGeneric(genericAllocator.root(), ptr);
737 // Allocate something very large, and uneven.
738 requestedSize = 512 * 1024 * 1024 - 1;
739 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
740 ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize);
741 EXPECT_TRUE(ptr);
742 actualSize = partitionAllocGetSize(ptr);
743 EXPECT_EQ(predictedSize, actualSize);
744 EXPECT_LT(requestedSize, actualSize);
745 partitionFreeGeneric(genericAllocator.root(), ptr);
747 // Too large allocation.
748 requestedSize = INT_MAX;
749 predictedSize = partitionAllocActualSize(genericAllocator.root(), requestedSize);
750 EXPECT_EQ(requestedSize, predictedSize);
752 TestShutdown();
755 // Test the realloc() contract.
756 TEST(PartitionAllocTest, Realloc)
758 TestSetup();
760 // realloc(0, size) should be equivalent to malloc().
761 void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, kTestAllocSize);
762 memset(ptr, 'A', kTestAllocSize);
763 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
764 // realloc(ptr, 0) should be equivalent to free().
765 void* ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, 0);
766 EXPECT_EQ(0, ptr2);
767 EXPECT_EQ(partitionCookieFreePointerAdjust(ptr), page->freelistHead);
769 // Test that growing an allocation with realloc() copies everything from the
770 // old allocation.
771 size_t size = kSystemPageSize - kExtraAllocSize;
772 EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size));
773 ptr = partitionAllocGeneric(genericAllocator.root(), size);
774 memset(ptr, 'A', size);
775 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, size + 1);
776 EXPECT_NE(ptr, ptr2);
777 char* charPtr2 = static_cast<char*>(ptr2);
778 EXPECT_EQ('A', charPtr2[0]);
779 EXPECT_EQ('A', charPtr2[size - 1]);
780 #if ENABLE(ASSERT)
781 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
782 #endif
784 // Test that shrinking an allocation with realloc() also copies everything
785 // from the old allocation.
786 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1);
787 EXPECT_NE(ptr2, ptr);
788 char* charPtr = static_cast<char*>(ptr);
789 EXPECT_EQ('A', charPtr[0]);
790 EXPECT_EQ('A', charPtr[size - 2]);
791 #if ENABLE(ASSERT)
792 EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
793 #endif
795 partitionFreeGeneric(genericAllocator.root(), ptr);
797 // Test that shrinking a direct mapped allocation happens in-place.
798 size = kGenericMaxBucketed + 16 * kSystemPageSize;
799 ptr = partitionAllocGeneric(genericAllocator.root(), size);
800 size_t actualSize = partitionAllocGetSize(ptr);
801 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, kGenericMaxBucketed + 8 * kSystemPageSize);
802 EXPECT_EQ(ptr, ptr2);
803 EXPECT_EQ(actualSize - 8 * kSystemPageSize, partitionAllocGetSize(ptr2));
805 // Test that a previously in-place shrunk direct mapped allocation can be
806 // expanded up again within its original size.
807 ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - kSystemPageSize);
808 EXPECT_EQ(ptr2, ptr);
809 EXPECT_EQ(actualSize - kSystemPageSize, partitionAllocGetSize(ptr));
811 // Test that a direct mapped allocation is performed not in-place when the
812 // new size is small enough.
813 ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, kSystemPageSize);
814 EXPECT_NE(ptr, ptr2);
816 partitionFreeGeneric(genericAllocator.root(), ptr2);
818 TestShutdown();
821 // Tests the handing out of freelists for partial pages.
822 TEST(PartitionAllocTest, PartialPageFreelists)
824 TestSetup();
826 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
827 EXPECT_EQ(kSystemPageSize - kAllocationGranularity, bigSize + kExtraAllocSize);
828 size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift;
829 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
830 EXPECT_EQ(0, bucket->emptyPagesHead);
832 void* ptr = partitionAlloc(allocator.root(), bigSize);
833 EXPECT_TRUE(ptr);
835 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
836 size_t totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / (bigSize + kExtraAllocSize);
837 EXPECT_EQ(4u, totalSlots);
838 // The freelist should have one entry, because we were able to exactly fit
839 // one object slot and one freelist pointer (the null that the head points
840 // to) into a system page.
841 EXPECT_TRUE(page->freelistHead);
842 EXPECT_EQ(1, page->numAllocatedSlots);
843 EXPECT_EQ(2, page->numUnprovisionedSlots);
845 void* ptr2 = partitionAlloc(allocator.root(), bigSize);
846 EXPECT_TRUE(ptr2);
847 EXPECT_FALSE(page->freelistHead);
848 EXPECT_EQ(2, page->numAllocatedSlots);
849 EXPECT_EQ(2, page->numUnprovisionedSlots);
851 void* ptr3 = partitionAlloc(allocator.root(), bigSize);
852 EXPECT_TRUE(ptr3);
853 EXPECT_TRUE(page->freelistHead);
854 EXPECT_EQ(3, page->numAllocatedSlots);
855 EXPECT_EQ(0, page->numUnprovisionedSlots);
857 void* ptr4 = partitionAlloc(allocator.root(), bigSize);
858 EXPECT_TRUE(ptr4);
859 EXPECT_FALSE(page->freelistHead);
860 EXPECT_EQ(4, page->numAllocatedSlots);
861 EXPECT_EQ(0, page->numUnprovisionedSlots);
863 void* ptr5 = partitionAlloc(allocator.root(), bigSize);
864 EXPECT_TRUE(ptr5);
866 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr5));
867 EXPECT_EQ(1, page2->numAllocatedSlots);
869 // Churn things a little whilst there's a partial page freelist.
870 partitionFree(ptr);
871 ptr = partitionAlloc(allocator.root(), bigSize);
872 void* ptr6 = partitionAlloc(allocator.root(), bigSize);
874 partitionFree(ptr);
875 partitionFree(ptr2);
876 partitionFree(ptr3);
877 partitionFree(ptr4);
878 partitionFree(ptr5);
879 partitionFree(ptr6);
880 EXPECT_NE(-1, page->emptyCacheIndex);
881 EXPECT_NE(-1, page2->emptyCacheIndex);
882 EXPECT_TRUE(page2->freelistHead);
883 EXPECT_EQ(0, page2->numAllocatedSlots);
885 // And test a couple of sizes that do not cross kSystemPageSize with a single allocation.
886 size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize;
887 bucketIdx = (mediumSize + kExtraAllocSize) >> kBucketShift;
888 bucket = &allocator.root()->buckets()[bucketIdx];
889 EXPECT_EQ(0, bucket->emptyPagesHead);
891 ptr = partitionAlloc(allocator.root(), mediumSize);
892 EXPECT_TRUE(ptr);
893 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
894 EXPECT_EQ(1, page->numAllocatedSlots);
895 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / (mediumSize + kExtraAllocSize);
896 size_t firstPageSlots = kSystemPageSize / (mediumSize + kExtraAllocSize);
897 EXPECT_EQ(2u, firstPageSlots);
898 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
900 partitionFree(ptr);
902 size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize;
903 bucketIdx = (smallSize + kExtraAllocSize) >> kBucketShift;
904 bucket = &allocator.root()->buckets()[bucketIdx];
905 EXPECT_EQ(0, bucket->emptyPagesHead);
907 ptr = partitionAlloc(allocator.root(), smallSize);
908 EXPECT_TRUE(ptr);
909 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
910 EXPECT_EQ(1, page->numAllocatedSlots);
911 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / (smallSize + kExtraAllocSize);
912 firstPageSlots = kSystemPageSize / (smallSize + kExtraAllocSize);
913 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
915 partitionFree(ptr);
916 EXPECT_TRUE(page->freelistHead);
917 EXPECT_EQ(0, page->numAllocatedSlots);
919 size_t verySmallSize = 32 - kExtraAllocSize;
920 bucketIdx = (verySmallSize + kExtraAllocSize) >> kBucketShift;
921 bucket = &allocator.root()->buckets()[bucketIdx];
922 EXPECT_EQ(0, bucket->emptyPagesHead);
924 ptr = partitionAlloc(allocator.root(), verySmallSize);
925 EXPECT_TRUE(ptr);
926 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
927 EXPECT_EQ(1, page->numAllocatedSlots);
928 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / (verySmallSize + kExtraAllocSize);
929 firstPageSlots = kSystemPageSize / (verySmallSize + kExtraAllocSize);
930 EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots);
932 partitionFree(ptr);
933 EXPECT_TRUE(page->freelistHead);
934 EXPECT_EQ(0, page->numAllocatedSlots);
936 // And try an allocation size (against the generic allocator) that is
937 // larger than a system page.
938 size_t pageAndAHalfSize = (kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize;
939 ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize);
940 EXPECT_TRUE(ptr);
941 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
942 EXPECT_EQ(1, page->numAllocatedSlots);
943 EXPECT_TRUE(page->freelistHead);
944 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / (pageAndAHalfSize + kExtraAllocSize);
945 EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots);
946 partitionFreeGeneric(genericAllocator.root(), ptr);
948 // And then make sure than exactly the page size only faults one page.
949 size_t pageSize = kSystemPageSize - kExtraAllocSize;
950 ptr = partitionAllocGeneric(genericAllocator.root(), pageSize);
951 EXPECT_TRUE(ptr);
952 page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
953 EXPECT_EQ(1, page->numAllocatedSlots);
954 EXPECT_FALSE(page->freelistHead);
955 totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / (pageSize + kExtraAllocSize);
956 EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots);
957 partitionFreeGeneric(genericAllocator.root(), ptr);
959 TestShutdown();
962 // Test some of the fragmentation-resistant properties of the allocator.
963 TEST(PartitionAllocTest, PageRefilling)
965 TestSetup();
966 PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
968 // Grab two full pages and a non-full page.
969 PartitionPage* page1 = GetFullPage(kTestAllocSize);
970 PartitionPage* page2 = GetFullPage(kTestAllocSize);
971 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
972 EXPECT_TRUE(ptr);
973 EXPECT_NE(page1, bucket->activePagesHead);
974 EXPECT_NE(page2, bucket->activePagesHead);
975 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
976 EXPECT_EQ(1, page->numAllocatedSlots);
978 // Work out a pointer into page2 and free it; and then page1 and free it.
979 char* ptr2 = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset;
980 partitionFree(ptr2);
981 ptr2 = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset;
982 partitionFree(ptr2);
984 // If we perform two allocations from the same bucket now, we expect to
985 // refill both the nearly full pages.
986 (void) partitionAlloc(allocator.root(), kTestAllocSize);
987 (void) partitionAlloc(allocator.root(), kTestAllocSize);
988 EXPECT_EQ(1, page->numAllocatedSlots);
990 FreeFullPage(page2);
991 FreeFullPage(page1);
992 partitionFree(ptr);
994 TestShutdown();
997 // Basic tests to ensure that allocations work for partial page buckets.
998 TEST(PartitionAllocTest, PartialPages)
1000 TestSetup();
1002 // Find a size that is backed by a partial partition page.
1003 size_t size = sizeof(void*);
1004 PartitionBucket* bucket = 0;
1005 while (size < kTestMaxAllocation) {
1006 bucket = &allocator.root()->buckets()[size >> kBucketShift];
1007 if (bucket->numSystemPagesPerSlotSpan % kNumSystemPagesPerPartitionPage)
1008 break;
1009 size += sizeof(void*);
1011 EXPECT_LT(size, kTestMaxAllocation);
1013 PartitionPage* page1 = GetFullPage(size);
1014 PartitionPage* page2 = GetFullPage(size);
1015 FreeFullPage(page2);
1016 FreeFullPage(page1);
1018 TestShutdown();
1021 // Test correct handling if our mapping collides with another.
1022 TEST(PartitionAllocTest, MappingCollision)
1024 TestSetup();
1025 // The -2 is because the first and last partition pages in a super page are
1026 // guard pages.
1027 size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2;
1028 OwnPtr<PartitionPage*[]> firstSuperPagePages = adoptArrayPtr(new PartitionPage*[numPartitionPagesNeeded]);
1029 OwnPtr<PartitionPage*[]> secondSuperPagePages = adoptArrayPtr(new PartitionPage*[numPartitionPagesNeeded]);
1031 size_t i;
1032 for (i = 0; i < numPartitionPagesNeeded; ++i)
1033 firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
1035 char* pageBase = reinterpret_cast<char*>(partitionPageToPointer(firstSuperPagePages[0]));
1036 EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
1037 pageBase -= kPartitionPageSize;
1038 // Map a single system page either side of the mapping for our allocations,
1039 // with the goal of tripping up alignment of the next mapping.
1040 void* map1 = allocPages(pageBase - kPageAllocationGranularity, kPageAllocationGranularity, kPageAllocationGranularity, PageInaccessible);
1041 EXPECT_TRUE(map1);
1042 void* map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, kPageAllocationGranularity, PageInaccessible);
1043 EXPECT_TRUE(map2);
1045 for (i = 0; i < numPartitionPagesNeeded; ++i)
1046 secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
1048 freePages(map1, kPageAllocationGranularity);
1049 freePages(map2, kPageAllocationGranularity);
1051 pageBase = reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0]));
1052 EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
1053 pageBase -= kPartitionPageSize;
1054 // Map a single system page either side of the mapping for our allocations,
1055 // with the goal of tripping up alignment of the next mapping.
1056 map1 = allocPages(pageBase - kPageAllocationGranularity, kPageAllocationGranularity, kPageAllocationGranularity, PageAccessible);
1057 EXPECT_TRUE(map1);
1058 map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, kPageAllocationGranularity, PageAccessible);
1059 EXPECT_TRUE(map2);
1060 setSystemPagesInaccessible(map1, kPageAllocationGranularity);
1061 setSystemPagesInaccessible(map2, kPageAllocationGranularity);
1063 PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
1064 freePages(map1, kPageAllocationGranularity);
1065 freePages(map2, kPageAllocationGranularity);
1067 EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & kPartitionPageOffsetMask);
1069 // And make sure we really did get a page in a new superpage.
1070 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(firstSuperPagePages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & kSuperPageBaseMask);
1071 EXPECT_NE(reinterpret_cast<uintptr_t>(partitionPageToPointer(secondSuperPagePages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>(partitionPageToPointer(pageInThirdSuperPage)) & kSuperPageBaseMask);
1073 FreeFullPage(pageInThirdSuperPage);
1074 for (i = 0; i < numPartitionPagesNeeded; ++i) {
1075 FreeFullPage(firstSuperPagePages[i]);
1076 FreeFullPage(secondSuperPagePages[i]);
1079 TestShutdown();
1082 // Tests that pages in the free page cache do get freed as appropriate.
1083 TEST(PartitionAllocTest, FreeCache)
1085 TestSetup();
1087 EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages);
1089 size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize;
1090 size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift;
1091 PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx];
1093 void* ptr = partitionAlloc(allocator.root(), bigSize);
1094 EXPECT_TRUE(ptr);
1095 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
1096 EXPECT_EQ(0, bucket->emptyPagesHead);
1097 EXPECT_EQ(1, page->numAllocatedSlots);
1098 EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1099 partitionFree(ptr);
1100 EXPECT_EQ(0, page->numAllocatedSlots);
1101 EXPECT_NE(-1, page->emptyCacheIndex);
1102 EXPECT_TRUE(page->freelistHead);
1104 CycleFreeCache(kTestAllocSize);
1106 // Flushing the cache should have really freed the unused page.
1107 EXPECT_FALSE(page->freelistHead);
1108 EXPECT_EQ(-1, page->emptyCacheIndex);
1109 EXPECT_EQ(0, page->numAllocatedSlots);
1110 PartitionBucket* cycleFreeCacheBucket = &allocator.root()->buckets()[kTestBucketIndex];
1111 EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * kSystemPageSize, allocator.root()->totalSizeOfCommittedPages);
1113 // Check that an allocation works ok whilst in this state (a free'd page
1114 // as the active pages head).
1115 ptr = partitionAlloc(allocator.root(), bigSize);
1116 EXPECT_FALSE(bucket->emptyPagesHead);
1117 partitionFree(ptr);
1119 // Also check that a page that is bouncing immediately between empty and
1120 // used does not get freed.
1121 for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
1122 ptr = partitionAlloc(allocator.root(), bigSize);
1123 EXPECT_TRUE(page->freelistHead);
1124 partitionFree(ptr);
1125 EXPECT_TRUE(page->freelistHead);
1127 EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages);
1128 TestShutdown();
1131 // Tests for a bug we had with losing references to free pages.
1132 TEST(PartitionAllocTest, LostFreePagesBug)
1134 TestSetup();
1136 size_t size = kPartitionPageSize - kExtraAllocSize;
1138 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1139 EXPECT_TRUE(ptr);
1140 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
1141 EXPECT_TRUE(ptr2);
1143 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr));
1144 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr2));
1145 PartitionBucket* bucket = page->bucket;
1147 EXPECT_EQ(0, bucket->emptyPagesHead);
1148 EXPECT_EQ(-1, page->numAllocatedSlots);
1149 EXPECT_EQ(1, page2->numAllocatedSlots);
1151 partitionFreeGeneric(genericAllocator.root(), ptr);
1152 partitionFreeGeneric(genericAllocator.root(), ptr2);
1154 EXPECT_TRUE(bucket->emptyPagesHead);
1155 EXPECT_TRUE(bucket->emptyPagesHead->nextPage);
1156 EXPECT_EQ(0, page->numAllocatedSlots);
1157 EXPECT_EQ(0, page2->numAllocatedSlots);
1158 EXPECT_TRUE(page->freelistHead);
1159 EXPECT_TRUE(page2->freelistHead);
1161 CycleGenericFreeCache(kTestAllocSize);
1163 EXPECT_FALSE(page->freelistHead);
1164 EXPECT_FALSE(page2->freelistHead);
1166 EXPECT_TRUE(bucket->emptyPagesHead);
1167 EXPECT_TRUE(bucket->emptyPagesHead->nextPage);
1168 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
1170 // At this moment, we have two decommitted pages, on the empty list.
1171 ptr = partitionAllocGeneric(genericAllocator.root(), size);
1172 EXPECT_TRUE(ptr);
1173 partitionFreeGeneric(genericAllocator.root(), ptr);
1175 EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead);
1176 EXPECT_TRUE(bucket->emptyPagesHead);
1177 EXPECT_TRUE(bucket->decommittedPagesHead);
1179 CycleGenericFreeCache(kTestAllocSize);
1181 // We're now set up to trigger a historical bug by scanning over the active
1182 // pages list. The current code gets into a different state, but we'll keep
1183 // the test as being an interesting corner case.
1184 ptr = partitionAllocGeneric(genericAllocator.root(), size);
1185 EXPECT_TRUE(ptr);
1186 partitionFreeGeneric(genericAllocator.root(), ptr);
1188 EXPECT_TRUE(bucket->activePagesHead);
1189 EXPECT_TRUE(bucket->emptyPagesHead);
1190 EXPECT_TRUE(bucket->decommittedPagesHead);
1192 TestShutdown();
1195 #if !CPU(64BIT) || OS(POSIX)
1197 static void DoReturnNullTest(size_t allocSize)
1199 TestSetup();
1201 EXPECT_TRUE(SetAddressSpaceLimit());
1203 // Work out the number of allocations for 6 GB of memory.
1204 const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024);
1206 void** ptrs = reinterpret_cast<void**>(partitionAllocGeneric(genericAllocator.root(), numAllocations * sizeof(void*)));
1207 int i;
1209 for (i = 0; i < numAllocations; ++i) {
1210 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), PartitionAllocReturnNull, allocSize);
1211 if (!i)
1212 EXPECT_TRUE(ptrs[0]);
1213 if (!ptrs[i]) {
1214 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), PartitionAllocReturnNull, allocSize);
1215 EXPECT_FALSE(ptrs[i]);
1216 break;
1220 // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
1221 // we're not actually testing anything here.
1222 EXPECT_LT(i, numAllocations);
1224 // Free, reallocate and free again each block we allocated. We do this to
1225 // check that freeing memory also works correctly after a failed allocation.
1226 for (--i; i >= 0; --i) {
1227 partitionFreeGeneric(genericAllocator.root(), ptrs[i]);
1228 ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), PartitionAllocReturnNull, allocSize);
1229 EXPECT_TRUE(ptrs[i]);
1230 partitionFreeGeneric(genericAllocator.root(), ptrs[i]);
1233 partitionFreeGeneric(genericAllocator.root(), ptrs);
1235 EXPECT_TRUE(ClearAddressSpaceLimit());
1237 TestShutdown();
1240 // Tests that if an allocation fails in "return null" mode, repeating it doesn't
1241 // crash, and still returns null. The test tries to allocate 6 GB of memory in
1242 // 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 4 GB
1243 // using setrlimit() first.
1244 #if OS(MACOSX)
1245 #define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull
1246 #else
1247 #define MAYBE_RepeatedReturnNull RepeatedReturnNull
1248 #endif
1249 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNull)
1251 // A single-slot but non-direct-mapped allocation size.
1252 DoReturnNullTest(512 * 1024);
1255 // Another "return null" test but for larger, direct-mapped allocations.
1256 #if OS(MACOSX)
1257 #define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect
1258 #else
1259 #define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect
1260 #endif
1261 TEST(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect)
1263 // A direct-mapped allocation size.
1264 DoReturnNullTest(256 * 1024 * 1024);
1267 #endif // !CPU(64BIT) || OS(POSIX)
1269 #if !OS(ANDROID)
1271 // Make sure that malloc(-1) dies.
1272 // In the past, we had an integer overflow that would alias malloc(-1) to
1273 // malloc(0), which is not good.
1274 TEST(PartitionAllocDeathTest, LargeAllocs)
1276 TestSetup();
1277 // Largest alloc.
1278 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(-1)), "");
1279 // And the smallest allocation we expect to die.
1280 EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), static_cast<size_t>(INT_MAX) + 1), "");
1282 TestShutdown();
1285 // Check that our immediate double-free detection works.
1286 TEST(PartitionAllocDeathTest, ImmediateDoubleFree)
1288 TestSetup();
1290 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1291 EXPECT_TRUE(ptr);
1292 partitionFreeGeneric(genericAllocator.root(), ptr);
1294 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1296 TestShutdown();
1299 // Check that our refcount-based double-free detection works.
1300 TEST(PartitionAllocDeathTest, RefcountDoubleFree)
1302 TestSetup();
1304 void* ptr = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1305 EXPECT_TRUE(ptr);
1306 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kTestAllocSize);
1307 EXPECT_TRUE(ptr2);
1308 partitionFreeGeneric(genericAllocator.root(), ptr);
1309 partitionFreeGeneric(genericAllocator.root(), ptr2);
1310 // This is not an immediate double-free so our immediate detection won't
1311 // fire. However, it does take the "refcount" of the partition page to -1,
1312 // which is illegal and should be trapped.
1313 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), "");
1315 TestShutdown();
1318 // Check that guard pages are present where expected.
1319 TEST(PartitionAllocDeathTest, GuardPages)
1321 TestSetup();
1323 // partitionAlloc adds kPartitionPageSize to the requested size
1324 // (for metadata), and then rounds that size to kPageAllocationGranularity.
1325 // To be able to reliably write one past a direct allocation, choose a size
1326 // that's
1327 // a) larger than kGenericMaxBucketed (to make the allocation direct)
1328 // b) aligned at kPageAllocationGranularity boundaries after
1329 // kPartitionPageSize has been added to it.
1330 // (On 32-bit, partitionAlloc adds another kSystemPageSize to the
1331 // allocation size before rounding, but there it marks the memory right
1332 // after size as inaccessible, so it's fine to write 1 past the size we
1333 // hand to partitionAlloc and we don't need to worry about allocation
1334 // granularities.)
1335 #define ALIGN(N, A) (((N) + (A) - 1) / (A) * (A))
1336 const int kSize = ALIGN(kGenericMaxBucketed + 1 + kPartitionPageSize, kPageAllocationGranularity) - kPartitionPageSize;
1337 #undef ALIGN
1338 static_assert(kSize > kGenericMaxBucketed, "allocation not large enough for direct allocation");
1339 size_t size = kSize - kExtraAllocSize;
1340 void* ptr = partitionAllocGeneric(genericAllocator.root(), size);
1342 EXPECT_TRUE(ptr);
1343 char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
1345 EXPECT_DEATH(*(charPtr - 1) = 'A', "");
1346 EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
1348 partitionFreeGeneric(genericAllocator.root(), ptr);
1350 TestShutdown();
1353 // Check that a bad free() is caught where the free() refers to an unused
1354 // partition page of a large allocation.
1355 TEST(PartitionAllocDeathTest, FreeWrongPartitionPage)
1357 TestSetup();
1359 // This large size will result in a direct mapped allocation with guard
1360 // pages at either end.
1361 void* ptr = partitionAllocGeneric(genericAllocator.root(), kPartitionPageSize * 2);
1362 EXPECT_TRUE(ptr);
1363 char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
1365 EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), "");
1367 partitionFreeGeneric(genericAllocator.root(), ptr);
1369 TestShutdown();
1372 #endif // !OS(ANDROID)
1374 // Tests that partitionDumpStatsGeneric and partitionDumpStats runs without
1375 // crashing and returns non zero values when memory is allocated.
1376 TEST(PartitionAllocTest, DumpMemoryStats)
1378 TestSetup();
1380 void* ptr = partitionAlloc(allocator.root(), kTestAllocSize);
1381 MockPartitionStatsDumper mockStatsDumper;
1382 partitionDumpStats(allocator.root(), "mock_allocator", false /* detailed dump */, &mockStatsDumper);
1383 EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded());
1385 partitionFree(ptr);
1388 // This series of tests checks the active -> empty -> decommitted states.
1390 void* genericPtr = partitionAllocGeneric(genericAllocator.root(), 2048 - kExtraAllocSize);
1392 MockPartitionStatsDumper mockStatsDumperGeneric;
1393 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1394 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1396 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(2048);
1397 EXPECT_TRUE(stats);
1398 EXPECT_TRUE(stats->isValid);
1399 EXPECT_EQ(2048u, stats->bucketSlotSize);
1400 EXPECT_EQ(2048u, stats->activeBytes);
1401 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1402 EXPECT_EQ(0u, stats->decommittableBytes);
1403 EXPECT_EQ(0u, stats->discardableBytes);
1404 EXPECT_EQ(0u, stats->numFullPages);
1405 EXPECT_EQ(1u, stats->numActivePages);
1406 EXPECT_EQ(0u, stats->numEmptyPages);
1407 EXPECT_EQ(0u, stats->numDecommittedPages);
1410 partitionFreeGeneric(genericAllocator.root(), genericPtr);
1413 MockPartitionStatsDumper mockStatsDumperGeneric;
1414 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1415 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1417 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(2048);
1418 EXPECT_TRUE(stats);
1419 EXPECT_TRUE(stats->isValid);
1420 EXPECT_EQ(2048u, stats->bucketSlotSize);
1421 EXPECT_EQ(0u, stats->activeBytes);
1422 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1423 EXPECT_EQ(kSystemPageSize, stats->decommittableBytes);
1424 EXPECT_EQ(0u, stats->discardableBytes);
1425 EXPECT_EQ(0u, stats->numFullPages);
1426 EXPECT_EQ(0u, stats->numActivePages);
1427 EXPECT_EQ(1u, stats->numEmptyPages);
1428 EXPECT_EQ(0u, stats->numDecommittedPages);
1431 CycleGenericFreeCache(kTestAllocSize);
1434 MockPartitionStatsDumper mockStatsDumperGeneric;
1435 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1436 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1438 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(2048);
1439 EXPECT_TRUE(stats);
1440 EXPECT_TRUE(stats->isValid);
1441 EXPECT_EQ(2048u, stats->bucketSlotSize);
1442 EXPECT_EQ(0u, stats->activeBytes);
1443 EXPECT_EQ(0u, stats->residentBytes);
1444 EXPECT_EQ(0u, stats->decommittableBytes);
1445 EXPECT_EQ(0u, stats->discardableBytes);
1446 EXPECT_EQ(0u, stats->numFullPages);
1447 EXPECT_EQ(0u, stats->numActivePages);
1448 EXPECT_EQ(0u, stats->numEmptyPages);
1449 EXPECT_EQ(1u, stats->numDecommittedPages);
1453 // This test checks for correct empty page list accounting.
1455 size_t size = kPartitionPageSize - kExtraAllocSize;
1456 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size);
1457 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
1458 partitionFreeGeneric(genericAllocator.root(), ptr1);
1459 partitionFreeGeneric(genericAllocator.root(), ptr2);
1461 CycleGenericFreeCache(kTestAllocSize);
1463 ptr1 = partitionAllocGeneric(genericAllocator.root(), size);
1466 MockPartitionStatsDumper mockStatsDumperGeneric;
1467 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1468 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1470 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(kPartitionPageSize);
1471 EXPECT_TRUE(stats);
1472 EXPECT_TRUE(stats->isValid);
1473 EXPECT_EQ(kPartitionPageSize, stats->bucketSlotSize);
1474 EXPECT_EQ(kPartitionPageSize, stats->activeBytes);
1475 EXPECT_EQ(kPartitionPageSize, stats->residentBytes);
1476 EXPECT_EQ(0u, stats->decommittableBytes);
1477 EXPECT_EQ(0u, stats->discardableBytes);
1478 EXPECT_EQ(1u, stats->numFullPages);
1479 EXPECT_EQ(0u, stats->numActivePages);
1480 EXPECT_EQ(0u, stats->numEmptyPages);
1481 EXPECT_EQ(1u, stats->numDecommittedPages);
1483 partitionFreeGeneric(genericAllocator.root(), ptr1);
1486 // This test checks for correct direct mapped accounting.
1488 size_t sizeSmaller = kGenericMaxBucketed + 1;
1489 size_t sizeBigger = (kGenericMaxBucketed * 2) + 1;
1490 size_t realSizeSmaller = (sizeSmaller + kSystemPageOffsetMask) & kSystemPageBaseMask;
1491 size_t realSizeBigger = (sizeBigger + kSystemPageOffsetMask) & kSystemPageBaseMask;
1492 void* ptr = partitionAllocGeneric(genericAllocator.root(), sizeSmaller);
1493 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), sizeBigger);
1496 MockPartitionStatsDumper mockStatsDumperGeneric;
1497 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1498 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1500 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(realSizeSmaller);
1501 EXPECT_TRUE(stats);
1502 EXPECT_TRUE(stats->isValid);
1503 EXPECT_TRUE(stats->isDirectMap);
1504 EXPECT_EQ(realSizeSmaller, stats->bucketSlotSize);
1505 EXPECT_EQ(realSizeSmaller, stats->activeBytes);
1506 EXPECT_EQ(realSizeSmaller, stats->residentBytes);
1507 EXPECT_EQ(0u, stats->decommittableBytes);
1508 EXPECT_EQ(0u, stats->discardableBytes);
1509 EXPECT_EQ(1u, stats->numFullPages);
1510 EXPECT_EQ(0u, stats->numActivePages);
1511 EXPECT_EQ(0u, stats->numEmptyPages);
1512 EXPECT_EQ(0u, stats->numDecommittedPages);
1514 stats = mockStatsDumperGeneric.GetBucketStats(realSizeBigger);
1515 EXPECT_TRUE(stats);
1516 EXPECT_TRUE(stats->isValid);
1517 EXPECT_TRUE(stats->isDirectMap);
1518 EXPECT_EQ(realSizeBigger, stats->bucketSlotSize);
1519 EXPECT_EQ(realSizeBigger, stats->activeBytes);
1520 EXPECT_EQ(realSizeBigger, stats->residentBytes);
1521 EXPECT_EQ(0u, stats->decommittableBytes);
1522 EXPECT_EQ(0u, stats->discardableBytes);
1523 EXPECT_EQ(1u, stats->numFullPages);
1524 EXPECT_EQ(0u, stats->numActivePages);
1525 EXPECT_EQ(0u, stats->numEmptyPages);
1526 EXPECT_EQ(0u, stats->numDecommittedPages);
1529 partitionFreeGeneric(genericAllocator.root(), ptr2);
1530 partitionFreeGeneric(genericAllocator.root(), ptr);
1532 // Whilst we're here, allocate again and free with different ordering
1533 // to give a workout to our linked list code.
1534 ptr = partitionAllocGeneric(genericAllocator.root(), sizeSmaller);
1535 ptr2 = partitionAllocGeneric(genericAllocator.root(), sizeBigger);
1536 partitionFreeGeneric(genericAllocator.root(), ptr);
1537 partitionFreeGeneric(genericAllocator.root(), ptr2);
1540 // This test checks large-but-not-quite-direct allocations.
1542 void* ptr = partitionAllocGeneric(genericAllocator.root(), 65536 + 1);
1545 MockPartitionStatsDumper mockStatsDumperGeneric;
1546 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1547 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1549 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder);
1550 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(slotSize);
1551 EXPECT_TRUE(stats);
1552 EXPECT_TRUE(stats->isValid);
1553 EXPECT_FALSE(stats->isDirectMap);
1554 EXPECT_EQ(slotSize, stats->bucketSlotSize);
1555 EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->activeBytes);
1556 EXPECT_EQ(slotSize, stats->residentBytes);
1557 EXPECT_EQ(0u, stats->decommittableBytes);
1558 EXPECT_EQ(kSystemPageSize, stats->discardableBytes);
1559 EXPECT_EQ(1u, stats->numFullPages);
1560 EXPECT_EQ(0u, stats->numActivePages);
1561 EXPECT_EQ(0u, stats->numEmptyPages);
1562 EXPECT_EQ(0u, stats->numDecommittedPages);
1565 partitionFreeGeneric(genericAllocator.root(), ptr);
1568 MockPartitionStatsDumper mockStatsDumperGeneric;
1569 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1570 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1572 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder);
1573 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(slotSize);
1574 EXPECT_TRUE(stats);
1575 EXPECT_TRUE(stats->isValid);
1576 EXPECT_FALSE(stats->isDirectMap);
1577 EXPECT_EQ(slotSize, stats->bucketSlotSize);
1578 EXPECT_EQ(0u, stats->activeBytes);
1579 EXPECT_EQ(slotSize, stats->residentBytes);
1580 EXPECT_EQ(slotSize, stats->decommittableBytes);
1581 EXPECT_EQ(0u, stats->numFullPages);
1582 EXPECT_EQ(0u, stats->numActivePages);
1583 EXPECT_EQ(1u, stats->numEmptyPages);
1584 EXPECT_EQ(0u, stats->numDecommittedPages);
1587 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), 65536 + kSystemPageSize + 1);
1588 EXPECT_EQ(ptr, ptr2);
1591 MockPartitionStatsDumper mockStatsDumperGeneric;
1592 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1593 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1595 size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder);
1596 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(slotSize);
1597 EXPECT_TRUE(stats);
1598 EXPECT_TRUE(stats->isValid);
1599 EXPECT_FALSE(stats->isDirectMap);
1600 EXPECT_EQ(slotSize, stats->bucketSlotSize);
1601 EXPECT_EQ(65536u + kSystemPageSize + 1 + kExtraAllocSize, stats->activeBytes);
1602 EXPECT_EQ(slotSize, stats->residentBytes);
1603 EXPECT_EQ(0u, stats->decommittableBytes);
1604 EXPECT_EQ(0u, stats->discardableBytes);
1605 EXPECT_EQ(1u, stats->numFullPages);
1606 EXPECT_EQ(0u, stats->numActivePages);
1607 EXPECT_EQ(0u, stats->numEmptyPages);
1608 EXPECT_EQ(0u, stats->numDecommittedPages);
1611 partitionFreeGeneric(genericAllocator.root(), ptr2);
1614 TestShutdown();
1617 // Tests the API to purge freeable memory.
1618 TEST(PartitionAllocTest, Purge)
1620 TestSetup();
1622 char* ptr = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), 2048 - kExtraAllocSize));
1623 partitionFreeGeneric(genericAllocator.root(), ptr);
1625 MockPartitionStatsDumper mockStatsDumperGeneric;
1626 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1627 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1629 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(2048);
1630 EXPECT_TRUE(stats);
1631 EXPECT_TRUE(stats->isValid);
1632 EXPECT_EQ(kSystemPageSize, stats->decommittableBytes);
1633 EXPECT_EQ(kSystemPageSize, stats->residentBytes);
1635 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitEmptyPages);
1637 MockPartitionStatsDumper mockStatsDumperGeneric;
1638 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1639 EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1641 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(2048);
1642 EXPECT_TRUE(stats);
1643 EXPECT_TRUE(stats->isValid);
1644 EXPECT_EQ(0u, stats->decommittableBytes);
1645 EXPECT_EQ(0u, stats->residentBytes);
1647 // Calling purge again here is a good way of testing we didn't mess up the
1648 // state of the free cache ring.
1649 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitEmptyPages);
1651 char* bigPtr = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), 256 * 1024));
1652 partitionFreeGeneric(genericAllocator.root(), bigPtr);
1653 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitEmptyPages);
1655 CheckPageInCore(ptr - kPointerOffset, false);
1656 CheckPageInCore(bigPtr - kPointerOffset, false);
1658 TestShutdown();
1661 // Tests that we prefer to allocate into a non-empty partition page over an
1662 // empty one. This is an important aspect of minimizing memory usage for some
1663 // allocation sizes, particularly larger ones.
1664 TEST(PartitionAllocTest, PreferActiveOverEmpty)
1666 TestSetup();
1668 size_t size = (kSystemPageSize * 2) - kExtraAllocSize;
1669 // Allocate 3 full slot spans worth of 8192-byte allocations.
1670 // Each slot span for this size is 16384 bytes, or 1 partition page and 2
1671 // slots.
1672 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size);
1673 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size);
1674 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size);
1675 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size);
1676 void* ptr5 = partitionAllocGeneric(genericAllocator.root(), size);
1677 void* ptr6 = partitionAllocGeneric(genericAllocator.root(), size);
1679 PartitionPage* page1 = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1));
1680 PartitionPage* page2 = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr3));
1681 PartitionPage* page3 = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr6));
1682 EXPECT_NE(page1, page2);
1683 EXPECT_NE(page2, page3);
1684 PartitionBucket* bucket = page1->bucket;
1685 EXPECT_EQ(page3, bucket->activePagesHead);
1687 // Free up the 2nd slot in each slot span.
1688 // This leaves the active list containing 3 pages, each with 1 used and 1
1689 // free slot. The active page will be the one containing ptr1.
1690 partitionFreeGeneric(genericAllocator.root(), ptr6);
1691 partitionFreeGeneric(genericAllocator.root(), ptr4);
1692 partitionFreeGeneric(genericAllocator.root(), ptr2);
1693 EXPECT_EQ(page1, bucket->activePagesHead);
1695 // Empty the middle page in the active list.
1696 partitionFreeGeneric(genericAllocator.root(), ptr3);
1697 EXPECT_EQ(page1, bucket->activePagesHead);
1699 // Empty the the first page in the active list -- also the current page.
1700 partitionFreeGeneric(genericAllocator.root(), ptr1);
1702 // A good choice here is to re-fill the third page since the first two are
1703 // empty. We used to fail that.
1704 void* ptr7 = partitionAllocGeneric(genericAllocator.root(), size);
1705 EXPECT_EQ(ptr6, ptr7);
1706 EXPECT_EQ(page3, bucket->activePagesHead);
1708 partitionFreeGeneric(genericAllocator.root(), ptr5);
1709 partitionFreeGeneric(genericAllocator.root(), ptr7);
1711 TestShutdown();
1714 // Tests the API to purge discardable memory.
1715 TEST(PartitionAllocTest, PurgeDiscardable)
1717 TestSetup();
1719 // Free the second of two 4096 byte allocations and then purge.
1721 void* ptr1 = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1722 char* ptr2 = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize));
1723 partitionFreeGeneric(genericAllocator.root(), ptr2);
1724 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1));
1725 EXPECT_EQ(2u, page->numUnprovisionedSlots);
1727 MockPartitionStatsDumper mockStatsDumperGeneric;
1728 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1729 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1731 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(kSystemPageSize);
1732 EXPECT_TRUE(stats);
1733 EXPECT_TRUE(stats->isValid);
1734 EXPECT_EQ(0u, stats->decommittableBytes);
1735 EXPECT_EQ(kSystemPageSize, stats->discardableBytes);
1736 EXPECT_EQ(kSystemPageSize, stats->activeBytes);
1737 EXPECT_EQ(2 * kSystemPageSize, stats->residentBytes);
1739 CheckPageInCore(ptr2 - kPointerOffset, true);
1740 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDiscardUnusedSystemPages);
1741 CheckPageInCore(ptr2 - kPointerOffset, false);
1742 EXPECT_EQ(3u, page->numUnprovisionedSlots);
1744 partitionFreeGeneric(genericAllocator.root(), ptr1);
1746 // Free the first of two 4096 byte allocations and then purge.
1748 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize));
1749 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1750 partitionFreeGeneric(genericAllocator.root(), ptr1);
1752 MockPartitionStatsDumper mockStatsDumperGeneric;
1753 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1754 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1756 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(kSystemPageSize);
1757 EXPECT_TRUE(stats);
1758 EXPECT_TRUE(stats->isValid);
1759 EXPECT_EQ(0u, stats->decommittableBytes);
1760 EXPECT_EQ(kSystemPageSize, stats->discardableBytes);
1761 EXPECT_EQ(kSystemPageSize, stats->activeBytes);
1762 EXPECT_EQ(2 * kSystemPageSize, stats->residentBytes);
1764 CheckPageInCore(ptr1 - kPointerOffset, true);
1765 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDiscardUnusedSystemPages);
1766 CheckPageInCore(ptr1 - kPointerOffset, false);
1768 partitionFreeGeneric(genericAllocator.root(), ptr2);
1771 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), 9216 - kExtraAllocSize));
1772 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), 9216 - kExtraAllocSize);
1773 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), 9216 - kExtraAllocSize);
1774 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), 9216 - kExtraAllocSize);
1775 memset(ptr1, 'A', 9216 - kExtraAllocSize);
1776 memset(ptr2, 'A', 9216 - kExtraAllocSize);
1777 partitionFreeGeneric(genericAllocator.root(), ptr2);
1778 partitionFreeGeneric(genericAllocator.root(), ptr1);
1780 MockPartitionStatsDumper mockStatsDumperGeneric;
1781 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1782 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1784 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(9216);
1785 EXPECT_TRUE(stats);
1786 EXPECT_TRUE(stats->isValid);
1787 EXPECT_EQ(0u, stats->decommittableBytes);
1788 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes);
1789 EXPECT_EQ(9216u * 2, stats->activeBytes);
1790 EXPECT_EQ(9 * kSystemPageSize, stats->residentBytes);
1792 CheckPageInCore(ptr1 - kPointerOffset, true);
1793 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
1794 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1795 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
1796 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
1797 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDiscardUnusedSystemPages);
1798 CheckPageInCore(ptr1 - kPointerOffset, true);
1799 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
1800 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1801 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
1802 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
1804 partitionFreeGeneric(genericAllocator.root(), ptr3);
1805 partitionFreeGeneric(genericAllocator.root(), ptr4);
1808 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), (64 * kSystemPageSize) - kExtraAllocSize));
1809 memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize);
1810 partitionFreeGeneric(genericAllocator.root(), ptr1);
1811 ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), (61 * kSystemPageSize) - kExtraAllocSize));
1813 MockPartitionStatsDumper mockStatsDumperGeneric;
1814 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1815 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1817 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(64 * kSystemPageSize);
1818 EXPECT_TRUE(stats);
1819 EXPECT_TRUE(stats->isValid);
1820 EXPECT_EQ(0u, stats->decommittableBytes);
1821 EXPECT_EQ(3 * kSystemPageSize, stats->discardableBytes);
1822 EXPECT_EQ(61 * kSystemPageSize, stats->activeBytes);
1823 EXPECT_EQ(64 * kSystemPageSize, stats->residentBytes);
1825 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
1826 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
1827 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
1828 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
1829 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDiscardUnusedSystemPages);
1830 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
1831 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
1832 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
1833 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
1835 partitionFreeGeneric(genericAllocator.root(), ptr1);
1837 // This sub-test tests truncation of the provisioned slots in a trickier
1838 // case where the freelist is rewritten.
1839 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitEmptyPages);
1841 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize));
1842 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1843 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1844 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1845 ptr1[0] = 'A';
1846 ptr1[kSystemPageSize] = 'A';
1847 ptr1[kSystemPageSize * 2] = 'A';
1848 ptr1[kSystemPageSize * 3] = 'A';
1849 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1));
1850 partitionFreeGeneric(genericAllocator.root(), ptr2);
1851 partitionFreeGeneric(genericAllocator.root(), ptr4);
1852 partitionFreeGeneric(genericAllocator.root(), ptr1);
1853 EXPECT_EQ(0u, page->numUnprovisionedSlots);
1856 MockPartitionStatsDumper mockStatsDumperGeneric;
1857 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1858 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1860 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(kSystemPageSize);
1861 EXPECT_TRUE(stats);
1862 EXPECT_TRUE(stats->isValid);
1863 EXPECT_EQ(0u, stats->decommittableBytes);
1864 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes);
1865 EXPECT_EQ(kSystemPageSize, stats->activeBytes);
1866 EXPECT_EQ(4 * kSystemPageSize, stats->residentBytes);
1868 CheckPageInCore(ptr1 - kPointerOffset, true);
1869 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
1870 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1871 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
1872 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDiscardUnusedSystemPages);
1873 EXPECT_EQ(1u, page->numUnprovisionedSlots);
1874 CheckPageInCore(ptr1 - kPointerOffset, true);
1875 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
1876 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1877 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
1879 // Let's check we didn't brick the freelist.
1880 void* ptr1b = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1881 EXPECT_EQ(ptr1, ptr1b);
1882 void* ptr2b = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1883 EXPECT_EQ(ptr2, ptr2b);
1884 EXPECT_FALSE(page->freelistHead);
1886 partitionFreeGeneric(genericAllocator.root(), ptr1);
1887 partitionFreeGeneric(genericAllocator.root(), ptr2);
1888 partitionFreeGeneric(genericAllocator.root(), ptr3);
1890 // This sub-test is similar, but tests a double-truncation.
1891 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDecommitEmptyPages);
1893 char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize));
1894 void* ptr2 = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1895 void* ptr3 = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1896 void* ptr4 = partitionAllocGeneric(genericAllocator.root(), kSystemPageSize - kExtraAllocSize);
1897 ptr1[0] = 'A';
1898 ptr1[kSystemPageSize] = 'A';
1899 ptr1[kSystemPageSize * 2] = 'A';
1900 ptr1[kSystemPageSize * 3] = 'A';
1901 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1));
1902 partitionFreeGeneric(genericAllocator.root(), ptr4);
1903 partitionFreeGeneric(genericAllocator.root(), ptr3);
1904 EXPECT_EQ(0u, page->numUnprovisionedSlots);
1907 MockPartitionStatsDumper mockStatsDumperGeneric;
1908 partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", false /* detailed dump */, &mockStatsDumperGeneric);
1909 EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded());
1911 const PartitionBucketMemoryStats* stats = mockStatsDumperGeneric.GetBucketStats(kSystemPageSize);
1912 EXPECT_TRUE(stats);
1913 EXPECT_TRUE(stats->isValid);
1914 EXPECT_EQ(0u, stats->decommittableBytes);
1915 EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes);
1916 EXPECT_EQ(2 * kSystemPageSize, stats->activeBytes);
1917 EXPECT_EQ(4 * kSystemPageSize, stats->residentBytes);
1919 CheckPageInCore(ptr1 - kPointerOffset, true);
1920 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
1921 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
1922 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
1923 partitionPurgeMemoryGeneric(genericAllocator.root(), PartitionPurgeDiscardUnusedSystemPages);
1924 EXPECT_EQ(2u, page->numUnprovisionedSlots);
1925 CheckPageInCore(ptr1 - kPointerOffset, true);
1926 CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
1927 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false);
1928 CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
1930 EXPECT_FALSE(page->freelistHead);
1932 partitionFreeGeneric(genericAllocator.root(), ptr1);
1933 partitionFreeGeneric(genericAllocator.root(), ptr2);
1936 TestShutdown();
1939 // Tests that the countLeadingZeros() functions work to our satisfaction.
1940 // It doesn't seem worth the overhead of a whole new file for these tests, so
1941 // we'll put them here since partitionAllocGeneric will depend heavily on these
1942 // functions working correctly.
1943 TEST(PartitionAllocTest, CLZWorks)
1945 EXPECT_EQ(32u, countLeadingZeros32(0u));
1946 EXPECT_EQ(31u, countLeadingZeros32(1u));
1947 EXPECT_EQ(1u, countLeadingZeros32(1u << 30));
1948 EXPECT_EQ(0u, countLeadingZeros32(1u << 31));
1950 #if CPU(64BIT)
1951 EXPECT_EQ(64u, countLeadingZerosSizet(0ull));
1952 EXPECT_EQ(63u, countLeadingZerosSizet(1ull));
1953 EXPECT_EQ(32u, countLeadingZerosSizet(1ull << 31));
1954 EXPECT_EQ(1u, countLeadingZerosSizet(1ull << 62));
1955 EXPECT_EQ(0u, countLeadingZerosSizet(1ull << 63));
1956 #else
1957 EXPECT_EQ(32u, countLeadingZerosSizet(0u));
1958 EXPECT_EQ(31u, countLeadingZerosSizet(1u));
1959 EXPECT_EQ(1u, countLeadingZerosSizet(1u << 30));
1960 EXPECT_EQ(0u, countLeadingZerosSizet(1u << 31));
1961 #endif
1964 } // namespace WTF
1966 #endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)