1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/memory/discardable_memory_allocator_android.h"
10 #include "base/memory/discardable_memory.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/strings/string_split.h"
14 #include "base/strings/stringprintf.h"
15 #include "build/build_config.h"
16 #include "testing/gtest/include/gtest/gtest.h"
21 const char kAllocatorName
[] = "allocator-for-testing";
23 const size_t kAshmemRegionSizeForTesting
= 32 * 1024 * 1024;
24 const size_t kPageSize
= 4096;
26 const size_t kMaxAllowedAllocationSize
=
27 std::numeric_limits
<size_t>::max() - kPageSize
+ 1;
29 class DiscardableMemoryAllocatorTest
: public testing::Test
{
31 DiscardableMemoryAllocatorTest()
32 : allocator_(kAllocatorName
, kAshmemRegionSizeForTesting
) {
35 DiscardableMemoryAllocator allocator_
;
38 void WriteToDiscardableMemory(DiscardableMemory
* memory
, size_t size
) {
39 // Write to the first and the last pages only to avoid paging in up to 64
41 static_cast<char*>(memory
->Memory())[0] = 'a';
42 static_cast<char*>(memory
->Memory())[size
- 1] = 'a';
45 TEST_F(DiscardableMemoryAllocatorTest
, Basic
) {
46 const size_t size
= 128;
47 scoped_ptr
<DiscardableMemory
> memory(allocator_
.Allocate(size
));
49 WriteToDiscardableMemory(memory
.get(), size
);
52 TEST_F(DiscardableMemoryAllocatorTest
, ZeroAllocationIsNotSupported
) {
53 scoped_ptr
<DiscardableMemory
> memory(allocator_
.Allocate(0));
57 TEST_F(DiscardableMemoryAllocatorTest
, TooLargeAllocationFails
) {
58 scoped_ptr
<DiscardableMemory
> memory(
59 allocator_
.Allocate(kMaxAllowedAllocationSize
+ 1));
60 // Page-alignment would have caused an overflow resulting in a small
61 // allocation if the input size wasn't checked correctly.
65 TEST_F(DiscardableMemoryAllocatorTest
,
66 AshmemRegionsAreNotSmallerThanRequestedSize
) {
67 // The creation of the underlying ashmem region is expected to fail since
68 // there should not be enough room in the address space. When ashmem creation
69 // fails, the allocator repetitively retries by dividing the size by 2. This
70 // size should not be smaller than the size the user requested so the
71 // allocation here should just fail (and not succeed with the minimum ashmem
73 scoped_ptr
<DiscardableMemory
> memory(
74 allocator_
.Allocate(kMaxAllowedAllocationSize
));
78 TEST_F(DiscardableMemoryAllocatorTest
, AshmemRegionsAreAlwaysPageAligned
) {
79 // Use a separate allocator here so that we can override the ashmem region
81 DiscardableMemoryAllocator
allocator(
82 kAllocatorName
, kMaxAllowedAllocationSize
);
83 scoped_ptr
<DiscardableMemory
> memory(allocator
.Allocate(kPageSize
));
85 EXPECT_GT(kMaxAllowedAllocationSize
, allocator
.last_ashmem_region_size());
86 ASSERT_TRUE(allocator
.last_ashmem_region_size() % kPageSize
== 0);
89 TEST_F(DiscardableMemoryAllocatorTest
, LargeAllocation
) {
90 // Note that large allocations should just use DiscardableMemoryAndroidSimple
92 const size_t size
= 64 * 1024 * 1024;
93 scoped_ptr
<DiscardableMemory
> memory(allocator_
.Allocate(size
));
95 WriteToDiscardableMemory(memory
.get(), size
);
98 TEST_F(DiscardableMemoryAllocatorTest
, ChunksArePageAligned
) {
99 scoped_ptr
<DiscardableMemory
> memory(allocator_
.Allocate(kPageSize
));
101 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory
->Memory()) % kPageSize
);
102 WriteToDiscardableMemory(memory
.get(), kPageSize
);
105 TEST_F(DiscardableMemoryAllocatorTest
, AllocateFreeAllocate
) {
106 scoped_ptr
<DiscardableMemory
> memory(allocator_
.Allocate(kPageSize
));
107 // Extra allocation that prevents the region from being deleted when |memory|
109 scoped_ptr
<DiscardableMemory
> memory_lock(allocator_
.Allocate(kPageSize
));
111 void* const address
= memory
->Memory();
112 memory
->Unlock(); // Tests that the reused chunk is being locked correctly.
114 memory
= allocator_
.Allocate(kPageSize
);
116 // The previously freed chunk should be reused.
117 EXPECT_EQ(address
, memory
->Memory());
118 WriteToDiscardableMemory(memory
.get(), kPageSize
);
121 TEST_F(DiscardableMemoryAllocatorTest
, FreeingWholeAshmemRegionClosesAshmem
) {
122 scoped_ptr
<DiscardableMemory
> memory(allocator_
.Allocate(kPageSize
));
124 const int kMagic
= 0xdeadbeef;
125 *static_cast<int*>(memory
->Memory()) = kMagic
;
127 // The previous ashmem region should have been closed thus it should not be
129 memory
= allocator_
.Allocate(kPageSize
);
131 EXPECT_NE(kMagic
, *static_cast<const int*>(memory
->Memory()));
134 TEST_F(DiscardableMemoryAllocatorTest
, AllocateUsesBestFitAlgorithm
) {
135 scoped_ptr
<DiscardableMemory
> memory1(allocator_
.Allocate(3 * kPageSize
));
136 ASSERT_TRUE(memory1
);
137 scoped_ptr
<DiscardableMemory
> memory2(allocator_
.Allocate(2 * kPageSize
));
138 ASSERT_TRUE(memory2
);
139 scoped_ptr
<DiscardableMemory
> memory3(allocator_
.Allocate(1 * kPageSize
));
140 ASSERT_TRUE(memory3
);
141 void* const address_3
= memory3
->Memory();
143 // Don't free |memory2| to avoid merging the 3 blocks together.
145 memory1
= allocator_
.Allocate(1 * kPageSize
);
146 ASSERT_TRUE(memory1
);
147 // The chunk whose size is closest to the requested size should be reused.
148 EXPECT_EQ(address_3
, memory1
->Memory());
149 WriteToDiscardableMemory(memory1
.get(), kPageSize
);
152 TEST_F(DiscardableMemoryAllocatorTest
, MergeFreeChunks
) {
153 scoped_ptr
<DiscardableMemory
> memory1(allocator_
.Allocate(kPageSize
));
154 ASSERT_TRUE(memory1
);
155 scoped_ptr
<DiscardableMemory
> memory2(allocator_
.Allocate(kPageSize
));
156 ASSERT_TRUE(memory2
);
157 scoped_ptr
<DiscardableMemory
> memory3(allocator_
.Allocate(kPageSize
));
158 ASSERT_TRUE(memory3
);
159 scoped_ptr
<DiscardableMemory
> memory4(allocator_
.Allocate(kPageSize
));
160 ASSERT_TRUE(memory4
);
161 void* const memory1_address
= memory1
->Memory();
164 // Freeing |memory2| (located between memory1 and memory3) should merge the
165 // three free blocks together.
167 memory1
= allocator_
.Allocate(3 * kPageSize
);
168 EXPECT_EQ(memory1_address
, memory1
->Memory());
171 TEST_F(DiscardableMemoryAllocatorTest
, MergeFreeChunksAdvanced
) {
172 scoped_ptr
<DiscardableMemory
> memory1(allocator_
.Allocate(4 * kPageSize
));
173 ASSERT_TRUE(memory1
);
174 scoped_ptr
<DiscardableMemory
> memory2(allocator_
.Allocate(4 * kPageSize
));
175 ASSERT_TRUE(memory2
);
176 void* const memory1_address
= memory1
->Memory();
178 memory1
= allocator_
.Allocate(2 * kPageSize
);
180 // At this point, the region should be in this state:
181 // 8 KBytes (used), 24 KBytes (free).
182 memory2
= allocator_
.Allocate(6 * kPageSize
);
184 static_cast<const char*>(memory2
->Memory()),
185 static_cast<const char*>(memory1_address
) + 2 * kPageSize
);
188 TEST_F(DiscardableMemoryAllocatorTest
, MergeFreeChunksAdvanced2
) {
189 scoped_ptr
<DiscardableMemory
> memory1(allocator_
.Allocate(4 * kPageSize
));
190 ASSERT_TRUE(memory1
);
191 scoped_ptr
<DiscardableMemory
> memory2(allocator_
.Allocate(4 * kPageSize
));
192 ASSERT_TRUE(memory2
);
193 void* const memory1_address
= memory1
->Memory();
195 memory1
= allocator_
.Allocate(2 * kPageSize
);
196 scoped_ptr
<DiscardableMemory
> memory3(allocator_
.Allocate(2 * kPageSize
));
197 // At this point, the region should be in this state:
198 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used).
201 // At this point, the region should be in this state:
202 // 8 KBytes (used), 24 KBytes (free).
203 memory2
= allocator_
.Allocate(6 * kPageSize
);
205 static_cast<const char*>(memory2
->Memory()),
206 static_cast<const char*>(memory1_address
) + 2 * kPageSize
);
209 TEST_F(DiscardableMemoryAllocatorTest
, MergeFreeChunksAndDeleteAshmemRegion
) {
210 scoped_ptr
<DiscardableMemory
> memory1(allocator_
.Allocate(4 * kPageSize
));
211 ASSERT_TRUE(memory1
);
212 scoped_ptr
<DiscardableMemory
> memory2(allocator_
.Allocate(4 * kPageSize
));
213 ASSERT_TRUE(memory2
);
215 memory1
= allocator_
.Allocate(2 * kPageSize
);
216 scoped_ptr
<DiscardableMemory
> memory3(allocator_
.Allocate(2 * kPageSize
));
217 // At this point, the region should be in this state:
218 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used).
221 // At this point, the region should be in this state:
222 // 8 KBytes (free), 8 KBytes (used), 8 KBytes (free).
223 const int kMagic
= 0xdeadbeef;
224 *static_cast<int*>(memory2
->Memory()) = kMagic
;
226 // The whole region should have been deleted.
227 memory2
= allocator_
.Allocate(2 * kPageSize
);
228 EXPECT_NE(kMagic
, *static_cast<int*>(memory2
->Memory()));
231 TEST_F(DiscardableMemoryAllocatorTest
,
232 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled
) {
233 // Keep |memory_1| below allocated so that the ashmem region doesn't get
234 // closed when |memory_2| is deleted.
235 scoped_ptr
<DiscardableMemory
> memory_1(allocator_
.Allocate(64 * 1024));
236 ASSERT_TRUE(memory_1
);
237 scoped_ptr
<DiscardableMemory
> memory_2(allocator_
.Allocate(32 * 1024));
238 ASSERT_TRUE(memory_2
);
239 void* const address
= memory_2
->Memory();
241 const size_t size
= 16 * 1024;
242 memory_2
= allocator_
.Allocate(size
);
243 ASSERT_TRUE(memory_2
);
244 EXPECT_EQ(address
, memory_2
->Memory());
245 WriteToDiscardableMemory(memory_2
.get(), size
);
246 scoped_ptr
<DiscardableMemory
> memory_3(allocator_
.Allocate(size
));
247 // The unused tail (16 KBytes large) of the previously freed chunk should be
249 EXPECT_EQ(static_cast<char*>(address
) + size
, memory_3
->Memory());
250 WriteToDiscardableMemory(memory_3
.get(), size
);
253 TEST_F(DiscardableMemoryAllocatorTest
, UseMultipleAshmemRegions
) {
254 // Leave one page untouched at the end of the ashmem region.
255 const size_t size
= kAshmemRegionSizeForTesting
- kPageSize
;
256 scoped_ptr
<DiscardableMemory
> memory1(allocator_
.Allocate(size
));
257 ASSERT_TRUE(memory1
);
258 WriteToDiscardableMemory(memory1
.get(), size
);
260 scoped_ptr
<DiscardableMemory
> memory2(
261 allocator_
.Allocate(kAshmemRegionSizeForTesting
));
262 ASSERT_TRUE(memory2
);
263 WriteToDiscardableMemory(memory2
.get(), kAshmemRegionSizeForTesting
);
264 // The last page of the first ashmem region should be used for this
266 scoped_ptr
<DiscardableMemory
> memory3(allocator_
.Allocate(kPageSize
));
267 ASSERT_TRUE(memory3
);
268 WriteToDiscardableMemory(memory3
.get(), kPageSize
);
269 EXPECT_EQ(memory3
->Memory(), static_cast<char*>(memory1
->Memory()) + size
);
272 TEST_F(DiscardableMemoryAllocatorTest
,
273 HighestAllocatedChunkPointerIsUpdatedWhenHighestChunkGetsSplit
) {
274 // Prevents the ashmem region from getting closed when |memory2| gets freed.
275 scoped_ptr
<DiscardableMemory
> memory1(allocator_
.Allocate(kPageSize
));
276 ASSERT_TRUE(memory1
);
278 scoped_ptr
<DiscardableMemory
> memory2(allocator_
.Allocate(4 * kPageSize
));
279 ASSERT_TRUE(memory2
);
282 memory2
= allocator_
.Allocate(kPageSize
);
283 // There should now be a free chunk of size 3 * |kPageSize| starting at offset
284 // 2 * |kPageSize| and the pointer to the highest allocated chunk should have
285 // also been updated to |base_| + 2 * |kPageSize|. This pointer is used to
286 // maintain the container mapping a chunk address to its previous chunk and
287 // this map is in turn used while merging previous contiguous chunks.
289 // Allocate more than 3 * |kPageSize| so that the free chunk of size 3 *
290 // |kPageSize| is not reused and |highest_allocated_chunk_| gets used instead.
291 scoped_ptr
<DiscardableMemory
> memory3(allocator_
.Allocate(4 * kPageSize
));
292 ASSERT_TRUE(memory3
);
294 // Deleting |memory3| (whose size is 4 * |kPageSize|) should result in a merge
295 // with its previous chunk which is the free chunk of size |3 * kPageSize|.
297 memory3
= allocator_
.Allocate((3 + 4) * kPageSize
);
298 EXPECT_EQ(memory3
->Memory(),
299 static_cast<const char*>(memory2
->Memory()) + kPageSize
);
302 } // namespace internal