1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
9 #include "base/memory/scoped_ptr.h"
10 #include "base/message_loop/message_loop.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
12 #include "gpu/command_buffer/service/command_buffer_service.h"
13 #include "gpu/command_buffer/service/gpu_scheduler.h"
14 #include "gpu/command_buffer/service/mocks.h"
15 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
16 #include "testing/gtest/include/gtest/gtest.h"
20 using testing::Return
;
23 using testing::Sequence
;
25 using testing::Invoke
;
28 class MappedMemoryTestBase
: public testing::Test
{
30 static const unsigned int kBufferSize
= 1024;
32 void SetUp() override
{
33 api_mock_
.reset(new AsyncAPIMock(true));
34 // ignore noops in the mock - we don't want to inspect the internals of the
36 EXPECT_CALL(*api_mock_
, DoCommand(cmd::kNoop
, 0, _
))
37 .WillRepeatedly(Return(error::kNoError
));
38 // Forward the SetToken calls to the engine
39 EXPECT_CALL(*api_mock_
.get(), DoCommand(cmd::kSetToken
, 1, _
))
40 .WillRepeatedly(DoAll(Invoke(api_mock_
.get(), &AsyncAPIMock::SetToken
),
41 Return(error::kNoError
)));
44 TransferBufferManager
* manager
= new TransferBufferManager();
45 transfer_buffer_manager_
= manager
;
46 EXPECT_TRUE(manager
->Initialize());
49 command_buffer_
.reset(
50 new CommandBufferService(transfer_buffer_manager_
.get()));
51 EXPECT_TRUE(command_buffer_
->Initialize());
53 gpu_scheduler_
.reset(new GpuScheduler(
54 command_buffer_
.get(), api_mock_
.get(), NULL
));
55 command_buffer_
->SetPutOffsetChangeCallback(base::Bind(
56 &GpuScheduler::PutChanged
, base::Unretained(gpu_scheduler_
.get())));
57 command_buffer_
->SetGetBufferChangeCallback(base::Bind(
58 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
60 api_mock_
->set_engine(gpu_scheduler_
.get());
62 helper_
.reset(new CommandBufferHelper(command_buffer_
.get()));
63 helper_
->Initialize(kBufferSize
);
67 return command_buffer_
->GetLastState().token
;
70 scoped_ptr
<AsyncAPIMock
> api_mock_
;
71 scoped_refptr
<TransferBufferManagerInterface
> transfer_buffer_manager_
;
72 scoped_ptr
<CommandBufferService
> command_buffer_
;
73 scoped_ptr
<GpuScheduler
> gpu_scheduler_
;
74 scoped_ptr
<CommandBufferHelper
> helper_
;
78 const unsigned int MappedMemoryTestBase::kBufferSize
;
86 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
87 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
88 // it directly, not through the RPC mechanism), making sure Noops are ignored
89 // and SetToken are properly forwarded to the engine.
90 class MemoryChunkTest
: public MappedMemoryTestBase
{
92 static const int32 kShmId
= 123;
93 void SetUp() override
{
94 MappedMemoryTestBase::SetUp();
95 scoped_ptr
<base::SharedMemory
> shared_memory(new base::SharedMemory());
96 shared_memory
->CreateAndMapAnonymous(kBufferSize
);
97 buffer_
= MakeBufferFromSharedMemory(shared_memory
.Pass(), kBufferSize
);
98 chunk_
.reset(new MemoryChunk(kShmId
,
101 base::Bind(&EmptyPoll
)));
104 void TearDown() override
{
105 // If the GpuScheduler posts any tasks, this forces them to run.
106 base::MessageLoop::current()->RunUntilIdle();
108 MappedMemoryTestBase::TearDown();
111 uint8
* buffer_memory() { return static_cast<uint8
*>(buffer_
->memory()); }
113 scoped_ptr
<MemoryChunk
> chunk_
;
114 scoped_refptr
<gpu::Buffer
> buffer_
;
118 const int32
MemoryChunkTest::kShmId
;
121 TEST_F(MemoryChunkTest
, Basic
) {
122 const unsigned int kSize
= 16;
123 EXPECT_EQ(kShmId
, chunk_
->shm_id());
124 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
125 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithWaiting());
126 EXPECT_EQ(kBufferSize
, chunk_
->GetSize());
127 void *pointer
= chunk_
->Alloc(kSize
);
128 ASSERT_TRUE(pointer
);
129 EXPECT_LE(buffer_
->memory(), static_cast<uint8
*>(pointer
));
130 EXPECT_GE(kBufferSize
,
131 static_cast<uint8
*>(pointer
) - buffer_memory() + kSize
);
132 EXPECT_EQ(kBufferSize
- kSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
133 EXPECT_EQ(kBufferSize
- kSize
, chunk_
->GetLargestFreeSizeWithWaiting());
134 EXPECT_EQ(kBufferSize
, chunk_
->GetSize());
136 chunk_
->Free(pointer
);
137 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
138 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithWaiting());
140 uint8
*pointer_char
= static_cast<uint8
*>(chunk_
->Alloc(kSize
));
141 ASSERT_TRUE(pointer_char
);
142 EXPECT_LE(buffer_memory(), pointer_char
);
143 EXPECT_GE(buffer_memory() + kBufferSize
, pointer_char
+ kSize
);
144 EXPECT_EQ(kBufferSize
- kSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
145 EXPECT_EQ(kBufferSize
- kSize
, chunk_
->GetLargestFreeSizeWithWaiting());
146 chunk_
->Free(pointer_char
);
147 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
148 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithWaiting());
151 class MappedMemoryManagerTest
: public MappedMemoryTestBase
{
153 MappedMemoryManager
* manager() const {
154 return manager_
.get();
158 void SetUp() override
{
159 MappedMemoryTestBase::SetUp();
160 manager_
.reset(new MappedMemoryManager(
161 helper_
.get(), base::Bind(&EmptyPoll
), MappedMemoryManager::kNoLimit
));
164 void TearDown() override
{
165 // If the GpuScheduler posts any tasks, this forces them to run.
166 base::MessageLoop::current()->RunUntilIdle();
168 MappedMemoryTestBase::TearDown();
171 scoped_ptr
<MappedMemoryManager
> manager_
;
174 TEST_F(MappedMemoryManagerTest
, Basic
) {
175 const unsigned int kSize
= 1024;
176 // Check we can alloc.
178 unsigned int offset1
= 0xFFFFFFFFU
;
179 void* mem1
= manager_
->Alloc(kSize
, &id1
, &offset1
);
182 EXPECT_EQ(0u, offset1
);
183 // Check if we free and realloc the same size we get the same memory
185 unsigned int offset2
= 0xFFFFFFFFU
;
186 manager_
->Free(mem1
);
187 void* mem2
= manager_
->Alloc(kSize
, &id2
, &offset2
);
188 EXPECT_EQ(mem1
, mem2
);
190 EXPECT_EQ(offset1
, offset2
);
191 // Check if we allocate again we get different shared memory
193 unsigned int offset3
= 0xFFFFFFFFU
;
194 void* mem3
= manager_
->Alloc(kSize
, &id3
, &offset3
);
195 ASSERT_TRUE(mem3
!= NULL
);
196 EXPECT_NE(mem2
, mem3
);
198 EXPECT_EQ(0u, offset3
);
199 // Free 3 and allocate 2 half size blocks.
200 manager_
->Free(mem3
);
203 unsigned int offset4
= 0xFFFFFFFFU
;
204 unsigned int offset5
= 0xFFFFFFFFU
;
205 void* mem4
= manager_
->Alloc(kSize
/ 2, &id4
, &offset4
);
206 void* mem5
= manager_
->Alloc(kSize
/ 2, &id5
, &offset5
);
207 ASSERT_TRUE(mem4
!= NULL
);
208 ASSERT_TRUE(mem5
!= NULL
);
211 EXPECT_EQ(0u, offset4
);
212 EXPECT_EQ(kSize
/ 2u, offset5
);
213 manager_
->Free(mem4
);
214 manager_
->Free(mem2
);
215 manager_
->Free(mem5
);
218 TEST_F(MappedMemoryManagerTest
, FreePendingToken
) {
219 const unsigned int kSize
= 128;
220 const unsigned int kAllocCount
= (kBufferSize
/ kSize
) * 2;
221 CHECK(kAllocCount
* kSize
== kBufferSize
* 2);
223 // Allocate several buffers across multiple chunks.
224 void *pointers
[kAllocCount
];
225 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
227 unsigned int offset
= 0xFFFFFFFFu
;
228 pointers
[i
] = manager_
->Alloc(kSize
, &id
, &offset
);
229 EXPECT_TRUE(pointers
[i
]);
231 EXPECT_NE(offset
, 0xFFFFFFFFu
);
234 // Free one successful allocation, pending fence.
235 int32 token
= helper_
.get()->InsertToken();
236 manager_
->FreePendingToken(pointers
[0], token
);
238 // The way we hooked up the helper and engine, it won't process commands
239 // until it has to wait for something. Which means the token shouldn't have
240 // passed yet at this point.
241 EXPECT_GT(token
, GetToken());
242 // Force it to read up to the token
244 // Check that the token has indeed passed.
245 EXPECT_LE(token
, GetToken());
247 // This allocation should use the spot just freed above.
249 unsigned int new_offset
= 0xFFFFFFFFu
;
250 void* new_ptr
= manager_
->Alloc(kSize
, &new_id
, &new_offset
);
251 EXPECT_TRUE(new_ptr
);
252 EXPECT_EQ(new_ptr
, pointers
[0]);
253 EXPECT_NE(new_id
, -1);
254 EXPECT_NE(new_offset
, 0xFFFFFFFFu
);
256 // Free up everything.
257 manager_
->Free(new_ptr
);
258 for (unsigned int i
= 1; i
< kAllocCount
; ++i
) {
259 manager_
->Free(pointers
[i
]);
263 TEST_F(MappedMemoryManagerTest
, FreeUnused
) {
265 unsigned int offset
= 0xFFFFFFFFU
;
266 void* m1
= manager_
->Alloc(kBufferSize
, &id
, &offset
);
267 void* m2
= manager_
->Alloc(kBufferSize
, &id
, &offset
);
268 ASSERT_TRUE(m1
!= NULL
);
269 ASSERT_TRUE(m2
!= NULL
);
270 EXPECT_EQ(2u, manager_
->num_chunks());
271 manager_
->FreeUnused();
272 EXPECT_EQ(2u, manager_
->num_chunks());
274 EXPECT_EQ(2u, manager_
->num_chunks());
275 manager_
->FreeUnused();
276 EXPECT_EQ(1u, manager_
->num_chunks());
278 EXPECT_EQ(1u, manager_
->num_chunks());
279 manager_
->FreeUnused();
280 EXPECT_EQ(0u, manager_
->num_chunks());
283 TEST_F(MappedMemoryManagerTest
, ChunkSizeMultiple
) {
284 const unsigned int kSize
= 1024;
285 manager_
->set_chunk_size_multiple(kSize
* 2);
286 // Check if we allocate less than the chunk size multiple we get
287 // chunks arounded up.
289 unsigned int offset1
= 0xFFFFFFFFU
;
290 void* mem1
= manager_
->Alloc(kSize
, &id1
, &offset1
);
292 unsigned int offset2
= 0xFFFFFFFFU
;
293 void* mem2
= manager_
->Alloc(kSize
, &id2
, &offset2
);
295 unsigned int offset3
= 0xFFFFFFFFU
;
296 void* mem3
= manager_
->Alloc(kSize
, &id3
, &offset3
);
303 EXPECT_EQ(0u, offset1
);
304 EXPECT_EQ(kSize
, offset2
);
305 EXPECT_EQ(0u, offset3
);
307 manager_
->Free(mem1
);
308 manager_
->Free(mem2
);
309 manager_
->Free(mem3
);
312 TEST_F(MappedMemoryManagerTest
, UnusedMemoryLimit
) {
313 const unsigned int kChunkSize
= 2048;
314 // Reset the manager with a memory limit.
315 manager_
.reset(new MappedMemoryManager(
316 helper_
.get(), base::Bind(&EmptyPoll
), kChunkSize
));
317 manager_
->set_chunk_size_multiple(kChunkSize
);
319 // Allocate one chunk worth of memory.
321 unsigned int offset1
= 0xFFFFFFFFU
;
322 void* mem1
= manager_
->Alloc(kChunkSize
, &id1
, &offset1
);
325 EXPECT_EQ(0u, offset1
);
327 // Allocate half a chunk worth of memory again.
328 // The same chunk will be used.
330 unsigned int offset2
= 0xFFFFFFFFU
;
331 void* mem2
= manager_
->Alloc(kChunkSize
, &id2
, &offset2
);
334 EXPECT_EQ(0u, offset2
);
336 // Expect two chunks to be allocated, exceeding the limit,
337 // since all memory is in use.
338 EXPECT_EQ(2 * kChunkSize
, manager_
->allocated_memory());
340 manager_
->Free(mem1
);
341 manager_
->Free(mem2
);
344 TEST_F(MappedMemoryManagerTest
, MemoryLimitWithReuse
) {
345 const unsigned int kSize
= 1024;
346 // Reset the manager with a memory limit.
347 manager_
.reset(new MappedMemoryManager(
348 helper_
.get(), base::Bind(&EmptyPoll
), kSize
));
349 const unsigned int kChunkSize
= 2 * 1024;
350 manager_
->set_chunk_size_multiple(kChunkSize
);
352 // Allocate half a chunk worth of memory.
354 unsigned int offset1
= 0xFFFFFFFFU
;
355 void* mem1
= manager_
->Alloc(kSize
, &id1
, &offset1
);
358 EXPECT_EQ(0u, offset1
);
360 // Allocate half a chunk worth of memory again.
361 // The same chunk will be used.
363 unsigned int offset2
= 0xFFFFFFFFU
;
364 void* mem2
= manager_
->Alloc(kSize
, &id2
, &offset2
);
367 EXPECT_EQ(kSize
, offset2
);
369 // Free one successful allocation, pending fence.
370 int32 token
= helper_
.get()->InsertToken();
371 manager_
->FreePendingToken(mem2
, token
);
373 // The way we hooked up the helper and engine, it won't process commands
374 // until it has to wait for something. Which means the token shouldn't have
375 // passed yet at this point.
376 EXPECT_GT(token
, GetToken());
378 // Since we didn't call helper_.finish() the token did not pass.
379 // We won't be able to claim the free memory without waiting and
380 // as we've already met the memory limit we'll have to wait
383 unsigned int offset3
= 0xFFFFFFFFU
;
384 void* mem3
= manager_
->Alloc(kSize
, &id3
, &offset3
);
387 // It will reuse the space from the second allocation just freed.
388 EXPECT_EQ(kSize
, offset3
);
390 // Expect one chunk to be allocated
391 EXPECT_EQ(1 * kChunkSize
, manager_
->allocated_memory());
393 manager_
->Free(mem1
);
394 manager_
->Free(mem3
);
397 TEST_F(MappedMemoryManagerTest
, MaxAllocationTest
) {
398 const unsigned int kSize
= 1024;
399 // Reset the manager with a memory limit.
400 manager_
.reset(new MappedMemoryManager(
401 helper_
.get(), base::Bind(&EmptyPoll
), kSize
));
403 const size_t kLimit
= 512;
404 manager_
->set_chunk_size_multiple(kLimit
);
406 // Allocate twice the limit worth of memory (currently unbounded).
408 unsigned int offset1
= 0xFFFFFFFFU
;
409 void* mem1
= manager_
->Alloc(kLimit
, &id1
, &offset1
);
412 EXPECT_EQ(0u, offset1
);
415 unsigned int offset2
= 0xFFFFFFFFU
;
416 void* mem2
= manager_
->Alloc(kLimit
, &id2
, &offset2
);
419 EXPECT_EQ(0u, offset2
);
421 manager_
->set_max_allocated_bytes(kLimit
);
423 // A new allocation should now fail.
425 unsigned int offset3
= 0xFFFFFFFFU
;
426 void* mem3
= manager_
->Alloc(kLimit
, &id3
, &offset3
);
429 EXPECT_EQ(0xFFFFFFFFU
, offset3
);
431 manager_
->Free(mem2
);
433 // New allocation is over the limit but should reuse allocated space
435 unsigned int offset4
= 0xFFFFFFFFU
;
436 void* mem4
= manager_
->Alloc(kLimit
, &id4
, &offset4
);
439 EXPECT_EQ(offset2
, offset4
);
441 manager_
->Free(mem1
);
442 manager_
->Free(mem4
);
446 void Poll(MappedMemoryManagerTest
*test
, std::list
<void*>* list
) {
447 std::list
<void*>::iterator it
= list
->begin();
448 while (it
!= list
->end()) {
450 test
->manager()->Free(address
);
451 it
= list
->erase(it
);
456 TEST_F(MappedMemoryManagerTest
, Poll
) {
457 std::list
<void*> unmanaged_memory_list
;
459 const unsigned int kSize
= 1024;
460 // Reset the manager with a memory limit.
461 manager_
.reset(new MappedMemoryManager(
463 base::Bind(&Poll
, this, &unmanaged_memory_list
),
466 // Allocate kSize bytes. Don't add the address to
467 // the unmanaged memory list, so that it won't be free:ed just yet.
469 unsigned int offset1
;
470 void* mem1
= manager_
->Alloc(kSize
, &id1
, &offset1
);
471 EXPECT_EQ(manager_
->bytes_in_use(), kSize
);
473 // Allocate kSize more bytes, and make sure we grew.
475 unsigned int offset2
;
476 void* mem2
= manager_
->Alloc(kSize
, &id2
, &offset2
);
477 EXPECT_EQ(manager_
->bytes_in_use(), kSize
* 2);
479 // Make the unmanaged buffer be released next time FreeUnused() is called
480 // in MappedMemoryManager/FencedAllocator. This happens for example when
481 // allocating new memory.
482 unmanaged_memory_list
.push_back(mem1
);
484 // Allocate kSize more bytes. This should poll unmanaged memory, which now
485 // should free the previously allocated unmanaged memory.
487 unsigned int offset3
;
488 void* mem3
= manager_
->Alloc(kSize
, &id3
, &offset3
);
489 EXPECT_EQ(manager_
->bytes_in_use(), kSize
* 2);
491 manager_
->Free(mem2
);
492 manager_
->Free(mem3
);
493 EXPECT_EQ(manager_
->bytes_in_use(), static_cast<size_t>(0));