1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
9 #include "base/memory/scoped_ptr.h"
10 #include "base/message_loop/message_loop.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
12 #include "gpu/command_buffer/service/command_buffer_service.h"
13 #include "gpu/command_buffer/service/gpu_scheduler.h"
14 #include "gpu/command_buffer/service/mocks.h"
15 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
16 #include "testing/gtest/include/gtest/gtest.h"
18 #if defined(OS_MACOSX)
19 #include "base/mac/scoped_nsautorelease_pool.h"
24 using testing::Return
;
27 using testing::Sequence
;
29 using testing::Invoke
;
32 class MappedMemoryTestBase
: public testing::Test
{
34 static const unsigned int kBufferSize
= 1024;
36 void SetUp() override
{
37 api_mock_
.reset(new AsyncAPIMock(true));
38 // ignore noops in the mock - we don't want to inspect the internals of the
40 EXPECT_CALL(*api_mock_
, DoCommand(cmd::kNoop
, 0, _
))
41 .WillRepeatedly(Return(error::kNoError
));
42 // Forward the SetToken calls to the engine
43 EXPECT_CALL(*api_mock_
.get(), DoCommand(cmd::kSetToken
, 1, _
))
44 .WillRepeatedly(DoAll(Invoke(api_mock_
.get(), &AsyncAPIMock::SetToken
),
45 Return(error::kNoError
)));
48 TransferBufferManager
* manager
= new TransferBufferManager();
49 transfer_buffer_manager_
.reset(manager
);
50 EXPECT_TRUE(manager
->Initialize());
53 command_buffer_
.reset(
54 new CommandBufferService(transfer_buffer_manager_
.get()));
55 EXPECT_TRUE(command_buffer_
->Initialize());
57 gpu_scheduler_
.reset(new GpuScheduler(
58 command_buffer_
.get(), api_mock_
.get(), NULL
));
59 command_buffer_
->SetPutOffsetChangeCallback(base::Bind(
60 &GpuScheduler::PutChanged
, base::Unretained(gpu_scheduler_
.get())));
61 command_buffer_
->SetGetBufferChangeCallback(base::Bind(
62 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
64 api_mock_
->set_engine(gpu_scheduler_
.get());
66 helper_
.reset(new CommandBufferHelper(command_buffer_
.get()));
67 helper_
->Initialize(kBufferSize
);
71 return command_buffer_
->GetLastState().token
;
74 #if defined(OS_MACOSX)
75 base::mac::ScopedNSAutoreleasePool autorelease_pool_
;
77 base::MessageLoop message_loop_
;
78 scoped_ptr
<AsyncAPIMock
> api_mock_
;
79 scoped_ptr
<TransferBufferManagerInterface
> transfer_buffer_manager_
;
80 scoped_ptr
<CommandBufferService
> command_buffer_
;
81 scoped_ptr
<GpuScheduler
> gpu_scheduler_
;
82 scoped_ptr
<CommandBufferHelper
> helper_
;
86 const unsigned int MappedMemoryTestBase::kBufferSize
;
94 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
95 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
96 // it directly, not through the RPC mechanism), making sure Noops are ignored
97 // and SetToken are properly forwarded to the engine.
98 class MemoryChunkTest
: public MappedMemoryTestBase
{
100 static const int32 kShmId
= 123;
101 void SetUp() override
{
102 MappedMemoryTestBase::SetUp();
103 scoped_ptr
<base::SharedMemory
> shared_memory(new base::SharedMemory());
104 shared_memory
->CreateAndMapAnonymous(kBufferSize
);
105 buffer_
= MakeBufferFromSharedMemory(shared_memory
.Pass(), kBufferSize
);
106 chunk_
.reset(new MemoryChunk(kShmId
,
109 base::Bind(&EmptyPoll
)));
112 void TearDown() override
{
113 // If the GpuScheduler posts any tasks, this forces them to run.
114 base::MessageLoop::current()->RunUntilIdle();
116 MappedMemoryTestBase::TearDown();
119 uint8
* buffer_memory() { return static_cast<uint8
*>(buffer_
->memory()); }
121 scoped_ptr
<MemoryChunk
> chunk_
;
122 scoped_refptr
<gpu::Buffer
> buffer_
;
126 const int32
MemoryChunkTest::kShmId
;
129 TEST_F(MemoryChunkTest
, Basic
) {
130 const unsigned int kSize
= 16;
131 EXPECT_EQ(kShmId
, chunk_
->shm_id());
132 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
133 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithWaiting());
134 EXPECT_EQ(kBufferSize
, chunk_
->GetSize());
135 void *pointer
= chunk_
->Alloc(kSize
);
136 ASSERT_TRUE(pointer
);
137 EXPECT_LE(buffer_
->memory(), static_cast<uint8
*>(pointer
));
138 EXPECT_GE(kBufferSize
,
139 static_cast<uint8
*>(pointer
) - buffer_memory() + kSize
);
140 EXPECT_EQ(kBufferSize
- kSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
141 EXPECT_EQ(kBufferSize
- kSize
, chunk_
->GetLargestFreeSizeWithWaiting());
142 EXPECT_EQ(kBufferSize
, chunk_
->GetSize());
144 chunk_
->Free(pointer
);
145 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
146 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithWaiting());
148 uint8
*pointer_char
= static_cast<uint8
*>(chunk_
->Alloc(kSize
));
149 ASSERT_TRUE(pointer_char
);
150 EXPECT_LE(buffer_memory(), pointer_char
);
151 EXPECT_GE(buffer_memory() + kBufferSize
, pointer_char
+ kSize
);
152 EXPECT_EQ(kBufferSize
- kSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
153 EXPECT_EQ(kBufferSize
- kSize
, chunk_
->GetLargestFreeSizeWithWaiting());
154 chunk_
->Free(pointer_char
);
155 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithoutWaiting());
156 EXPECT_EQ(kBufferSize
, chunk_
->GetLargestFreeSizeWithWaiting());
159 class MappedMemoryManagerTest
: public MappedMemoryTestBase
{
161 MappedMemoryManager
* manager() const {
162 return manager_
.get();
166 void SetUp() override
{
167 MappedMemoryTestBase::SetUp();
168 manager_
.reset(new MappedMemoryManager(
169 helper_
.get(), base::Bind(&EmptyPoll
), MappedMemoryManager::kNoLimit
));
172 void TearDown() override
{
173 // If the GpuScheduler posts any tasks, this forces them to run.
174 base::MessageLoop::current()->RunUntilIdle();
176 MappedMemoryTestBase::TearDown();
179 scoped_ptr
<MappedMemoryManager
> manager_
;
182 TEST_F(MappedMemoryManagerTest
, Basic
) {
183 const unsigned int kSize
= 1024;
184 // Check we can alloc.
186 unsigned int offset1
= 0xFFFFFFFFU
;
187 void* mem1
= manager_
->Alloc(kSize
, &id1
, &offset1
);
190 EXPECT_EQ(0u, offset1
);
191 // Check if we free and realloc the same size we get the same memory
193 unsigned int offset2
= 0xFFFFFFFFU
;
194 manager_
->Free(mem1
);
195 void* mem2
= manager_
->Alloc(kSize
, &id2
, &offset2
);
196 EXPECT_EQ(mem1
, mem2
);
198 EXPECT_EQ(offset1
, offset2
);
199 // Check if we allocate again we get different shared memory
201 unsigned int offset3
= 0xFFFFFFFFU
;
202 void* mem3
= manager_
->Alloc(kSize
, &id3
, &offset3
);
203 ASSERT_TRUE(mem3
!= NULL
);
204 EXPECT_NE(mem2
, mem3
);
206 EXPECT_EQ(0u, offset3
);
207 // Free 3 and allocate 2 half size blocks.
208 manager_
->Free(mem3
);
211 unsigned int offset4
= 0xFFFFFFFFU
;
212 unsigned int offset5
= 0xFFFFFFFFU
;
213 void* mem4
= manager_
->Alloc(kSize
/ 2, &id4
, &offset4
);
214 void* mem5
= manager_
->Alloc(kSize
/ 2, &id5
, &offset5
);
215 ASSERT_TRUE(mem4
!= NULL
);
216 ASSERT_TRUE(mem5
!= NULL
);
219 EXPECT_EQ(0u, offset4
);
220 EXPECT_EQ(kSize
/ 2u, offset5
);
221 manager_
->Free(mem4
);
222 manager_
->Free(mem2
);
223 manager_
->Free(mem5
);
226 TEST_F(MappedMemoryManagerTest
, FreePendingToken
) {
227 const unsigned int kSize
= 128;
228 const unsigned int kAllocCount
= (kBufferSize
/ kSize
) * 2;
229 CHECK(kAllocCount
* kSize
== kBufferSize
* 2);
231 // Allocate several buffers across multiple chunks.
232 void *pointers
[kAllocCount
];
233 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
235 unsigned int offset
= 0xFFFFFFFFu
;
236 pointers
[i
] = manager_
->Alloc(kSize
, &id
, &offset
);
237 EXPECT_TRUE(pointers
[i
]);
239 EXPECT_NE(offset
, 0xFFFFFFFFu
);
242 // Free one successful allocation, pending fence.
243 int32 token
= helper_
.get()->InsertToken();
244 manager_
->FreePendingToken(pointers
[0], token
);
246 // The way we hooked up the helper and engine, it won't process commands
247 // until it has to wait for something. Which means the token shouldn't have
248 // passed yet at this point.
249 EXPECT_GT(token
, GetToken());
250 // Force it to read up to the token
252 // Check that the token has indeed passed.
253 EXPECT_LE(token
, GetToken());
255 // This allocation should use the spot just freed above.
257 unsigned int new_offset
= 0xFFFFFFFFu
;
258 void* new_ptr
= manager_
->Alloc(kSize
, &new_id
, &new_offset
);
259 EXPECT_TRUE(new_ptr
);
260 EXPECT_EQ(new_ptr
, pointers
[0]);
261 EXPECT_NE(new_id
, -1);
262 EXPECT_NE(new_offset
, 0xFFFFFFFFu
);
264 // Free up everything.
265 manager_
->Free(new_ptr
);
266 for (unsigned int i
= 1; i
< kAllocCount
; ++i
) {
267 manager_
->Free(pointers
[i
]);
271 TEST_F(MappedMemoryManagerTest
, FreeUnused
) {
273 unsigned int offset
= 0xFFFFFFFFU
;
274 void* m1
= manager_
->Alloc(kBufferSize
, &id
, &offset
);
275 void* m2
= manager_
->Alloc(kBufferSize
, &id
, &offset
);
276 ASSERT_TRUE(m1
!= NULL
);
277 ASSERT_TRUE(m2
!= NULL
);
278 EXPECT_EQ(2u, manager_
->num_chunks());
279 manager_
->FreeUnused();
280 EXPECT_EQ(2u, manager_
->num_chunks());
282 EXPECT_EQ(2u, manager_
->num_chunks());
283 manager_
->FreeUnused();
284 EXPECT_EQ(1u, manager_
->num_chunks());
286 EXPECT_EQ(1u, manager_
->num_chunks());
287 manager_
->FreeUnused();
288 EXPECT_EQ(0u, manager_
->num_chunks());
291 TEST_F(MappedMemoryManagerTest
, ChunkSizeMultiple
) {
292 const unsigned int kSize
= 1024;
293 manager_
->set_chunk_size_multiple(kSize
* 2);
294 // Check if we allocate less than the chunk size multiple we get
295 // chunks arounded up.
297 unsigned int offset1
= 0xFFFFFFFFU
;
298 void* mem1
= manager_
->Alloc(kSize
, &id1
, &offset1
);
300 unsigned int offset2
= 0xFFFFFFFFU
;
301 void* mem2
= manager_
->Alloc(kSize
, &id2
, &offset2
);
303 unsigned int offset3
= 0xFFFFFFFFU
;
304 void* mem3
= manager_
->Alloc(kSize
, &id3
, &offset3
);
311 EXPECT_EQ(0u, offset1
);
312 EXPECT_EQ(kSize
, offset2
);
313 EXPECT_EQ(0u, offset3
);
315 manager_
->Free(mem1
);
316 manager_
->Free(mem2
);
317 manager_
->Free(mem3
);
320 TEST_F(MappedMemoryManagerTest
, UnusedMemoryLimit
) {
321 const unsigned int kChunkSize
= 2048;
322 // Reset the manager with a memory limit.
323 manager_
.reset(new MappedMemoryManager(
324 helper_
.get(), base::Bind(&EmptyPoll
), kChunkSize
));
325 manager_
->set_chunk_size_multiple(kChunkSize
);
327 // Allocate one chunk worth of memory.
329 unsigned int offset1
= 0xFFFFFFFFU
;
330 void* mem1
= manager_
->Alloc(kChunkSize
, &id1
, &offset1
);
333 EXPECT_EQ(0u, offset1
);
335 // Allocate half a chunk worth of memory again.
336 // The same chunk will be used.
338 unsigned int offset2
= 0xFFFFFFFFU
;
339 void* mem2
= manager_
->Alloc(kChunkSize
, &id2
, &offset2
);
342 EXPECT_EQ(0u, offset2
);
344 // Expect two chunks to be allocated, exceeding the limit,
345 // since all memory is in use.
346 EXPECT_EQ(2 * kChunkSize
, manager_
->allocated_memory());
348 manager_
->Free(mem1
);
349 manager_
->Free(mem2
);
352 TEST_F(MappedMemoryManagerTest
, MemoryLimitWithReuse
) {
353 const unsigned int kSize
= 1024;
354 // Reset the manager with a memory limit.
355 manager_
.reset(new MappedMemoryManager(
356 helper_
.get(), base::Bind(&EmptyPoll
), kSize
));
357 const unsigned int kChunkSize
= 2 * 1024;
358 manager_
->set_chunk_size_multiple(kChunkSize
);
360 // Allocate half a chunk worth of memory.
362 unsigned int offset1
= 0xFFFFFFFFU
;
363 void* mem1
= manager_
->Alloc(kSize
, &id1
, &offset1
);
366 EXPECT_EQ(0u, offset1
);
368 // Allocate half a chunk worth of memory again.
369 // The same chunk will be used.
371 unsigned int offset2
= 0xFFFFFFFFU
;
372 void* mem2
= manager_
->Alloc(kSize
, &id2
, &offset2
);
375 EXPECT_EQ(kSize
, offset2
);
377 // Free one successful allocation, pending fence.
378 int32 token
= helper_
.get()->InsertToken();
379 manager_
->FreePendingToken(mem2
, token
);
381 // The way we hooked up the helper and engine, it won't process commands
382 // until it has to wait for something. Which means the token shouldn't have
383 // passed yet at this point.
384 EXPECT_GT(token
, GetToken());
386 // Since we didn't call helper_.finish() the token did not pass.
387 // We won't be able to claim the free memory without waiting and
388 // as we've already met the memory limit we'll have to wait
391 unsigned int offset3
= 0xFFFFFFFFU
;
392 void* mem3
= manager_
->Alloc(kSize
, &id3
, &offset3
);
395 // It will reuse the space from the second allocation just freed.
396 EXPECT_EQ(kSize
, offset3
);
398 // Expect one chunk to be allocated
399 EXPECT_EQ(1 * kChunkSize
, manager_
->allocated_memory());
401 manager_
->Free(mem1
);
402 manager_
->Free(mem3
);
406 void Poll(MappedMemoryManagerTest
*test
, std::list
<void*>* list
) {
407 std::list
<void*>::iterator it
= list
->begin();
408 while (it
!= list
->end()) {
410 test
->manager()->Free(address
);
411 it
= list
->erase(it
);
416 TEST_F(MappedMemoryManagerTest
, Poll
) {
417 std::list
<void*> unmanaged_memory_list
;
419 const unsigned int kSize
= 1024;
420 // Reset the manager with a memory limit.
421 manager_
.reset(new MappedMemoryManager(
423 base::Bind(&Poll
, this, &unmanaged_memory_list
),
426 // Allocate kSize bytes. Don't add the address to
427 // the unmanaged memory list, so that it won't be free:ed just yet.
429 unsigned int offset1
;
430 void* mem1
= manager_
->Alloc(kSize
, &id1
, &offset1
);
431 EXPECT_EQ(manager_
->bytes_in_use(), kSize
);
433 // Allocate kSize more bytes, and make sure we grew.
435 unsigned int offset2
;
436 void* mem2
= manager_
->Alloc(kSize
, &id2
, &offset2
);
437 EXPECT_EQ(manager_
->bytes_in_use(), kSize
* 2);
439 // Make the unmanaged buffer be released next time FreeUnused() is called
440 // in MappedMemoryManager/FencedAllocator. This happens for example when
441 // allocating new memory.
442 unmanaged_memory_list
.push_back(mem1
);
444 // Allocate kSize more bytes. This should poll unmanaged memory, which now
445 // should free the previously allocated unmanaged memory.
447 unsigned int offset3
;
448 void* mem3
= manager_
->Alloc(kSize
, &id3
, &offset3
);
449 EXPECT_EQ(manager_
->bytes_in_use(), kSize
* 2);
451 manager_
->Free(mem2
);
452 manager_
->Free(mem3
);
453 EXPECT_EQ(manager_
->bytes_in_use(), static_cast<size_t>(0));