1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the tests for the FencedAllocator class.
8 #include "base/bind_helpers.h"
9 #include "base/memory/aligned_memory.h"
10 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
11 #include "gpu/command_buffer/client/fenced_allocator.h"
12 #include "gpu/command_buffer/service/cmd_buffer_engine.h"
13 #include "gpu/command_buffer/service/command_buffer_service.h"
14 #include "gpu/command_buffer/service/gpu_scheduler.h"
15 #include "gpu/command_buffer/service/mocks.h"
16 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
17 #include "testing/gtest/include/gtest/gtest.h"
21 using testing::Return
;
24 using testing::Sequence
;
26 using testing::Invoke
;
27 using testing::InvokeWithoutArgs
;
30 class BaseFencedAllocatorTest
: public testing::Test
{
32 static const unsigned int kBufferSize
= 1024;
33 static const int kAllocAlignment
= 16;
35 void SetUp() override
{
36 api_mock_
.reset(new AsyncAPIMock(true));
37 // ignore noops in the mock - we don't want to inspect the internals of the
39 EXPECT_CALL(*api_mock_
, DoCommand(cmd::kNoop
, 0, _
))
40 .WillRepeatedly(Return(error::kNoError
));
41 // Forward the SetToken calls to the engine
42 EXPECT_CALL(*api_mock_
.get(), DoCommand(cmd::kSetToken
, 1, _
))
43 .WillRepeatedly(DoAll(Invoke(api_mock_
.get(), &AsyncAPIMock::SetToken
),
44 Return(error::kNoError
)));
47 TransferBufferManager
* manager
= new TransferBufferManager();
48 transfer_buffer_manager_
= manager
;
49 EXPECT_TRUE(manager
->Initialize());
51 command_buffer_
.reset(
52 new CommandBufferService(transfer_buffer_manager_
.get()));
53 EXPECT_TRUE(command_buffer_
->Initialize());
55 gpu_scheduler_
.reset(new GpuScheduler(
56 command_buffer_
.get(), api_mock_
.get(), NULL
));
57 command_buffer_
->SetPutOffsetChangeCallback(base::Bind(
58 &GpuScheduler::PutChanged
, base::Unretained(gpu_scheduler_
.get())));
59 command_buffer_
->SetGetBufferChangeCallback(base::Bind(
60 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
62 api_mock_
->set_engine(gpu_scheduler_
.get());
64 helper_
.reset(new CommandBufferHelper(command_buffer_
.get()));
65 helper_
->Initialize(kBufferSize
);
69 return command_buffer_
->GetLastState().token
;
72 scoped_ptr
<AsyncAPIMock
> api_mock_
;
73 scoped_refptr
<TransferBufferManagerInterface
> transfer_buffer_manager_
;
74 scoped_ptr
<CommandBufferService
> command_buffer_
;
75 scoped_ptr
<GpuScheduler
> gpu_scheduler_
;
76 scoped_ptr
<CommandBufferHelper
> helper_
;
80 const unsigned int BaseFencedAllocatorTest::kBufferSize
;
88 // Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
89 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
90 // it directly, not through the RPC mechanism), making sure Noops are ignored
91 // and SetToken are properly forwarded to the engine.
92 class FencedAllocatorTest
: public BaseFencedAllocatorTest
{
94 void SetUp() override
{
95 BaseFencedAllocatorTest::SetUp();
96 allocator_
.reset(new FencedAllocator(kBufferSize
,
98 base::Bind(&EmptyPoll
)));
101 void TearDown() override
{
102 // If the GpuScheduler posts any tasks, this forces them to run.
103 base::MessageLoop::current()->RunUntilIdle();
105 EXPECT_TRUE(allocator_
->CheckConsistency());
107 BaseFencedAllocatorTest::TearDown();
110 scoped_ptr
<FencedAllocator
> allocator_
;
113 // Checks basic alloc and free.
114 TEST_F(FencedAllocatorTest
, TestBasic
) {
115 allocator_
->CheckConsistency();
116 EXPECT_FALSE(allocator_
->InUse());
118 const unsigned int kSize
= 16;
119 FencedAllocator::Offset offset
= allocator_
->Alloc(kSize
);
120 EXPECT_TRUE(allocator_
->InUse());
121 EXPECT_NE(FencedAllocator::kInvalidOffset
, offset
);
122 EXPECT_GE(kBufferSize
, offset
+kSize
);
123 EXPECT_TRUE(allocator_
->CheckConsistency());
125 allocator_
->Free(offset
);
126 EXPECT_FALSE(allocator_
->InUse());
127 EXPECT_TRUE(allocator_
->CheckConsistency());
130 // Test alloc 0 fails.
131 TEST_F(FencedAllocatorTest
, TestAllocZero
) {
132 FencedAllocator::Offset offset
= allocator_
->Alloc(0);
133 EXPECT_EQ(FencedAllocator::kInvalidOffset
, offset
);
134 EXPECT_FALSE(allocator_
->InUse());
135 EXPECT_TRUE(allocator_
->CheckConsistency());
138 // Checks out-of-memory condition.
139 TEST_F(FencedAllocatorTest
, TestOutOfMemory
) {
140 EXPECT_TRUE(allocator_
->CheckConsistency());
142 const unsigned int kSize
= 16;
143 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
144 CHECK(kAllocCount
* kSize
== kBufferSize
);
146 // Allocate several buffers to fill in the memory.
147 FencedAllocator::Offset offsets
[kAllocCount
];
148 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
149 offsets
[i
] = allocator_
->Alloc(kSize
);
150 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[i
]);
151 EXPECT_GE(kBufferSize
, offsets
[i
]+kSize
);
152 EXPECT_TRUE(allocator_
->CheckConsistency());
155 // This allocation should fail.
156 FencedAllocator::Offset offset_failed
= allocator_
->Alloc(kSize
);
157 EXPECT_EQ(FencedAllocator::kInvalidOffset
, offset_failed
);
158 EXPECT_TRUE(allocator_
->CheckConsistency());
160 // Free one successful allocation, reallocate with half the size
161 allocator_
->Free(offsets
[0]);
162 EXPECT_TRUE(allocator_
->CheckConsistency());
163 offsets
[0] = allocator_
->Alloc(kSize
/2);
164 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[0]);
165 EXPECT_GE(kBufferSize
, offsets
[0]+kSize
);
166 EXPECT_TRUE(allocator_
->CheckConsistency());
168 // This allocation should fail as well.
169 offset_failed
= allocator_
->Alloc(kSize
);
170 EXPECT_EQ(FencedAllocator::kInvalidOffset
, offset_failed
);
171 EXPECT_TRUE(allocator_
->CheckConsistency());
173 // Free up everything.
174 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
175 allocator_
->Free(offsets
[i
]);
176 EXPECT_TRUE(allocator_
->CheckConsistency());
180 // Checks the free-pending-token mechanism.
181 TEST_F(FencedAllocatorTest
, TestFreePendingToken
) {
182 EXPECT_TRUE(allocator_
->CheckConsistency());
184 const unsigned int kSize
= 16;
185 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
186 CHECK(kAllocCount
* kSize
== kBufferSize
);
188 // Allocate several buffers to fill in the memory.
189 FencedAllocator::Offset offsets
[kAllocCount
];
190 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
191 offsets
[i
] = allocator_
->Alloc(kSize
);
192 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[i
]);
193 EXPECT_GE(kBufferSize
, offsets
[i
]+kSize
);
194 EXPECT_TRUE(allocator_
->CheckConsistency());
197 // This allocation should fail.
198 FencedAllocator::Offset offset_failed
= allocator_
->Alloc(kSize
);
199 EXPECT_EQ(FencedAllocator::kInvalidOffset
, offset_failed
);
200 EXPECT_TRUE(allocator_
->CheckConsistency());
202 // Free one successful allocation, pending fence.
203 int32 token
= helper_
.get()->InsertToken();
204 allocator_
->FreePendingToken(offsets
[0], token
);
205 EXPECT_TRUE(allocator_
->CheckConsistency());
207 // The way we hooked up the helper and engine, it won't process commands
208 // until it has to wait for something. Which means the token shouldn't have
209 // passed yet at this point.
210 EXPECT_GT(token
, GetToken());
212 // This allocation will need to reclaim the space freed above, so that should
213 // process the commands until the token is passed.
214 offsets
[0] = allocator_
->Alloc(kSize
);
215 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[0]);
216 EXPECT_GE(kBufferSize
, offsets
[0]+kSize
);
217 EXPECT_TRUE(allocator_
->CheckConsistency());
218 // Check that the token has indeed passed.
219 EXPECT_LE(token
, GetToken());
221 // Free up everything.
222 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
223 allocator_
->Free(offsets
[i
]);
224 EXPECT_TRUE(allocator_
->CheckConsistency());
228 // Checks the free-pending-token mechanism using FreeUnused
229 TEST_F(FencedAllocatorTest
, FreeUnused
) {
230 EXPECT_TRUE(allocator_
->CheckConsistency());
232 const unsigned int kSize
= 16;
233 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
234 CHECK(kAllocCount
* kSize
== kBufferSize
);
236 // Allocate several buffers to fill in the memory.
237 FencedAllocator::Offset offsets
[kAllocCount
];
238 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
239 offsets
[i
] = allocator_
->Alloc(kSize
);
240 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[i
]);
241 EXPECT_GE(kBufferSize
, offsets
[i
]+kSize
);
242 EXPECT_TRUE(allocator_
->CheckConsistency());
244 EXPECT_TRUE(allocator_
->InUse());
246 // No memory should be available.
247 EXPECT_EQ(0u, allocator_
->GetLargestFreeSize());
249 // Free one successful allocation, pending fence.
250 int32 token
= helper_
.get()->InsertToken();
251 allocator_
->FreePendingToken(offsets
[0], token
);
252 EXPECT_TRUE(allocator_
->CheckConsistency());
254 // Force the command buffer to process the token.
257 // Tell the allocator to update what's available based on the current token.
258 allocator_
->FreeUnused();
260 // Check that the new largest free size takes into account the unused block.
261 EXPECT_EQ(kSize
, allocator_
->GetLargestFreeSize());
264 token
= helper_
.get()->InsertToken();
265 allocator_
->FreePendingToken(offsets
[1], token
);
266 token
= helper_
.get()->InsertToken();
267 allocator_
->FreePendingToken(offsets
[2], token
);
268 EXPECT_TRUE(allocator_
->CheckConsistency());
270 // Check that nothing has changed.
271 EXPECT_EQ(kSize
, allocator_
->GetLargestFreeSize());
273 // Force the command buffer to process the token.
276 // Tell the allocator to update what's available based on the current token.
277 allocator_
->FreeUnused();
279 // Check that the new largest free size takes into account the unused blocks.
280 EXPECT_EQ(kSize
* 3, allocator_
->GetLargestFreeSize());
281 EXPECT_TRUE(allocator_
->InUse());
283 // Free up everything.
284 for (unsigned int i
= 3; i
< kAllocCount
; ++i
) {
285 allocator_
->Free(offsets
[i
]);
286 EXPECT_TRUE(allocator_
->CheckConsistency());
288 EXPECT_FALSE(allocator_
->InUse());
291 // Tests GetLargestFreeSize
292 TEST_F(FencedAllocatorTest
, TestGetLargestFreeSize
) {
293 EXPECT_TRUE(allocator_
->CheckConsistency());
294 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeSize());
296 FencedAllocator::Offset offset
= allocator_
->Alloc(kBufferSize
);
297 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
298 EXPECT_EQ(0u, allocator_
->GetLargestFreeSize());
299 allocator_
->Free(offset
);
300 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeSize());
302 const unsigned int kSize
= 16;
303 offset
= allocator_
->Alloc(kSize
);
304 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
305 // The following checks that the buffer is allocated "smartly" - which is
306 // dependent on the implementation. But both first-fit or best-fit would
308 EXPECT_EQ(kBufferSize
- kSize
, allocator_
->GetLargestFreeSize());
310 // Allocate 2 more buffers (now 3), and then free the first two. This is to
311 // ensure a hole. Note that this is dependent on the first-fit current
313 FencedAllocator::Offset offset1
= allocator_
->Alloc(kSize
);
314 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset1
);
315 FencedAllocator::Offset offset2
= allocator_
->Alloc(kSize
);
316 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset2
);
317 allocator_
->Free(offset
);
318 allocator_
->Free(offset1
);
319 EXPECT_EQ(kBufferSize
- 3 * kSize
, allocator_
->GetLargestFreeSize());
321 offset
= allocator_
->Alloc(kBufferSize
- 3 * kSize
);
322 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
323 EXPECT_EQ(2 * kSize
, allocator_
->GetLargestFreeSize());
325 offset1
= allocator_
->Alloc(2 * kSize
);
326 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset1
);
327 EXPECT_EQ(0u, allocator_
->GetLargestFreeSize());
329 allocator_
->Free(offset
);
330 allocator_
->Free(offset1
);
331 allocator_
->Free(offset2
);
334 // Tests GetLargestFreeOrPendingSize
335 TEST_F(FencedAllocatorTest
, TestGetLargestFreeOrPendingSize
) {
336 EXPECT_TRUE(allocator_
->CheckConsistency());
337 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeOrPendingSize());
339 FencedAllocator::Offset offset
= allocator_
->Alloc(kBufferSize
);
340 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
341 EXPECT_EQ(0u, allocator_
->GetLargestFreeOrPendingSize());
342 allocator_
->Free(offset
);
343 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeOrPendingSize());
345 const unsigned int kSize
= 16;
346 offset
= allocator_
->Alloc(kSize
);
347 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
348 // The following checks that the buffer is allocates "smartly" - which is
349 // dependent on the implementation. But both first-fit or best-fit would
351 EXPECT_EQ(kBufferSize
- kSize
, allocator_
->GetLargestFreeOrPendingSize());
353 // Allocate 2 more buffers (now 3), and then free the first two. This is to
354 // ensure a hole. Note that this is dependent on the first-fit current
356 FencedAllocator::Offset offset1
= allocator_
->Alloc(kSize
);
357 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset1
);
358 FencedAllocator::Offset offset2
= allocator_
->Alloc(kSize
);
359 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset2
);
360 allocator_
->Free(offset
);
361 allocator_
->Free(offset1
);
362 EXPECT_EQ(kBufferSize
- 3 * kSize
,
363 allocator_
->GetLargestFreeOrPendingSize());
365 // Free the last one, pending a token.
366 int32 token
= helper_
.get()->InsertToken();
367 allocator_
->FreePendingToken(offset2
, token
);
369 // Now all the buffers have been freed...
370 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeOrPendingSize());
371 // .. but one is still waiting for the token.
372 EXPECT_EQ(kBufferSize
- 3 * kSize
,
373 allocator_
->GetLargestFreeSize());
375 // The way we hooked up the helper and engine, it won't process commands
376 // until it has to wait for something. Which means the token shouldn't have
377 // passed yet at this point.
378 EXPECT_GT(token
, GetToken());
379 // This allocation will need to reclaim the space freed above, so that should
380 // process the commands until the token is passed, but it will succeed.
381 offset
= allocator_
->Alloc(kBufferSize
);
382 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
383 // Check that the token has indeed passed.
384 EXPECT_LE(token
, GetToken());
385 allocator_
->Free(offset
);
387 // Everything now has been freed...
388 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeOrPendingSize());
390 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeSize());
393 class FencedAllocatorPollTest
: public BaseFencedAllocatorTest
{
395 static const unsigned int kAllocSize
= 128;
397 MOCK_METHOD0(MockedPoll
, void());
400 virtual void TearDown() {
401 // If the GpuScheduler posts any tasks, this forces them to run.
402 base::MessageLoop::current()->RunUntilIdle();
404 BaseFencedAllocatorTest::TearDown();
408 TEST_F(FencedAllocatorPollTest
, TestPoll
) {
409 scoped_ptr
<FencedAllocator
> allocator(
410 new FencedAllocator(kBufferSize
,
412 base::Bind(&FencedAllocatorPollTest::MockedPoll
,
413 base::Unretained(this))));
415 FencedAllocator::Offset mem1
= allocator
->Alloc(kAllocSize
);
416 FencedAllocator::Offset mem2
= allocator
->Alloc(kAllocSize
);
417 EXPECT_NE(mem1
, FencedAllocator::kInvalidOffset
);
418 EXPECT_NE(mem2
, FencedAllocator::kInvalidOffset
);
419 EXPECT_TRUE(allocator
->CheckConsistency());
420 EXPECT_EQ(allocator
->bytes_in_use(), kAllocSize
* 2);
422 // Check that no-op Poll doesn't affect the state.
423 EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
424 allocator
->FreeUnused();
425 EXPECT_TRUE(allocator
->CheckConsistency());
426 EXPECT_EQ(allocator
->bytes_in_use(), kAllocSize
* 2);
428 // Check that freeing in Poll works.
429 base::Closure free_mem1_closure
=
430 base::Bind(&FencedAllocator::Free
,
431 base::Unretained(allocator
.get()),
433 EXPECT_CALL(*this, MockedPoll())
434 .WillOnce(InvokeWithoutArgs(&free_mem1_closure
, &base::Closure::Run
))
435 .RetiresOnSaturation();
436 allocator
->FreeUnused();
437 EXPECT_TRUE(allocator
->CheckConsistency());
438 EXPECT_EQ(allocator
->bytes_in_use(), kAllocSize
* 1);
440 // Check that freeing still works.
441 EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
442 allocator
->Free(mem2
);
443 allocator
->FreeUnused();
444 EXPECT_TRUE(allocator
->CheckConsistency());
445 EXPECT_EQ(allocator
->bytes_in_use(), 0u);
450 // Test fixture for FencedAllocatorWrapper test - Creates a
451 // FencedAllocatorWrapper, using a CommandBufferHelper with a mock
452 // AsyncAPIInterface for its interface (calling it directly, not through the
453 // RPC mechanism), making sure Noops are ignored and SetToken are properly
454 // forwarded to the engine.
455 class FencedAllocatorWrapperTest
: public BaseFencedAllocatorTest
{
457 void SetUp() override
{
458 BaseFencedAllocatorTest::SetUp();
460 // Though allocating this buffer isn't strictly necessary, it makes
461 // allocations point to valid addresses, so they could be used for
463 buffer_
.reset(static_cast<char*>(base::AlignedAlloc(
464 kBufferSize
, kAllocAlignment
)));
465 allocator_
.reset(new FencedAllocatorWrapper(kBufferSize
,
467 base::Bind(&EmptyPoll
),
471 void TearDown() override
{
472 // If the GpuScheduler posts any tasks, this forces them to run.
473 base::MessageLoop::current()->RunUntilIdle();
475 EXPECT_TRUE(allocator_
->CheckConsistency());
477 BaseFencedAllocatorTest::TearDown();
480 scoped_ptr
<FencedAllocatorWrapper
> allocator_
;
481 scoped_ptr
<char, base::AlignedFreeDeleter
> buffer_
;
484 // Checks basic alloc and free.
485 TEST_F(FencedAllocatorWrapperTest
, TestBasic
) {
486 allocator_
->CheckConsistency();
488 const unsigned int kSize
= 16;
489 void *pointer
= allocator_
->Alloc(kSize
);
490 ASSERT_TRUE(pointer
);
491 EXPECT_LE(buffer_
.get(), static_cast<char *>(pointer
));
492 EXPECT_GE(kBufferSize
, static_cast<char *>(pointer
) - buffer_
.get() + kSize
);
493 EXPECT_TRUE(allocator_
->CheckConsistency());
495 allocator_
->Free(pointer
);
496 EXPECT_TRUE(allocator_
->CheckConsistency());
498 char *pointer_char
= allocator_
->AllocTyped
<char>(kSize
);
499 ASSERT_TRUE(pointer_char
);
500 EXPECT_LE(buffer_
.get(), pointer_char
);
501 EXPECT_GE(buffer_
.get() + kBufferSize
, pointer_char
+ kSize
);
502 allocator_
->Free(pointer_char
);
503 EXPECT_TRUE(allocator_
->CheckConsistency());
505 unsigned int *pointer_uint
= allocator_
->AllocTyped
<unsigned int>(kSize
);
506 ASSERT_TRUE(pointer_uint
);
507 EXPECT_LE(buffer_
.get(), reinterpret_cast<char *>(pointer_uint
));
508 EXPECT_GE(buffer_
.get() + kBufferSize
,
509 reinterpret_cast<char *>(pointer_uint
+ kSize
));
511 // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
512 // directly, except from the remaining size.
513 EXPECT_EQ(kBufferSize
- kSize
* sizeof(*pointer_uint
),
514 allocator_
->GetLargestFreeSize());
515 allocator_
->Free(pointer_uint
);
518 // Test alloc 0 fails.
519 TEST_F(FencedAllocatorWrapperTest
, TestAllocZero
) {
520 allocator_
->CheckConsistency();
522 void *pointer
= allocator_
->Alloc(0);
523 ASSERT_FALSE(pointer
);
524 EXPECT_TRUE(allocator_
->CheckConsistency());
527 // Checks that allocation offsets are aligned to multiples of 16 bytes.
528 TEST_F(FencedAllocatorWrapperTest
, TestAlignment
) {
529 allocator_
->CheckConsistency();
531 const unsigned int kSize1
= 75;
532 void *pointer1
= allocator_
->Alloc(kSize1
);
533 ASSERT_TRUE(pointer1
);
534 EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1
) & (kAllocAlignment
- 1), 0);
535 EXPECT_TRUE(allocator_
->CheckConsistency());
537 const unsigned int kSize2
= 43;
538 void *pointer2
= allocator_
->Alloc(kSize2
);
539 ASSERT_TRUE(pointer2
);
540 EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2
) & (kAllocAlignment
- 1), 0);
541 EXPECT_TRUE(allocator_
->CheckConsistency());
543 allocator_
->Free(pointer2
);
544 EXPECT_TRUE(allocator_
->CheckConsistency());
546 allocator_
->Free(pointer1
);
547 EXPECT_TRUE(allocator_
->CheckConsistency());
550 // Checks out-of-memory condition.
551 TEST_F(FencedAllocatorWrapperTest
, TestOutOfMemory
) {
552 allocator_
->CheckConsistency();
554 const unsigned int kSize
= 16;
555 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
556 CHECK(kAllocCount
* kSize
== kBufferSize
);
558 // Allocate several buffers to fill in the memory.
559 void *pointers
[kAllocCount
];
560 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
561 pointers
[i
] = allocator_
->Alloc(kSize
);
562 EXPECT_TRUE(pointers
[i
]);
563 EXPECT_TRUE(allocator_
->CheckConsistency());
566 // This allocation should fail.
567 void *pointer_failed
= allocator_
->Alloc(kSize
);
568 EXPECT_FALSE(pointer_failed
);
569 EXPECT_TRUE(allocator_
->CheckConsistency());
571 // Free one successful allocation, reallocate with half the size
572 allocator_
->Free(pointers
[0]);
573 EXPECT_TRUE(allocator_
->CheckConsistency());
574 pointers
[0] = allocator_
->Alloc(kSize
/2);
575 EXPECT_TRUE(pointers
[0]);
576 EXPECT_TRUE(allocator_
->CheckConsistency());
578 // This allocation should fail as well.
579 pointer_failed
= allocator_
->Alloc(kSize
);
580 EXPECT_FALSE(pointer_failed
);
581 EXPECT_TRUE(allocator_
->CheckConsistency());
583 // Free up everything.
584 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
585 allocator_
->Free(pointers
[i
]);
586 EXPECT_TRUE(allocator_
->CheckConsistency());
590 // Checks the free-pending-token mechanism.
591 TEST_F(FencedAllocatorWrapperTest
, TestFreePendingToken
) {
592 allocator_
->CheckConsistency();
594 const unsigned int kSize
= 16;
595 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
596 CHECK(kAllocCount
* kSize
== kBufferSize
);
598 // Allocate several buffers to fill in the memory.
599 void *pointers
[kAllocCount
];
600 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
601 pointers
[i
] = allocator_
->Alloc(kSize
);
602 EXPECT_TRUE(pointers
[i
]);
603 EXPECT_TRUE(allocator_
->CheckConsistency());
606 // This allocation should fail.
607 void *pointer_failed
= allocator_
->Alloc(kSize
);
608 EXPECT_FALSE(pointer_failed
);
609 EXPECT_TRUE(allocator_
->CheckConsistency());
611 // Free one successful allocation, pending fence.
612 int32 token
= helper_
.get()->InsertToken();
613 allocator_
->FreePendingToken(pointers
[0], token
);
614 EXPECT_TRUE(allocator_
->CheckConsistency());
616 // The way we hooked up the helper and engine, it won't process commands
617 // until it has to wait for something. Which means the token shouldn't have
618 // passed yet at this point.
619 EXPECT_GT(token
, GetToken());
621 // This allocation will need to reclaim the space freed above, so that should
622 // process the commands until the token is passed.
623 pointers
[0] = allocator_
->Alloc(kSize
);
624 EXPECT_TRUE(pointers
[0]);
625 EXPECT_TRUE(allocator_
->CheckConsistency());
626 // Check that the token has indeed passed.
627 EXPECT_LE(token
, GetToken());
629 // Free up everything.
630 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
631 allocator_
->Free(pointers
[i
]);
632 EXPECT_TRUE(allocator_
->CheckConsistency());