1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the tests for the FencedAllocator class.
8 #include "base/bind_helpers.h"
9 #include "base/memory/aligned_memory.h"
10 #include "base/message_loop/message_loop.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
12 #include "gpu/command_buffer/client/fenced_allocator.h"
13 #include "gpu/command_buffer/service/cmd_buffer_engine.h"
14 #include "gpu/command_buffer/service/command_buffer_service.h"
15 #include "gpu/command_buffer/service/gpu_scheduler.h"
16 #include "gpu/command_buffer/service/mocks.h"
17 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
18 #include "testing/gtest/include/gtest/gtest.h"
20 #if defined(OS_MACOSX)
21 #include "base/mac/scoped_nsautorelease_pool.h"
26 using testing::Return
;
29 using testing::Sequence
;
31 using testing::Invoke
;
32 using testing::InvokeWithoutArgs
;
35 class BaseFencedAllocatorTest
: public testing::Test
{
37 static const unsigned int kBufferSize
= 1024;
38 static const int kAllocAlignment
= 16;
40 void SetUp() override
{
41 api_mock_
.reset(new AsyncAPIMock(true));
42 // ignore noops in the mock - we don't want to inspect the internals of the
44 EXPECT_CALL(*api_mock_
, DoCommand(cmd::kNoop
, 0, _
))
45 .WillRepeatedly(Return(error::kNoError
));
46 // Forward the SetToken calls to the engine
47 EXPECT_CALL(*api_mock_
.get(), DoCommand(cmd::kSetToken
, 1, _
))
48 .WillRepeatedly(DoAll(Invoke(api_mock_
.get(), &AsyncAPIMock::SetToken
),
49 Return(error::kNoError
)));
52 TransferBufferManager
* manager
= new TransferBufferManager();
53 transfer_buffer_manager_
.reset(manager
);
54 EXPECT_TRUE(manager
->Initialize());
56 command_buffer_
.reset(
57 new CommandBufferService(transfer_buffer_manager_
.get()));
58 EXPECT_TRUE(command_buffer_
->Initialize());
60 gpu_scheduler_
.reset(new GpuScheduler(
61 command_buffer_
.get(), api_mock_
.get(), NULL
));
62 command_buffer_
->SetPutOffsetChangeCallback(base::Bind(
63 &GpuScheduler::PutChanged
, base::Unretained(gpu_scheduler_
.get())));
64 command_buffer_
->SetGetBufferChangeCallback(base::Bind(
65 &GpuScheduler::SetGetBuffer
, base::Unretained(gpu_scheduler_
.get())));
67 api_mock_
->set_engine(gpu_scheduler_
.get());
69 helper_
.reset(new CommandBufferHelper(command_buffer_
.get()));
70 helper_
->Initialize(kBufferSize
);
74 return command_buffer_
->GetLastState().token
;
77 #if defined(OS_MACOSX)
78 base::mac::ScopedNSAutoreleasePool autorelease_pool_
;
80 base::MessageLoop message_loop_
;
81 scoped_ptr
<AsyncAPIMock
> api_mock_
;
82 scoped_ptr
<TransferBufferManagerInterface
> transfer_buffer_manager_
;
83 scoped_ptr
<CommandBufferService
> command_buffer_
;
84 scoped_ptr
<GpuScheduler
> gpu_scheduler_
;
85 scoped_ptr
<CommandBufferHelper
> helper_
;
89 const unsigned int BaseFencedAllocatorTest::kBufferSize
;
97 // Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
98 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
99 // it directly, not through the RPC mechanism), making sure Noops are ignored
100 // and SetToken are properly forwarded to the engine.
101 class FencedAllocatorTest
: public BaseFencedAllocatorTest
{
103 void SetUp() override
{
104 BaseFencedAllocatorTest::SetUp();
105 allocator_
.reset(new FencedAllocator(kBufferSize
,
107 base::Bind(&EmptyPoll
)));
110 void TearDown() override
{
111 // If the GpuScheduler posts any tasks, this forces them to run.
112 base::MessageLoop::current()->RunUntilIdle();
114 EXPECT_TRUE(allocator_
->CheckConsistency());
116 BaseFencedAllocatorTest::TearDown();
119 scoped_ptr
<FencedAllocator
> allocator_
;
122 // Checks basic alloc and free.
123 TEST_F(FencedAllocatorTest
, TestBasic
) {
124 allocator_
->CheckConsistency();
125 EXPECT_FALSE(allocator_
->InUse());
127 const unsigned int kSize
= 16;
128 FencedAllocator::Offset offset
= allocator_
->Alloc(kSize
);
129 EXPECT_TRUE(allocator_
->InUse());
130 EXPECT_NE(FencedAllocator::kInvalidOffset
, offset
);
131 EXPECT_GE(kBufferSize
, offset
+kSize
);
132 EXPECT_TRUE(allocator_
->CheckConsistency());
134 allocator_
->Free(offset
);
135 EXPECT_FALSE(allocator_
->InUse());
136 EXPECT_TRUE(allocator_
->CheckConsistency());
139 // Test alloc 0 fails.
140 TEST_F(FencedAllocatorTest
, TestAllocZero
) {
141 FencedAllocator::Offset offset
= allocator_
->Alloc(0);
142 EXPECT_EQ(FencedAllocator::kInvalidOffset
, offset
);
143 EXPECT_FALSE(allocator_
->InUse());
144 EXPECT_TRUE(allocator_
->CheckConsistency());
147 // Checks out-of-memory condition.
148 TEST_F(FencedAllocatorTest
, TestOutOfMemory
) {
149 EXPECT_TRUE(allocator_
->CheckConsistency());
151 const unsigned int kSize
= 16;
152 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
153 CHECK(kAllocCount
* kSize
== kBufferSize
);
155 // Allocate several buffers to fill in the memory.
156 FencedAllocator::Offset offsets
[kAllocCount
];
157 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
158 offsets
[i
] = allocator_
->Alloc(kSize
);
159 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[i
]);
160 EXPECT_GE(kBufferSize
, offsets
[i
]+kSize
);
161 EXPECT_TRUE(allocator_
->CheckConsistency());
164 // This allocation should fail.
165 FencedAllocator::Offset offset_failed
= allocator_
->Alloc(kSize
);
166 EXPECT_EQ(FencedAllocator::kInvalidOffset
, offset_failed
);
167 EXPECT_TRUE(allocator_
->CheckConsistency());
169 // Free one successful allocation, reallocate with half the size
170 allocator_
->Free(offsets
[0]);
171 EXPECT_TRUE(allocator_
->CheckConsistency());
172 offsets
[0] = allocator_
->Alloc(kSize
/2);
173 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[0]);
174 EXPECT_GE(kBufferSize
, offsets
[0]+kSize
);
175 EXPECT_TRUE(allocator_
->CheckConsistency());
177 // This allocation should fail as well.
178 offset_failed
= allocator_
->Alloc(kSize
);
179 EXPECT_EQ(FencedAllocator::kInvalidOffset
, offset_failed
);
180 EXPECT_TRUE(allocator_
->CheckConsistency());
182 // Free up everything.
183 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
184 allocator_
->Free(offsets
[i
]);
185 EXPECT_TRUE(allocator_
->CheckConsistency());
189 // Checks the free-pending-token mechanism.
190 TEST_F(FencedAllocatorTest
, TestFreePendingToken
) {
191 EXPECT_TRUE(allocator_
->CheckConsistency());
193 const unsigned int kSize
= 16;
194 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
195 CHECK(kAllocCount
* kSize
== kBufferSize
);
197 // Allocate several buffers to fill in the memory.
198 FencedAllocator::Offset offsets
[kAllocCount
];
199 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
200 offsets
[i
] = allocator_
->Alloc(kSize
);
201 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[i
]);
202 EXPECT_GE(kBufferSize
, offsets
[i
]+kSize
);
203 EXPECT_TRUE(allocator_
->CheckConsistency());
206 // This allocation should fail.
207 FencedAllocator::Offset offset_failed
= allocator_
->Alloc(kSize
);
208 EXPECT_EQ(FencedAllocator::kInvalidOffset
, offset_failed
);
209 EXPECT_TRUE(allocator_
->CheckConsistency());
211 // Free one successful allocation, pending fence.
212 int32 token
= helper_
.get()->InsertToken();
213 allocator_
->FreePendingToken(offsets
[0], token
);
214 EXPECT_TRUE(allocator_
->CheckConsistency());
216 // The way we hooked up the helper and engine, it won't process commands
217 // until it has to wait for something. Which means the token shouldn't have
218 // passed yet at this point.
219 EXPECT_GT(token
, GetToken());
221 // This allocation will need to reclaim the space freed above, so that should
222 // process the commands until the token is passed.
223 offsets
[0] = allocator_
->Alloc(kSize
);
224 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[0]);
225 EXPECT_GE(kBufferSize
, offsets
[0]+kSize
);
226 EXPECT_TRUE(allocator_
->CheckConsistency());
227 // Check that the token has indeed passed.
228 EXPECT_LE(token
, GetToken());
230 // Free up everything.
231 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
232 allocator_
->Free(offsets
[i
]);
233 EXPECT_TRUE(allocator_
->CheckConsistency());
237 // Checks the free-pending-token mechanism using FreeUnused
238 TEST_F(FencedAllocatorTest
, FreeUnused
) {
239 EXPECT_TRUE(allocator_
->CheckConsistency());
241 const unsigned int kSize
= 16;
242 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
243 CHECK(kAllocCount
* kSize
== kBufferSize
);
245 // Allocate several buffers to fill in the memory.
246 FencedAllocator::Offset offsets
[kAllocCount
];
247 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
248 offsets
[i
] = allocator_
->Alloc(kSize
);
249 EXPECT_NE(FencedAllocator::kInvalidOffset
, offsets
[i
]);
250 EXPECT_GE(kBufferSize
, offsets
[i
]+kSize
);
251 EXPECT_TRUE(allocator_
->CheckConsistency());
253 EXPECT_TRUE(allocator_
->InUse());
255 // No memory should be available.
256 EXPECT_EQ(0u, allocator_
->GetLargestFreeSize());
258 // Free one successful allocation, pending fence.
259 int32 token
= helper_
.get()->InsertToken();
260 allocator_
->FreePendingToken(offsets
[0], token
);
261 EXPECT_TRUE(allocator_
->CheckConsistency());
263 // Force the command buffer to process the token.
266 // Tell the allocator to update what's available based on the current token.
267 allocator_
->FreeUnused();
269 // Check that the new largest free size takes into account the unused block.
270 EXPECT_EQ(kSize
, allocator_
->GetLargestFreeSize());
273 token
= helper_
.get()->InsertToken();
274 allocator_
->FreePendingToken(offsets
[1], token
);
275 token
= helper_
.get()->InsertToken();
276 allocator_
->FreePendingToken(offsets
[2], token
);
277 EXPECT_TRUE(allocator_
->CheckConsistency());
279 // Check that nothing has changed.
280 EXPECT_EQ(kSize
, allocator_
->GetLargestFreeSize());
282 // Force the command buffer to process the token.
285 // Tell the allocator to update what's available based on the current token.
286 allocator_
->FreeUnused();
288 // Check that the new largest free size takes into account the unused blocks.
289 EXPECT_EQ(kSize
* 3, allocator_
->GetLargestFreeSize());
290 EXPECT_TRUE(allocator_
->InUse());
292 // Free up everything.
293 for (unsigned int i
= 3; i
< kAllocCount
; ++i
) {
294 allocator_
->Free(offsets
[i
]);
295 EXPECT_TRUE(allocator_
->CheckConsistency());
297 EXPECT_FALSE(allocator_
->InUse());
300 // Tests GetLargestFreeSize
301 TEST_F(FencedAllocatorTest
, TestGetLargestFreeSize
) {
302 EXPECT_TRUE(allocator_
->CheckConsistency());
303 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeSize());
305 FencedAllocator::Offset offset
= allocator_
->Alloc(kBufferSize
);
306 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
307 EXPECT_EQ(0u, allocator_
->GetLargestFreeSize());
308 allocator_
->Free(offset
);
309 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeSize());
311 const unsigned int kSize
= 16;
312 offset
= allocator_
->Alloc(kSize
);
313 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
314 // The following checks that the buffer is allocated "smartly" - which is
315 // dependent on the implementation. But both first-fit or best-fit would
317 EXPECT_EQ(kBufferSize
- kSize
, allocator_
->GetLargestFreeSize());
319 // Allocate 2 more buffers (now 3), and then free the first two. This is to
320 // ensure a hole. Note that this is dependent on the first-fit current
322 FencedAllocator::Offset offset1
= allocator_
->Alloc(kSize
);
323 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset1
);
324 FencedAllocator::Offset offset2
= allocator_
->Alloc(kSize
);
325 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset2
);
326 allocator_
->Free(offset
);
327 allocator_
->Free(offset1
);
328 EXPECT_EQ(kBufferSize
- 3 * kSize
, allocator_
->GetLargestFreeSize());
330 offset
= allocator_
->Alloc(kBufferSize
- 3 * kSize
);
331 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
332 EXPECT_EQ(2 * kSize
, allocator_
->GetLargestFreeSize());
334 offset1
= allocator_
->Alloc(2 * kSize
);
335 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset1
);
336 EXPECT_EQ(0u, allocator_
->GetLargestFreeSize());
338 allocator_
->Free(offset
);
339 allocator_
->Free(offset1
);
340 allocator_
->Free(offset2
);
343 // Tests GetLargestFreeOrPendingSize
344 TEST_F(FencedAllocatorTest
, TestGetLargestFreeOrPendingSize
) {
345 EXPECT_TRUE(allocator_
->CheckConsistency());
346 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeOrPendingSize());
348 FencedAllocator::Offset offset
= allocator_
->Alloc(kBufferSize
);
349 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
350 EXPECT_EQ(0u, allocator_
->GetLargestFreeOrPendingSize());
351 allocator_
->Free(offset
);
352 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeOrPendingSize());
354 const unsigned int kSize
= 16;
355 offset
= allocator_
->Alloc(kSize
);
356 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
357 // The following checks that the buffer is allocates "smartly" - which is
358 // dependent on the implementation. But both first-fit or best-fit would
360 EXPECT_EQ(kBufferSize
- kSize
, allocator_
->GetLargestFreeOrPendingSize());
362 // Allocate 2 more buffers (now 3), and then free the first two. This is to
363 // ensure a hole. Note that this is dependent on the first-fit current
365 FencedAllocator::Offset offset1
= allocator_
->Alloc(kSize
);
366 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset1
);
367 FencedAllocator::Offset offset2
= allocator_
->Alloc(kSize
);
368 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset2
);
369 allocator_
->Free(offset
);
370 allocator_
->Free(offset1
);
371 EXPECT_EQ(kBufferSize
- 3 * kSize
,
372 allocator_
->GetLargestFreeOrPendingSize());
374 // Free the last one, pending a token.
375 int32 token
= helper_
.get()->InsertToken();
376 allocator_
->FreePendingToken(offset2
, token
);
378 // Now all the buffers have been freed...
379 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeOrPendingSize());
380 // .. but one is still waiting for the token.
381 EXPECT_EQ(kBufferSize
- 3 * kSize
,
382 allocator_
->GetLargestFreeSize());
384 // The way we hooked up the helper and engine, it won't process commands
385 // until it has to wait for something. Which means the token shouldn't have
386 // passed yet at this point.
387 EXPECT_GT(token
, GetToken());
388 // This allocation will need to reclaim the space freed above, so that should
389 // process the commands until the token is passed, but it will succeed.
390 offset
= allocator_
->Alloc(kBufferSize
);
391 ASSERT_NE(FencedAllocator::kInvalidOffset
, offset
);
392 // Check that the token has indeed passed.
393 EXPECT_LE(token
, GetToken());
394 allocator_
->Free(offset
);
396 // Everything now has been freed...
397 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeOrPendingSize());
399 EXPECT_EQ(kBufferSize
, allocator_
->GetLargestFreeSize());
402 class FencedAllocatorPollTest
: public BaseFencedAllocatorTest
{
404 static const unsigned int kAllocSize
= 128;
406 MOCK_METHOD0(MockedPoll
, void());
409 virtual void TearDown() {
410 // If the GpuScheduler posts any tasks, this forces them to run.
411 base::MessageLoop::current()->RunUntilIdle();
413 BaseFencedAllocatorTest::TearDown();
417 TEST_F(FencedAllocatorPollTest
, TestPoll
) {
418 scoped_ptr
<FencedAllocator
> allocator(
419 new FencedAllocator(kBufferSize
,
421 base::Bind(&FencedAllocatorPollTest::MockedPoll
,
422 base::Unretained(this))));
424 FencedAllocator::Offset mem1
= allocator
->Alloc(kAllocSize
);
425 FencedAllocator::Offset mem2
= allocator
->Alloc(kAllocSize
);
426 EXPECT_NE(mem1
, FencedAllocator::kInvalidOffset
);
427 EXPECT_NE(mem2
, FencedAllocator::kInvalidOffset
);
428 EXPECT_TRUE(allocator
->CheckConsistency());
429 EXPECT_EQ(allocator
->bytes_in_use(), kAllocSize
* 2);
431 // Check that no-op Poll doesn't affect the state.
432 EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
433 allocator
->FreeUnused();
434 EXPECT_TRUE(allocator
->CheckConsistency());
435 EXPECT_EQ(allocator
->bytes_in_use(), kAllocSize
* 2);
437 // Check that freeing in Poll works.
438 base::Closure free_mem1_closure
=
439 base::Bind(&FencedAllocator::Free
,
440 base::Unretained(allocator
.get()),
442 EXPECT_CALL(*this, MockedPoll())
443 .WillOnce(InvokeWithoutArgs(&free_mem1_closure
, &base::Closure::Run
))
444 .RetiresOnSaturation();
445 allocator
->FreeUnused();
446 EXPECT_TRUE(allocator
->CheckConsistency());
447 EXPECT_EQ(allocator
->bytes_in_use(), kAllocSize
* 1);
449 // Check that freeing still works.
450 EXPECT_CALL(*this, MockedPoll()).RetiresOnSaturation();
451 allocator
->Free(mem2
);
452 allocator
->FreeUnused();
453 EXPECT_TRUE(allocator
->CheckConsistency());
454 EXPECT_EQ(allocator
->bytes_in_use(), 0u);
459 // Test fixture for FencedAllocatorWrapper test - Creates a
460 // FencedAllocatorWrapper, using a CommandBufferHelper with a mock
461 // AsyncAPIInterface for its interface (calling it directly, not through the
462 // RPC mechanism), making sure Noops are ignored and SetToken are properly
463 // forwarded to the engine.
464 class FencedAllocatorWrapperTest
: public BaseFencedAllocatorTest
{
466 void SetUp() override
{
467 BaseFencedAllocatorTest::SetUp();
469 // Though allocating this buffer isn't strictly necessary, it makes
470 // allocations point to valid addresses, so they could be used for
472 buffer_
.reset(static_cast<char*>(base::AlignedAlloc(
473 kBufferSize
, kAllocAlignment
)));
474 allocator_
.reset(new FencedAllocatorWrapper(kBufferSize
,
476 base::Bind(&EmptyPoll
),
480 void TearDown() override
{
481 // If the GpuScheduler posts any tasks, this forces them to run.
482 base::MessageLoop::current()->RunUntilIdle();
484 EXPECT_TRUE(allocator_
->CheckConsistency());
486 BaseFencedAllocatorTest::TearDown();
489 scoped_ptr
<FencedAllocatorWrapper
> allocator_
;
490 scoped_ptr
<char, base::AlignedFreeDeleter
> buffer_
;
493 // Checks basic alloc and free.
494 TEST_F(FencedAllocatorWrapperTest
, TestBasic
) {
495 allocator_
->CheckConsistency();
497 const unsigned int kSize
= 16;
498 void *pointer
= allocator_
->Alloc(kSize
);
499 ASSERT_TRUE(pointer
);
500 EXPECT_LE(buffer_
.get(), static_cast<char *>(pointer
));
501 EXPECT_GE(kBufferSize
, static_cast<char *>(pointer
) - buffer_
.get() + kSize
);
502 EXPECT_TRUE(allocator_
->CheckConsistency());
504 allocator_
->Free(pointer
);
505 EXPECT_TRUE(allocator_
->CheckConsistency());
507 char *pointer_char
= allocator_
->AllocTyped
<char>(kSize
);
508 ASSERT_TRUE(pointer_char
);
509 EXPECT_LE(buffer_
.get(), pointer_char
);
510 EXPECT_GE(buffer_
.get() + kBufferSize
, pointer_char
+ kSize
);
511 allocator_
->Free(pointer_char
);
512 EXPECT_TRUE(allocator_
->CheckConsistency());
514 unsigned int *pointer_uint
= allocator_
->AllocTyped
<unsigned int>(kSize
);
515 ASSERT_TRUE(pointer_uint
);
516 EXPECT_LE(buffer_
.get(), reinterpret_cast<char *>(pointer_uint
));
517 EXPECT_GE(buffer_
.get() + kBufferSize
,
518 reinterpret_cast<char *>(pointer_uint
+ kSize
));
520 // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
521 // directly, except from the remaining size.
522 EXPECT_EQ(kBufferSize
- kSize
* sizeof(*pointer_uint
),
523 allocator_
->GetLargestFreeSize());
524 allocator_
->Free(pointer_uint
);
527 // Test alloc 0 fails.
528 TEST_F(FencedAllocatorWrapperTest
, TestAllocZero
) {
529 allocator_
->CheckConsistency();
531 void *pointer
= allocator_
->Alloc(0);
532 ASSERT_FALSE(pointer
);
533 EXPECT_TRUE(allocator_
->CheckConsistency());
536 // Checks that allocation offsets are aligned to multiples of 16 bytes.
537 TEST_F(FencedAllocatorWrapperTest
, TestAlignment
) {
538 allocator_
->CheckConsistency();
540 const unsigned int kSize1
= 75;
541 void *pointer1
= allocator_
->Alloc(kSize1
);
542 ASSERT_TRUE(pointer1
);
543 EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1
) & (kAllocAlignment
- 1), 0);
544 EXPECT_TRUE(allocator_
->CheckConsistency());
546 const unsigned int kSize2
= 43;
547 void *pointer2
= allocator_
->Alloc(kSize2
);
548 ASSERT_TRUE(pointer2
);
549 EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2
) & (kAllocAlignment
- 1), 0);
550 EXPECT_TRUE(allocator_
->CheckConsistency());
552 allocator_
->Free(pointer2
);
553 EXPECT_TRUE(allocator_
->CheckConsistency());
555 allocator_
->Free(pointer1
);
556 EXPECT_TRUE(allocator_
->CheckConsistency());
559 // Checks out-of-memory condition.
560 TEST_F(FencedAllocatorWrapperTest
, TestOutOfMemory
) {
561 allocator_
->CheckConsistency();
563 const unsigned int kSize
= 16;
564 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
565 CHECK(kAllocCount
* kSize
== kBufferSize
);
567 // Allocate several buffers to fill in the memory.
568 void *pointers
[kAllocCount
];
569 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
570 pointers
[i
] = allocator_
->Alloc(kSize
);
571 EXPECT_TRUE(pointers
[i
]);
572 EXPECT_TRUE(allocator_
->CheckConsistency());
575 // This allocation should fail.
576 void *pointer_failed
= allocator_
->Alloc(kSize
);
577 EXPECT_FALSE(pointer_failed
);
578 EXPECT_TRUE(allocator_
->CheckConsistency());
580 // Free one successful allocation, reallocate with half the size
581 allocator_
->Free(pointers
[0]);
582 EXPECT_TRUE(allocator_
->CheckConsistency());
583 pointers
[0] = allocator_
->Alloc(kSize
/2);
584 EXPECT_TRUE(pointers
[0]);
585 EXPECT_TRUE(allocator_
->CheckConsistency());
587 // This allocation should fail as well.
588 pointer_failed
= allocator_
->Alloc(kSize
);
589 EXPECT_FALSE(pointer_failed
);
590 EXPECT_TRUE(allocator_
->CheckConsistency());
592 // Free up everything.
593 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
594 allocator_
->Free(pointers
[i
]);
595 EXPECT_TRUE(allocator_
->CheckConsistency());
599 // Checks the free-pending-token mechanism.
600 TEST_F(FencedAllocatorWrapperTest
, TestFreePendingToken
) {
601 allocator_
->CheckConsistency();
603 const unsigned int kSize
= 16;
604 const unsigned int kAllocCount
= kBufferSize
/ kSize
;
605 CHECK(kAllocCount
* kSize
== kBufferSize
);
607 // Allocate several buffers to fill in the memory.
608 void *pointers
[kAllocCount
];
609 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
610 pointers
[i
] = allocator_
->Alloc(kSize
);
611 EXPECT_TRUE(pointers
[i
]);
612 EXPECT_TRUE(allocator_
->CheckConsistency());
615 // This allocation should fail.
616 void *pointer_failed
= allocator_
->Alloc(kSize
);
617 EXPECT_FALSE(pointer_failed
);
618 EXPECT_TRUE(allocator_
->CheckConsistency());
620 // Free one successful allocation, pending fence.
621 int32 token
= helper_
.get()->InsertToken();
622 allocator_
->FreePendingToken(pointers
[0], token
);
623 EXPECT_TRUE(allocator_
->CheckConsistency());
625 // The way we hooked up the helper and engine, it won't process commands
626 // until it has to wait for something. Which means the token shouldn't have
627 // passed yet at this point.
628 EXPECT_GT(token
, GetToken());
630 // This allocation will need to reclaim the space freed above, so that should
631 // process the commands until the token is passed.
632 pointers
[0] = allocator_
->Alloc(kSize
);
633 EXPECT_TRUE(pointers
[0]);
634 EXPECT_TRUE(allocator_
->CheckConsistency());
635 // Check that the token has indeed passed.
636 EXPECT_LE(token
, GetToken());
638 // Free up everything.
639 for (unsigned int i
= 0; i
< kAllocCount
; ++i
) {
640 allocator_
->Free(pointers
[i
]);
641 EXPECT_TRUE(allocator_
->CheckConsistency());