mac: Let IPhotoDataProvider::GetAlbumNames() return albums in a deterministic order.
[chromium-blink-merge.git] / gpu / command_buffer / client / fenced_allocator_test.cc
blob94faf0ca917eb8cb7012a71570a1160f501aa6f5
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the tests for the FencedAllocator class.
7 #include "base/bind.h"
8 #include "base/bind_helpers.h"
9 #include "base/memory/aligned_memory.h"
10 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
11 #include "gpu/command_buffer/client/fenced_allocator.h"
12 #include "gpu/command_buffer/service/cmd_buffer_engine.h"
13 #include "gpu/command_buffer/service/command_buffer_service.h"
14 #include "gpu/command_buffer/service/gpu_scheduler.h"
15 #include "gpu/command_buffer/service/mocks.h"
16 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
17 #include "testing/gtest/include/gtest/gtest.h"
19 namespace gpu {
21 using testing::Return;
22 using testing::Mock;
23 using testing::Truly;
24 using testing::Sequence;
25 using testing::DoAll;
26 using testing::Invoke;
27 using testing::InvokeWithoutArgs;
28 using testing::_;
30 class BaseFencedAllocatorTest : public testing::Test {
31 protected:
32 static const unsigned int kBufferSize = 1024;
33 static const int kAllocAlignment = 16;
35 void SetUp() override {
36 api_mock_.reset(new AsyncAPIMock(true));
37 // ignore noops in the mock - we don't want to inspect the internals of the
38 // helper.
39 EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
40 .WillRepeatedly(Return(error::kNoError));
41 // Forward the SetToken calls to the engine
42 EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
43 .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
44 Return(error::kNoError)));
47 TransferBufferManager* manager = new TransferBufferManager(nullptr);
48 transfer_buffer_manager_ = manager;
49 EXPECT_TRUE(manager->Initialize());
51 command_buffer_.reset(
52 new CommandBufferService(transfer_buffer_manager_.get()));
53 EXPECT_TRUE(command_buffer_->Initialize());
55 gpu_scheduler_.reset(new GpuScheduler(
56 command_buffer_.get(), api_mock_.get(), NULL));
57 command_buffer_->SetPutOffsetChangeCallback(base::Bind(
58 &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
59 command_buffer_->SetGetBufferChangeCallback(base::Bind(
60 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
62 api_mock_->set_engine(gpu_scheduler_.get());
64 helper_.reset(new CommandBufferHelper(command_buffer_.get()));
65 helper_->Initialize(kBufferSize);
68 int32 GetToken() {
69 return command_buffer_->GetLastState().token;
72 scoped_ptr<AsyncAPIMock> api_mock_;
73 scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_;
74 scoped_ptr<CommandBufferService> command_buffer_;
75 scoped_ptr<GpuScheduler> gpu_scheduler_;
76 scoped_ptr<CommandBufferHelper> helper_;
79 #ifndef _MSC_VER
80 const unsigned int BaseFencedAllocatorTest::kBufferSize;
81 #endif
83 // Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
84 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
85 // it directly, not through the RPC mechanism), making sure Noops are ignored
86 // and SetToken are properly forwarded to the engine.
87 class FencedAllocatorTest : public BaseFencedAllocatorTest {
88 protected:
89 void SetUp() override {
90 BaseFencedAllocatorTest::SetUp();
91 allocator_.reset(new FencedAllocator(kBufferSize, helper_.get()));
94 void TearDown() override {
95 // If the GpuScheduler posts any tasks, this forces them to run.
96 base::MessageLoop::current()->RunUntilIdle();
98 EXPECT_TRUE(allocator_->CheckConsistency());
100 BaseFencedAllocatorTest::TearDown();
103 scoped_ptr<FencedAllocator> allocator_;
106 // Checks basic alloc and free.
107 TEST_F(FencedAllocatorTest, TestBasic) {
108 allocator_->CheckConsistency();
109 EXPECT_FALSE(allocator_->InUse());
111 const unsigned int kSize = 16;
112 FencedAllocator::Offset offset = allocator_->Alloc(kSize);
113 EXPECT_TRUE(allocator_->InUse());
114 EXPECT_NE(FencedAllocator::kInvalidOffset, offset);
115 EXPECT_GE(kBufferSize, offset+kSize);
116 EXPECT_TRUE(allocator_->CheckConsistency());
118 allocator_->Free(offset);
119 EXPECT_FALSE(allocator_->InUse());
120 EXPECT_TRUE(allocator_->CheckConsistency());
123 // Test alloc 0 fails.
124 TEST_F(FencedAllocatorTest, TestAllocZero) {
125 FencedAllocator::Offset offset = allocator_->Alloc(0);
126 EXPECT_EQ(FencedAllocator::kInvalidOffset, offset);
127 EXPECT_FALSE(allocator_->InUse());
128 EXPECT_TRUE(allocator_->CheckConsistency());
131 // Checks out-of-memory condition.
132 TEST_F(FencedAllocatorTest, TestOutOfMemory) {
133 EXPECT_TRUE(allocator_->CheckConsistency());
135 const unsigned int kSize = 16;
136 const unsigned int kAllocCount = kBufferSize / kSize;
137 CHECK(kAllocCount * kSize == kBufferSize);
139 // Allocate several buffers to fill in the memory.
140 FencedAllocator::Offset offsets[kAllocCount];
141 for (unsigned int i = 0; i < kAllocCount; ++i) {
142 offsets[i] = allocator_->Alloc(kSize);
143 EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
144 EXPECT_GE(kBufferSize, offsets[i]+kSize);
145 EXPECT_TRUE(allocator_->CheckConsistency());
148 // This allocation should fail.
149 FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
150 EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
151 EXPECT_TRUE(allocator_->CheckConsistency());
153 // Free one successful allocation, reallocate with half the size
154 allocator_->Free(offsets[0]);
155 EXPECT_TRUE(allocator_->CheckConsistency());
156 offsets[0] = allocator_->Alloc(kSize/2);
157 EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
158 EXPECT_GE(kBufferSize, offsets[0]+kSize);
159 EXPECT_TRUE(allocator_->CheckConsistency());
161 // This allocation should fail as well.
162 offset_failed = allocator_->Alloc(kSize);
163 EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
164 EXPECT_TRUE(allocator_->CheckConsistency());
166 // Free up everything.
167 for (unsigned int i = 0; i < kAllocCount; ++i) {
168 allocator_->Free(offsets[i]);
169 EXPECT_TRUE(allocator_->CheckConsistency());
173 // Checks the free-pending-token mechanism.
174 TEST_F(FencedAllocatorTest, TestFreePendingToken) {
175 EXPECT_TRUE(allocator_->CheckConsistency());
177 const unsigned int kSize = 16;
178 const unsigned int kAllocCount = kBufferSize / kSize;
179 CHECK(kAllocCount * kSize == kBufferSize);
181 // Allocate several buffers to fill in the memory.
182 FencedAllocator::Offset offsets[kAllocCount];
183 for (unsigned int i = 0; i < kAllocCount; ++i) {
184 offsets[i] = allocator_->Alloc(kSize);
185 EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
186 EXPECT_GE(kBufferSize, offsets[i]+kSize);
187 EXPECT_TRUE(allocator_->CheckConsistency());
190 // This allocation should fail.
191 FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
192 EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
193 EXPECT_TRUE(allocator_->CheckConsistency());
195 // Free one successful allocation, pending fence.
196 int32 token = helper_.get()->InsertToken();
197 allocator_->FreePendingToken(offsets[0], token);
198 EXPECT_TRUE(allocator_->CheckConsistency());
200 // The way we hooked up the helper and engine, it won't process commands
201 // until it has to wait for something. Which means the token shouldn't have
202 // passed yet at this point.
203 EXPECT_GT(token, GetToken());
205 // This allocation will need to reclaim the space freed above, so that should
206 // process the commands until the token is passed.
207 offsets[0] = allocator_->Alloc(kSize);
208 EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
209 EXPECT_GE(kBufferSize, offsets[0]+kSize);
210 EXPECT_TRUE(allocator_->CheckConsistency());
211 // Check that the token has indeed passed.
212 EXPECT_LE(token, GetToken());
214 // Free up everything.
215 for (unsigned int i = 0; i < kAllocCount; ++i) {
216 allocator_->Free(offsets[i]);
217 EXPECT_TRUE(allocator_->CheckConsistency());
221 // Checks the free-pending-token mechanism using FreeUnused
222 TEST_F(FencedAllocatorTest, FreeUnused) {
223 EXPECT_TRUE(allocator_->CheckConsistency());
225 const unsigned int kSize = 16;
226 const unsigned int kAllocCount = kBufferSize / kSize;
227 CHECK(kAllocCount * kSize == kBufferSize);
229 // Allocate several buffers to fill in the memory.
230 FencedAllocator::Offset offsets[kAllocCount];
231 for (unsigned int i = 0; i < kAllocCount; ++i) {
232 offsets[i] = allocator_->Alloc(kSize);
233 EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
234 EXPECT_GE(kBufferSize, offsets[i]+kSize);
235 EXPECT_TRUE(allocator_->CheckConsistency());
237 EXPECT_TRUE(allocator_->InUse());
239 // No memory should be available.
240 EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
242 // Free one successful allocation, pending fence.
243 int32 token = helper_.get()->InsertToken();
244 allocator_->FreePendingToken(offsets[0], token);
245 EXPECT_TRUE(allocator_->CheckConsistency());
247 // Force the command buffer to process the token.
248 helper_->Finish();
250 // Tell the allocator to update what's available based on the current token.
251 allocator_->FreeUnused();
253 // Check that the new largest free size takes into account the unused block.
254 EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
256 // Free two more.
257 token = helper_.get()->InsertToken();
258 allocator_->FreePendingToken(offsets[1], token);
259 token = helper_.get()->InsertToken();
260 allocator_->FreePendingToken(offsets[2], token);
261 EXPECT_TRUE(allocator_->CheckConsistency());
263 // Check that nothing has changed.
264 EXPECT_EQ(kSize, allocator_->GetLargestFreeSize());
266 // Force the command buffer to process the token.
267 helper_->Finish();
269 // Tell the allocator to update what's available based on the current token.
270 allocator_->FreeUnused();
272 // Check that the new largest free size takes into account the unused blocks.
273 EXPECT_EQ(kSize * 3, allocator_->GetLargestFreeSize());
274 EXPECT_TRUE(allocator_->InUse());
276 // Free up everything.
277 for (unsigned int i = 3; i < kAllocCount; ++i) {
278 allocator_->Free(offsets[i]);
279 EXPECT_TRUE(allocator_->CheckConsistency());
281 EXPECT_FALSE(allocator_->InUse());
284 // Tests GetLargestFreeSize
285 TEST_F(FencedAllocatorTest, TestGetLargestFreeSize) {
286 EXPECT_TRUE(allocator_->CheckConsistency());
287 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
289 FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
290 ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
291 EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
292 allocator_->Free(offset);
293 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
295 const unsigned int kSize = 16;
296 offset = allocator_->Alloc(kSize);
297 ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
298 // The following checks that the buffer is allocated "smartly" - which is
299 // dependent on the implementation. But both first-fit or best-fit would
300 // ensure that.
301 EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSize());
303 // Allocate 2 more buffers (now 3), and then free the first two. This is to
304 // ensure a hole. Note that this is dependent on the first-fit current
305 // implementation.
306 FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
307 ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
308 FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
309 ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
310 allocator_->Free(offset);
311 allocator_->Free(offset1);
312 EXPECT_EQ(kBufferSize - 3 * kSize, allocator_->GetLargestFreeSize());
314 offset = allocator_->Alloc(kBufferSize - 3 * kSize);
315 ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
316 EXPECT_EQ(2 * kSize, allocator_->GetLargestFreeSize());
318 offset1 = allocator_->Alloc(2 * kSize);
319 ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
320 EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
322 allocator_->Free(offset);
323 allocator_->Free(offset1);
324 allocator_->Free(offset2);
327 // Tests GetLargestFreeOrPendingSize
328 TEST_F(FencedAllocatorTest, TestGetLargestFreeOrPendingSize) {
329 EXPECT_TRUE(allocator_->CheckConsistency());
330 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
332 FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
333 ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
334 EXPECT_EQ(0u, allocator_->GetLargestFreeOrPendingSize());
335 allocator_->Free(offset);
336 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
338 const unsigned int kSize = 16;
339 offset = allocator_->Alloc(kSize);
340 ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
341 // The following checks that the buffer is allocates "smartly" - which is
342 // dependent on the implementation. But both first-fit or best-fit would
343 // ensure that.
344 EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeOrPendingSize());
346 // Allocate 2 more buffers (now 3), and then free the first two. This is to
347 // ensure a hole. Note that this is dependent on the first-fit current
348 // implementation.
349 FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
350 ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
351 FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
352 ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
353 allocator_->Free(offset);
354 allocator_->Free(offset1);
355 EXPECT_EQ(kBufferSize - 3 * kSize,
356 allocator_->GetLargestFreeOrPendingSize());
358 // Free the last one, pending a token.
359 int32 token = helper_.get()->InsertToken();
360 allocator_->FreePendingToken(offset2, token);
362 // Now all the buffers have been freed...
363 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
364 // .. but one is still waiting for the token.
365 EXPECT_EQ(kBufferSize - 3 * kSize,
366 allocator_->GetLargestFreeSize());
368 // The way we hooked up the helper and engine, it won't process commands
369 // until it has to wait for something. Which means the token shouldn't have
370 // passed yet at this point.
371 EXPECT_GT(token, GetToken());
372 // This allocation will need to reclaim the space freed above, so that should
373 // process the commands until the token is passed, but it will succeed.
374 offset = allocator_->Alloc(kBufferSize);
375 ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
376 // Check that the token has indeed passed.
377 EXPECT_LE(token, GetToken());
378 allocator_->Free(offset);
380 // Everything now has been freed...
381 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
382 // ... for real.
383 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
386 // Test fixture for FencedAllocatorWrapper test - Creates a
387 // FencedAllocatorWrapper, using a CommandBufferHelper with a mock
388 // AsyncAPIInterface for its interface (calling it directly, not through the
389 // RPC mechanism), making sure Noops are ignored and SetToken are properly
390 // forwarded to the engine.
391 class FencedAllocatorWrapperTest : public BaseFencedAllocatorTest {
392 protected:
393 void SetUp() override {
394 BaseFencedAllocatorTest::SetUp();
396 // Though allocating this buffer isn't strictly necessary, it makes
397 // allocations point to valid addresses, so they could be used for
398 // something.
399 buffer_.reset(static_cast<char*>(base::AlignedAlloc(
400 kBufferSize, kAllocAlignment)));
401 allocator_.reset(new FencedAllocatorWrapper(kBufferSize,
402 helper_.get(),
403 buffer_.get()));
406 void TearDown() override {
407 // If the GpuScheduler posts any tasks, this forces them to run.
408 base::MessageLoop::current()->RunUntilIdle();
410 EXPECT_TRUE(allocator_->CheckConsistency());
412 BaseFencedAllocatorTest::TearDown();
415 scoped_ptr<FencedAllocatorWrapper> allocator_;
416 scoped_ptr<char, base::AlignedFreeDeleter> buffer_;
419 // Checks basic alloc and free.
420 TEST_F(FencedAllocatorWrapperTest, TestBasic) {
421 allocator_->CheckConsistency();
423 const unsigned int kSize = 16;
424 void *pointer = allocator_->Alloc(kSize);
425 ASSERT_TRUE(pointer);
426 EXPECT_LE(buffer_.get(), static_cast<char *>(pointer));
427 EXPECT_GE(kBufferSize, static_cast<char *>(pointer) - buffer_.get() + kSize);
428 EXPECT_TRUE(allocator_->CheckConsistency());
430 allocator_->Free(pointer);
431 EXPECT_TRUE(allocator_->CheckConsistency());
433 char *pointer_char = allocator_->AllocTyped<char>(kSize);
434 ASSERT_TRUE(pointer_char);
435 EXPECT_LE(buffer_.get(), pointer_char);
436 EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
437 allocator_->Free(pointer_char);
438 EXPECT_TRUE(allocator_->CheckConsistency());
440 unsigned int *pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
441 ASSERT_TRUE(pointer_uint);
442 EXPECT_LE(buffer_.get(), reinterpret_cast<char *>(pointer_uint));
443 EXPECT_GE(buffer_.get() + kBufferSize,
444 reinterpret_cast<char *>(pointer_uint + kSize));
446 // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
447 // directly, except from the remaining size.
448 EXPECT_EQ(kBufferSize - kSize * sizeof(*pointer_uint),
449 allocator_->GetLargestFreeSize());
450 allocator_->Free(pointer_uint);
453 // Test alloc 0 fails.
454 TEST_F(FencedAllocatorWrapperTest, TestAllocZero) {
455 allocator_->CheckConsistency();
457 void *pointer = allocator_->Alloc(0);
458 ASSERT_FALSE(pointer);
459 EXPECT_TRUE(allocator_->CheckConsistency());
462 // Checks that allocation offsets are aligned to multiples of 16 bytes.
463 TEST_F(FencedAllocatorWrapperTest, TestAlignment) {
464 allocator_->CheckConsistency();
466 const unsigned int kSize1 = 75;
467 void *pointer1 = allocator_->Alloc(kSize1);
468 ASSERT_TRUE(pointer1);
469 EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1) & (kAllocAlignment - 1), 0);
470 EXPECT_TRUE(allocator_->CheckConsistency());
472 const unsigned int kSize2 = 43;
473 void *pointer2 = allocator_->Alloc(kSize2);
474 ASSERT_TRUE(pointer2);
475 EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2) & (kAllocAlignment - 1), 0);
476 EXPECT_TRUE(allocator_->CheckConsistency());
478 allocator_->Free(pointer2);
479 EXPECT_TRUE(allocator_->CheckConsistency());
481 allocator_->Free(pointer1);
482 EXPECT_TRUE(allocator_->CheckConsistency());
485 // Checks out-of-memory condition.
486 TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) {
487 allocator_->CheckConsistency();
489 const unsigned int kSize = 16;
490 const unsigned int kAllocCount = kBufferSize / kSize;
491 CHECK(kAllocCount * kSize == kBufferSize);
493 // Allocate several buffers to fill in the memory.
494 void *pointers[kAllocCount];
495 for (unsigned int i = 0; i < kAllocCount; ++i) {
496 pointers[i] = allocator_->Alloc(kSize);
497 EXPECT_TRUE(pointers[i]);
498 EXPECT_TRUE(allocator_->CheckConsistency());
501 // This allocation should fail.
502 void *pointer_failed = allocator_->Alloc(kSize);
503 EXPECT_FALSE(pointer_failed);
504 EXPECT_TRUE(allocator_->CheckConsistency());
506 // Free one successful allocation, reallocate with half the size
507 allocator_->Free(pointers[0]);
508 EXPECT_TRUE(allocator_->CheckConsistency());
509 pointers[0] = allocator_->Alloc(kSize/2);
510 EXPECT_TRUE(pointers[0]);
511 EXPECT_TRUE(allocator_->CheckConsistency());
513 // This allocation should fail as well.
514 pointer_failed = allocator_->Alloc(kSize);
515 EXPECT_FALSE(pointer_failed);
516 EXPECT_TRUE(allocator_->CheckConsistency());
518 // Free up everything.
519 for (unsigned int i = 0; i < kAllocCount; ++i) {
520 allocator_->Free(pointers[i]);
521 EXPECT_TRUE(allocator_->CheckConsistency());
525 // Checks the free-pending-token mechanism.
526 TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) {
527 allocator_->CheckConsistency();
529 const unsigned int kSize = 16;
530 const unsigned int kAllocCount = kBufferSize / kSize;
531 CHECK(kAllocCount * kSize == kBufferSize);
533 // Allocate several buffers to fill in the memory.
534 void *pointers[kAllocCount];
535 for (unsigned int i = 0; i < kAllocCount; ++i) {
536 pointers[i] = allocator_->Alloc(kSize);
537 EXPECT_TRUE(pointers[i]);
538 EXPECT_TRUE(allocator_->CheckConsistency());
541 // This allocation should fail.
542 void *pointer_failed = allocator_->Alloc(kSize);
543 EXPECT_FALSE(pointer_failed);
544 EXPECT_TRUE(allocator_->CheckConsistency());
546 // Free one successful allocation, pending fence.
547 int32 token = helper_.get()->InsertToken();
548 allocator_->FreePendingToken(pointers[0], token);
549 EXPECT_TRUE(allocator_->CheckConsistency());
551 // The way we hooked up the helper and engine, it won't process commands
552 // until it has to wait for something. Which means the token shouldn't have
553 // passed yet at this point.
554 EXPECT_GT(token, GetToken());
556 // This allocation will need to reclaim the space freed above, so that should
557 // process the commands until the token is passed.
558 pointers[0] = allocator_->Alloc(kSize);
559 EXPECT_TRUE(pointers[0]);
560 EXPECT_TRUE(allocator_->CheckConsistency());
561 // Check that the token has indeed passed.
562 EXPECT_LE(token, GetToken());
564 // Free up everything.
565 for (unsigned int i = 0; i < kAllocCount; ++i) {
566 allocator_->Free(pointers[i]);
567 EXPECT_TRUE(allocator_->CheckConsistency());
571 } // namespace gpu