cc: Added inline to Tile::IsReadyToDraw
[chromium-blink-merge.git] / gpu / command_buffer / client / ring_buffer_test.cc
blob05014289eab06915ebad68f4e7905490d84d905a
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the tests for the RingBuffer class.
7 #include "gpu/command_buffer/client/ring_buffer.h"
9 #include "base/bind.h"
10 #include "base/bind_helpers.h"
11 #include "base/message_loop/message_loop.h"
12 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
13 #include "gpu/command_buffer/service/cmd_buffer_engine.h"
14 #include "gpu/command_buffer/service/command_buffer_service.h"
15 #include "gpu/command_buffer/service/gpu_scheduler.h"
16 #include "gpu/command_buffer/service/mocks.h"
17 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
18 #include "testing/gtest/include/gtest/gtest.h"
20 #if defined(OS_MACOSX)
21 #include "base/mac/scoped_nsautorelease_pool.h"
22 #endif
24 namespace gpu {
26 using testing::Return;
27 using testing::Mock;
28 using testing::Truly;
29 using testing::Sequence;
30 using testing::DoAll;
31 using testing::Invoke;
32 using testing::_;
34 class BaseRingBufferTest : public testing::Test {
35 protected:
36 static const unsigned int kBaseOffset = 128;
37 static const unsigned int kBufferSize = 1024;
39 void RunPendingSetToken() {
40 for (std::vector<const void*>::iterator it = set_token_arguments_.begin();
41 it != set_token_arguments_.end();
42 ++it) {
43 api_mock_->SetToken(cmd::kSetToken, 1, *it);
45 set_token_arguments_.clear();
46 delay_set_token_ = false;
49 void SetToken(unsigned int command,
50 unsigned int arg_count,
51 const void* _args) {
52 EXPECT_EQ(static_cast<unsigned int>(cmd::kSetToken), command);
53 EXPECT_EQ(1u, arg_count);
54 if (delay_set_token_)
55 set_token_arguments_.push_back(_args);
56 else
57 api_mock_->SetToken(cmd::kSetToken, 1, _args);
60 virtual void SetUp() {
61 delay_set_token_ = false;
62 api_mock_.reset(new AsyncAPIMock);
63 // ignore noops in the mock - we don't want to inspect the internals of the
64 // helper.
65 EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
66 .WillRepeatedly(Return(error::kNoError));
67 // Forward the SetToken calls to the engine
68 EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
69 .WillRepeatedly(DoAll(Invoke(this, &BaseRingBufferTest::SetToken),
70 Return(error::kNoError)));
73 TransferBufferManager* manager = new TransferBufferManager();
74 transfer_buffer_manager_.reset(manager);
75 EXPECT_TRUE(manager->Initialize());
77 command_buffer_.reset(
78 new CommandBufferService(transfer_buffer_manager_.get()));
79 EXPECT_TRUE(command_buffer_->Initialize());
81 gpu_scheduler_.reset(new GpuScheduler(
82 command_buffer_.get(), api_mock_.get(), NULL));
83 command_buffer_->SetPutOffsetChangeCallback(base::Bind(
84 &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
85 command_buffer_->SetGetBufferChangeCallback(base::Bind(
86 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
88 api_mock_->set_engine(gpu_scheduler_.get());
90 helper_.reset(new CommandBufferHelper(command_buffer_.get()));
91 helper_->Initialize(kBufferSize);
94 int32 GetToken() {
95 return command_buffer_->GetState().token;
98 #if defined(OS_MACOSX)
99 base::mac::ScopedNSAutoreleasePool autorelease_pool_;
100 #endif
101 base::MessageLoop message_loop_;
102 scoped_ptr<AsyncAPIMock> api_mock_;
103 scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
104 scoped_ptr<CommandBufferService> command_buffer_;
105 scoped_ptr<GpuScheduler> gpu_scheduler_;
106 scoped_ptr<CommandBufferHelper> helper_;
107 std::vector<const void*> set_token_arguments_;
108 bool delay_set_token_;
112 #ifndef _MSC_VER
113 const unsigned int BaseRingBufferTest::kBaseOffset;
114 const unsigned int BaseRingBufferTest::kBufferSize;
115 #endif
117 // Test fixture for RingBuffer test - Creates a RingBuffer, using a
118 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
119 // it directly, not through the RPC mechanism), making sure Noops are ignored
120 // and SetToken are properly forwarded to the engine.
121 class RingBufferTest : public BaseRingBufferTest {
122 protected:
123 virtual void SetUp() {
124 BaseRingBufferTest::SetUp();
125 allocator_.reset(new RingBuffer(kBaseOffset, kBufferSize, helper_.get()));
128 virtual void TearDown() {
129 // If the GpuScheduler posts any tasks, this forces them to run.
130 base::MessageLoop::current()->RunUntilIdle();
132 BaseRingBufferTest::TearDown();
135 scoped_ptr<RingBuffer> allocator_;
138 // Checks basic alloc and free.
139 TEST_F(RingBufferTest, TestBasic) {
140 const unsigned int kSize = 16;
141 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
142 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSizeNoWaiting());
143 RingBuffer::Offset offset = allocator_->Alloc(kSize);
144 EXPECT_GE(kBufferSize, offset - kBaseOffset + kSize);
145 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
146 EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSizeNoWaiting());
147 int32 token = helper_->InsertToken();
148 allocator_->FreePendingToken(offset, token);
151 // Checks the free-pending-token mechanism.
152 TEST_F(RingBufferTest, TestFreePendingToken) {
153 const unsigned int kSize = 16;
154 const unsigned int kAllocCount = kBufferSize / kSize;
155 CHECK(kAllocCount * kSize == kBufferSize);
157 delay_set_token_ = true;
158 // Allocate several buffers to fill in the memory.
159 int32 tokens[kAllocCount];
160 for (unsigned int ii = 0; ii < kAllocCount; ++ii) {
161 RingBuffer::Offset offset = allocator_->Alloc(kSize);
162 EXPECT_GE(kBufferSize, offset - kBaseOffset + kSize);
163 tokens[ii] = helper_->InsertToken();
164 allocator_->FreePendingToken(offset, tokens[ii]);
167 EXPECT_EQ(kBufferSize - (kSize * kAllocCount),
168 allocator_->GetLargestFreeSizeNoWaiting());
170 RunPendingSetToken();
172 // This allocation will need to reclaim the space freed above, so that should
173 // process the commands until a token is passed.
174 RingBuffer::Offset offset1 = allocator_->Alloc(kSize);
175 EXPECT_EQ(kBaseOffset, offset1);
177 // Check that the token has indeed passed.
178 EXPECT_LE(tokens[0], GetToken());
180 allocator_->FreePendingToken(offset1, helper_->InsertToken());
183 // Tests GetLargestFreeSizeNoWaiting
184 TEST_F(RingBufferTest, TestGetLargestFreeSizeNoWaiting) {
185 EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSizeNoWaiting());
187 RingBuffer::Offset offset = allocator_->Alloc(kBufferSize);
188 EXPECT_EQ(0u, allocator_->GetLargestFreeSizeNoWaiting());
189 allocator_->FreePendingToken(offset, helper_->InsertToken());
192 TEST_F(RingBufferTest, TestFreeBug) {
193 // The first and second allocations must not match.
194 const unsigned int kAlloc1 = 10;
195 const unsigned int kAlloc2 = 20;
196 RingBuffer::Offset offset = allocator_->Alloc(kAlloc1);
197 EXPECT_EQ(kBufferSize - kAlloc1, allocator_->GetLargestFreeSizeNoWaiting());
198 allocator_->FreePendingToken(offset, helper_.get()->InsertToken());
199 offset = allocator_->Alloc(kAlloc2);
200 EXPECT_EQ(kBufferSize - kAlloc1 - kAlloc2,
201 allocator_->GetLargestFreeSizeNoWaiting());
202 allocator_->FreePendingToken(offset, helper_.get()->InsertToken());
203 offset = allocator_->Alloc(kBufferSize);
204 EXPECT_EQ(0u, allocator_->GetLargestFreeSizeNoWaiting());
205 allocator_->FreePendingToken(offset, helper_.get()->InsertToken());
208 // Test fixture for RingBufferWrapper test - Creates a
209 // RingBufferWrapper, using a CommandBufferHelper with a mock
210 // AsyncAPIInterface for its interface (calling it directly, not through the
211 // RPC mechanism), making sure Noops are ignored and SetToken are properly
212 // forwarded to the engine.
213 class RingBufferWrapperTest : public BaseRingBufferTest {
214 protected:
215 virtual void SetUp() {
216 BaseRingBufferTest::SetUp();
218 // Though allocating this buffer isn't strictly necessary, it makes
219 // allocations point to valid addresses, so they could be used for
220 // something.
221 buffer_.reset(new int8[kBufferSize + kBaseOffset]);
222 buffer_start_ = buffer_.get() + kBaseOffset;
223 allocator_.reset(new RingBufferWrapper(
224 kBaseOffset, kBufferSize, helper_.get(), buffer_start_));
227 virtual void TearDown() {
228 // If the GpuScheduler posts any tasks, this forces them to run.
229 base::MessageLoop::current()->RunUntilIdle();
231 BaseRingBufferTest::TearDown();
234 scoped_ptr<RingBufferWrapper> allocator_;
235 scoped_ptr<int8[]> buffer_;
236 int8* buffer_start_;
239 // Checks basic alloc and free.
240 TEST_F(RingBufferWrapperTest, TestBasic) {
241 const unsigned int kSize = 16;
242 void* pointer = allocator_->Alloc(kSize);
243 ASSERT_TRUE(pointer);
244 EXPECT_LE(buffer_start_, static_cast<int8*>(pointer));
245 EXPECT_GE(kBufferSize, static_cast<int8*>(pointer) - buffer_start_ + kSize);
247 allocator_->FreePendingToken(pointer, helper_->InsertToken());
249 int8* pointer_int8 = allocator_->AllocTyped<int8>(kSize);
250 ASSERT_TRUE(pointer_int8);
251 EXPECT_LE(buffer_start_, pointer_int8);
252 EXPECT_GE(buffer_start_ + kBufferSize, pointer_int8 + kSize);
253 allocator_->FreePendingToken(pointer_int8, helper_->InsertToken());
255 unsigned int* pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
256 ASSERT_TRUE(pointer_uint);
257 EXPECT_LE(buffer_start_, reinterpret_cast<int8*>(pointer_uint));
258 EXPECT_GE(buffer_start_ + kBufferSize,
259 reinterpret_cast<int8* >(pointer_uint + kSize));
261 // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
262 // directly, except from the remaining size.
263 EXPECT_EQ(kBufferSize - kSize - kSize - kSize * sizeof(*pointer_uint),
264 allocator_->GetLargestFreeSizeNoWaiting());
265 allocator_->FreePendingToken(pointer_uint, helper_->InsertToken());
268 // Checks the free-pending-token mechanism.
269 TEST_F(RingBufferWrapperTest, TestFreePendingToken) {
270 const unsigned int kSize = 16;
271 const unsigned int kAllocCount = kBufferSize / kSize;
272 CHECK(kAllocCount * kSize == kBufferSize);
274 delay_set_token_ = true;
275 // Allocate several buffers to fill in the memory.
276 int32 tokens[kAllocCount];
277 for (unsigned int ii = 0; ii < kAllocCount; ++ii) {
278 void* pointer = allocator_->Alloc(kSize);
279 EXPECT_TRUE(pointer != NULL);
280 tokens[ii] = helper_->InsertToken();
281 allocator_->FreePendingToken(pointer, helper_->InsertToken());
284 EXPECT_EQ(kBufferSize - (kSize * kAllocCount),
285 allocator_->GetLargestFreeSizeNoWaiting());
287 RunPendingSetToken();
289 // This allocation will need to reclaim the space freed above, so that should
290 // process the commands until the token is passed.
291 void* pointer1 = allocator_->Alloc(kSize);
292 EXPECT_EQ(buffer_start_, static_cast<int8*>(pointer1));
294 // Check that the token has indeed passed.
295 EXPECT_LE(tokens[0], GetToken());
297 allocator_->FreePendingToken(pointer1, helper_->InsertToken());
298 EXPECT_LE(command_buffer_->GetState().token, helper_->InsertToken());
301 } // namespace gpu