Only grant permissions to new extensions from sync if they have the expected version
[chromium-blink-merge.git] / gpu / command_buffer / client / mapped_memory_unittest.cc
blob73da17690e43519706372f760f749716bac63b29
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
7 #include <list>
8 #include "base/bind.h"
9 #include "base/memory/scoped_ptr.h"
10 #include "base/message_loop/message_loop.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
12 #include "gpu/command_buffer/service/command_buffer_service.h"
13 #include "gpu/command_buffer/service/gpu_scheduler.h"
14 #include "gpu/command_buffer/service/mocks.h"
15 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
16 #include "testing/gtest/include/gtest/gtest.h"
18 namespace gpu {
20 using testing::Return;
21 using testing::Mock;
22 using testing::Truly;
23 using testing::Sequence;
24 using testing::DoAll;
25 using testing::Invoke;
26 using testing::_;
28 class MappedMemoryTestBase : public testing::Test {
29 protected:
30 static const unsigned int kBufferSize = 1024;
32 void SetUp() override {
33 api_mock_.reset(new AsyncAPIMock(true));
34 // ignore noops in the mock - we don't want to inspect the internals of the
35 // helper.
36 EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
37 .WillRepeatedly(Return(error::kNoError));
38 // Forward the SetToken calls to the engine
39 EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
40 .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
41 Return(error::kNoError)));
44 TransferBufferManager* manager = new TransferBufferManager(nullptr);
45 transfer_buffer_manager_ = manager;
46 EXPECT_TRUE(manager->Initialize());
49 command_buffer_.reset(
50 new CommandBufferService(transfer_buffer_manager_.get()));
51 EXPECT_TRUE(command_buffer_->Initialize());
53 gpu_scheduler_.reset(new GpuScheduler(
54 command_buffer_.get(), api_mock_.get(), NULL));
55 command_buffer_->SetPutOffsetChangeCallback(base::Bind(
56 &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get())));
57 command_buffer_->SetGetBufferChangeCallback(base::Bind(
58 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
60 api_mock_->set_engine(gpu_scheduler_.get());
62 helper_.reset(new CommandBufferHelper(command_buffer_.get()));
63 helper_->Initialize(kBufferSize);
66 int32 GetToken() {
67 return command_buffer_->GetLastState().token;
70 scoped_ptr<AsyncAPIMock> api_mock_;
71 scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_;
72 scoped_ptr<CommandBufferService> command_buffer_;
73 scoped_ptr<GpuScheduler> gpu_scheduler_;
74 scoped_ptr<CommandBufferHelper> helper_;
77 #ifndef _MSC_VER
78 const unsigned int MappedMemoryTestBase::kBufferSize;
79 #endif
81 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
82 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
83 // it directly, not through the RPC mechanism), making sure Noops are ignored
84 // and SetToken are properly forwarded to the engine.
85 class MemoryChunkTest : public MappedMemoryTestBase {
86 protected:
87 static const int32 kShmId = 123;
88 void SetUp() override {
89 MappedMemoryTestBase::SetUp();
90 scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
91 shared_memory->CreateAndMapAnonymous(kBufferSize);
92 buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize);
93 chunk_.reset(new MemoryChunk(kShmId, buffer_, helper_.get()));
96 void TearDown() override {
97 // If the GpuScheduler posts any tasks, this forces them to run.
98 base::MessageLoop::current()->RunUntilIdle();
100 MappedMemoryTestBase::TearDown();
103 uint8* buffer_memory() { return static_cast<uint8*>(buffer_->memory()); }
105 scoped_ptr<MemoryChunk> chunk_;
106 scoped_refptr<gpu::Buffer> buffer_;
109 #ifndef _MSC_VER
110 const int32 MemoryChunkTest::kShmId;
111 #endif
113 TEST_F(MemoryChunkTest, Basic) {
114 const unsigned int kSize = 16;
115 EXPECT_EQ(kShmId, chunk_->shm_id());
116 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
117 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
118 EXPECT_EQ(kBufferSize, chunk_->GetSize());
119 void *pointer = chunk_->Alloc(kSize);
120 ASSERT_TRUE(pointer);
121 EXPECT_LE(buffer_->memory(), static_cast<uint8*>(pointer));
122 EXPECT_GE(kBufferSize,
123 static_cast<uint8*>(pointer) - buffer_memory() + kSize);
124 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
125 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
126 EXPECT_EQ(kBufferSize, chunk_->GetSize());
128 chunk_->Free(pointer);
129 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
130 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
132 uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize));
133 ASSERT_TRUE(pointer_char);
134 EXPECT_LE(buffer_memory(), pointer_char);
135 EXPECT_GE(buffer_memory() + kBufferSize, pointer_char + kSize);
136 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
137 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
138 chunk_->Free(pointer_char);
139 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
140 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
143 class MappedMemoryManagerTest : public MappedMemoryTestBase {
144 public:
145 MappedMemoryManager* manager() const {
146 return manager_.get();
149 protected:
150 void SetUp() override {
151 MappedMemoryTestBase::SetUp();
152 manager_.reset(
153 new MappedMemoryManager(helper_.get(), MappedMemoryManager::kNoLimit));
156 void TearDown() override {
157 // If the GpuScheduler posts any tasks, this forces them to run.
158 base::MessageLoop::current()->RunUntilIdle();
159 manager_.reset();
160 MappedMemoryTestBase::TearDown();
163 scoped_ptr<MappedMemoryManager> manager_;
166 TEST_F(MappedMemoryManagerTest, Basic) {
167 const unsigned int kSize = 1024;
168 // Check we can alloc.
169 int32 id1 = -1;
170 unsigned int offset1 = 0xFFFFFFFFU;
171 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
172 ASSERT_TRUE(mem1);
173 EXPECT_NE(-1, id1);
174 EXPECT_EQ(0u, offset1);
175 // Check if we free and realloc the same size we get the same memory
176 int32 id2 = -1;
177 unsigned int offset2 = 0xFFFFFFFFU;
178 manager_->Free(mem1);
179 void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
180 EXPECT_EQ(mem1, mem2);
181 EXPECT_EQ(id1, id2);
182 EXPECT_EQ(offset1, offset2);
183 // Check if we allocate again we get different shared memory
184 int32 id3 = -1;
185 unsigned int offset3 = 0xFFFFFFFFU;
186 void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
187 ASSERT_TRUE(mem3 != NULL);
188 EXPECT_NE(mem2, mem3);
189 EXPECT_NE(id2, id3);
190 EXPECT_EQ(0u, offset3);
191 // Free 3 and allocate 2 half size blocks.
192 manager_->Free(mem3);
193 int32 id4 = -1;
194 int32 id5 = -1;
195 unsigned int offset4 = 0xFFFFFFFFU;
196 unsigned int offset5 = 0xFFFFFFFFU;
197 void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4);
198 void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5);
199 ASSERT_TRUE(mem4 != NULL);
200 ASSERT_TRUE(mem5 != NULL);
201 EXPECT_EQ(id3, id4);
202 EXPECT_EQ(id4, id5);
203 EXPECT_EQ(0u, offset4);
204 EXPECT_EQ(kSize / 2u, offset5);
205 manager_->Free(mem4);
206 manager_->Free(mem2);
207 manager_->Free(mem5);
210 TEST_F(MappedMemoryManagerTest, FreePendingToken) {
211 const unsigned int kSize = 128;
212 const unsigned int kAllocCount = (kBufferSize / kSize) * 2;
213 CHECK(kAllocCount * kSize == kBufferSize * 2);
215 // Allocate several buffers across multiple chunks.
216 void *pointers[kAllocCount];
217 for (unsigned int i = 0; i < kAllocCount; ++i) {
218 int32 id = -1;
219 unsigned int offset = 0xFFFFFFFFu;
220 pointers[i] = manager_->Alloc(kSize, &id, &offset);
221 EXPECT_TRUE(pointers[i]);
222 EXPECT_NE(id, -1);
223 EXPECT_NE(offset, 0xFFFFFFFFu);
226 // Free one successful allocation, pending fence.
227 int32 token = helper_.get()->InsertToken();
228 manager_->FreePendingToken(pointers[0], token);
230 // The way we hooked up the helper and engine, it won't process commands
231 // until it has to wait for something. Which means the token shouldn't have
232 // passed yet at this point.
233 EXPECT_GT(token, GetToken());
234 // Force it to read up to the token
235 helper_->Finish();
236 // Check that the token has indeed passed.
237 EXPECT_LE(token, GetToken());
239 // This allocation should use the spot just freed above.
240 int32 new_id = -1;
241 unsigned int new_offset = 0xFFFFFFFFu;
242 void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset);
243 EXPECT_TRUE(new_ptr);
244 EXPECT_EQ(new_ptr, pointers[0]);
245 EXPECT_NE(new_id, -1);
246 EXPECT_NE(new_offset, 0xFFFFFFFFu);
248 // Free up everything.
249 manager_->Free(new_ptr);
250 for (unsigned int i = 1; i < kAllocCount; ++i) {
251 manager_->Free(pointers[i]);
255 TEST_F(MappedMemoryManagerTest, FreeUnused) {
256 int32 id = -1;
257 unsigned int offset = 0xFFFFFFFFU;
258 void* m1 = manager_->Alloc(kBufferSize, &id, &offset);
259 void* m2 = manager_->Alloc(kBufferSize, &id, &offset);
260 ASSERT_TRUE(m1 != NULL);
261 ASSERT_TRUE(m2 != NULL);
262 EXPECT_EQ(2u, manager_->num_chunks());
263 manager_->FreeUnused();
264 EXPECT_EQ(2u, manager_->num_chunks());
265 manager_->Free(m2);
266 EXPECT_EQ(2u, manager_->num_chunks());
267 manager_->FreeUnused();
268 EXPECT_EQ(1u, manager_->num_chunks());
269 manager_->Free(m1);
270 EXPECT_EQ(1u, manager_->num_chunks());
271 manager_->FreeUnused();
272 EXPECT_EQ(0u, manager_->num_chunks());
275 TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
276 const unsigned int kSize = 1024;
277 manager_->set_chunk_size_multiple(kSize * 2);
278 // Check if we allocate less than the chunk size multiple we get
279 // chunks arounded up.
280 int32 id1 = -1;
281 unsigned int offset1 = 0xFFFFFFFFU;
282 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
283 int32 id2 = -1;
284 unsigned int offset2 = 0xFFFFFFFFU;
285 void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
286 int32 id3 = -1;
287 unsigned int offset3 = 0xFFFFFFFFU;
288 void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
289 ASSERT_TRUE(mem1);
290 ASSERT_TRUE(mem2);
291 ASSERT_TRUE(mem3);
292 EXPECT_NE(-1, id1);
293 EXPECT_EQ(id1, id2);
294 EXPECT_NE(id2, id3);
295 EXPECT_EQ(0u, offset1);
296 EXPECT_EQ(kSize, offset2);
297 EXPECT_EQ(0u, offset3);
299 manager_->Free(mem1);
300 manager_->Free(mem2);
301 manager_->Free(mem3);
304 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
305 const unsigned int kChunkSize = 2048;
306 // Reset the manager with a memory limit.
307 manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize));
308 manager_->set_chunk_size_multiple(kChunkSize);
310 // Allocate one chunk worth of memory.
311 int32 id1 = -1;
312 unsigned int offset1 = 0xFFFFFFFFU;
313 void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
314 ASSERT_TRUE(mem1);
315 EXPECT_NE(-1, id1);
316 EXPECT_EQ(0u, offset1);
318 // Allocate half a chunk worth of memory again.
319 // The same chunk will be used.
320 int32 id2 = -1;
321 unsigned int offset2 = 0xFFFFFFFFU;
322 void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2);
323 ASSERT_TRUE(mem2);
324 EXPECT_NE(-1, id2);
325 EXPECT_EQ(0u, offset2);
327 // Expect two chunks to be allocated, exceeding the limit,
328 // since all memory is in use.
329 EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
331 manager_->Free(mem1);
332 manager_->Free(mem2);
335 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
336 const unsigned int kSize = 1024;
337 // Reset the manager with a memory limit.
338 manager_.reset(new MappedMemoryManager(helper_.get(), kSize));
339 const unsigned int kChunkSize = 2 * 1024;
340 manager_->set_chunk_size_multiple(kChunkSize);
342 // Allocate half a chunk worth of memory.
343 int32 id1 = -1;
344 unsigned int offset1 = 0xFFFFFFFFU;
345 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
346 ASSERT_TRUE(mem1);
347 EXPECT_NE(-1, id1);
348 EXPECT_EQ(0u, offset1);
350 // Allocate half a chunk worth of memory again.
351 // The same chunk will be used.
352 int32 id2 = -1;
353 unsigned int offset2 = 0xFFFFFFFFU;
354 void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
355 ASSERT_TRUE(mem2);
356 EXPECT_NE(-1, id2);
357 EXPECT_EQ(kSize, offset2);
359 // Free one successful allocation, pending fence.
360 int32 token = helper_.get()->InsertToken();
361 manager_->FreePendingToken(mem2, token);
363 // The way we hooked up the helper and engine, it won't process commands
364 // until it has to wait for something. Which means the token shouldn't have
365 // passed yet at this point.
366 EXPECT_GT(token, GetToken());
368 // Since we didn't call helper_.finish() the token did not pass.
369 // We won't be able to claim the free memory without waiting and
370 // as we've already met the memory limit we'll have to wait
371 // on the token.
372 int32 id3 = -1;
373 unsigned int offset3 = 0xFFFFFFFFU;
374 void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
375 ASSERT_TRUE(mem3);
376 EXPECT_NE(-1, id3);
377 // It will reuse the space from the second allocation just freed.
378 EXPECT_EQ(kSize, offset3);
380 // Expect one chunk to be allocated
381 EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
383 manager_->Free(mem1);
384 manager_->Free(mem3);
387 TEST_F(MappedMemoryManagerTest, MaxAllocationTest) {
388 const unsigned int kSize = 1024;
389 // Reset the manager with a memory limit.
390 manager_.reset(new MappedMemoryManager(helper_.get(), kSize));
392 const size_t kLimit = 512;
393 manager_->set_chunk_size_multiple(kLimit);
395 // Allocate twice the limit worth of memory (currently unbounded).
396 int32 id1 = -1;
397 unsigned int offset1 = 0xFFFFFFFFU;
398 void* mem1 = manager_->Alloc(kLimit, &id1, &offset1);
399 ASSERT_TRUE(mem1);
400 EXPECT_NE(-1, id1);
401 EXPECT_EQ(0u, offset1);
403 int32 id2 = -1;
404 unsigned int offset2 = 0xFFFFFFFFU;
405 void* mem2 = manager_->Alloc(kLimit, &id2, &offset2);
406 ASSERT_TRUE(mem2);
407 EXPECT_NE(-1, id2);
408 EXPECT_EQ(0u, offset2);
410 manager_->set_max_allocated_bytes(kLimit);
412 // A new allocation should now fail.
413 int32 id3 = -1;
414 unsigned int offset3 = 0xFFFFFFFFU;
415 void* mem3 = manager_->Alloc(kLimit, &id3, &offset3);
416 ASSERT_FALSE(mem3);
417 EXPECT_EQ(-1, id3);
418 EXPECT_EQ(0xFFFFFFFFU, offset3);
420 manager_->Free(mem2);
422 // New allocation is over the limit but should reuse allocated space
423 int32 id4 = -1;
424 unsigned int offset4 = 0xFFFFFFFFU;
425 void* mem4 = manager_->Alloc(kLimit, &id4, &offset4);
426 ASSERT_TRUE(mem4);
427 EXPECT_EQ(id2, id4);
428 EXPECT_EQ(offset2, offset4);
430 manager_->Free(mem1);
431 manager_->Free(mem4);
434 } // namespace gpu