1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
10 #include "base/debug/trace_event.h"
11 #include "base/logging.h"
12 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
16 MemoryChunk::MemoryChunk(
17 int32 shm_id
, gpu::Buffer shm
, CommandBufferHelper
* helper
)
20 allocator_(shm
.size
, helper
, shm
.ptr
) {
23 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper
* helper
,
24 size_t unused_memory_reclaim_limit
)
25 : chunk_size_multiple_(1),
28 max_free_bytes_(unused_memory_reclaim_limit
) {
31 MappedMemoryManager::~MappedMemoryManager() {
32 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
33 for (MemoryChunkVector::iterator iter
= chunks_
.begin();
34 iter
!= chunks_
.end(); ++iter
) {
35 MemoryChunk
* chunk
= *iter
;
36 cmd_buf
->DestroyTransferBuffer(chunk
->shm_id());
40 void* MappedMemoryManager::Alloc(
41 unsigned int size
, int32
* shm_id
, unsigned int* shm_offset
) {
44 if (size
<= allocated_memory_
) {
45 size_t total_bytes_in_use
= 0;
46 // See if any of the chunks can satisfy this request.
47 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
48 MemoryChunk
* chunk
= chunks_
[ii
];
50 total_bytes_in_use
+= chunk
->bytes_in_use();
51 if (chunk
->GetLargestFreeSizeWithoutWaiting() >= size
) {
52 void* mem
= chunk
->Alloc(size
);
54 *shm_id
= chunk
->shm_id();
55 *shm_offset
= chunk
->GetOffset(mem
);
60 // If there is a memory limit being enforced and total free
61 // memory (allocated_memory_ - total_bytes_in_use) is larger than
62 // the limit try waiting.
63 if (max_free_bytes_
!= kNoLimit
&&
64 (allocated_memory_
- total_bytes_in_use
) >= max_free_bytes_
) {
65 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
66 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
67 MemoryChunk
* chunk
= chunks_
[ii
];
68 if (chunk
->GetLargestFreeSizeWithWaiting() >= size
) {
69 void* mem
= chunk
->Alloc(size
);
71 *shm_id
= chunk
->shm_id();
72 *shm_offset
= chunk
->GetOffset(mem
);
79 // Make a new chunk to satisfy the request.
80 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
81 unsigned int chunk_size
=
82 ((size
+ chunk_size_multiple_
- 1) / chunk_size_multiple_
) *
85 gpu::Buffer shm
= cmd_buf
->CreateTransferBuffer(chunk_size
, &id
);
88 MemoryChunk
* mc
= new MemoryChunk(id
, shm
, helper_
);
89 allocated_memory_
+= mc
->GetSize();
90 chunks_
.push_back(mc
);
91 void* mem
= mc
->Alloc(size
);
93 *shm_id
= mc
->shm_id();
94 *shm_offset
= mc
->GetOffset(mem
);
98 void MappedMemoryManager::Free(void* pointer
) {
99 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
100 MemoryChunk
* chunk
= chunks_
[ii
];
101 if (chunk
->IsInChunk(pointer
)) {
102 chunk
->Free(pointer
);
109 void MappedMemoryManager::FreePendingToken(void* pointer
, int32 token
) {
110 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
111 MemoryChunk
* chunk
= chunks_
[ii
];
112 if (chunk
->IsInChunk(pointer
)) {
113 chunk
->FreePendingToken(pointer
, token
);
120 void MappedMemoryManager::FreeUnused() {
121 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
122 MemoryChunkVector::iterator iter
= chunks_
.begin();
123 while (iter
!= chunks_
.end()) {
124 MemoryChunk
* chunk
= *iter
;
126 if (!chunk
->InUse()) {
127 cmd_buf
->DestroyTransferBuffer(chunk
->shm_id());
128 allocated_memory_
-= chunk
->GetSize();
129 iter
= chunks_
.erase(iter
);