1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
10 #include "base/debug/trace_event.h"
11 #include "base/logging.h"
12 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
16 MemoryChunk::MemoryChunk(int32 shm_id
,
17 scoped_refptr
<gpu::Buffer
> shm
,
18 CommandBufferHelper
* helper
,
19 const base::Closure
& poll_callback
)
22 allocator_(shm
->size(), helper
, poll_callback
, shm
->memory()) {}
24 MemoryChunk::~MemoryChunk() {}
26 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper
* helper
,
27 const base::Closure
& poll_callback
,
28 size_t unused_memory_reclaim_limit
)
29 : chunk_size_multiple_(1),
31 poll_callback_(poll_callback
),
33 max_free_bytes_(unused_memory_reclaim_limit
) {
36 MappedMemoryManager::~MappedMemoryManager() {
37 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
38 for (MemoryChunkVector::iterator iter
= chunks_
.begin();
39 iter
!= chunks_
.end(); ++iter
) {
40 MemoryChunk
* chunk
= *iter
;
41 cmd_buf
->DestroyTransferBuffer(chunk
->shm_id());
45 void* MappedMemoryManager::Alloc(
46 unsigned int size
, int32
* shm_id
, unsigned int* shm_offset
) {
49 if (size
<= allocated_memory_
) {
50 size_t total_bytes_in_use
= 0;
51 // See if any of the chunks can satisfy this request.
52 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
53 MemoryChunk
* chunk
= chunks_
[ii
];
55 total_bytes_in_use
+= chunk
->bytes_in_use();
56 if (chunk
->GetLargestFreeSizeWithoutWaiting() >= size
) {
57 void* mem
= chunk
->Alloc(size
);
59 *shm_id
= chunk
->shm_id();
60 *shm_offset
= chunk
->GetOffset(mem
);
65 // If there is a memory limit being enforced and total free
66 // memory (allocated_memory_ - total_bytes_in_use) is larger than
67 // the limit try waiting.
68 if (max_free_bytes_
!= kNoLimit
&&
69 (allocated_memory_
- total_bytes_in_use
) >= max_free_bytes_
) {
70 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
71 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
72 MemoryChunk
* chunk
= chunks_
[ii
];
73 if (chunk
->GetLargestFreeSizeWithWaiting() >= size
) {
74 void* mem
= chunk
->Alloc(size
);
76 *shm_id
= chunk
->shm_id();
77 *shm_offset
= chunk
->GetOffset(mem
);
84 // Make a new chunk to satisfy the request.
85 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
86 unsigned int chunk_size
=
87 ((size
+ chunk_size_multiple_
- 1) / chunk_size_multiple_
) *
90 scoped_refptr
<gpu::Buffer
> shm
=
91 cmd_buf
->CreateTransferBuffer(chunk_size
, &id
);
95 MemoryChunk
* mc
= new MemoryChunk(id
, shm
, helper_
, poll_callback_
);
96 allocated_memory_
+= mc
->GetSize();
97 chunks_
.push_back(mc
);
98 void* mem
= mc
->Alloc(size
);
100 *shm_id
= mc
->shm_id();
101 *shm_offset
= mc
->GetOffset(mem
);
105 void MappedMemoryManager::Free(void* pointer
) {
106 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
107 MemoryChunk
* chunk
= chunks_
[ii
];
108 if (chunk
->IsInChunk(pointer
)) {
109 chunk
->Free(pointer
);
116 void MappedMemoryManager::FreePendingToken(void* pointer
, int32 token
) {
117 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
118 MemoryChunk
* chunk
= chunks_
[ii
];
119 if (chunk
->IsInChunk(pointer
)) {
120 chunk
->FreePendingToken(pointer
, token
);
127 void MappedMemoryManager::FreeUnused() {
128 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
129 MemoryChunkVector::iterator iter
= chunks_
.begin();
130 while (iter
!= chunks_
.end()) {
131 MemoryChunk
* chunk
= *iter
;
133 if (!chunk
->InUse()) {
134 cmd_buf
->DestroyTransferBuffer(chunk
->shm_id());
135 allocated_memory_
-= chunk
->GetSize();
136 iter
= chunks_
.erase(iter
);