1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
10 #include "base/debug/trace_event.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
15 MemoryChunk::MemoryChunk(
16 int32 shm_id
, gpu::Buffer shm
, CommandBufferHelper
* helper
)
19 allocator_(shm
.size
, helper
, shm
.ptr
) {
22 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper
* helper
,
23 size_t unused_memory_reclaim_limit
)
24 : chunk_size_multiple_(1),
27 max_free_bytes_(unused_memory_reclaim_limit
) {
30 MappedMemoryManager::~MappedMemoryManager() {
31 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
32 for (MemoryChunkVector::iterator iter
= chunks_
.begin();
33 iter
!= chunks_
.end(); ++iter
) {
34 MemoryChunk
* chunk
= *iter
;
35 cmd_buf
->DestroyTransferBuffer(chunk
->shm_id());
39 void* MappedMemoryManager::Alloc(
40 unsigned int size
, int32
* shm_id
, unsigned int* shm_offset
) {
42 GPU_DCHECK(shm_offset
);
43 if (size
<= allocated_memory_
) {
44 size_t total_bytes_in_use
= 0;
45 // See if any of the chunks can satisfy this request.
46 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
47 MemoryChunk
* chunk
= chunks_
[ii
];
49 total_bytes_in_use
+= chunk
->bytes_in_use();
50 if (chunk
->GetLargestFreeSizeWithoutWaiting() >= size
) {
51 void* mem
= chunk
->Alloc(size
);
53 *shm_id
= chunk
->shm_id();
54 *shm_offset
= chunk
->GetOffset(mem
);
59 // If there is a memory limit being enforced and total free
60 // memory (allocated_memory_ - total_bytes_in_use) is larger than
61 // the limit try waiting.
62 if (max_free_bytes_
!= kNoLimit
&&
63 (allocated_memory_
- total_bytes_in_use
) >= max_free_bytes_
) {
64 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
65 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
66 MemoryChunk
* chunk
= chunks_
[ii
];
67 if (chunk
->GetLargestFreeSizeWithWaiting() >= size
) {
68 void* mem
= chunk
->Alloc(size
);
70 *shm_id
= chunk
->shm_id();
71 *shm_offset
= chunk
->GetOffset(mem
);
78 // Make a new chunk to satisfy the request.
79 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
80 unsigned int chunk_size
=
81 ((size
+ chunk_size_multiple_
- 1) / chunk_size_multiple_
) *
84 gpu::Buffer shm
= cmd_buf
->CreateTransferBuffer(chunk_size
, &id
);
87 MemoryChunk
* mc
= new MemoryChunk(id
, shm
, helper_
);
88 allocated_memory_
+= mc
->GetSize();
89 chunks_
.push_back(mc
);
90 void* mem
= mc
->Alloc(size
);
92 *shm_id
= mc
->shm_id();
93 *shm_offset
= mc
->GetOffset(mem
);
97 void MappedMemoryManager::Free(void* pointer
) {
98 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
99 MemoryChunk
* chunk
= chunks_
[ii
];
100 if (chunk
->IsInChunk(pointer
)) {
101 chunk
->Free(pointer
);
108 void MappedMemoryManager::FreePendingToken(void* pointer
, int32 token
) {
109 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
110 MemoryChunk
* chunk
= chunks_
[ii
];
111 if (chunk
->IsInChunk(pointer
)) {
112 chunk
->FreePendingToken(pointer
, token
);
119 void MappedMemoryManager::FreeUnused() {
120 CommandBuffer
* cmd_buf
= helper_
->command_buffer();
121 MemoryChunkVector::iterator iter
= chunks_
.begin();
122 while (iter
!= chunks_
.end()) {
123 MemoryChunk
* chunk
= *iter
;
125 if (!chunk
->InUse()) {
126 cmd_buf
->DestroyTransferBuffer(chunk
->shm_id());
127 allocated_memory_
-= chunk
->GetSize();
128 iter
= chunks_
.erase(iter
);