mac: Let IPhotoDataProvider::GetAlbumNames() return albums in a deterministic order.
[chromium-blink-merge.git] / gpu / command_buffer / client / mapped_memory.cc
blobd798ada7e8ccb0d2c82f01cb3aae13066d87961f
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
7 #include <algorithm>
8 #include <functional>
10 #include "base/atomic_sequence_num.h"
11 #include "base/logging.h"
12 #include "base/strings/stringprintf.h"
13 #include "base/thread_task_runner_handle.h"
14 #include "base/trace_event/memory_dump_manager.h"
15 #include "base/trace_event/trace_event.h"
16 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
17 #include "gpu/command_buffer/common/buffer.h"
19 namespace gpu {
20 namespace {
22 // Generates process-unique IDs to use for tracing a MappedMemoryManager's
23 // chunks.
24 base::StaticAtomicSequenceNumber g_next_mapped_memory_manager_tracing_id;
26 } // namespace
28 MemoryChunk::MemoryChunk(int32 shm_id,
29 scoped_refptr<gpu::Buffer> shm,
30 CommandBufferHelper* helper)
31 : shm_id_(shm_id),
32 shm_(shm),
33 allocator_(shm->size(), helper, shm->memory()) {}
35 MemoryChunk::~MemoryChunk() {}
37 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
38 size_t unused_memory_reclaim_limit)
39 : chunk_size_multiple_(FencedAllocator::kAllocAlignment),
40 helper_(helper),
41 allocated_memory_(0),
42 max_free_bytes_(unused_memory_reclaim_limit),
43 max_allocated_bytes_(kNoLimit),
44 tracing_id_(g_next_mapped_memory_manager_tracing_id.GetNext()) {
45 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
46 // Don't register a dump provider in these cases.
47 // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
48 if (base::ThreadTaskRunnerHandle::IsSet()) {
49 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
50 this, base::ThreadTaskRunnerHandle::Get());
54 MappedMemoryManager::~MappedMemoryManager() {
55 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
56 this);
58 CommandBuffer* cmd_buf = helper_->command_buffer();
59 for (MemoryChunkVector::iterator iter = chunks_.begin();
60 iter != chunks_.end(); ++iter) {
61 MemoryChunk* chunk = *iter;
62 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
66 void* MappedMemoryManager::Alloc(
67 unsigned int size, int32* shm_id, unsigned int* shm_offset) {
68 DCHECK(shm_id);
69 DCHECK(shm_offset);
70 if (size <= allocated_memory_) {
71 size_t total_bytes_in_use = 0;
72 // See if any of the chunks can satisfy this request.
73 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
74 MemoryChunk* chunk = chunks_[ii];
75 chunk->FreeUnused();
76 total_bytes_in_use += chunk->bytes_in_use();
77 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
78 void* mem = chunk->Alloc(size);
79 DCHECK(mem);
80 *shm_id = chunk->shm_id();
81 *shm_offset = chunk->GetOffset(mem);
82 return mem;
86 // If there is a memory limit being enforced and total free
87 // memory (allocated_memory_ - total_bytes_in_use) is larger than
88 // the limit try waiting.
89 if (max_free_bytes_ != kNoLimit &&
90 (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
91 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
92 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
93 MemoryChunk* chunk = chunks_[ii];
94 if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
95 void* mem = chunk->Alloc(size);
96 DCHECK(mem);
97 *shm_id = chunk->shm_id();
98 *shm_offset = chunk->GetOffset(mem);
99 return mem;
105 if (max_allocated_bytes_ != kNoLimit &&
106 (allocated_memory_ + size) > max_allocated_bytes_) {
107 return nullptr;
110 // Make a new chunk to satisfy the request.
111 CommandBuffer* cmd_buf = helper_->command_buffer();
112 unsigned int chunk_size =
113 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
114 chunk_size_multiple_;
115 int32 id = -1;
116 scoped_refptr<gpu::Buffer> shm =
117 cmd_buf->CreateTransferBuffer(chunk_size, &id);
118 if (id < 0)
119 return NULL;
120 DCHECK(shm.get());
121 MemoryChunk* mc = new MemoryChunk(id, shm, helper_);
122 allocated_memory_ += mc->GetSize();
123 chunks_.push_back(mc);
124 void* mem = mc->Alloc(size);
125 DCHECK(mem);
126 *shm_id = mc->shm_id();
127 *shm_offset = mc->GetOffset(mem);
128 return mem;
131 void MappedMemoryManager::Free(void* pointer) {
132 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
133 MemoryChunk* chunk = chunks_[ii];
134 if (chunk->IsInChunk(pointer)) {
135 chunk->Free(pointer);
136 return;
139 NOTREACHED();
142 void MappedMemoryManager::FreePendingToken(void* pointer, int32 token) {
143 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
144 MemoryChunk* chunk = chunks_[ii];
145 if (chunk->IsInChunk(pointer)) {
146 chunk->FreePendingToken(pointer, token);
147 return;
150 NOTREACHED();
153 void MappedMemoryManager::FreeUnused() {
154 CommandBuffer* cmd_buf = helper_->command_buffer();
155 MemoryChunkVector::iterator iter = chunks_.begin();
156 while (iter != chunks_.end()) {
157 MemoryChunk* chunk = *iter;
158 chunk->FreeUnused();
159 if (!chunk->InUse()) {
160 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
161 allocated_memory_ -= chunk->GetSize();
162 iter = chunks_.erase(iter);
163 } else {
164 ++iter;
169 bool MappedMemoryManager::OnMemoryDump(
170 const base::trace_event::MemoryDumpArgs& args,
171 base::trace_event::ProcessMemoryDump* pmd) {
172 const uint64 tracing_process_id =
173 base::trace_event::MemoryDumpManager::GetInstance()
174 ->GetTracingProcessId();
176 for (const auto& chunk : chunks_) {
177 std::string dump_name = base::StringPrintf(
178 "gpu/mapped_memory/manager_%d/chunk_%d", tracing_id_, chunk->shm_id());
179 base::trace_event::MemoryAllocatorDump* dump =
180 pmd->CreateAllocatorDump(dump_name);
182 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
183 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
184 chunk->GetSize());
185 dump->AddScalar("free_size",
186 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
187 chunk->GetFreeSize());
189 auto guid = GetBufferGUIDForTracing(tracing_process_id, chunk->shm_id());
191 const int kImportance = 2;
192 pmd->CreateSharedGlobalAllocatorDump(guid);
193 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
196 return true;
199 void ScopedMappedMemoryPtr::Release() {
200 if (buffer_) {
201 mapped_memory_manager_->FreePendingToken(buffer_, helper_->InsertToken());
202 buffer_ = nullptr;
203 size_ = 0;
204 shm_id_ = 0;
205 shm_offset_ = 0;
207 if (flush_after_release_)
208 helper_->CommandBufferHelper::Flush();
212 void ScopedMappedMemoryPtr::Reset(uint32_t new_size) {
213 Release();
215 if (new_size) {
216 buffer_ = mapped_memory_manager_->Alloc(new_size, &shm_id_, &shm_offset_);
217 size_ = buffer_ ? new_size : 0;
221 } // namespace gpu