gpu: Add memory tracing of GPU transfer buffers.
[chromium-blink-merge.git] / gpu / command_buffer / client / mapped_memory.cc
blob34a3c1ac51070a3bba7075e2b1d62f8764ff833b
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
7 #include <algorithm>
8 #include <functional>
10 #include "base/atomic_sequence_num.h"
11 #include "base/logging.h"
12 #include "base/strings/stringprintf.h"
13 #include "base/thread_task_runner_handle.h"
14 #include "base/trace_event/memory_dump_manager.h"
15 #include "base/trace_event/trace_event.h"
16 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
17 #include "gpu/command_buffer/common/buffer.h"
19 namespace gpu {
20 namespace {
22 // Generates process-unique IDs to use for tracing a MappedMemoryManager's
23 // chunks.
24 base::StaticAtomicSequenceNumber g_next_mapped_memory_manager_tracing_id;
26 } // namespace
28 MemoryChunk::MemoryChunk(int32 shm_id,
29 scoped_refptr<gpu::Buffer> shm,
30 CommandBufferHelper* helper,
31 const base::Closure& poll_callback)
32 : shm_id_(shm_id),
33 shm_(shm),
34 allocator_(shm->size(), helper, poll_callback, shm->memory()) {}
36 MemoryChunk::~MemoryChunk() {}
38 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
39 const base::Closure& poll_callback,
40 size_t unused_memory_reclaim_limit)
41 : chunk_size_multiple_(FencedAllocator::kAllocAlignment),
42 helper_(helper),
43 poll_callback_(poll_callback),
44 allocated_memory_(0),
45 max_free_bytes_(unused_memory_reclaim_limit),
46 max_allocated_bytes_(kNoLimit),
47 tracing_id_(g_next_mapped_memory_manager_tracing_id.GetNext()) {
48 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
49 // Don't register a dump provider in these cases.
50 // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
51 if (base::ThreadTaskRunnerHandle::IsSet()) {
52 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
53 this, base::ThreadTaskRunnerHandle::Get());
57 MappedMemoryManager::~MappedMemoryManager() {
58 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
59 this);
61 CommandBuffer* cmd_buf = helper_->command_buffer();
62 for (MemoryChunkVector::iterator iter = chunks_.begin();
63 iter != chunks_.end(); ++iter) {
64 MemoryChunk* chunk = *iter;
65 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
69 void* MappedMemoryManager::Alloc(
70 unsigned int size, int32* shm_id, unsigned int* shm_offset) {
71 DCHECK(shm_id);
72 DCHECK(shm_offset);
73 if (size <= allocated_memory_) {
74 size_t total_bytes_in_use = 0;
75 // See if any of the chunks can satisfy this request.
76 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
77 MemoryChunk* chunk = chunks_[ii];
78 chunk->FreeUnused();
79 total_bytes_in_use += chunk->bytes_in_use();
80 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
81 void* mem = chunk->Alloc(size);
82 DCHECK(mem);
83 *shm_id = chunk->shm_id();
84 *shm_offset = chunk->GetOffset(mem);
85 return mem;
89 // If there is a memory limit being enforced and total free
90 // memory (allocated_memory_ - total_bytes_in_use) is larger than
91 // the limit try waiting.
92 if (max_free_bytes_ != kNoLimit &&
93 (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
94 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
95 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
96 MemoryChunk* chunk = chunks_[ii];
97 if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
98 void* mem = chunk->Alloc(size);
99 DCHECK(mem);
100 *shm_id = chunk->shm_id();
101 *shm_offset = chunk->GetOffset(mem);
102 return mem;
108 if (max_allocated_bytes_ != kNoLimit &&
109 (allocated_memory_ + size) > max_allocated_bytes_) {
110 return nullptr;
113 // Make a new chunk to satisfy the request.
114 CommandBuffer* cmd_buf = helper_->command_buffer();
115 unsigned int chunk_size =
116 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
117 chunk_size_multiple_;
118 int32 id = -1;
119 scoped_refptr<gpu::Buffer> shm =
120 cmd_buf->CreateTransferBuffer(chunk_size, &id);
121 if (id < 0)
122 return NULL;
123 DCHECK(shm.get());
124 MemoryChunk* mc = new MemoryChunk(id, shm, helper_, poll_callback_);
125 allocated_memory_ += mc->GetSize();
126 chunks_.push_back(mc);
127 void* mem = mc->Alloc(size);
128 DCHECK(mem);
129 *shm_id = mc->shm_id();
130 *shm_offset = mc->GetOffset(mem);
131 return mem;
134 void MappedMemoryManager::Free(void* pointer) {
135 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
136 MemoryChunk* chunk = chunks_[ii];
137 if (chunk->IsInChunk(pointer)) {
138 chunk->Free(pointer);
139 return;
142 NOTREACHED();
145 void MappedMemoryManager::FreePendingToken(void* pointer, int32 token) {
146 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
147 MemoryChunk* chunk = chunks_[ii];
148 if (chunk->IsInChunk(pointer)) {
149 chunk->FreePendingToken(pointer, token);
150 return;
153 NOTREACHED();
156 void MappedMemoryManager::FreeUnused() {
157 CommandBuffer* cmd_buf = helper_->command_buffer();
158 MemoryChunkVector::iterator iter = chunks_.begin();
159 while (iter != chunks_.end()) {
160 MemoryChunk* chunk = *iter;
161 chunk->FreeUnused();
162 if (!chunk->InUse()) {
163 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
164 allocated_memory_ -= chunk->GetSize();
165 iter = chunks_.erase(iter);
166 } else {
167 ++iter;
172 bool MappedMemoryManager::OnMemoryDump(
173 const base::trace_event::MemoryDumpArgs& args,
174 base::trace_event::ProcessMemoryDump* pmd) {
175 const uint64 tracing_process_id =
176 base::trace_event::MemoryDumpManager::GetInstance()
177 ->GetTracingProcessId();
179 for (const auto& chunk : chunks_) {
180 std::string dump_name = base::StringPrintf(
181 "gpu/mapped_memory/manager_%d/chunk_%d", tracing_id_, chunk->shm_id());
182 base::trace_event::MemoryAllocatorDump* dump =
183 pmd->CreateAllocatorDump(dump_name);
185 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
186 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
187 chunk->GetSize());
188 dump->AddScalar("free_size",
189 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
190 chunk->GetFreeSize());
192 auto guid = GetBufferGUIDForTracing(tracing_process_id, chunk->shm_id());
194 const int kImportance = 2;
195 pmd->CreateSharedGlobalAllocatorDump(guid);
196 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
199 return true;
202 void ScopedMappedMemoryPtr::Release() {
203 if (buffer_) {
204 mapped_memory_manager_->FreePendingToken(buffer_, helper_->InsertToken());
205 buffer_ = nullptr;
206 size_ = 0;
207 shm_id_ = 0;
208 shm_offset_ = 0;
210 if (flush_after_release_)
211 helper_->CommandBufferHelper::Flush();
215 void ScopedMappedMemoryPtr::Reset(uint32_t new_size) {
216 Release();
218 if (new_size) {
219 buffer_ = mapped_memory_manager_->Alloc(new_size, &shm_id_, &shm_offset_);
220 size_ = buffer_ ? new_size : 0;
224 } // namespace gpu