Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / gpu / command_buffer / client / mapped_memory.cc
blob7c5864a4710e64e03296d6829fef1f1ad9170fbb
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "gpu/command_buffer/client/mapped_memory.h"
7 #include <algorithm>
8 #include <functional>
10 #include "base/logging.h"
11 #include "base/trace_event/trace_event.h"
12 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
14 namespace gpu {
16 MemoryChunk::MemoryChunk(int32 shm_id,
17 scoped_refptr<gpu::Buffer> shm,
18 CommandBufferHelper* helper,
19 const base::Closure& poll_callback)
20 : shm_id_(shm_id),
21 shm_(shm),
22 allocator_(shm->size(), helper, poll_callback, shm->memory()) {}
24 MemoryChunk::~MemoryChunk() {}
26 MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
27 const base::Closure& poll_callback,
28 size_t unused_memory_reclaim_limit)
29 : chunk_size_multiple_(FencedAllocator::kAllocAlignment),
30 helper_(helper),
31 poll_callback_(poll_callback),
32 allocated_memory_(0),
33 max_free_bytes_(unused_memory_reclaim_limit),
34 max_allocated_bytes_(kNoLimit) {
37 MappedMemoryManager::~MappedMemoryManager() {
38 CommandBuffer* cmd_buf = helper_->command_buffer();
39 for (MemoryChunkVector::iterator iter = chunks_.begin();
40 iter != chunks_.end(); ++iter) {
41 MemoryChunk* chunk = *iter;
42 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
46 void* MappedMemoryManager::Alloc(
47 unsigned int size, int32* shm_id, unsigned int* shm_offset) {
48 DCHECK(shm_id);
49 DCHECK(shm_offset);
50 if (size <= allocated_memory_) {
51 size_t total_bytes_in_use = 0;
52 // See if any of the chunks can satisfy this request.
53 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
54 MemoryChunk* chunk = chunks_[ii];
55 chunk->FreeUnused();
56 total_bytes_in_use += chunk->bytes_in_use();
57 if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
58 void* mem = chunk->Alloc(size);
59 DCHECK(mem);
60 *shm_id = chunk->shm_id();
61 *shm_offset = chunk->GetOffset(mem);
62 return mem;
66 // If there is a memory limit being enforced and total free
67 // memory (allocated_memory_ - total_bytes_in_use) is larger than
68 // the limit try waiting.
69 if (max_free_bytes_ != kNoLimit &&
70 (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
71 TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
72 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
73 MemoryChunk* chunk = chunks_[ii];
74 if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
75 void* mem = chunk->Alloc(size);
76 DCHECK(mem);
77 *shm_id = chunk->shm_id();
78 *shm_offset = chunk->GetOffset(mem);
79 return mem;
85 if (max_allocated_bytes_ != kNoLimit &&
86 (allocated_memory_ + size) > max_allocated_bytes_) {
87 return nullptr;
90 // Make a new chunk to satisfy the request.
91 CommandBuffer* cmd_buf = helper_->command_buffer();
92 unsigned int chunk_size =
93 ((size + chunk_size_multiple_ - 1) / chunk_size_multiple_) *
94 chunk_size_multiple_;
95 int32 id = -1;
96 scoped_refptr<gpu::Buffer> shm =
97 cmd_buf->CreateTransferBuffer(chunk_size, &id);
98 if (id < 0)
99 return NULL;
100 DCHECK(shm.get());
101 MemoryChunk* mc = new MemoryChunk(id, shm, helper_, poll_callback_);
102 allocated_memory_ += mc->GetSize();
103 chunks_.push_back(mc);
104 void* mem = mc->Alloc(size);
105 DCHECK(mem);
106 *shm_id = mc->shm_id();
107 *shm_offset = mc->GetOffset(mem);
108 return mem;
111 void MappedMemoryManager::Free(void* pointer) {
112 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
113 MemoryChunk* chunk = chunks_[ii];
114 if (chunk->IsInChunk(pointer)) {
115 chunk->Free(pointer);
116 return;
119 NOTREACHED();
122 void MappedMemoryManager::FreePendingToken(void* pointer, int32 token) {
123 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
124 MemoryChunk* chunk = chunks_[ii];
125 if (chunk->IsInChunk(pointer)) {
126 chunk->FreePendingToken(pointer, token);
127 return;
130 NOTREACHED();
133 void MappedMemoryManager::FreeUnused() {
134 CommandBuffer* cmd_buf = helper_->command_buffer();
135 MemoryChunkVector::iterator iter = chunks_.begin();
136 while (iter != chunks_.end()) {
137 MemoryChunk* chunk = *iter;
138 chunk->FreeUnused();
139 if (!chunk->InUse()) {
140 cmd_buf->DestroyTransferBuffer(chunk->shm_id());
141 allocated_memory_ -= chunk->GetSize();
142 iter = chunks_.erase(iter);
143 } else {
144 ++iter;
149 void ScopedMappedMemoryPtr::Release() {
150 if (buffer_) {
151 mapped_memory_manager_->FreePendingToken(buffer_, helper_->InsertToken());
152 buffer_ = nullptr;
153 size_ = 0;
154 shm_id_ = 0;
155 shm_offset_ = 0;
157 if (flush_after_release_)
158 helper_->CommandBufferHelper::Flush();
162 void ScopedMappedMemoryPtr::Reset(uint32_t new_size) {
163 Release();
165 if (new_size) {
166 buffer_ = mapped_memory_manager_->Alloc(new_size, &shm_id_, &shm_offset_);
167 size_ = buffer_ ? new_size : 0;
171 } // namespace gpu