Upstreaming browser/ui/uikit_ui_util from iOS.
[chromium-blink-merge.git] / gpu / command_buffer / client / mapped_memory.h
blob55d94e560689536252a645166bfacc48ceabd4cb
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
8 #include <stdint.h>
10 #include "base/bind.h"
11 #include "base/macros.h"
12 #include "base/memory/scoped_vector.h"
13 #include "gpu/command_buffer/client/fenced_allocator.h"
14 #include "gpu/command_buffer/common/buffer.h"
15 #include "gpu/gpu_export.h"
17 namespace gpu {
19 class CommandBufferHelper;
21 // Manages a shared memory segment.
22 class GPU_EXPORT MemoryChunk {
23 public:
24 MemoryChunk(int32_t shm_id,
25 scoped_refptr<gpu::Buffer> shm,
26 CommandBufferHelper* helper,
27 const base::Closure& poll_callback);
28 ~MemoryChunk();
30 // Gets the size of the largest free block that is available without waiting.
31 unsigned int GetLargestFreeSizeWithoutWaiting() {
32 return allocator_.GetLargestFreeSize();
35 // Gets the size of the largest free block that can be allocated if the
36 // caller can wait.
37 unsigned int GetLargestFreeSizeWithWaiting() {
38 return allocator_.GetLargestFreeOrPendingSize();
41 // Gets the size of the chunk.
42 unsigned int GetSize() const {
43 return static_cast<unsigned int>(shm_->size());
46 // The shared memory id for this chunk.
47 int32_t shm_id() const {
48 return shm_id_;
51 // Allocates a block of memory. If the buffer is out of directly available
52 // memory, this function may wait until memory that was freed "pending a
53 // token" can be re-used.
55 // Parameters:
56 // size: the size of the memory block to allocate.
58 // Returns:
59 // the pointer to the allocated memory block, or NULL if out of
60 // memory.
61 void* Alloc(unsigned int size) {
62 return allocator_.Alloc(size);
65 // Gets the offset to a memory block given the base memory and the address.
66 // It translates NULL to FencedAllocator::kInvalidOffset.
67 unsigned int GetOffset(void* pointer) {
68 return allocator_.GetOffset(pointer);
71 // Frees a block of memory.
73 // Parameters:
74 // pointer: the pointer to the memory block to free.
75 void Free(void* pointer) {
76 allocator_.Free(pointer);
79 // Frees a block of memory, pending the passage of a token. That memory won't
80 // be re-allocated until the token has passed through the command stream.
82 // Parameters:
83 // pointer: the pointer to the memory block to free.
84 // token: the token value to wait for before re-using the memory.
85 void FreePendingToken(void* pointer, unsigned int token) {
86 allocator_.FreePendingToken(pointer, token);
89 // Frees any blocks whose tokens have passed.
90 void FreeUnused() {
91 allocator_.FreeUnused();
94 // Returns true if pointer is in the range of this block.
95 bool IsInChunk(void* pointer) const {
96 return pointer >= shm_->memory() &&
97 pointer <
98 reinterpret_cast<const int8_t*>(shm_->memory()) + shm_->size();
101 // Returns true of any memory in this chunk is in use.
102 bool InUse() {
103 return allocator_.InUse();
106 size_t bytes_in_use() const {
107 return allocator_.bytes_in_use();
110 private:
111 int32_t shm_id_;
112 scoped_refptr<gpu::Buffer> shm_;
113 FencedAllocatorWrapper allocator_;
115 DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
118 // Manages MemoryChunks.
119 class GPU_EXPORT MappedMemoryManager {
120 public:
121 enum MemoryLimit {
122 kNoLimit = 0,
125 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
126 // to be reclaimed before allocating more memory.
127 MappedMemoryManager(CommandBufferHelper* helper,
128 const base::Closure& poll_callback,
129 size_t unused_memory_reclaim_limit);
131 ~MappedMemoryManager();
133 unsigned int chunk_size_multiple() const {
134 return chunk_size_multiple_;
137 void set_chunk_size_multiple(unsigned int multiple) {
138 DCHECK(multiple % FencedAllocator::kAllocAlignment == 0);
139 chunk_size_multiple_ = multiple;
142 size_t max_allocated_bytes() const {
143 return max_allocated_bytes_;
146 void set_max_allocated_bytes(size_t max_allocated_bytes) {
147 max_allocated_bytes_ = max_allocated_bytes;
150 // Allocates a block of memory
151 // Parameters:
152 // size: size of memory to allocate.
153 // shm_id: pointer to variable to receive the shared memory id.
154 // shm_offset: pointer to variable to receive the shared memory offset.
155 // Returns:
156 // pointer to allocated block of memory. NULL if failure.
157 void* Alloc(
158 unsigned int size, int32_t* shm_id, unsigned int* shm_offset);
160 // Frees a block of memory.
162 // Parameters:
163 // pointer: the pointer to the memory block to free.
164 void Free(void* pointer);
166 // Frees a block of memory, pending the passage of a token. That memory won't
167 // be re-allocated until the token has passed through the command stream.
169 // Parameters:
170 // pointer: the pointer to the memory block to free.
171 // token: the token value to wait for before re-using the memory.
172 void FreePendingToken(void* pointer, int32_t token);
174 // Free Any Shared memory that is not in use.
175 void FreeUnused();
177 // Used for testing
178 size_t num_chunks() const {
179 return chunks_.size();
182 size_t bytes_in_use() const {
183 size_t bytes_in_use = 0;
184 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
185 MemoryChunk* chunk = chunks_[ii];
186 bytes_in_use += chunk->bytes_in_use();
188 return bytes_in_use;
191 // Used for testing
192 size_t allocated_memory() const {
193 return allocated_memory_;
196 private:
197 typedef ScopedVector<MemoryChunk> MemoryChunkVector;
199 // size a chunk is rounded up to.
200 unsigned int chunk_size_multiple_;
201 CommandBufferHelper* helper_;
202 base::Closure poll_callback_;
203 MemoryChunkVector chunks_;
204 size_t allocated_memory_;
205 size_t max_free_bytes_;
206 size_t max_allocated_bytes_;
208 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
211 // A class that will manage the lifetime of a mapped memory allocation
212 class GPU_EXPORT ScopedMappedMemoryPtr {
213 public:
214 ScopedMappedMemoryPtr(
215 uint32_t size,
216 CommandBufferHelper* helper,
217 MappedMemoryManager* mapped_memory_manager)
218 : buffer_(NULL),
219 size_(0),
220 shm_id_(0),
221 shm_offset_(0),
222 flush_after_release_(false),
223 helper_(helper),
224 mapped_memory_manager_(mapped_memory_manager) {
225 Reset(size);
228 ~ScopedMappedMemoryPtr() {
229 Release();
232 bool valid() const {
233 return buffer_ != NULL;
236 void SetFlushAfterRelease(bool flush_after_release) {
237 flush_after_release_ = flush_after_release;
240 uint32_t size() const {
241 return size_;
244 int32_t shm_id() const {
245 return shm_id_;
248 uint32_t offset() const {
249 return shm_offset_;
252 void* address() const {
253 return buffer_;
256 void Release();
258 void Reset(uint32_t new_size);
260 private:
261 void* buffer_;
262 uint32_t size_;
263 int32_t shm_id_;
264 uint32_t shm_offset_;
265 bool flush_after_release_;
266 CommandBufferHelper* helper_;
267 MappedMemoryManager* mapped_memory_manager_;
268 DISALLOW_COPY_AND_ASSIGN(ScopedMappedMemoryPtr);
271 } // namespace gpu
273 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_