We started redesigning GpuMemoryBuffer interface to handle multiple buffers [0].
[chromium-blink-merge.git] / gpu / command_buffer / client / mapped_memory.h
blob10ac639929f3664a15d425c9b8fb82f46d331e1a
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
8 #include <stdint.h>
10 #include "base/bind.h"
11 #include "base/macros.h"
12 #include "base/memory/scoped_vector.h"
13 #include "gpu/command_buffer/client/fenced_allocator.h"
14 #include "gpu/command_buffer/common/buffer.h"
15 #include "gpu/gpu_export.h"
17 namespace gpu {
19 class CommandBufferHelper;
21 // Manages a shared memory segment.
22 class GPU_EXPORT MemoryChunk {
23 public:
24 MemoryChunk(int32_t shm_id,
25 scoped_refptr<gpu::Buffer> shm,
26 CommandBufferHelper* helper,
27 const base::Closure& poll_callback);
28 ~MemoryChunk();
30 // Gets the size of the largest free block that is available without waiting.
31 unsigned int GetLargestFreeSizeWithoutWaiting() {
32 return allocator_.GetLargestFreeSize();
35 // Gets the size of the largest free block that can be allocated if the
36 // caller can wait.
37 unsigned int GetLargestFreeSizeWithWaiting() {
38 return allocator_.GetLargestFreeOrPendingSize();
41 // Gets the size of the chunk.
42 unsigned int GetSize() const {
43 return static_cast<unsigned int>(shm_->size());
46 // The shared memory id for this chunk.
47 int32_t shm_id() const {
48 return shm_id_;
51 // Allocates a block of memory. If the buffer is out of directly available
52 // memory, this function may wait until memory that was freed "pending a
53 // token" can be re-used.
55 // Parameters:
56 // size: the size of the memory block to allocate.
58 // Returns:
59 // the pointer to the allocated memory block, or NULL if out of
60 // memory.
61 void* Alloc(unsigned int size) {
62 return allocator_.Alloc(size);
65 // Gets the offset to a memory block given the base memory and the address.
66 // It translates NULL to FencedAllocator::kInvalidOffset.
67 unsigned int GetOffset(void* pointer) {
68 return allocator_.GetOffset(pointer);
71 // Frees a block of memory.
73 // Parameters:
74 // pointer: the pointer to the memory block to free.
75 void Free(void* pointer) {
76 allocator_.Free(pointer);
79 // Frees a block of memory, pending the passage of a token. That memory won't
80 // be re-allocated until the token has passed through the command stream.
82 // Parameters:
83 // pointer: the pointer to the memory block to free.
84 // token: the token value to wait for before re-using the memory.
85 void FreePendingToken(void* pointer, unsigned int token) {
86 allocator_.FreePendingToken(pointer, token);
89 // Frees any blocks whose tokens have passed.
90 void FreeUnused() {
91 allocator_.FreeUnused();
94 // Returns true if pointer is in the range of this block.
95 bool IsInChunk(void* pointer) const {
96 return pointer >= shm_->memory() &&
97 pointer <
98 reinterpret_cast<const int8_t*>(shm_->memory()) + shm_->size();
101 // Returns true of any memory in this chunk is in use.
102 bool InUse() {
103 return allocator_.InUse();
106 size_t bytes_in_use() const {
107 return allocator_.bytes_in_use();
110 private:
111 int32_t shm_id_;
112 scoped_refptr<gpu::Buffer> shm_;
113 FencedAllocatorWrapper allocator_;
115 DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
118 // Manages MemoryChunks.
119 class GPU_EXPORT MappedMemoryManager {
120 public:
121 enum MemoryLimit {
122 kNoLimit = 0,
125 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
126 // to be reclaimed before allocating more memory.
127 MappedMemoryManager(CommandBufferHelper* helper,
128 const base::Closure& poll_callback,
129 size_t unused_memory_reclaim_limit);
131 ~MappedMemoryManager();
133 unsigned int chunk_size_multiple() const {
134 return chunk_size_multiple_;
137 void set_chunk_size_multiple(unsigned int multiple) {
138 DCHECK(multiple % FencedAllocator::kAllocAlignment == 0);
139 chunk_size_multiple_ = multiple;
142 // Allocates a block of memory
143 // Parameters:
144 // size: size of memory to allocate.
145 // shm_id: pointer to variable to receive the shared memory id.
146 // shm_offset: pointer to variable to receive the shared memory offset.
147 // Returns:
148 // pointer to allocated block of memory. NULL if failure.
149 void* Alloc(
150 unsigned int size, int32_t* shm_id, unsigned int* shm_offset);
152 // Frees a block of memory.
154 // Parameters:
155 // pointer: the pointer to the memory block to free.
156 void Free(void* pointer);
158 // Frees a block of memory, pending the passage of a token. That memory won't
159 // be re-allocated until the token has passed through the command stream.
161 // Parameters:
162 // pointer: the pointer to the memory block to free.
163 // token: the token value to wait for before re-using the memory.
164 void FreePendingToken(void* pointer, int32_t token);
166 // Free Any Shared memory that is not in use.
167 void FreeUnused();
169 // Used for testing
170 size_t num_chunks() const {
171 return chunks_.size();
174 size_t bytes_in_use() const {
175 size_t bytes_in_use = 0;
176 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
177 MemoryChunk* chunk = chunks_[ii];
178 bytes_in_use += chunk->bytes_in_use();
180 return bytes_in_use;
183 // Used for testing
184 size_t allocated_memory() const {
185 return allocated_memory_;
188 private:
189 typedef ScopedVector<MemoryChunk> MemoryChunkVector;
191 // size a chunk is rounded up to.
192 unsigned int chunk_size_multiple_;
193 CommandBufferHelper* helper_;
194 base::Closure poll_callback_;
195 MemoryChunkVector chunks_;
196 size_t allocated_memory_;
197 size_t max_free_bytes_;
199 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
202 } // namespace gpu
204 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_