Only grant permissions to new extensions from sync if they have the expected version
[chromium-blink-merge.git] / gpu / command_buffer / client / mapped_memory.h
blobaad8647c7902d262efdef6c8bdb36ccc354852d8
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
8 #include <stdint.h>
10 #include "base/bind.h"
11 #include "base/macros.h"
12 #include "base/memory/scoped_vector.h"
13 #include "base/trace_event/memory_dump_provider.h"
14 #include "gpu/command_buffer/client/fenced_allocator.h"
15 #include "gpu/command_buffer/common/buffer.h"
16 #include "gpu/gpu_export.h"
18 namespace gpu {
20 class CommandBufferHelper;
22 // Manages a shared memory segment.
23 class GPU_EXPORT MemoryChunk {
24 public:
25 MemoryChunk(int32_t shm_id,
26 scoped_refptr<gpu::Buffer> shm,
27 CommandBufferHelper* helper);
28 ~MemoryChunk();
30 // Gets the size of the largest free block that is available without waiting.
31 unsigned int GetLargestFreeSizeWithoutWaiting() {
32 return allocator_.GetLargestFreeSize();
35 // Gets the size of the largest free block that can be allocated if the
36 // caller can wait.
37 unsigned int GetLargestFreeSizeWithWaiting() {
38 return allocator_.GetLargestFreeOrPendingSize();
41 // Gets the size of the chunk.
42 unsigned int GetSize() const {
43 return static_cast<unsigned int>(shm_->size());
46 // The shared memory id for this chunk.
47 int32_t shm_id() const {
48 return shm_id_;
51 // Allocates a block of memory. If the buffer is out of directly available
52 // memory, this function may wait until memory that was freed "pending a
53 // token" can be re-used.
55 // Parameters:
56 // size: the size of the memory block to allocate.
58 // Returns:
59 // the pointer to the allocated memory block, or NULL if out of
60 // memory.
61 void* Alloc(unsigned int size) {
62 return allocator_.Alloc(size);
65 // Gets the offset to a memory block given the base memory and the address.
66 // It translates NULL to FencedAllocator::kInvalidOffset.
67 unsigned int GetOffset(void* pointer) {
68 return allocator_.GetOffset(pointer);
71 // Frees a block of memory.
73 // Parameters:
74 // pointer: the pointer to the memory block to free.
75 void Free(void* pointer) {
76 allocator_.Free(pointer);
79 // Frees a block of memory, pending the passage of a token. That memory won't
80 // be re-allocated until the token has passed through the command stream.
82 // Parameters:
83 // pointer: the pointer to the memory block to free.
84 // token: the token value to wait for before re-using the memory.
85 void FreePendingToken(void* pointer, unsigned int token) {
86 allocator_.FreePendingToken(pointer, token);
89 // Frees any blocks whose tokens have passed.
90 void FreeUnused() {
91 allocator_.FreeUnused();
94 // Gets the free size of the chunk.
95 unsigned int GetFreeSize() { return allocator_.GetFreeSize(); }
97 // Returns true if pointer is in the range of this block.
98 bool IsInChunk(void* pointer) const {
99 return pointer >= shm_->memory() &&
100 pointer <
101 reinterpret_cast<const int8_t*>(shm_->memory()) + shm_->size();
104 // Returns true of any memory in this chunk is in use.
105 bool InUse() {
106 return allocator_.InUse();
109 size_t bytes_in_use() const {
110 return allocator_.bytes_in_use();
113 private:
114 int32_t shm_id_;
115 scoped_refptr<gpu::Buffer> shm_;
116 FencedAllocatorWrapper allocator_;
118 DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
121 // Manages MemoryChunks.
122 class GPU_EXPORT MappedMemoryManager
123 : public base::trace_event::MemoryDumpProvider {
124 public:
125 enum MemoryLimit {
126 kNoLimit = 0,
129 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
130 // to be reclaimed before allocating more memory.
131 MappedMemoryManager(CommandBufferHelper* helper,
132 size_t unused_memory_reclaim_limit);
134 ~MappedMemoryManager() override;
136 unsigned int chunk_size_multiple() const {
137 return chunk_size_multiple_;
140 void set_chunk_size_multiple(unsigned int multiple) {
141 DCHECK(multiple % FencedAllocator::kAllocAlignment == 0);
142 chunk_size_multiple_ = multiple;
145 size_t max_allocated_bytes() const {
146 return max_allocated_bytes_;
149 void set_max_allocated_bytes(size_t max_allocated_bytes) {
150 max_allocated_bytes_ = max_allocated_bytes;
153 // Allocates a block of memory
154 // Parameters:
155 // size: size of memory to allocate.
156 // shm_id: pointer to variable to receive the shared memory id.
157 // shm_offset: pointer to variable to receive the shared memory offset.
158 // Returns:
159 // pointer to allocated block of memory. NULL if failure.
160 void* Alloc(
161 unsigned int size, int32_t* shm_id, unsigned int* shm_offset);
163 // Frees a block of memory.
165 // Parameters:
166 // pointer: the pointer to the memory block to free.
167 void Free(void* pointer);
169 // Frees a block of memory, pending the passage of a token. That memory won't
170 // be re-allocated until the token has passed through the command stream.
172 // Parameters:
173 // pointer: the pointer to the memory block to free.
174 // token: the token value to wait for before re-using the memory.
175 void FreePendingToken(void* pointer, int32_t token);
177 // Free Any Shared memory that is not in use.
178 void FreeUnused();
180 // Overridden from base::trace_event::MemoryDumpProvider:
181 bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
182 base::trace_event::ProcessMemoryDump* pmd) override;
184 // Used for testing
185 size_t num_chunks() const {
186 return chunks_.size();
189 size_t bytes_in_use() const {
190 size_t bytes_in_use = 0;
191 for (size_t ii = 0; ii < chunks_.size(); ++ii) {
192 MemoryChunk* chunk = chunks_[ii];
193 bytes_in_use += chunk->bytes_in_use();
195 return bytes_in_use;
198 // Used for testing
199 size_t allocated_memory() const {
200 return allocated_memory_;
203 private:
204 typedef ScopedVector<MemoryChunk> MemoryChunkVector;
206 // size a chunk is rounded up to.
207 unsigned int chunk_size_multiple_;
208 CommandBufferHelper* helper_;
209 MemoryChunkVector chunks_;
210 size_t allocated_memory_;
211 size_t max_free_bytes_;
212 size_t max_allocated_bytes_;
213 // A process-unique ID used for disambiguating memory dumps from different
214 // mapped memory manager.
215 int tracing_id_;
217 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
220 // A class that will manage the lifetime of a mapped memory allocation
221 class GPU_EXPORT ScopedMappedMemoryPtr {
222 public:
223 ScopedMappedMemoryPtr(
224 uint32_t size,
225 CommandBufferHelper* helper,
226 MappedMemoryManager* mapped_memory_manager)
227 : buffer_(NULL),
228 size_(0),
229 shm_id_(0),
230 shm_offset_(0),
231 flush_after_release_(false),
232 helper_(helper),
233 mapped_memory_manager_(mapped_memory_manager) {
234 Reset(size);
237 ~ScopedMappedMemoryPtr() {
238 Release();
241 bool valid() const {
242 return buffer_ != NULL;
245 void SetFlushAfterRelease(bool flush_after_release) {
246 flush_after_release_ = flush_after_release;
249 uint32_t size() const {
250 return size_;
253 int32_t shm_id() const {
254 return shm_id_;
257 uint32_t offset() const {
258 return shm_offset_;
261 void* address() const {
262 return buffer_;
265 void Release();
267 void Reset(uint32_t new_size);
269 private:
270 void* buffer_;
271 uint32_t size_;
272 int32_t shm_id_;
273 uint32_t shm_offset_;
274 bool flush_after_release_;
275 CommandBufferHelper* helper_;
276 MappedMemoryManager* mapped_memory_manager_;
277 DISALLOW_COPY_AND_ASSIGN(ScopedMappedMemoryPtr);
280 } // namespace gpu
282 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_