1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
6 #define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_
10 #include "base/bind.h"
11 #include "base/macros.h"
12 #include "base/memory/scoped_vector.h"
13 #include "base/trace_event/memory_dump_provider.h"
14 #include "gpu/command_buffer/client/fenced_allocator.h"
15 #include "gpu/command_buffer/common/buffer.h"
16 #include "gpu/gpu_export.h"
20 class CommandBufferHelper
;
22 // Manages a shared memory segment.
23 class GPU_EXPORT MemoryChunk
{
25 MemoryChunk(int32_t shm_id
,
26 scoped_refptr
<gpu::Buffer
> shm
,
27 CommandBufferHelper
* helper
);
30 // Gets the size of the largest free block that is available without waiting.
31 unsigned int GetLargestFreeSizeWithoutWaiting() {
32 return allocator_
.GetLargestFreeSize();
35 // Gets the size of the largest free block that can be allocated if the
37 unsigned int GetLargestFreeSizeWithWaiting() {
38 return allocator_
.GetLargestFreeOrPendingSize();
41 // Gets the size of the chunk.
42 unsigned int GetSize() const {
43 return static_cast<unsigned int>(shm_
->size());
46 // The shared memory id for this chunk.
47 int32_t shm_id() const {
51 // Allocates a block of memory. If the buffer is out of directly available
52 // memory, this function may wait until memory that was freed "pending a
53 // token" can be re-used.
56 // size: the size of the memory block to allocate.
59 // the pointer to the allocated memory block, or NULL if out of
61 void* Alloc(unsigned int size
) {
62 return allocator_
.Alloc(size
);
65 // Gets the offset to a memory block given the base memory and the address.
66 // It translates NULL to FencedAllocator::kInvalidOffset.
67 unsigned int GetOffset(void* pointer
) {
68 return allocator_
.GetOffset(pointer
);
71 // Frees a block of memory.
74 // pointer: the pointer to the memory block to free.
75 void Free(void* pointer
) {
76 allocator_
.Free(pointer
);
79 // Frees a block of memory, pending the passage of a token. That memory won't
80 // be re-allocated until the token has passed through the command stream.
83 // pointer: the pointer to the memory block to free.
84 // token: the token value to wait for before re-using the memory.
85 void FreePendingToken(void* pointer
, unsigned int token
) {
86 allocator_
.FreePendingToken(pointer
, token
);
89 // Frees any blocks whose tokens have passed.
91 allocator_
.FreeUnused();
94 // Gets the free size of the chunk.
95 unsigned int GetFreeSize() { return allocator_
.GetFreeSize(); }
97 // Returns true if pointer is in the range of this block.
98 bool IsInChunk(void* pointer
) const {
99 return pointer
>= shm_
->memory() &&
101 reinterpret_cast<const int8_t*>(shm_
->memory()) + shm_
->size();
104 // Returns true of any memory in this chunk is in use.
106 return allocator_
.InUse();
109 size_t bytes_in_use() const {
110 return allocator_
.bytes_in_use();
115 scoped_refptr
<gpu::Buffer
> shm_
;
116 FencedAllocatorWrapper allocator_
;
118 DISALLOW_COPY_AND_ASSIGN(MemoryChunk
);
121 // Manages MemoryChunks.
122 class GPU_EXPORT MappedMemoryManager
123 : public base::trace_event::MemoryDumpProvider
{
129 // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
130 // to be reclaimed before allocating more memory.
131 MappedMemoryManager(CommandBufferHelper
* helper
,
132 size_t unused_memory_reclaim_limit
);
134 ~MappedMemoryManager() override
;
136 unsigned int chunk_size_multiple() const {
137 return chunk_size_multiple_
;
140 void set_chunk_size_multiple(unsigned int multiple
) {
141 DCHECK(multiple
% FencedAllocator::kAllocAlignment
== 0);
142 chunk_size_multiple_
= multiple
;
145 size_t max_allocated_bytes() const {
146 return max_allocated_bytes_
;
149 void set_max_allocated_bytes(size_t max_allocated_bytes
) {
150 max_allocated_bytes_
= max_allocated_bytes
;
153 // Allocates a block of memory
155 // size: size of memory to allocate.
156 // shm_id: pointer to variable to receive the shared memory id.
157 // shm_offset: pointer to variable to receive the shared memory offset.
159 // pointer to allocated block of memory. NULL if failure.
161 unsigned int size
, int32_t* shm_id
, unsigned int* shm_offset
);
163 // Frees a block of memory.
166 // pointer: the pointer to the memory block to free.
167 void Free(void* pointer
);
169 // Frees a block of memory, pending the passage of a token. That memory won't
170 // be re-allocated until the token has passed through the command stream.
173 // pointer: the pointer to the memory block to free.
174 // token: the token value to wait for before re-using the memory.
175 void FreePendingToken(void* pointer
, int32_t token
);
177 // Free Any Shared memory that is not in use.
180 // Overridden from base::trace_event::MemoryDumpProvider:
181 bool OnMemoryDump(const base::trace_event::MemoryDumpArgs
& args
,
182 base::trace_event::ProcessMemoryDump
* pmd
) override
;
185 size_t num_chunks() const {
186 return chunks_
.size();
189 size_t bytes_in_use() const {
190 size_t bytes_in_use
= 0;
191 for (size_t ii
= 0; ii
< chunks_
.size(); ++ii
) {
192 MemoryChunk
* chunk
= chunks_
[ii
];
193 bytes_in_use
+= chunk
->bytes_in_use();
199 size_t allocated_memory() const {
200 return allocated_memory_
;
204 typedef ScopedVector
<MemoryChunk
> MemoryChunkVector
;
206 // size a chunk is rounded up to.
207 unsigned int chunk_size_multiple_
;
208 CommandBufferHelper
* helper_
;
209 MemoryChunkVector chunks_
;
210 size_t allocated_memory_
;
211 size_t max_free_bytes_
;
212 size_t max_allocated_bytes_
;
213 // A process-unique ID used for disambiguating memory dumps from different
214 // mapped memory manager.
217 DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager
);
220 // A class that will manage the lifetime of a mapped memory allocation
221 class GPU_EXPORT ScopedMappedMemoryPtr
{
223 ScopedMappedMemoryPtr(
225 CommandBufferHelper
* helper
,
226 MappedMemoryManager
* mapped_memory_manager
)
231 flush_after_release_(false),
233 mapped_memory_manager_(mapped_memory_manager
) {
237 ~ScopedMappedMemoryPtr() {
242 return buffer_
!= NULL
;
245 void SetFlushAfterRelease(bool flush_after_release
) {
246 flush_after_release_
= flush_after_release
;
249 uint32_t size() const {
253 int32_t shm_id() const {
257 uint32_t offset() const {
261 void* address() const {
267 void Reset(uint32_t new_size
);
273 uint32_t shm_offset_
;
274 bool flush_after_release_
;
275 CommandBufferHelper
* helper_
;
276 MappedMemoryManager
* mapped_memory_manager_
;
277 DISALLOW_COPY_AND_ASSIGN(ScopedMappedMemoryPtr
);
282 #endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_