Only grant permissions to new extensions from sync if they have the expected version
[chromium-blink-merge.git] / gpu / command_buffer / client / fenced_allocator.cc
blob726fe7dafd3a311ba9e45bcad554ca43ed676aa2
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the FencedAllocator class.
7 #include "gpu/command_buffer/client/fenced_allocator.h"
9 #include <algorithm>
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
13 namespace gpu {
15 namespace {
17 // Round down to the largest multiple of kAllocAlignment no greater than |size|.
18 unsigned int RoundDown(unsigned int size) {
19 return size & ~(FencedAllocator::kAllocAlignment - 1);
22 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
23 unsigned int RoundUp(unsigned int size) {
24 return (size + (FencedAllocator::kAllocAlignment - 1)) &
25 ~(FencedAllocator::kAllocAlignment - 1);
28 } // namespace
30 #ifndef _MSC_VER
31 const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
32 const unsigned int FencedAllocator::kAllocAlignment;
33 #endif
35 FencedAllocator::FencedAllocator(unsigned int size, CommandBufferHelper* helper)
36 : helper_(helper), bytes_in_use_(0) {
37 Block block = { FREE, 0, RoundDown(size), kUnusedToken };
38 blocks_.push_back(block);
41 FencedAllocator::~FencedAllocator() {
42 // Free blocks pending tokens.
43 for (unsigned int i = 0; i < blocks_.size(); ++i) {
44 if (blocks_[i].state == FREE_PENDING_TOKEN) {
45 i = WaitForTokenAndFreeBlock(i);
49 DCHECK_EQ(blocks_.size(), 1u);
50 DCHECK_EQ(blocks_[0].state, FREE);
53 // Looks for a non-allocated block that is big enough. Search in the FREE
54 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
55 // blocks, waiting for them. The current implementation isn't smart about
56 // optimizing what to wait for, just looks inside the block in order (first-fit
57 // as well).
58 FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
59 // size of 0 is not allowed because it would be inconsistent to only sometimes
60 // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
61 if (size == 0) {
62 return kInvalidOffset;
65 // Round up the allocation size to ensure alignment.
66 size = RoundUp(size);
68 // Try first to allocate in a free block.
69 for (unsigned int i = 0; i < blocks_.size(); ++i) {
70 Block &block = blocks_[i];
71 if (block.state == FREE && block.size >= size) {
72 return AllocInBlock(i, size);
76 // No free block is available. Look for blocks pending tokens, and wait for
77 // them to be re-usable.
78 for (unsigned int i = 0; i < blocks_.size(); ++i) {
79 if (blocks_[i].state != FREE_PENDING_TOKEN)
80 continue;
81 i = WaitForTokenAndFreeBlock(i);
82 if (blocks_[i].size >= size)
83 return AllocInBlock(i, size);
85 return kInvalidOffset;
88 // Looks for the corresponding block, mark it FREE, and collapse it if
89 // necessary.
90 void FencedAllocator::Free(FencedAllocator::Offset offset) {
91 BlockIndex index = GetBlockByOffset(offset);
92 DCHECK_NE(blocks_[index].state, FREE);
93 Block &block = blocks_[index];
95 if (block.state == IN_USE)
96 bytes_in_use_ -= block.size;
98 block.state = FREE;
99 CollapseFreeBlock(index);
102 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
103 void FencedAllocator::FreePendingToken(
104 FencedAllocator::Offset offset, int32 token) {
105 BlockIndex index = GetBlockByOffset(offset);
106 Block &block = blocks_[index];
107 if (block.state == IN_USE)
108 bytes_in_use_ -= block.size;
109 block.state = FREE_PENDING_TOKEN;
110 block.token = token;
113 // Gets the max of the size of the blocks marked as free.
114 unsigned int FencedAllocator::GetLargestFreeSize() {
115 FreeUnused();
116 unsigned int max_size = 0;
117 for (unsigned int i = 0; i < blocks_.size(); ++i) {
118 Block &block = blocks_[i];
119 if (block.state == FREE)
120 max_size = std::max(max_size, block.size);
122 return max_size;
125 // Gets the size of the largest segment of blocks that are either FREE or
126 // FREE_PENDING_TOKEN.
127 unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
128 unsigned int max_size = 0;
129 unsigned int current_size = 0;
130 for (unsigned int i = 0; i < blocks_.size(); ++i) {
131 Block &block = blocks_[i];
132 if (block.state == IN_USE) {
133 max_size = std::max(max_size, current_size);
134 current_size = 0;
135 } else {
136 DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
137 current_size += block.size;
140 return std::max(max_size, current_size);
143 // Gets the total size of all blocks marked as free.
144 unsigned int FencedAllocator::GetFreeSize() {
145 FreeUnused();
146 unsigned int size = 0;
147 for (unsigned int i = 0; i < blocks_.size(); ++i) {
148 Block& block = blocks_[i];
149 if (block.state == FREE)
150 size += block.size;
152 return size;
155 // Makes sure that:
156 // - there is at least one block.
157 // - there are no contiguous FREE blocks (they should have been collapsed).
158 // - the successive offsets match the block sizes, and they are in order.
159 bool FencedAllocator::CheckConsistency() {
160 if (blocks_.size() < 1) return false;
161 for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
162 Block &current = blocks_[i];
163 Block &next = blocks_[i + 1];
164 // This test is NOT included in the next one, because offset is unsigned.
165 if (next.offset <= current.offset)
166 return false;
167 if (next.offset != current.offset + current.size)
168 return false;
169 if (current.state == FREE && next.state == FREE)
170 return false;
172 return true;
175 // Returns false if all blocks are actually FREE, in which
176 // case they would be coalesced into one block, true otherwise.
177 bool FencedAllocator::InUse() {
178 return blocks_.size() != 1 || blocks_[0].state != FREE;
181 // Collapse the block to the next one, then to the previous one. Provided the
182 // structure is consistent, those are the only blocks eligible for collapse.
183 FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
184 BlockIndex index) {
185 if (index + 1 < blocks_.size()) {
186 Block &next = blocks_[index + 1];
187 if (next.state == FREE) {
188 blocks_[index].size += next.size;
189 blocks_.erase(blocks_.begin() + index + 1);
192 if (index > 0) {
193 Block &prev = blocks_[index - 1];
194 if (prev.state == FREE) {
195 prev.size += blocks_[index].size;
196 blocks_.erase(blocks_.begin() + index);
197 --index;
200 return index;
203 // Waits for the block's token, then mark the block as free, then collapse it.
204 FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
205 BlockIndex index) {
206 Block &block = blocks_[index];
207 DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
208 helper_->WaitForToken(block.token);
209 block.state = FREE;
210 return CollapseFreeBlock(index);
213 // Frees any blocks pending a token for which the token has been read.
214 void FencedAllocator::FreeUnused() {
215 for (unsigned int i = 0; i < blocks_.size();) {
216 Block& block = blocks_[i];
217 if (block.state == FREE_PENDING_TOKEN &&
218 helper_->HasTokenPassed(block.token)) {
219 block.state = FREE;
220 i = CollapseFreeBlock(i);
221 } else {
222 ++i;
227 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
228 // split it and mark the first one (of the requested size) IN_USE.
229 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
230 unsigned int size) {
231 Block &block = blocks_[index];
232 DCHECK_GE(block.size, size);
233 DCHECK_EQ(block.state, FREE);
234 Offset offset = block.offset;
235 bytes_in_use_ += size;
236 if (block.size == size) {
237 block.state = IN_USE;
238 return offset;
240 Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
241 block.state = IN_USE;
242 block.size = size;
243 // this is the last thing being done because it may invalidate block;
244 blocks_.insert(blocks_.begin() + index + 1, newblock);
245 return offset;
248 // The blocks are in offset order, so we can do a binary search.
249 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
250 Block templ = { IN_USE, offset, 0, kUnusedToken };
251 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
252 templ, OffsetCmp());
253 DCHECK(it != blocks_.end() && it->offset == offset);
254 return it-blocks_.begin();
257 } // namespace gpu