1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the FencedAllocator class.
7 #include "gpu/command_buffer/client/fenced_allocator.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
17 // Allocation alignment, must be a power of two.
18 const unsigned int kAllocAlignment
= 16;
20 // Round down to the largest multiple of kAllocAlignment no greater than |size|.
21 unsigned int RoundDown(unsigned int size
) {
22 return size
& ~(kAllocAlignment
- 1);
25 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
26 unsigned int RoundUp(unsigned int size
) {
27 return (size
+ (kAllocAlignment
- 1)) & ~(kAllocAlignment
- 1);
33 const FencedAllocator::Offset
FencedAllocator::kInvalidOffset
;
36 FencedAllocator::FencedAllocator(unsigned int size
,
37 CommandBufferHelper
* helper
,
38 const base::Closure
& poll_callback
)
40 poll_callback_(poll_callback
),
42 Block block
= { FREE
, 0, RoundDown(size
), kUnusedToken
};
43 blocks_
.push_back(block
);
46 FencedAllocator::~FencedAllocator() {
47 // Free blocks pending tokens.
48 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
49 if (blocks_
[i
].state
== FREE_PENDING_TOKEN
) {
50 i
= WaitForTokenAndFreeBlock(i
);
54 DCHECK_EQ(blocks_
.size(), 1u);
55 DCHECK_EQ(blocks_
[0].state
, FREE
);
58 // Looks for a non-allocated block that is big enough. Search in the FREE
59 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
60 // blocks, waiting for them. The current implementation isn't smart about
61 // optimizing what to wait for, just looks inside the block in order (first-fit
63 FencedAllocator::Offset
FencedAllocator::Alloc(unsigned int size
) {
64 // size of 0 is not allowed because it would be inconsistent to only sometimes
65 // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
67 return kInvalidOffset
;
70 // Round up the allocation size to ensure alignment.
73 // Try first to allocate in a free block.
74 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
75 Block
&block
= blocks_
[i
];
76 if (block
.state
== FREE
&& block
.size
>= size
) {
77 return AllocInBlock(i
, size
);
81 // No free block is available. Look for blocks pending tokens, and wait for
82 // them to be re-usable.
83 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
84 if (blocks_
[i
].state
!= FREE_PENDING_TOKEN
)
86 i
= WaitForTokenAndFreeBlock(i
);
87 if (blocks_
[i
].size
>= size
)
88 return AllocInBlock(i
, size
);
90 return kInvalidOffset
;
93 // Looks for the corresponding block, mark it FREE, and collapse it if
95 void FencedAllocator::Free(FencedAllocator::Offset offset
) {
96 BlockIndex index
= GetBlockByOffset(offset
);
97 DCHECK_NE(blocks_
[index
].state
, FREE
);
98 Block
&block
= blocks_
[index
];
100 if (block
.state
== IN_USE
)
101 bytes_in_use_
-= block
.size
;
104 CollapseFreeBlock(index
);
107 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
108 void FencedAllocator::FreePendingToken(
109 FencedAllocator::Offset offset
, int32 token
) {
110 BlockIndex index
= GetBlockByOffset(offset
);
111 Block
&block
= blocks_
[index
];
112 if (block
.state
== IN_USE
)
113 bytes_in_use_
-= block
.size
;
114 block
.state
= FREE_PENDING_TOKEN
;
118 // Gets the max of the size of the blocks marked as free.
119 unsigned int FencedAllocator::GetLargestFreeSize() {
121 unsigned int max_size
= 0;
122 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
123 Block
&block
= blocks_
[i
];
124 if (block
.state
== FREE
)
125 max_size
= std::max(max_size
, block
.size
);
130 // Gets the size of the largest segment of blocks that are either FREE or
131 // FREE_PENDING_TOKEN.
132 unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
133 unsigned int max_size
= 0;
134 unsigned int current_size
= 0;
135 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
136 Block
&block
= blocks_
[i
];
137 if (block
.state
== IN_USE
) {
138 max_size
= std::max(max_size
, current_size
);
141 DCHECK(block
.state
== FREE
|| block
.state
== FREE_PENDING_TOKEN
);
142 current_size
+= block
.size
;
145 return std::max(max_size
, current_size
);
149 // - there is at least one block.
150 // - there are no contiguous FREE blocks (they should have been collapsed).
151 // - the successive offsets match the block sizes, and they are in order.
152 bool FencedAllocator::CheckConsistency() {
153 if (blocks_
.size() < 1) return false;
154 for (unsigned int i
= 0; i
< blocks_
.size() - 1; ++i
) {
155 Block
¤t
= blocks_
[i
];
156 Block
&next
= blocks_
[i
+ 1];
157 // This test is NOT included in the next one, because offset is unsigned.
158 if (next
.offset
<= current
.offset
)
160 if (next
.offset
!= current
.offset
+ current
.size
)
162 if (current
.state
== FREE
&& next
.state
== FREE
)
168 // Returns false if all blocks are actually FREE, in which
169 // case they would be coalesced into one block, true otherwise.
170 bool FencedAllocator::InUse() {
171 return blocks_
.size() != 1 || blocks_
[0].state
!= FREE
;
174 // Collapse the block to the next one, then to the previous one. Provided the
175 // structure is consistent, those are the only blocks eligible for collapse.
176 FencedAllocator::BlockIndex
FencedAllocator::CollapseFreeBlock(
178 if (index
+ 1 < blocks_
.size()) {
179 Block
&next
= blocks_
[index
+ 1];
180 if (next
.state
== FREE
) {
181 blocks_
[index
].size
+= next
.size
;
182 blocks_
.erase(blocks_
.begin() + index
+ 1);
186 Block
&prev
= blocks_
[index
- 1];
187 if (prev
.state
== FREE
) {
188 prev
.size
+= blocks_
[index
].size
;
189 blocks_
.erase(blocks_
.begin() + index
);
196 // Waits for the block's token, then mark the block as free, then collapse it.
197 FencedAllocator::BlockIndex
FencedAllocator::WaitForTokenAndFreeBlock(
199 Block
&block
= blocks_
[index
];
200 DCHECK_EQ(block
.state
, FREE_PENDING_TOKEN
);
201 helper_
->WaitForToken(block
.token
);
203 return CollapseFreeBlock(index
);
206 // Frees any blocks pending a token for which the token has been read.
207 void FencedAllocator::FreeUnused() {
208 // Free any potential blocks that has its lifetime handled outside.
209 poll_callback_
.Run();
211 for (unsigned int i
= 0; i
< blocks_
.size();) {
212 Block
& block
= blocks_
[i
];
213 if (block
.state
== FREE_PENDING_TOKEN
&&
214 helper_
->HasTokenPassed(block
.token
)) {
216 i
= CollapseFreeBlock(i
);
223 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
224 // split it and mark the first one (of the requested size) IN_USE.
225 FencedAllocator::Offset
FencedAllocator::AllocInBlock(BlockIndex index
,
227 Block
&block
= blocks_
[index
];
228 DCHECK_GE(block
.size
, size
);
229 DCHECK_EQ(block
.state
, FREE
);
230 Offset offset
= block
.offset
;
231 bytes_in_use_
+= size
;
232 if (block
.size
== size
) {
233 block
.state
= IN_USE
;
236 Block newblock
= { FREE
, offset
+ size
, block
.size
- size
, kUnusedToken
};
237 block
.state
= IN_USE
;
239 // this is the last thing being done because it may invalidate block;
240 blocks_
.insert(blocks_
.begin() + index
+ 1, newblock
);
244 // The blocks are in offset order, so we can do a binary search.
245 FencedAllocator::BlockIndex
FencedAllocator::GetBlockByOffset(Offset offset
) {
246 Block templ
= { IN_USE
, offset
, 0, kUnusedToken
};
247 Container::iterator it
= std::lower_bound(blocks_
.begin(), blocks_
.end(),
249 DCHECK(it
!= blocks_
.end() && it
->offset
== offset
);
250 return it
-blocks_
.begin();