1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file contains the implementation of the FencedAllocator class.
7 #include "gpu/command_buffer/client/fenced_allocator.h"
11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
17 // Allocation alignment, must be a power of two.
18 const unsigned int kAllocAlignment
= 16;
20 // Round down to the largest multiple of kAllocAlignment no greater than |size|.
21 unsigned int RoundDown(unsigned int size
) {
22 return size
& ~(kAllocAlignment
- 1);
25 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
26 unsigned int RoundUp(unsigned int size
) {
27 return (size
+ (kAllocAlignment
- 1)) & ~(kAllocAlignment
- 1);
33 const FencedAllocator::Offset
FencedAllocator::kInvalidOffset
;
36 FencedAllocator::FencedAllocator(unsigned int size
,
37 CommandBufferHelper
*helper
)
40 Block block
= { FREE
, 0, RoundDown(size
), kUnusedToken
};
41 blocks_
.push_back(block
);
44 FencedAllocator::~FencedAllocator() {
45 // Free blocks pending tokens.
46 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
47 if (blocks_
[i
].state
== FREE_PENDING_TOKEN
) {
48 i
= WaitForTokenAndFreeBlock(i
);
51 // These checks are not valid if the service has crashed or lost the context.
52 // GPU_DCHECK_EQ(blocks_.size(), 1u);
53 // GPU_DCHECK_EQ(blocks_[0].state, FREE);
56 // Looks for a non-allocated block that is big enough. Search in the FREE
57 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
58 // blocks, waiting for them. The current implementation isn't smart about
59 // optimizing what to wait for, just looks inside the block in order (first-fit
61 FencedAllocator::Offset
FencedAllocator::Alloc(unsigned int size
) {
62 // size of 0 is not allowed because it would be inconsistent to only sometimes
63 // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
65 return kInvalidOffset
;
68 // Round up the allocation size to ensure alignment.
71 // Try first to allocate in a free block.
72 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
73 Block
&block
= blocks_
[i
];
74 if (block
.state
== FREE
&& block
.size
>= size
) {
75 return AllocInBlock(i
, size
);
79 // No free block is available. Look for blocks pending tokens, and wait for
80 // them to be re-usable.
81 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
82 if (blocks_
[i
].state
!= FREE_PENDING_TOKEN
)
84 i
= WaitForTokenAndFreeBlock(i
);
85 if (blocks_
[i
].size
>= size
)
86 return AllocInBlock(i
, size
);
88 return kInvalidOffset
;
91 // Looks for the corresponding block, mark it FREE, and collapse it if
93 void FencedAllocator::Free(FencedAllocator::Offset offset
) {
94 BlockIndex index
= GetBlockByOffset(offset
);
95 GPU_DCHECK_NE(blocks_
[index
].state
, FREE
);
96 Block
&block
= blocks_
[index
];
98 if (block
.state
== IN_USE
)
99 bytes_in_use_
-= block
.size
;
102 CollapseFreeBlock(index
);
105 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
106 void FencedAllocator::FreePendingToken(
107 FencedAllocator::Offset offset
, int32 token
) {
108 BlockIndex index
= GetBlockByOffset(offset
);
109 Block
&block
= blocks_
[index
];
110 if (block
.state
== IN_USE
)
111 bytes_in_use_
-= block
.size
;
112 block
.state
= FREE_PENDING_TOKEN
;
116 // Gets the max of the size of the blocks marked as free.
117 unsigned int FencedAllocator::GetLargestFreeSize() {
119 unsigned int max_size
= 0;
120 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
121 Block
&block
= blocks_
[i
];
122 if (block
.state
== FREE
)
123 max_size
= std::max(max_size
, block
.size
);
128 // Gets the size of the largest segment of blocks that are either FREE or
129 // FREE_PENDING_TOKEN.
130 unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
131 unsigned int max_size
= 0;
132 unsigned int current_size
= 0;
133 for (unsigned int i
= 0; i
< blocks_
.size(); ++i
) {
134 Block
&block
= blocks_
[i
];
135 if (block
.state
== IN_USE
) {
136 max_size
= std::max(max_size
, current_size
);
139 GPU_DCHECK(block
.state
== FREE
|| block
.state
== FREE_PENDING_TOKEN
);
140 current_size
+= block
.size
;
143 return std::max(max_size
, current_size
);
147 // - there is at least one block.
148 // - there are no contiguous FREE blocks (they should have been collapsed).
149 // - the successive offsets match the block sizes, and they are in order.
150 bool FencedAllocator::CheckConsistency() {
151 if (blocks_
.size() < 1) return false;
152 for (unsigned int i
= 0; i
< blocks_
.size() - 1; ++i
) {
153 Block
¤t
= blocks_
[i
];
154 Block
&next
= blocks_
[i
+ 1];
155 // This test is NOT included in the next one, because offset is unsigned.
156 if (next
.offset
<= current
.offset
)
158 if (next
.offset
!= current
.offset
+ current
.size
)
160 if (current
.state
== FREE
&& next
.state
== FREE
)
166 // Returns false if all blocks are actually FREE, in which
167 // case they would be coalesced into one block, true otherwise.
168 bool FencedAllocator::InUse() {
169 return blocks_
.size() != 1 || blocks_
[0].state
!= FREE
;
172 // Collapse the block to the next one, then to the previous one. Provided the
173 // structure is consistent, those are the only blocks eligible for collapse.
174 FencedAllocator::BlockIndex
FencedAllocator::CollapseFreeBlock(
176 if (index
+ 1 < blocks_
.size()) {
177 Block
&next
= blocks_
[index
+ 1];
178 if (next
.state
== FREE
) {
179 blocks_
[index
].size
+= next
.size
;
180 blocks_
.erase(blocks_
.begin() + index
+ 1);
184 Block
&prev
= blocks_
[index
- 1];
185 if (prev
.state
== FREE
) {
186 prev
.size
+= blocks_
[index
].size
;
187 blocks_
.erase(blocks_
.begin() + index
);
194 // Waits for the block's token, then mark the block as free, then collapse it.
195 FencedAllocator::BlockIndex
FencedAllocator::WaitForTokenAndFreeBlock(
197 Block
&block
= blocks_
[index
];
198 GPU_DCHECK_EQ(block
.state
, FREE_PENDING_TOKEN
);
199 helper_
->WaitForToken(block
.token
);
201 return CollapseFreeBlock(index
);
204 // Frees any blocks pending a token for which the token has been read.
205 void FencedAllocator::FreeUnused() {
206 int32 last_token_read
= helper_
->last_token_read();
207 for (unsigned int i
= 0; i
< blocks_
.size();) {
208 Block
& block
= blocks_
[i
];
209 if (block
.state
== FREE_PENDING_TOKEN
&& block
.token
<= last_token_read
) {
211 i
= CollapseFreeBlock(i
);
218 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
219 // split it and mark the first one (of the requested size) IN_USE.
220 FencedAllocator::Offset
FencedAllocator::AllocInBlock(BlockIndex index
,
222 Block
&block
= blocks_
[index
];
223 GPU_DCHECK_GE(block
.size
, size
);
224 GPU_DCHECK_EQ(block
.state
, FREE
);
225 Offset offset
= block
.offset
;
226 bytes_in_use_
+= size
;
227 if (block
.size
== size
) {
228 block
.state
= IN_USE
;
231 Block newblock
= { FREE
, offset
+ size
, block
.size
- size
, kUnusedToken
};
232 block
.state
= IN_USE
;
234 // this is the last thing being done because it may invalidate block;
235 blocks_
.insert(blocks_
.begin() + index
+ 1, newblock
);
239 // The blocks are in offset order, so we can do a binary search.
240 FencedAllocator::BlockIndex
FencedAllocator::GetBlockByOffset(Offset offset
) {
241 Block templ
= { IN_USE
, offset
, 0, kUnusedToken
};
242 Container::iterator it
= std::lower_bound(blocks_
.begin(), blocks_
.end(),
244 GPU_DCHECK(it
!= blocks_
.end() && it
->offset
== offset
);
245 return it
-blocks_
.begin();