Roll leveldb 3f7758:803d69 (v1.17 -> v1.18)
[chromium-blink-merge.git] / gpu / command_buffer / client / transfer_buffer.cc
blobda00d87d17e5cdad45c6493701e01db5268499b6
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // A class to Manage a growing transfer buffer.
7 #include "gpu/command_buffer/client/transfer_buffer.h"
9 #include "base/bits.h"
10 #include "base/debug/trace_event.h"
11 #include "base/logging.h"
12 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
14 namespace gpu {
16 TransferBuffer::TransferBuffer(
17 CommandBufferHelper* helper)
18 : helper_(helper),
19 result_size_(0),
20 default_buffer_size_(0),
21 min_buffer_size_(0),
22 max_buffer_size_(0),
23 alignment_(0),
24 size_to_flush_(0),
25 bytes_since_last_flush_(0),
26 buffer_id_(-1),
27 result_buffer_(NULL),
28 result_shm_offset_(0),
29 usable_(true) {
32 TransferBuffer::~TransferBuffer() {
33 Free();
36 bool TransferBuffer::Initialize(
37 unsigned int default_buffer_size,
38 unsigned int result_size,
39 unsigned int min_buffer_size,
40 unsigned int max_buffer_size,
41 unsigned int alignment,
42 unsigned int size_to_flush) {
43 result_size_ = result_size;
44 default_buffer_size_ = default_buffer_size;
45 min_buffer_size_ = min_buffer_size;
46 max_buffer_size_ = max_buffer_size;
47 alignment_ = alignment;
48 size_to_flush_ = size_to_flush;
49 ReallocateRingBuffer(default_buffer_size_ - result_size);
50 return HaveBuffer();
53 void TransferBuffer::Free() {
54 if (HaveBuffer()) {
55 TRACE_EVENT0("gpu", "TransferBuffer::Free");
56 helper_->Finish();
57 helper_->command_buffer()->DestroyTransferBuffer(buffer_id_);
58 buffer_id_ = -1;
59 buffer_ = NULL;
60 result_buffer_ = NULL;
61 result_shm_offset_ = 0;
62 ring_buffer_.reset();
63 bytes_since_last_flush_ = 0;
67 bool TransferBuffer::HaveBuffer() const {
68 DCHECK(buffer_id_ == -1 || buffer_.get());
69 return buffer_id_ != -1;
72 RingBuffer::Offset TransferBuffer::GetOffset(void* pointer) const {
73 return ring_buffer_->GetOffset(pointer);
76 void TransferBuffer::FreePendingToken(void* p, unsigned int token) {
77 ring_buffer_->FreePendingToken(p, token);
78 if (bytes_since_last_flush_ >= size_to_flush_ && size_to_flush_ > 0) {
79 helper_->Flush();
80 bytes_since_last_flush_ = 0;
84 void TransferBuffer::AllocateRingBuffer(unsigned int size) {
85 for (;size >= min_buffer_size_; size /= 2) {
86 int32 id = -1;
87 scoped_refptr<gpu::Buffer> buffer =
88 helper_->command_buffer()->CreateTransferBuffer(size, &id);
89 if (id != -1) {
90 DCHECK(buffer.get());
91 buffer_ = buffer;
92 ring_buffer_.reset(new RingBuffer(
93 alignment_,
94 result_size_,
95 buffer_->size() - result_size_,
96 helper_,
97 static_cast<char*>(buffer_->memory()) + result_size_));
98 buffer_id_ = id;
99 result_buffer_ = buffer_->memory();
100 result_shm_offset_ = 0;
101 return;
103 // we failed so don't try larger than this.
104 max_buffer_size_ = size / 2;
106 usable_ = false;
109 static unsigned int ComputePOTSize(unsigned int dimension) {
110 return (dimension == 0) ? 0 : 1 << base::bits::Log2Ceiling(dimension);
113 void TransferBuffer::ReallocateRingBuffer(unsigned int size) {
114 // What size buffer would we ask for if we needed a new one?
115 unsigned int needed_buffer_size = ComputePOTSize(size + result_size_);
116 needed_buffer_size = std::max(needed_buffer_size, min_buffer_size_);
117 needed_buffer_size = std::max(needed_buffer_size, default_buffer_size_);
118 needed_buffer_size = std::min(needed_buffer_size, max_buffer_size_);
120 if (usable_ && (!HaveBuffer() || needed_buffer_size > buffer_->size())) {
121 if (HaveBuffer()) {
122 Free();
124 AllocateRingBuffer(needed_buffer_size);
128 void* TransferBuffer::AllocUpTo(
129 unsigned int size, unsigned int* size_allocated) {
130 DCHECK(size_allocated);
132 ReallocateRingBuffer(size);
134 if (!HaveBuffer()) {
135 return NULL;
138 unsigned int max_size = ring_buffer_->GetLargestFreeOrPendingSize();
139 *size_allocated = std::min(max_size, size);
140 bytes_since_last_flush_ += *size_allocated;
141 return ring_buffer_->Alloc(*size_allocated);
144 void* TransferBuffer::Alloc(unsigned int size) {
145 ReallocateRingBuffer(size);
147 if (!HaveBuffer()) {
148 return NULL;
151 unsigned int max_size = ring_buffer_->GetLargestFreeOrPendingSize();
152 if (size > max_size) {
153 return NULL;
156 bytes_since_last_flush_ += size;
157 return ring_buffer_->Alloc(size);
160 void* TransferBuffer::GetResultBuffer() {
161 ReallocateRingBuffer(result_size_);
162 return result_buffer_;
165 int TransferBuffer::GetResultOffset() {
166 ReallocateRingBuffer(result_size_);
167 return result_shm_offset_;
170 int TransferBuffer::GetShmId() {
171 ReallocateRingBuffer(result_size_);
172 return buffer_id_;
175 unsigned int TransferBuffer::GetCurrentMaxAllocationWithoutRealloc() const {
176 return HaveBuffer() ? ring_buffer_->GetLargestFreeOrPendingSize() : 0;
179 unsigned int TransferBuffer::GetMaxAllocation() const {
180 return HaveBuffer() ? max_buffer_size_ - result_size_ : 0;
183 void ScopedTransferBufferPtr::Release() {
184 if (buffer_) {
185 transfer_buffer_->FreePendingToken(buffer_, helper_->InsertToken());
186 buffer_ = NULL;
187 size_ = 0;
191 void ScopedTransferBufferPtr::Reset(unsigned int new_size) {
192 Release();
193 // NOTE: we allocate buffers of size 0 so that HaveBuffer will be true, so
194 // that address will return a pointer just like malloc, and so that GetShmId
195 // will be valid. That has the side effect that we'll insert a token on free.
196 // We could add code skip the token for a zero size buffer but it doesn't seem
197 // worth the complication.
198 buffer_ = transfer_buffer_->AllocUpTo(new_size, &size_);
201 } // namespace gpu