Revert "Fix broken channel icon in chrome://help on CrOS" and try again
[chromium-blink-merge.git] / cc / raster / one_copy_tile_task_worker_pool.cc
blob169fc36c2eb49bc1a5c9ec019c9c0277bd3b68ee
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
7 #include <algorithm>
8 #include <limits>
10 #include "base/strings/stringprintf.h"
11 #include "base/thread_task_runner_handle.h"
12 #include "base/trace_event/memory_dump_manager.h"
13 #include "base/trace_event/trace_event.h"
14 #include "base/trace_event/trace_event_argument.h"
15 #include "cc/base/math_util.h"
16 #include "cc/debug/traced_value.h"
17 #include "cc/raster/raster_buffer.h"
18 #include "cc/resources/platform_color.h"
19 #include "cc/resources/resource_format.h"
20 #include "cc/resources/resource_util.h"
21 #include "cc/resources/scoped_resource.h"
22 #include "gpu/GLES2/gl2extchromium.h"
23 #include "gpu/command_buffer/client/gles2_interface.h"
24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
25 #include "ui/gfx/buffer_format_util.h"
27 namespace cc {
28 namespace {
30 class RasterBufferImpl : public RasterBuffer {
31 public:
32 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
33 ResourceProvider* resource_provider,
34 ResourceFormat resource_format,
35 const Resource* resource,
36 uint64_t previous_content_id)
37 : worker_pool_(worker_pool),
38 resource_(resource),
39 lock_(resource_provider, resource->id()),
40 previous_content_id_(previous_content_id) {}
42 ~RasterBufferImpl() override {}
44 // Overridden from RasterBuffer:
45 void Playback(const RasterSource* raster_source,
46 const gfx::Rect& raster_full_rect,
47 const gfx::Rect& raster_dirty_rect,
48 uint64_t new_content_id,
49 float scale,
50 bool include_images) override {
51 worker_pool_->PlaybackAndCopyOnWorkerThread(
52 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect,
53 scale, include_images, previous_content_id_, new_content_id);
56 private:
57 OneCopyTileTaskWorkerPool* worker_pool_;
58 const Resource* resource_;
59 ResourceProvider::ScopedWriteLockGL lock_;
60 uint64_t previous_content_id_;
62 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
65 // Delay between checking for query result to be available.
66 const int kCheckForQueryResultAvailableTickRateMs = 1;
68 // Number of attempts to allow before we perform a check that will wait for
69 // query to complete.
70 const int kMaxCheckForQueryResultAvailableAttempts = 256;
72 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
73 // default batch size for copy operations.
74 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
76 // Delay before a staging buffer might be released.
77 const int kStagingBufferExpirationDelayMs = 1000;
79 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
80 unsigned complete = 1;
81 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
82 return !!complete;
85 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
86 TRACE_EVENT0("cc", "WaitForQueryResult");
88 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
89 while (attempts_left--) {
90 if (CheckForQueryResult(gl, query_id))
91 break;
93 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
94 kCheckForQueryResultAvailableTickRateMs));
97 unsigned result = 0;
98 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
101 } // namespace
103 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size)
104 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {}
106 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
107 DCHECK_EQ(texture_id, 0u);
108 DCHECK_EQ(image_id, 0u);
109 DCHECK_EQ(query_id, 0u);
112 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
113 gpu::gles2::GLES2Interface* gl) {
114 if (query_id) {
115 gl->DeleteQueriesEXT(1, &query_id);
116 query_id = 0;
118 if (image_id) {
119 gl->DestroyImageCHROMIUM(image_id);
120 image_id = 0;
122 if (texture_id) {
123 gl->DeleteTextures(1, &texture_id);
124 texture_id = 0;
128 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
129 base::trace_event::ProcessMemoryDump* pmd,
130 ResourceFormat format,
131 bool in_free_list) const {
132 if (!gpu_memory_buffer)
133 return;
135 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
136 std::string buffer_dump_name =
137 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id.id);
138 base::trace_event::MemoryAllocatorDump* buffer_dump =
139 pmd->CreateAllocatorDump(buffer_dump_name);
141 uint64_t buffer_size_in_bytes =
142 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format);
143 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
144 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
145 buffer_size_in_bytes);
146 buffer_dump->AddScalar("free_size",
147 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
148 in_free_list ? buffer_size_in_bytes : 0);
150 // Emit an ownership edge towards a global allocator dump node.
151 const uint64 tracing_process_id =
152 base::trace_event::MemoryDumpManager::GetInstance()
153 ->GetTracingProcessId();
154 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
155 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
156 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
158 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
159 // the tracing UI will account the effective size of the buffer to the child.
160 const int kImportance = 2;
161 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
164 // static
165 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
166 base::SequencedTaskRunner* task_runner,
167 TaskGraphRunner* task_graph_runner,
168 ContextProvider* context_provider,
169 ResourceProvider* resource_provider,
170 int max_copy_texture_chromium_size,
171 bool use_persistent_gpu_memory_buffers,
172 int max_staging_buffers) {
173 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
174 task_runner, task_graph_runner, resource_provider,
175 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers,
176 max_staging_buffers));
179 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
180 base::SequencedTaskRunner* task_runner,
181 TaskGraphRunner* task_graph_runner,
182 ResourceProvider* resource_provider,
183 int max_copy_texture_chromium_size,
184 bool use_persistent_gpu_memory_buffers,
185 int max_staging_buffers)
186 : task_runner_(task_runner),
187 task_graph_runner_(task_graph_runner),
188 namespace_token_(task_graph_runner->GetNamespaceToken()),
189 resource_provider_(resource_provider),
190 max_bytes_per_copy_operation_(
191 max_copy_texture_chromium_size
192 ? std::min(kMaxBytesPerCopyOperation,
193 max_copy_texture_chromium_size)
194 : kMaxBytesPerCopyOperation),
195 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers),
196 bytes_scheduled_since_last_flush_(0),
197 max_staging_buffers_(max_staging_buffers),
198 staging_buffer_expiration_delay_(
199 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)),
200 reduce_memory_usage_pending_(false),
201 weak_ptr_factory_(this),
202 task_set_finished_weak_ptr_factory_(this) {
203 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
204 this, base::ThreadTaskRunnerHandle::Get());
205 reduce_memory_usage_callback_ =
206 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
207 weak_ptr_factory_.GetWeakPtr());
210 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
211 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
212 this);
215 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
216 return this;
219 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
220 client_ = client;
223 void OneCopyTileTaskWorkerPool::Shutdown() {
224 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
226 TaskGraph empty;
227 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
228 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
230 base::AutoLock lock(lock_);
232 if (buffers_.empty())
233 return;
235 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
238 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
239 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
241 if (tasks_pending_.none())
242 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
244 // Mark all task sets as pending.
245 tasks_pending_.set();
247 size_t priority = kTileTaskPriorityBase;
249 graph_.Reset();
251 // Cancel existing OnTaskSetFinished callbacks.
252 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
254 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
256 size_t task_count[kNumberOfTaskSets] = {0};
258 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
259 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
260 task_runner_.get(),
261 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
262 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
265 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
266 it != queue->items.end(); ++it) {
267 const TileTaskQueue::Item& item = *it;
268 RasterTask* task = item.task;
269 DCHECK(!task->HasCompleted());
271 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
272 if (!item.task_sets[task_set])
273 continue;
275 ++task_count[task_set];
277 graph_.edges.push_back(
278 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
281 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
284 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
285 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
286 kTaskSetFinishedTaskPriorityBase + task_set,
287 task_count[task_set]);
290 ScheduleTasksOnOriginThread(this, &graph_);
292 // Barrier to sync any new resources to the worker context.
293 resource_provider_->output_surface()
294 ->context_provider()
295 ->ContextGL()
296 ->OrderingBarrierCHROMIUM();
298 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
300 std::copy(new_task_set_finished_tasks,
301 new_task_set_finished_tasks + kNumberOfTaskSets,
302 task_set_finished_tasks_);
304 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
305 StateAsValue());
308 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
309 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
311 task_graph_runner_->CollectCompletedTasks(namespace_token_,
312 &completed_tasks_);
314 for (Task::Vector::const_iterator it = completed_tasks_.begin();
315 it != completed_tasks_.end(); ++it) {
316 TileTask* task = static_cast<TileTask*>(it->get());
318 task->WillComplete();
319 task->CompleteOnOriginThread(this);
320 task->DidComplete();
322 task->RunReplyOnOriginThread();
324 completed_tasks_.clear();
327 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() const {
328 return resource_provider_->memory_efficient_texture_format();
331 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const {
332 return !PlatformColor::SameComponentOrder(GetResourceFormat());
335 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
336 const Resource* resource,
337 uint64_t resource_content_id,
338 uint64_t previous_content_id) {
339 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
340 // the dirty rect.
341 DCHECK_EQ(resource->format(),
342 resource_provider_->memory_efficient_texture_format());
343 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl(
344 this, resource_provider_,
345 resource_provider_->memory_efficient_texture_format(), resource,
346 previous_content_id));
349 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
350 scoped_ptr<RasterBuffer> buffer) {
351 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
354 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread(
355 const Resource* resource,
356 const ResourceProvider::ScopedWriteLockGL* resource_lock,
357 const RasterSource* raster_source,
358 const gfx::Rect& raster_full_rect,
359 const gfx::Rect& raster_dirty_rect,
360 float scale,
361 bool include_images,
362 uint64_t previous_content_id,
363 uint64_t new_content_id) {
364 base::AutoLock lock(lock_);
366 scoped_ptr<StagingBuffer> staging_buffer =
367 AcquireStagingBuffer(resource, previous_content_id);
368 DCHECK(staging_buffer);
371 base::AutoUnlock unlock(lock_);
373 // Allocate GpuMemoryBuffer if necessary.
374 if (!staging_buffer->gpu_memory_buffer) {
375 staging_buffer->gpu_memory_buffer =
376 resource_provider_->gpu_memory_buffer_manager()
377 ->AllocateGpuMemoryBuffer(
378 staging_buffer->size,
379 BufferFormat(
380 resource_provider_->memory_efficient_texture_format()),
381 use_persistent_gpu_memory_buffers_
382 ? gfx::BufferUsage::PERSISTENT_MAP
383 : gfx::BufferUsage::MAP);
384 DCHECK_EQ(gfx::NumberOfPlanesForBufferFormat(
385 staging_buffer->gpu_memory_buffer->GetFormat()),
386 1u);
389 gfx::Rect playback_rect = raster_full_rect;
390 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
391 // Reduce playback rect to dirty region if the content id of the staging
392 // buffer matches the prevous content id.
393 if (previous_content_id == staging_buffer->content_id)
394 playback_rect.Intersect(raster_dirty_rect);
397 if (staging_buffer->gpu_memory_buffer) {
398 void* data = nullptr;
399 bool rv = staging_buffer->gpu_memory_buffer->Map(&data);
400 DCHECK(rv);
401 int stride;
402 staging_buffer->gpu_memory_buffer->GetStride(&stride);
403 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
404 DCHECK_GE(stride, 0);
406 DCHECK(!playback_rect.IsEmpty())
407 << "Why are we rastering a tile that's not dirty?";
408 TileTaskWorkerPool::PlaybackToMemory(
409 data, resource_provider_->memory_efficient_texture_format(),
410 staging_buffer->size, static_cast<size_t>(stride), raster_source,
411 raster_full_rect, playback_rect, scale, include_images);
412 staging_buffer->gpu_memory_buffer->Unmap();
413 staging_buffer->content_id = new_content_id;
417 ContextProvider* context_provider =
418 resource_provider_->output_surface()->worker_context_provider();
419 DCHECK(context_provider);
422 ContextProvider::ScopedContextLock scoped_context(context_provider);
424 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
425 DCHECK(gl);
427 unsigned image_target = resource_provider_->GetImageTextureTarget(
428 resource_provider_->memory_efficient_texture_format());
430 // Create and bind staging texture.
431 if (!staging_buffer->texture_id) {
432 gl->GenTextures(1, &staging_buffer->texture_id);
433 gl->BindTexture(image_target, staging_buffer->texture_id);
434 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
435 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
436 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
437 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
438 } else {
439 gl->BindTexture(image_target, staging_buffer->texture_id);
442 // Create and bind image.
443 if (!staging_buffer->image_id) {
444 if (staging_buffer->gpu_memory_buffer) {
445 staging_buffer->image_id = gl->CreateImageCHROMIUM(
446 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
447 staging_buffer->size.width(), staging_buffer->size.height(),
448 GLInternalFormat(
449 resource_provider_->memory_efficient_texture_format()));
450 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
452 } else {
453 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
454 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id);
457 // Unbind staging texture.
458 gl->BindTexture(image_target, 0);
460 if (resource_provider_->use_sync_query()) {
461 if (!staging_buffer->query_id)
462 gl->GenQueriesEXT(1, &staging_buffer->query_id);
464 #if defined(OS_CHROMEOS)
465 // TODO(reveman): This avoids a performance problem on some ChromeOS
466 // devices. This needs to be removed to support native GpuMemoryBuffer
467 // implementations. crbug.com/436314
468 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
469 #else
470 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
471 staging_buffer->query_id);
472 #endif
475 int bytes_per_row =
476 (BitsPerPixel(resource_provider_->memory_efficient_texture_format()) *
477 resource->size().width()) /
479 int chunk_size_in_rows =
480 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
481 // Align chunk size to 4. Required to support compressed texture formats.
482 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
483 int y = 0;
484 int height = resource->size().height();
485 while (y < height) {
486 // Copy at most |chunk_size_in_rows|.
487 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
488 DCHECK_GT(rows_to_copy, 0);
490 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id,
491 resource_lock->texture_id(), 0, y, 0, y,
492 resource->size().width(), rows_to_copy, false,
493 false, false);
494 y += rows_to_copy;
496 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
497 // used for this copy operation.
498 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
500 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
501 gl->ShallowFlushCHROMIUM();
502 bytes_scheduled_since_last_flush_ = 0;
506 if (resource_provider_->use_sync_query()) {
507 #if defined(OS_CHROMEOS)
508 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
509 #else
510 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
511 #endif
514 // Barrier to sync worker context output to cc context.
515 gl->OrderingBarrierCHROMIUM();
518 staging_buffer->last_usage = base::TimeTicks::Now();
519 busy_buffers_.push_back(staging_buffer.Pass());
521 ScheduleReduceMemoryUsage();
524 bool OneCopyTileTaskWorkerPool::OnMemoryDump(
525 const base::trace_event::MemoryDumpArgs& args,
526 base::trace_event::ProcessMemoryDump* pmd) {
527 base::AutoLock lock(lock_);
529 for (const auto& buffer : buffers_) {
530 buffer->OnMemoryDump(pmd,
531 resource_provider_->memory_efficient_texture_format(),
532 std::find(free_buffers_.begin(), free_buffers_.end(),
533 buffer) != free_buffers_.end());
536 return true;
539 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer>
540 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource,
541 uint64_t previous_content_id) {
542 lock_.AssertAcquired();
544 scoped_ptr<StagingBuffer> staging_buffer;
546 ContextProvider* context_provider =
547 resource_provider_->output_surface()->worker_context_provider();
548 DCHECK(context_provider);
550 ContextProvider::ScopedContextLock scoped_context(context_provider);
552 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
553 DCHECK(gl);
555 // Check if any busy buffers have become available.
556 if (resource_provider_->use_sync_query()) {
557 while (!busy_buffers_.empty()) {
558 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
559 break;
561 free_buffers_.push_back(busy_buffers_.take_front());
565 // Wait for number of non-free buffers to become less than the limit.
566 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) {
567 // Stop when there are no more busy buffers to wait for.
568 if (busy_buffers_.empty())
569 break;
571 if (resource_provider_->use_sync_query()) {
572 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
573 free_buffers_.push_back(busy_buffers_.take_front());
574 } else {
575 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
576 gl->Finish();
577 while (!busy_buffers_.empty())
578 free_buffers_.push_back(busy_buffers_.take_front());
582 // Find a staging buffer that allows us to perform partial raster when
583 // using persistent GpuMemoryBuffers.
584 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
585 StagingBufferDeque::iterator it =
586 std::find_if(free_buffers_.begin(), free_buffers_.end(),
587 [previous_content_id](const StagingBuffer* buffer) {
588 return buffer->content_id == previous_content_id;
590 if (it != free_buffers_.end())
591 staging_buffer = free_buffers_.take(it);
594 // Find staging buffer of correct size.
595 if (!staging_buffer) {
596 StagingBufferDeque::iterator it =
597 std::find_if(free_buffers_.begin(), free_buffers_.end(),
598 [resource](const StagingBuffer* buffer) {
599 return buffer->size == resource->size();
601 if (it != free_buffers_.end())
602 staging_buffer = free_buffers_.take(it);
605 // Create new staging buffer if necessary.
606 if (!staging_buffer) {
607 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size()));
608 buffers_.insert(staging_buffer.get());
611 // Release enough free buffers to stay within the limit.
612 while (buffers_.size() > max_staging_buffers_) {
613 if (free_buffers_.empty())
614 break;
616 free_buffers_.front()->DestroyGLResources(gl);
617 buffers_.erase(free_buffers_.front());
618 free_buffers_.take_front();
621 return staging_buffer.Pass();
624 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
625 lock_.AssertAcquired();
627 if (!free_buffers_.empty())
628 return free_buffers_.front()->last_usage;
630 if (!busy_buffers_.empty())
631 return busy_buffers_.front()->last_usage;
633 return base::TimeTicks();
636 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
637 lock_.AssertAcquired();
639 if (reduce_memory_usage_pending_)
640 return;
642 reduce_memory_usage_pending_ = true;
644 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer
645 // should be released.
646 base::TimeTicks reduce_memory_usage_time =
647 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
648 task_runner_->PostDelayedTask(
649 FROM_HERE, reduce_memory_usage_callback_,
650 reduce_memory_usage_time - base::TimeTicks::Now());
653 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
654 base::AutoLock lock(lock_);
656 reduce_memory_usage_pending_ = false;
658 if (free_buffers_.empty() && busy_buffers_.empty())
659 return;
661 base::TimeTicks current_time = base::TimeTicks::Now();
662 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
664 if (free_buffers_.empty() && busy_buffers_.empty())
665 return;
667 reduce_memory_usage_pending_ = true;
669 // Schedule another call to ReduceMemoryUsage at the time when the next
670 // buffer should be released.
671 base::TimeTicks reduce_memory_usage_time =
672 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
673 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
674 reduce_memory_usage_time - current_time);
677 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
678 base::TimeTicks time) {
679 lock_.AssertAcquired();
681 ContextProvider* context_provider =
682 resource_provider_->output_surface()->worker_context_provider();
683 DCHECK(context_provider);
686 ContextProvider::ScopedContextLock scoped_context(context_provider);
688 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
689 DCHECK(gl);
691 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
692 // buffers as soon as we find a buffer that has been used since |time|.
693 while (!free_buffers_.empty()) {
694 if (free_buffers_.front()->last_usage > time)
695 return;
697 free_buffers_.front()->DestroyGLResources(gl);
698 buffers_.erase(free_buffers_.front());
699 free_buffers_.take_front();
702 while (!busy_buffers_.empty()) {
703 if (busy_buffers_.front()->last_usage > time)
704 return;
706 busy_buffers_.front()->DestroyGLResources(gl);
707 buffers_.erase(busy_buffers_.front());
708 busy_buffers_.take_front();
713 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
714 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
715 task_set);
717 DCHECK(tasks_pending_[task_set]);
718 tasks_pending_[task_set] = false;
719 if (tasks_pending_.any()) {
720 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
721 "state", StateAsValue());
722 } else {
723 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
725 client_->DidFinishRunningTileTasks(task_set);
728 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
729 OneCopyTileTaskWorkerPool::StateAsValue() const {
730 scoped_refptr<base::trace_event::TracedValue> state =
731 new base::trace_event::TracedValue();
733 state->BeginArray("tasks_pending");
734 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
735 state->AppendBoolean(tasks_pending_[task_set]);
736 state->EndArray();
737 state->BeginDictionary("staging_state");
738 StagingStateAsValueInto(state.get());
739 state->EndDictionary();
741 return state;
744 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
745 base::trace_event::TracedValue* staging_state) const {
746 base::AutoLock lock(lock_);
748 staging_state->SetInteger("staging_buffer_count",
749 static_cast<int>(buffers_.size()));
750 staging_state->SetInteger("busy_count",
751 static_cast<int>(busy_buffers_.size()));
752 staging_state->SetInteger("free_count",
753 static_cast<int>(free_buffers_.size()));
756 } // namespace cc