cc: Use worker context for one-copy tile initialization.
[chromium-blink-merge.git] / cc / raster / one_copy_tile_task_worker_pool.cc
blob3284fe9ddc6ade766f9a1cb582d564e495134f5d
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
7 #include <algorithm>
8 #include <limits>
10 #include "base/strings/stringprintf.h"
11 #include "base/thread_task_runner_handle.h"
12 #include "base/trace_event/memory_dump_manager.h"
13 #include "base/trace_event/trace_event.h"
14 #include "base/trace_event/trace_event_argument.h"
15 #include "cc/base/math_util.h"
16 #include "cc/debug/traced_value.h"
17 #include "cc/raster/raster_buffer.h"
18 #include "cc/resources/platform_color.h"
19 #include "cc/resources/resource_format.h"
20 #include "cc/resources/resource_util.h"
21 #include "cc/resources/scoped_resource.h"
22 #include "gpu/GLES2/gl2extchromium.h"
23 #include "gpu/command_buffer/client/gles2_interface.h"
24 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
26 namespace cc {
27 namespace {
29 class RasterBufferImpl : public RasterBuffer {
30 public:
31 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
32 ResourceProvider* resource_provider,
33 ResourceFormat resource_format,
34 const Resource* resource,
35 uint64_t previous_content_id)
36 : worker_pool_(worker_pool),
37 resource_(resource),
38 lock_(resource_provider, resource->id()),
39 previous_content_id_(previous_content_id) {}
41 ~RasterBufferImpl() override {}
43 // Overridden from RasterBuffer:
44 void Playback(const RasterSource* raster_source,
45 const gfx::Rect& raster_full_rect,
46 const gfx::Rect& raster_dirty_rect,
47 uint64_t new_content_id,
48 float scale,
49 bool include_images) override {
50 worker_pool_->PlaybackAndCopyOnWorkerThread(
51 resource_, &lock_, raster_source, raster_full_rect, raster_dirty_rect,
52 scale, include_images, previous_content_id_, new_content_id);
55 private:
56 OneCopyTileTaskWorkerPool* worker_pool_;
57 const Resource* resource_;
58 ResourceProvider::ScopedWriteLockGL lock_;
59 uint64_t previous_content_id_;
61 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
64 // Delay between checking for query result to be available.
65 const int kCheckForQueryResultAvailableTickRateMs = 1;
67 // Number of attempts to allow before we perform a check that will wait for
68 // query to complete.
69 const int kMaxCheckForQueryResultAvailableAttempts = 256;
71 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
72 // default batch size for copy operations.
73 const int kMaxBytesPerCopyOperation = 1024 * 1024 * 4;
75 // Delay before a staging buffer might be released.
76 const int kStagingBufferExpirationDelayMs = 1000;
78 bool CheckForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
79 unsigned complete = 1;
80 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_AVAILABLE_EXT, &complete);
81 return !!complete;
84 void WaitForQueryResult(gpu::gles2::GLES2Interface* gl, unsigned query_id) {
85 TRACE_EVENT0("cc", "WaitForQueryResult");
87 int attempts_left = kMaxCheckForQueryResultAvailableAttempts;
88 while (attempts_left--) {
89 if (CheckForQueryResult(gl, query_id))
90 break;
92 base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(
93 kCheckForQueryResultAvailableTickRateMs));
96 unsigned result = 0;
97 gl->GetQueryObjectuivEXT(query_id, GL_QUERY_RESULT_EXT, &result);
100 } // namespace
102 OneCopyTileTaskWorkerPool::StagingBuffer::StagingBuffer(const gfx::Size& size)
103 : size(size), texture_id(0), image_id(0), query_id(0), content_id(0) {}
105 OneCopyTileTaskWorkerPool::StagingBuffer::~StagingBuffer() {
106 DCHECK_EQ(texture_id, 0u);
107 DCHECK_EQ(image_id, 0u);
108 DCHECK_EQ(query_id, 0u);
111 void OneCopyTileTaskWorkerPool::StagingBuffer::DestroyGLResources(
112 gpu::gles2::GLES2Interface* gl) {
113 if (query_id) {
114 gl->DeleteQueriesEXT(1, &query_id);
115 query_id = 0;
117 if (image_id) {
118 gl->DestroyImageCHROMIUM(image_id);
119 image_id = 0;
121 if (texture_id) {
122 gl->DeleteTextures(1, &texture_id);
123 texture_id = 0;
127 void OneCopyTileTaskWorkerPool::StagingBuffer::OnMemoryDump(
128 base::trace_event::ProcessMemoryDump* pmd,
129 ResourceFormat format,
130 bool in_free_list) const {
131 if (!gpu_memory_buffer)
132 return;
134 gfx::GpuMemoryBufferId buffer_id = gpu_memory_buffer->GetId();
135 std::string buffer_dump_name =
136 base::StringPrintf("cc/one_copy/staging_memory/buffer_%d", buffer_id);
137 base::trace_event::MemoryAllocatorDump* buffer_dump =
138 pmd->CreateAllocatorDump(buffer_dump_name);
140 uint64_t buffer_size_in_bytes =
141 ResourceUtil::UncheckedSizeInBytes<uint64_t>(size, format);
142 buffer_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
143 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
144 buffer_size_in_bytes);
145 buffer_dump->AddScalar("free_size",
146 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
147 in_free_list ? buffer_size_in_bytes : 0);
149 // Emit an ownership edge towards a global allocator dump node.
150 const uint64 tracing_process_id =
151 base::trace_event::MemoryDumpManager::GetInstance()
152 ->GetTracingProcessId();
153 base::trace_event::MemoryAllocatorDumpGuid shared_buffer_guid =
154 gfx::GetGpuMemoryBufferGUIDForTracing(tracing_process_id, buffer_id);
155 pmd->CreateSharedGlobalAllocatorDump(shared_buffer_guid);
157 // By creating an edge with a higher |importance| (w.r.t. browser-side dumps)
158 // the tracing UI will account the effective size of the buffer to the child.
159 const int kImportance = 2;
160 pmd->AddOwnershipEdge(buffer_dump->guid(), shared_buffer_guid, kImportance);
163 // static
164 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
165 base::SequencedTaskRunner* task_runner,
166 TaskGraphRunner* task_graph_runner,
167 ContextProvider* context_provider,
168 ResourceProvider* resource_provider,
169 int max_copy_texture_chromium_size,
170 bool use_persistent_gpu_memory_buffers,
171 unsigned image_target,
172 int max_staging_buffers) {
173 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
174 task_runner, task_graph_runner, resource_provider,
175 max_copy_texture_chromium_size, use_persistent_gpu_memory_buffers,
176 image_target, max_staging_buffers));
179 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
180 base::SequencedTaskRunner* task_runner,
181 TaskGraphRunner* task_graph_runner,
182 ResourceProvider* resource_provider,
183 int max_copy_texture_chromium_size,
184 bool use_persistent_gpu_memory_buffers,
185 unsigned image_target,
186 int max_staging_buffers)
187 : task_runner_(task_runner),
188 task_graph_runner_(task_graph_runner),
189 namespace_token_(task_graph_runner->GetNamespaceToken()),
190 resource_provider_(resource_provider),
191 max_bytes_per_copy_operation_(
192 max_copy_texture_chromium_size
193 ? std::min(kMaxBytesPerCopyOperation,
194 max_copy_texture_chromium_size)
195 : kMaxBytesPerCopyOperation),
196 use_persistent_gpu_memory_buffers_(use_persistent_gpu_memory_buffers),
197 image_target_(image_target),
198 bytes_scheduled_since_last_flush_(0),
199 max_staging_buffers_(max_staging_buffers),
200 staging_buffer_expiration_delay_(
201 base::TimeDelta::FromMilliseconds(kStagingBufferExpirationDelayMs)),
202 reduce_memory_usage_pending_(false),
203 weak_ptr_factory_(this),
204 task_set_finished_weak_ptr_factory_(this) {
205 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
206 this, base::ThreadTaskRunnerHandle::Get());
207 reduce_memory_usage_callback_ =
208 base::Bind(&OneCopyTileTaskWorkerPool::ReduceMemoryUsage,
209 weak_ptr_factory_.GetWeakPtr());
212 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
213 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
214 this);
217 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
218 return this;
221 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
222 client_ = client;
225 void OneCopyTileTaskWorkerPool::Shutdown() {
226 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
228 TaskGraph empty;
229 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
230 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
232 base::AutoLock lock(lock_);
234 if (buffers_.empty())
235 return;
237 ReleaseBuffersNotUsedSince(base::TimeTicks() + base::TimeDelta::Max());
240 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
241 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
243 if (tasks_pending_.none())
244 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
246 // Mark all task sets as pending.
247 tasks_pending_.set();
249 size_t priority = kTileTaskPriorityBase;
251 graph_.Reset();
253 // Cancel existing OnTaskSetFinished callbacks.
254 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
256 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
258 size_t task_count[kNumberOfTaskSets] = {0};
260 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
261 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
262 task_runner_.get(),
263 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
264 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
267 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
268 it != queue->items.end(); ++it) {
269 const TileTaskQueue::Item& item = *it;
270 RasterTask* task = item.task;
271 DCHECK(!task->HasCompleted());
273 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
274 if (!item.task_sets[task_set])
275 continue;
277 ++task_count[task_set];
279 graph_.edges.push_back(
280 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
283 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
286 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
287 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
288 kTaskSetFinishedTaskPriorityBase + task_set,
289 task_count[task_set]);
292 ScheduleTasksOnOriginThread(this, &graph_);
294 // Barrier to sync any new resources to the worker context.
295 resource_provider_->output_surface()
296 ->context_provider()
297 ->ContextGL()
298 ->OrderingBarrierCHROMIUM();
300 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
302 std::copy(new_task_set_finished_tasks,
303 new_task_set_finished_tasks + kNumberOfTaskSets,
304 task_set_finished_tasks_);
306 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
307 StateAsValue());
310 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
311 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
313 task_graph_runner_->CollectCompletedTasks(namespace_token_,
314 &completed_tasks_);
316 for (Task::Vector::const_iterator it = completed_tasks_.begin();
317 it != completed_tasks_.end(); ++it) {
318 TileTask* task = static_cast<TileTask*>(it->get());
320 task->WillComplete();
321 task->CompleteOnOriginThread(this);
322 task->DidComplete();
324 task->RunReplyOnOriginThread();
326 completed_tasks_.clear();
329 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() const {
330 return resource_provider_->best_texture_format();
333 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const {
334 return !PlatformColor::SameComponentOrder(GetResourceFormat());
337 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
338 const Resource* resource,
339 uint64_t resource_content_id,
340 uint64_t previous_content_id) {
341 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
342 // the dirty rect.
343 DCHECK_EQ(resource->format(), resource_provider_->best_texture_format());
344 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl(
345 this, resource_provider_, resource_provider_->best_texture_format(),
346 resource, previous_content_id));
349 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
350 scoped_ptr<RasterBuffer> buffer) {
351 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
354 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread(
355 const Resource* resource,
356 const ResourceProvider::ScopedWriteLockGL* resource_lock,
357 const RasterSource* raster_source,
358 const gfx::Rect& raster_full_rect,
359 const gfx::Rect& raster_dirty_rect,
360 float scale,
361 bool include_images,
362 uint64_t previous_content_id,
363 uint64_t new_content_id) {
364 base::AutoLock lock(lock_);
366 scoped_ptr<StagingBuffer> staging_buffer =
367 AcquireStagingBuffer(resource, previous_content_id);
368 DCHECK(staging_buffer);
371 base::AutoUnlock unlock(lock_);
373 // Allocate GpuMemoryBuffer if necessary.
374 if (!staging_buffer->gpu_memory_buffer) {
375 staging_buffer->gpu_memory_buffer =
376 resource_provider_->gpu_memory_buffer_manager()
377 ->AllocateGpuMemoryBuffer(
378 staging_buffer->size,
379 BufferFormat(resource_provider_->best_texture_format()),
380 use_persistent_gpu_memory_buffers_
381 ? gfx::BufferUsage::PERSISTENT_MAP
382 : gfx::BufferUsage::MAP);
385 gfx::Rect playback_rect = raster_full_rect;
386 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
387 // Reduce playback rect to dirty region if the content id of the staging
388 // buffer matches the prevous content id.
389 if (previous_content_id == staging_buffer->content_id)
390 playback_rect.Intersect(raster_dirty_rect);
393 if (staging_buffer->gpu_memory_buffer) {
394 void* data = nullptr;
395 bool rv = staging_buffer->gpu_memory_buffer->Map(&data);
396 DCHECK(rv);
397 int stride;
398 staging_buffer->gpu_memory_buffer->GetStride(&stride);
399 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
400 DCHECK_GE(stride, 0);
402 DCHECK(!playback_rect.IsEmpty())
403 << "Why are we rastering a tile that's not dirty?";
404 TileTaskWorkerPool::PlaybackToMemory(
405 data, resource_provider_->best_texture_format(), staging_buffer->size,
406 static_cast<size_t>(stride), raster_source, raster_full_rect,
407 playback_rect, scale, include_images);
408 staging_buffer->gpu_memory_buffer->Unmap();
409 staging_buffer->content_id = new_content_id;
413 ContextProvider* context_provider =
414 resource_provider_->output_surface()->worker_context_provider();
415 DCHECK(context_provider);
418 ContextProvider::ScopedContextLock scoped_context(context_provider);
420 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
421 DCHECK(gl);
423 if (!staging_buffer->texture_id) {
424 gl->GenTextures(1, &staging_buffer->texture_id);
425 gl->BindTexture(image_target_, staging_buffer->texture_id);
426 gl->TexParameteri(image_target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
427 gl->TexParameteri(image_target_, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
428 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
429 gl->TexParameteri(image_target_, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
430 if (staging_buffer->gpu_memory_buffer) {
431 DCHECK(!staging_buffer->image_id);
432 staging_buffer->image_id = gl->CreateImageCHROMIUM(
433 staging_buffer->gpu_memory_buffer->AsClientBuffer(),
434 staging_buffer->size.width(), staging_buffer->size.height(),
435 GLInternalFormat(resource_provider_->best_texture_format()));
436 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
437 } else {
438 gl->BindTexture(image_target_, staging_buffer->texture_id);
439 gl->ReleaseTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
440 gl->BindTexImage2DCHROMIUM(image_target_, staging_buffer->image_id);
444 if (resource_provider_->use_sync_query()) {
445 if (!staging_buffer->query_id)
446 gl->GenQueriesEXT(1, &staging_buffer->query_id);
448 #if defined(OS_CHROMEOS)
449 // TODO(reveman): This avoids a performance problem on some ChromeOS
450 // devices. This needs to be removed to support native GpuMemoryBuffer
451 // implementations. crbug.com/436314
452 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id);
453 #else
454 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM,
455 staging_buffer->query_id);
456 #endif
459 int bytes_per_row =
460 (BitsPerPixel(resource_provider_->best_texture_format()) *
461 resource->size().width()) /
463 int chunk_size_in_rows =
464 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row);
465 // Align chunk size to 4. Required to support compressed texture formats.
466 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4);
467 int y = 0;
468 int height = resource->size().height();
469 while (y < height) {
470 // Copy at most |chunk_size_in_rows|.
471 int rows_to_copy = std::min(chunk_size_in_rows, height - y);
472 DCHECK_GT(rows_to_copy, 0);
474 gl->CopySubTextureCHROMIUM(GL_TEXTURE_2D, staging_buffer->texture_id,
475 resource_lock->texture_id(), 0, y, 0, y,
476 resource->size().width(), rows_to_copy, false,
477 false, false);
478 y += rows_to_copy;
480 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
481 // used for this copy operation.
482 bytes_scheduled_since_last_flush_ += rows_to_copy * bytes_per_row;
484 if (bytes_scheduled_since_last_flush_ >= max_bytes_per_copy_operation_) {
485 gl->ShallowFlushCHROMIUM();
486 bytes_scheduled_since_last_flush_ = 0;
490 if (resource_provider_->use_sync_query()) {
491 #if defined(OS_CHROMEOS)
492 gl->EndQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM);
493 #else
494 gl->EndQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM);
495 #endif
498 // Barrier to sync worker context output to cc context.
499 gl->OrderingBarrierCHROMIUM();
502 staging_buffer->last_usage = base::TimeTicks::Now();
503 busy_buffers_.push_back(staging_buffer.Pass());
505 ScheduleReduceMemoryUsage();
508 bool OneCopyTileTaskWorkerPool::OnMemoryDump(
509 const base::trace_event::MemoryDumpArgs& args,
510 base::trace_event::ProcessMemoryDump* pmd) {
511 base::AutoLock lock(lock_);
513 for (const auto& buffer : buffers_) {
514 buffer->OnMemoryDump(pmd, resource_provider_->best_texture_format(),
515 std::find(free_buffers_.begin(), free_buffers_.end(),
516 buffer) != free_buffers_.end());
519 return true;
522 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer>
523 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource,
524 uint64_t previous_content_id) {
525 lock_.AssertAcquired();
527 scoped_ptr<StagingBuffer> staging_buffer;
529 ContextProvider* context_provider =
530 resource_provider_->output_surface()->worker_context_provider();
531 DCHECK(context_provider);
533 ContextProvider::ScopedContextLock scoped_context(context_provider);
535 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
536 DCHECK(gl);
538 // Check if any busy buffers have become available.
539 if (resource_provider_->use_sync_query()) {
540 while (!busy_buffers_.empty()) {
541 if (!CheckForQueryResult(gl, busy_buffers_.front()->query_id))
542 break;
544 free_buffers_.push_back(busy_buffers_.take_front());
548 // Wait for number of non-free buffers to become less than the limit.
549 while ((buffers_.size() - free_buffers_.size()) >= max_staging_buffers_) {
550 // Stop when there are no more busy buffers to wait for.
551 if (busy_buffers_.empty())
552 break;
554 if (resource_provider_->use_sync_query()) {
555 WaitForQueryResult(gl, busy_buffers_.front()->query_id);
556 free_buffers_.push_back(busy_buffers_.take_front());
557 } else {
558 // Fall-back to glFinish if CHROMIUM_sync_query is not available.
559 gl->Finish();
560 while (!busy_buffers_.empty())
561 free_buffers_.push_back(busy_buffers_.take_front());
565 // Find a staging buffer that allows us to perform partial raster when
566 // using persistent GpuMemoryBuffers.
567 if (use_persistent_gpu_memory_buffers_ && previous_content_id) {
568 StagingBufferDeque::iterator it =
569 std::find_if(free_buffers_.begin(), free_buffers_.end(),
570 [previous_content_id](const StagingBuffer* buffer) {
571 return buffer->content_id == previous_content_id;
573 if (it != free_buffers_.end())
574 staging_buffer = free_buffers_.take(it);
577 // Find staging buffer of correct size.
578 if (!staging_buffer) {
579 StagingBufferDeque::iterator it =
580 std::find_if(free_buffers_.begin(), free_buffers_.end(),
581 [resource](const StagingBuffer* buffer) {
582 return buffer->size == resource->size();
584 if (it != free_buffers_.end())
585 staging_buffer = free_buffers_.take(it);
588 // Create new staging buffer if necessary.
589 if (!staging_buffer) {
590 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size()));
591 buffers_.insert(staging_buffer.get());
594 // Release enough free buffers to stay within the limit.
595 while (buffers_.size() > max_staging_buffers_) {
596 if (free_buffers_.empty())
597 break;
599 free_buffers_.front()->DestroyGLResources(gl);
600 buffers_.erase(free_buffers_.front());
601 free_buffers_.take_front();
604 return staging_buffer.Pass();
607 base::TimeTicks OneCopyTileTaskWorkerPool::GetUsageTimeForLRUBuffer() {
608 lock_.AssertAcquired();
610 if (!free_buffers_.empty())
611 return free_buffers_.front()->last_usage;
613 if (!busy_buffers_.empty())
614 return busy_buffers_.front()->last_usage;
616 return base::TimeTicks();
619 void OneCopyTileTaskWorkerPool::ScheduleReduceMemoryUsage() {
620 lock_.AssertAcquired();
622 if (reduce_memory_usage_pending_)
623 return;
625 reduce_memory_usage_pending_ = true;
627 // Schedule a call to ReduceMemoryUsage at the time when the LRU buffer
628 // should be released.
629 base::TimeTicks reduce_memory_usage_time =
630 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
631 task_runner_->PostDelayedTask(
632 FROM_HERE, reduce_memory_usage_callback_,
633 reduce_memory_usage_time - base::TimeTicks::Now());
636 void OneCopyTileTaskWorkerPool::ReduceMemoryUsage() {
637 base::AutoLock lock(lock_);
639 reduce_memory_usage_pending_ = false;
641 if (free_buffers_.empty() && busy_buffers_.empty())
642 return;
644 base::TimeTicks current_time = base::TimeTicks::Now();
645 ReleaseBuffersNotUsedSince(current_time - staging_buffer_expiration_delay_);
647 if (free_buffers_.empty() && busy_buffers_.empty())
648 return;
650 reduce_memory_usage_pending_ = true;
652 // Schedule another call to ReduceMemoryUsage at the time when the next
653 // buffer should be released.
654 base::TimeTicks reduce_memory_usage_time =
655 GetUsageTimeForLRUBuffer() + staging_buffer_expiration_delay_;
656 task_runner_->PostDelayedTask(FROM_HERE, reduce_memory_usage_callback_,
657 reduce_memory_usage_time - current_time);
660 void OneCopyTileTaskWorkerPool::ReleaseBuffersNotUsedSince(
661 base::TimeTicks time) {
662 lock_.AssertAcquired();
664 ContextProvider* context_provider =
665 resource_provider_->output_surface()->worker_context_provider();
666 DCHECK(context_provider);
669 ContextProvider::ScopedContextLock scoped_context(context_provider);
671 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL();
672 DCHECK(gl);
674 // Note: Front buffer is guaranteed to be LRU so we can stop releasing
675 // buffers as soon as we find a buffer that has been used since |time|.
676 while (!free_buffers_.empty()) {
677 if (free_buffers_.front()->last_usage > time)
678 return;
680 free_buffers_.front()->DestroyGLResources(gl);
681 buffers_.erase(free_buffers_.front());
682 free_buffers_.take_front();
685 while (!busy_buffers_.empty()) {
686 if (busy_buffers_.front()->last_usage > time)
687 return;
689 busy_buffers_.front()->DestroyGLResources(gl);
690 buffers_.erase(busy_buffers_.front());
691 busy_buffers_.take_front();
696 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
697 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
698 task_set);
700 DCHECK(tasks_pending_[task_set]);
701 tasks_pending_[task_set] = false;
702 if (tasks_pending_.any()) {
703 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
704 "state", StateAsValue());
705 } else {
706 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
708 client_->DidFinishRunningTileTasks(task_set);
711 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
712 OneCopyTileTaskWorkerPool::StateAsValue() const {
713 scoped_refptr<base::trace_event::TracedValue> state =
714 new base::trace_event::TracedValue();
716 state->BeginArray("tasks_pending");
717 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
718 state->AppendBoolean(tasks_pending_[task_set]);
719 state->EndArray();
720 state->BeginDictionary("staging_state");
721 StagingStateAsValueInto(state.get());
722 state->EndDictionary();
724 return state;
727 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
728 base::trace_event::TracedValue* staging_state) const {
729 base::AutoLock lock(lock_);
731 staging_state->SetInteger("staging_buffer_count",
732 static_cast<int>(buffers_.size()));
733 staging_state->SetInteger("busy_count",
734 static_cast<int>(busy_buffers_.size()));
735 staging_state->SetInteger("free_count",
736 static_cast<int>(free_buffers_.size()));
739 } // namespace cc