Add test for clicking bookmark star in presence of ctrl-D keybinding
[chromium-blink-merge.git] / cc / resources / one_copy_tile_task_worker_pool.cc
blobdcb8dea819d67073bf38150b98e4a658595c2b11
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/resources/one_copy_tile_task_worker_pool.h"
7 #include <algorithm>
8 #include <limits>
10 #include "base/debug/trace_event.h"
11 #include "base/debug/trace_event_argument.h"
12 #include "base/strings/stringprintf.h"
13 #include "cc/debug/traced_value.h"
14 #include "cc/resources/raster_buffer.h"
15 #include "cc/resources/resource_pool.h"
16 #include "cc/resources/scoped_resource.h"
17 #include "gpu/command_buffer/client/gles2_interface.h"
18 #include "ui/gfx/gpu_memory_buffer.h"
20 namespace cc {
21 namespace {
23 class RasterBufferImpl : public RasterBuffer {
24 public:
25 RasterBufferImpl(OneCopyTileTaskWorkerPool* worker_pool,
26 ResourceProvider* resource_provider,
27 ResourcePool* resource_pool,
28 const Resource* resource)
29 : worker_pool_(worker_pool),
30 resource_provider_(resource_provider),
31 resource_pool_(resource_pool),
32 resource_(resource),
33 raster_resource_(resource_pool->AcquireResource(resource->size())),
34 lock_(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
35 resource_provider_,
36 raster_resource_->id())),
37 sequence_(0) {}
39 ~RasterBufferImpl() override {
40 // Release write lock in case a copy was never scheduled.
41 lock_.reset();
43 // Make sure any scheduled copy operations are issued before we release the
44 // raster resource.
45 if (sequence_)
46 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
48 // Return raster resource to pool so it can be used by another RasterBuffer
49 // instance.
50 if (raster_resource_)
51 resource_pool_->ReleaseResource(raster_resource_.Pass());
54 // Overridden from RasterBuffer:
55 void Playback(const RasterSource* raster_source,
56 const gfx::Rect& rect,
57 float scale) override {
58 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
59 lock_.Pass(), raster_resource_.Pass(), resource_, raster_source, rect,
60 scale);
63 private:
64 OneCopyTileTaskWorkerPool* worker_pool_;
65 ResourceProvider* resource_provider_;
66 ResourcePool* resource_pool_;
67 const Resource* resource_;
68 scoped_ptr<ScopedResource> raster_resource_;
69 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
70 CopySequenceNumber sequence_;
72 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
75 // Flush interval when performing copy operations.
76 const int kCopyFlushPeriod = 4;
78 // Number of in-flight copy operations to allow.
79 const int kMaxCopyOperations = 16;
81 // Delay been checking for copy operations to complete.
82 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
84 // Number of failed attempts to allow before we perform a check that will
85 // wait for copy operations to complete if needed.
86 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
88 } // namespace
90 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
91 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
92 scoped_ptr<ScopedResource> src,
93 const Resource* dst)
94 : write_lock(write_lock.Pass()), src(src.Pass()), dst(dst) {
97 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
100 // static
101 scoped_ptr<TileTaskWorkerPool> OneCopyTileTaskWorkerPool::Create(
102 base::SequencedTaskRunner* task_runner,
103 TaskGraphRunner* task_graph_runner,
104 ContextProvider* context_provider,
105 ResourceProvider* resource_provider,
106 ResourcePool* resource_pool) {
107 return make_scoped_ptr<TileTaskWorkerPool>(new OneCopyTileTaskWorkerPool(
108 task_runner, task_graph_runner, context_provider, resource_provider,
109 resource_pool));
112 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
113 base::SequencedTaskRunner* task_runner,
114 TaskGraphRunner* task_graph_runner,
115 ContextProvider* context_provider,
116 ResourceProvider* resource_provider,
117 ResourcePool* resource_pool)
118 : task_runner_(task_runner),
119 task_graph_runner_(task_graph_runner),
120 namespace_token_(task_graph_runner->GetNamespaceToken()),
121 context_provider_(context_provider),
122 resource_provider_(resource_provider),
123 resource_pool_(resource_pool),
124 last_issued_copy_operation_(0),
125 last_flushed_copy_operation_(0),
126 lock_(),
127 copy_operation_count_cv_(&lock_),
128 scheduled_copy_operation_count_(0),
129 issued_copy_operation_count_(0),
130 next_copy_operation_sequence_(1),
131 check_for_completed_copy_operations_pending_(false),
132 shutdown_(false),
133 weak_ptr_factory_(this),
134 task_set_finished_weak_ptr_factory_(this) {
135 DCHECK(context_provider_);
138 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
139 DCHECK_EQ(scheduled_copy_operation_count_, 0u);
142 TileTaskRunner* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
143 return this;
146 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
147 client_ = client;
150 void OneCopyTileTaskWorkerPool::Shutdown() {
151 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
154 base::AutoLock lock(lock_);
156 shutdown_ = true;
157 copy_operation_count_cv_.Signal();
160 TaskGraph empty;
161 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
162 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
165 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
166 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
168 if (tasks_pending_.none())
169 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
171 // Mark all task sets as pending.
172 tasks_pending_.set();
174 unsigned priority = kTileTaskPriorityBase;
176 graph_.Reset();
178 // Cancel existing OnTaskSetFinished callbacks.
179 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
181 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
183 size_t task_count[kNumberOfTaskSets] = {0};
185 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
186 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
187 task_runner_.get(),
188 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished,
189 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
192 resource_pool_->CheckBusyResources(false);
194 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
195 it != queue->items.end(); ++it) {
196 const TileTaskQueue::Item& item = *it;
197 RasterTask* task = item.task;
198 DCHECK(!task->HasCompleted());
200 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
201 if (!item.task_sets[task_set])
202 continue;
204 ++task_count[task_set];
206 graph_.edges.push_back(
207 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
210 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
213 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
214 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
215 kTaskSetFinishedTaskPriority, task_count[task_set]);
218 ScheduleTasksOnOriginThread(this, &graph_);
219 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
221 std::copy(new_task_set_finished_tasks,
222 new_task_set_finished_tasks + kNumberOfTaskSets,
223 task_set_finished_tasks_);
225 resource_pool_->ReduceResourceUsage();
227 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
228 StateAsValue());
231 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
232 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
234 task_graph_runner_->CollectCompletedTasks(namespace_token_,
235 &completed_tasks_);
237 for (Task::Vector::const_iterator it = completed_tasks_.begin();
238 it != completed_tasks_.end(); ++it) {
239 TileTask* task = static_cast<TileTask*>(it->get());
241 task->WillComplete();
242 task->CompleteOnOriginThread(this);
243 task->DidComplete();
245 task->RunReplyOnOriginThread();
247 completed_tasks_.clear();
250 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
251 const Resource* resource) {
252 DCHECK_EQ(resource->format(), resource_pool_->resource_format());
253 return make_scoped_ptr<RasterBuffer>(
254 new RasterBufferImpl(this, resource_provider_, resource_pool_, resource));
257 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
258 scoped_ptr<RasterBuffer> buffer) {
259 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
262 CopySequenceNumber
263 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
264 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
265 scoped_ptr<ScopedResource> src,
266 const Resource* dst,
267 const RasterSource* raster_source,
268 const gfx::Rect& rect,
269 float scale) {
270 base::AutoLock lock(lock_);
272 int failed_attempts = 0;
273 while ((scheduled_copy_operation_count_ + issued_copy_operation_count_) >=
274 kMaxCopyOperations) {
275 // Ignore limit when shutdown is set.
276 if (shutdown_)
277 break;
279 ++failed_attempts;
281 // Schedule a check that will also wait for operations to complete
282 // after too many failed attempts.
283 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
285 // Schedule a check for completed copy operations if too many operations
286 // are currently in-flight.
287 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
290 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
292 // Wait for in-flight copy operations to drop below limit.
293 copy_operation_count_cv_.Wait();
297 // Increment |scheduled_copy_operation_count_| before releasing |lock_|.
298 ++scheduled_copy_operation_count_;
300 // There may be more work available, so wake up another worker thread.
301 copy_operation_count_cv_.Signal();
304 base::AutoUnlock unlock(lock_);
306 gfx::GpuMemoryBuffer* gpu_memory_buffer = write_lock->GetGpuMemoryBuffer();
307 if (gpu_memory_buffer) {
308 TileTaskWorkerPool::PlaybackToMemory(
309 gpu_memory_buffer->Map(), src->format(), src->size(),
310 gpu_memory_buffer->GetStride(), raster_source, rect, scale);
311 gpu_memory_buffer->Unmap();
315 pending_copy_operations_.push_back(
316 make_scoped_ptr(new CopyOperation(write_lock.Pass(), src.Pass(), dst)));
318 // Acquire a sequence number for this copy operation.
319 CopySequenceNumber sequence = next_copy_operation_sequence_++;
321 // Post task that will advance last flushed copy operation to |sequence|
322 // if we have reached the flush period.
323 if ((sequence % kCopyFlushPeriod) == 0) {
324 task_runner_->PostTask(
325 FROM_HERE,
326 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo,
327 weak_ptr_factory_.GetWeakPtr(), sequence));
330 return sequence;
333 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
334 CopySequenceNumber sequence) {
335 if (last_issued_copy_operation_ >= sequence)
336 return;
338 IssueCopyOperations(sequence - last_issued_copy_operation_);
339 last_issued_copy_operation_ = sequence;
342 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
343 CopySequenceNumber sequence) {
344 if (last_flushed_copy_operation_ >= sequence)
345 return;
347 AdvanceLastIssuedCopyTo(sequence);
349 // Flush all issued copy operations.
350 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
351 last_flushed_copy_operation_ = last_issued_copy_operation_;
354 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
355 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
356 task_set);
358 DCHECK(tasks_pending_[task_set]);
359 tasks_pending_[task_set] = false;
360 if (tasks_pending_.any()) {
361 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
362 "state", StateAsValue());
363 } else {
364 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
366 client_->DidFinishRunningTileTasks(task_set);
369 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count) {
370 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
371 count);
373 CopyOperation::Deque copy_operations;
376 base::AutoLock lock(lock_);
378 for (int64 i = 0; i < count; ++i) {
379 DCHECK(!pending_copy_operations_.empty());
380 copy_operations.push_back(pending_copy_operations_.take_front());
383 // Decrement |scheduled_copy_operation_count_| and increment
384 // |issued_copy_operation_count_| to reflect the transition of copy
385 // operations from "pending" to "issued" state.
386 DCHECK_GE(scheduled_copy_operation_count_, copy_operations.size());
387 scheduled_copy_operation_count_ -= copy_operations.size();
388 issued_copy_operation_count_ += copy_operations.size();
391 while (!copy_operations.empty()) {
392 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
394 // Remove the write lock.
395 copy_operation->write_lock.reset();
397 // Copy contents of source resource to destination resource.
398 resource_provider_->CopyResource(copy_operation->src->id(),
399 copy_operation->dst->id());
401 // Return source resource to pool where it can be reused once copy
402 // operation has completed and resource is no longer busy.
403 resource_pool_->ReleaseResource(copy_operation->src.Pass());
407 void OneCopyTileTaskWorkerPool::
408 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
409 bool wait_if_needed) {
410 lock_.AssertAcquired();
412 if (check_for_completed_copy_operations_pending_)
413 return;
415 base::TimeTicks now = base::TimeTicks::Now();
417 // Schedule a check for completed copy operations as soon as possible but
418 // don't allow two consecutive checks to be scheduled to run less than the
419 // tick rate apart.
420 base::TimeTicks next_check_for_completed_copy_operations_time =
421 std::max(last_check_for_completed_copy_operations_time_ +
422 base::TimeDelta::FromMilliseconds(
423 kCheckForCompletedCopyOperationsTickRateMs),
424 now);
426 task_runner_->PostDelayedTask(
427 FROM_HERE,
428 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations,
429 weak_ptr_factory_.GetWeakPtr(), wait_if_needed),
430 next_check_for_completed_copy_operations_time - now);
432 last_check_for_completed_copy_operations_time_ =
433 next_check_for_completed_copy_operations_time;
434 check_for_completed_copy_operations_pending_ = true;
437 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
438 bool wait_if_needed) {
439 TRACE_EVENT1("cc",
440 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
441 "wait_if_needed", wait_if_needed);
443 resource_pool_->CheckBusyResources(wait_if_needed);
446 base::AutoLock lock(lock_);
448 DCHECK(check_for_completed_copy_operations_pending_);
449 check_for_completed_copy_operations_pending_ = false;
451 // The number of busy resources in the pool reflects the number of issued
452 // copy operations that have not yet completed.
453 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
455 // There may be work blocked on too many in-flight copy operations, so wake
456 // up a worker thread.
457 copy_operation_count_cv_.Signal();
461 scoped_refptr<base::debug::ConvertableToTraceFormat>
462 OneCopyTileTaskWorkerPool::StateAsValue() const {
463 scoped_refptr<base::debug::TracedValue> state =
464 new base::debug::TracedValue();
466 state->BeginArray("tasks_pending");
467 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
468 state->AppendBoolean(tasks_pending_[task_set]);
469 state->EndArray();
470 state->BeginDictionary("staging_state");
471 StagingStateAsValueInto(state.get());
472 state->EndDictionary();
474 return state;
477 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
478 base::debug::TracedValue* staging_state) const {
479 staging_state->SetInteger("staging_resource_count",
480 resource_pool_->total_resource_count());
481 staging_state->SetInteger("bytes_used_for_staging_resources",
482 resource_pool_->total_memory_usage_bytes());
483 staging_state->SetInteger("pending_copy_count",
484 resource_pool_->total_resource_count() -
485 resource_pool_->acquired_resource_count());
486 staging_state->SetInteger("bytes_pending_copy",
487 resource_pool_->total_memory_usage_bytes() -
488 resource_pool_->acquired_memory_usage_bytes());
491 } // namespace cc