aw: Move SharedRendererState out of AwContents
[chromium-blink-merge.git] / cc / resources / one_copy_raster_worker_pool.cc
bloba8b838e83908ff4dd1d215ac9223e39a937258d9
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/resources/one_copy_raster_worker_pool.h"
7 #include <algorithm>
8 #include <limits>
10 #include "base/debug/trace_event.h"
11 #include "base/debug/trace_event_argument.h"
12 #include "base/strings/stringprintf.h"
13 #include "cc/debug/traced_value.h"
14 #include "cc/resources/raster_buffer.h"
15 #include "cc/resources/resource_pool.h"
16 #include "cc/resources/scoped_resource.h"
17 #include "gpu/command_buffer/client/gles2_interface.h"
18 #include "ui/gfx/gpu_memory_buffer.h"
20 namespace cc {
21 namespace {
23 class RasterBufferImpl : public RasterBuffer {
24 public:
25 RasterBufferImpl(OneCopyRasterWorkerPool* worker_pool,
26 ResourceProvider* resource_provider,
27 ResourcePool* resource_pool,
28 const Resource* resource)
29 : worker_pool_(worker_pool),
30 resource_provider_(resource_provider),
31 resource_pool_(resource_pool),
32 resource_(resource),
33 raster_resource_(resource_pool->AcquireResource(resource->size())),
34 lock_(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
35 resource_provider_,
36 raster_resource_->id())),
37 sequence_(0) {}
39 ~RasterBufferImpl() override {
40 // Release write lock in case a copy was never scheduled.
41 lock_.reset();
43 // Make sure any scheduled copy operations are issued before we release the
44 // raster resource.
45 if (sequence_)
46 worker_pool_->AdvanceLastIssuedCopyTo(sequence_);
48 // Return raster resource to pool so it can be used by another RasterBuffer
49 // instance.
50 if (raster_resource_)
51 resource_pool_->ReleaseResource(raster_resource_.Pass());
54 // Overridden from RasterBuffer:
55 void Playback(const RasterSource* raster_source,
56 const gfx::Rect& rect,
57 float scale) override {
58 sequence_ = worker_pool_->PlaybackAndScheduleCopyOnWorkerThread(
59 lock_.Pass(), raster_resource_.Pass(), resource_, raster_source, rect,
60 scale);
63 private:
64 OneCopyRasterWorkerPool* worker_pool_;
65 ResourceProvider* resource_provider_;
66 ResourcePool* resource_pool_;
67 const Resource* resource_;
68 scoped_ptr<ScopedResource> raster_resource_;
69 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> lock_;
70 CopySequenceNumber sequence_;
72 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
75 // Flush interval when performing copy operations.
76 const int kCopyFlushPeriod = 4;
78 // Number of in-flight copy operations to allow.
79 const int kMaxCopyOperations = 16;
81 // Delay been checking for copy operations to complete.
82 const int kCheckForCompletedCopyOperationsTickRateMs = 1;
84 // Number of failed attempts to allow before we perform a check that will
85 // wait for copy operations to complete if needed.
86 const int kFailedAttemptsBeforeWaitIfNeeded = 256;
88 } // namespace
90 OneCopyRasterWorkerPool::CopyOperation::CopyOperation(
91 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
92 scoped_ptr<ScopedResource> src,
93 const Resource* dst)
94 : write_lock(write_lock.Pass()), src(src.Pass()), dst(dst) {
97 OneCopyRasterWorkerPool::CopyOperation::~CopyOperation() {
100 // static
101 scoped_ptr<RasterWorkerPool> OneCopyRasterWorkerPool::Create(
102 base::SequencedTaskRunner* task_runner,
103 TaskGraphRunner* task_graph_runner,
104 ContextProvider* context_provider,
105 ResourceProvider* resource_provider,
106 ResourcePool* resource_pool) {
107 return make_scoped_ptr<RasterWorkerPool>(
108 new OneCopyRasterWorkerPool(task_runner,
109 task_graph_runner,
110 context_provider,
111 resource_provider,
112 resource_pool));
115 OneCopyRasterWorkerPool::OneCopyRasterWorkerPool(
116 base::SequencedTaskRunner* task_runner,
117 TaskGraphRunner* task_graph_runner,
118 ContextProvider* context_provider,
119 ResourceProvider* resource_provider,
120 ResourcePool* resource_pool)
121 : task_runner_(task_runner),
122 task_graph_runner_(task_graph_runner),
123 namespace_token_(task_graph_runner->GetNamespaceToken()),
124 context_provider_(context_provider),
125 resource_provider_(resource_provider),
126 resource_pool_(resource_pool),
127 last_issued_copy_operation_(0),
128 last_flushed_copy_operation_(0),
129 lock_(),
130 copy_operation_count_cv_(&lock_),
131 scheduled_copy_operation_count_(0),
132 issued_copy_operation_count_(0),
133 next_copy_operation_sequence_(1),
134 check_for_completed_copy_operations_pending_(false),
135 shutdown_(false),
136 weak_ptr_factory_(this),
137 raster_finished_weak_ptr_factory_(this) {
138 DCHECK(context_provider_);
141 OneCopyRasterWorkerPool::~OneCopyRasterWorkerPool() {
142 DCHECK_EQ(scheduled_copy_operation_count_, 0u);
145 Rasterizer* OneCopyRasterWorkerPool::AsRasterizer() {
146 return this;
149 void OneCopyRasterWorkerPool::SetClient(RasterizerClient* client) {
150 client_ = client;
153 void OneCopyRasterWorkerPool::Shutdown() {
154 TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::Shutdown");
157 base::AutoLock lock(lock_);
159 shutdown_ = true;
160 copy_operation_count_cv_.Signal();
163 TaskGraph empty;
164 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
165 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
168 void OneCopyRasterWorkerPool::ScheduleTasks(RasterTaskQueue* queue) {
169 TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::ScheduleTasks");
171 if (raster_pending_.none())
172 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
174 // Mark all task sets as pending.
175 raster_pending_.set();
177 unsigned priority = kRasterTaskPriorityBase;
179 graph_.Reset();
181 // Cancel existing OnRasterFinished callbacks.
182 raster_finished_weak_ptr_factory_.InvalidateWeakPtrs();
184 scoped_refptr<RasterizerTask> new_raster_finished_tasks[kNumberOfTaskSets];
186 size_t task_count[kNumberOfTaskSets] = {0};
188 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
189 new_raster_finished_tasks[task_set] = CreateRasterFinishedTask(
190 task_runner_.get(),
191 base::Bind(&OneCopyRasterWorkerPool::OnRasterFinished,
192 raster_finished_weak_ptr_factory_.GetWeakPtr(),
193 task_set));
196 resource_pool_->CheckBusyResources(false);
198 for (RasterTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
199 it != queue->items.end();
200 ++it) {
201 const RasterTaskQueue::Item& item = *it;
202 RasterTask* task = item.task;
203 DCHECK(!task->HasCompleted());
205 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
206 if (!item.task_sets[task_set])
207 continue;
209 ++task_count[task_set];
211 graph_.edges.push_back(
212 TaskGraph::Edge(task, new_raster_finished_tasks[task_set].get()));
215 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
218 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
219 InsertNodeForTask(&graph_,
220 new_raster_finished_tasks[task_set].get(),
221 kRasterFinishedTaskPriority,
222 task_count[task_set]);
225 ScheduleTasksOnOriginThread(this, &graph_);
226 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
228 std::copy(new_raster_finished_tasks,
229 new_raster_finished_tasks + kNumberOfTaskSets,
230 raster_finished_tasks_);
232 resource_pool_->ReduceResourceUsage();
234 TRACE_EVENT_ASYNC_STEP_INTO1(
235 "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
238 void OneCopyRasterWorkerPool::CheckForCompletedTasks() {
239 TRACE_EVENT0("cc", "OneCopyRasterWorkerPool::CheckForCompletedTasks");
241 task_graph_runner_->CollectCompletedTasks(namespace_token_,
242 &completed_tasks_);
244 for (Task::Vector::const_iterator it = completed_tasks_.begin();
245 it != completed_tasks_.end();
246 ++it) {
247 RasterizerTask* task = static_cast<RasterizerTask*>(it->get());
249 task->WillComplete();
250 task->CompleteOnOriginThread(this);
251 task->DidComplete();
253 task->RunReplyOnOriginThread();
255 completed_tasks_.clear();
258 scoped_ptr<RasterBuffer> OneCopyRasterWorkerPool::AcquireBufferForRaster(
259 const Resource* resource) {
260 DCHECK_EQ(resource->format(), resource_pool_->resource_format());
261 return make_scoped_ptr<RasterBuffer>(
262 new RasterBufferImpl(this, resource_provider_, resource_pool_, resource));
265 void OneCopyRasterWorkerPool::ReleaseBufferForRaster(
266 scoped_ptr<RasterBuffer> buffer) {
267 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
270 CopySequenceNumber
271 OneCopyRasterWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
272 scoped_ptr<ResourceProvider::ScopedWriteLockGpuMemoryBuffer> write_lock,
273 scoped_ptr<ScopedResource> src,
274 const Resource* dst,
275 const RasterSource* raster_source,
276 const gfx::Rect& rect,
277 float scale) {
278 base::AutoLock lock(lock_);
280 int failed_attempts = 0;
281 while ((scheduled_copy_operation_count_ + issued_copy_operation_count_) >=
282 kMaxCopyOperations) {
283 // Ignore limit when shutdown is set.
284 if (shutdown_)
285 break;
287 ++failed_attempts;
289 // Schedule a check that will also wait for operations to complete
290 // after too many failed attempts.
291 bool wait_if_needed = failed_attempts > kFailedAttemptsBeforeWaitIfNeeded;
293 // Schedule a check for completed copy operations if too many operations
294 // are currently in-flight.
295 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed);
298 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
300 // Wait for in-flight copy operations to drop below limit.
301 copy_operation_count_cv_.Wait();
305 // Increment |scheduled_copy_operation_count_| before releasing |lock_|.
306 ++scheduled_copy_operation_count_;
308 // There may be more work available, so wake up another worker thread.
309 copy_operation_count_cv_.Signal();
312 base::AutoUnlock unlock(lock_);
314 gfx::GpuMemoryBuffer* gpu_memory_buffer = write_lock->GetGpuMemoryBuffer();
315 if (gpu_memory_buffer) {
316 RasterWorkerPool::PlaybackToMemory(gpu_memory_buffer->Map(),
317 src->format(),
318 src->size(),
319 gpu_memory_buffer->GetStride(),
320 raster_source,
321 rect,
322 scale);
323 gpu_memory_buffer->Unmap();
327 pending_copy_operations_.push_back(
328 make_scoped_ptr(new CopyOperation(write_lock.Pass(), src.Pass(), dst)));
330 // Acquire a sequence number for this copy operation.
331 CopySequenceNumber sequence = next_copy_operation_sequence_++;
333 // Post task that will advance last flushed copy operation to |sequence|
334 // if we have reached the flush period.
335 if ((sequence % kCopyFlushPeriod) == 0) {
336 task_runner_->PostTask(
337 FROM_HERE,
338 base::Bind(&OneCopyRasterWorkerPool::AdvanceLastFlushedCopyTo,
339 weak_ptr_factory_.GetWeakPtr(),
340 sequence));
343 return sequence;
346 void OneCopyRasterWorkerPool::AdvanceLastIssuedCopyTo(
347 CopySequenceNumber sequence) {
348 if (last_issued_copy_operation_ >= sequence)
349 return;
351 IssueCopyOperations(sequence - last_issued_copy_operation_);
352 last_issued_copy_operation_ = sequence;
355 void OneCopyRasterWorkerPool::AdvanceLastFlushedCopyTo(
356 CopySequenceNumber sequence) {
357 if (last_flushed_copy_operation_ >= sequence)
358 return;
360 AdvanceLastIssuedCopyTo(sequence);
362 // Flush all issued copy operations.
363 context_provider_->ContextGL()->ShallowFlushCHROMIUM();
364 last_flushed_copy_operation_ = last_issued_copy_operation_;
367 void OneCopyRasterWorkerPool::OnRasterFinished(TaskSet task_set) {
368 TRACE_EVENT1(
369 "cc", "OneCopyRasterWorkerPool::OnRasterFinished", "task_set", task_set);
371 DCHECK(raster_pending_[task_set]);
372 raster_pending_[task_set] = false;
373 if (raster_pending_.any()) {
374 TRACE_EVENT_ASYNC_STEP_INTO1(
375 "cc", "ScheduledTasks", this, "rasterizing", "state", StateAsValue());
376 } else {
377 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
379 client_->DidFinishRunningTasks(task_set);
382 void OneCopyRasterWorkerPool::IssueCopyOperations(int64 count) {
383 TRACE_EVENT1(
384 "cc", "OneCopyRasterWorkerPool::IssueCopyOperations", "count", count);
386 CopyOperation::Deque copy_operations;
389 base::AutoLock lock(lock_);
391 for (int64 i = 0; i < count; ++i) {
392 DCHECK(!pending_copy_operations_.empty());
393 copy_operations.push_back(pending_copy_operations_.take_front());
396 // Decrement |scheduled_copy_operation_count_| and increment
397 // |issued_copy_operation_count_| to reflect the transition of copy
398 // operations from "pending" to "issued" state.
399 DCHECK_GE(scheduled_copy_operation_count_, copy_operations.size());
400 scheduled_copy_operation_count_ -= copy_operations.size();
401 issued_copy_operation_count_ += copy_operations.size();
404 while (!copy_operations.empty()) {
405 scoped_ptr<CopyOperation> copy_operation = copy_operations.take_front();
407 // Remove the write lock.
408 copy_operation->write_lock.reset();
410 // Copy contents of source resource to destination resource.
411 resource_provider_->CopyResource(copy_operation->src->id(),
412 copy_operation->dst->id());
414 // Return source resource to pool where it can be reused once copy
415 // operation has completed and resource is no longer busy.
416 resource_pool_->ReleaseResource(copy_operation->src.Pass());
420 void OneCopyRasterWorkerPool::
421 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
422 bool wait_if_needed) {
423 lock_.AssertAcquired();
425 if (check_for_completed_copy_operations_pending_)
426 return;
428 base::TimeTicks now = base::TimeTicks::Now();
430 // Schedule a check for completed copy operations as soon as possible but
431 // don't allow two consecutive checks to be scheduled to run less than the
432 // tick rate apart.
433 base::TimeTicks next_check_for_completed_copy_operations_time =
434 std::max(last_check_for_completed_copy_operations_time_ +
435 base::TimeDelta::FromMilliseconds(
436 kCheckForCompletedCopyOperationsTickRateMs),
437 now);
439 task_runner_->PostDelayedTask(
440 FROM_HERE,
441 base::Bind(&OneCopyRasterWorkerPool::CheckForCompletedCopyOperations,
442 weak_ptr_factory_.GetWeakPtr(),
443 wait_if_needed),
444 next_check_for_completed_copy_operations_time - now);
446 last_check_for_completed_copy_operations_time_ =
447 next_check_for_completed_copy_operations_time;
448 check_for_completed_copy_operations_pending_ = true;
451 void OneCopyRasterWorkerPool::CheckForCompletedCopyOperations(
452 bool wait_if_needed) {
453 TRACE_EVENT1("cc",
454 "OneCopyRasterWorkerPool::CheckForCompletedCopyOperations",
455 "wait_if_needed",
456 wait_if_needed);
458 resource_pool_->CheckBusyResources(wait_if_needed);
461 base::AutoLock lock(lock_);
463 DCHECK(check_for_completed_copy_operations_pending_);
464 check_for_completed_copy_operations_pending_ = false;
466 // The number of busy resources in the pool reflects the number of issued
467 // copy operations that have not yet completed.
468 issued_copy_operation_count_ = resource_pool_->busy_resource_count();
470 // There may be work blocked on too many in-flight copy operations, so wake
471 // up a worker thread.
472 copy_operation_count_cv_.Signal();
476 scoped_refptr<base::debug::ConvertableToTraceFormat>
477 OneCopyRasterWorkerPool::StateAsValue() const {
478 scoped_refptr<base::debug::TracedValue> state =
479 new base::debug::TracedValue();
481 state->BeginArray("tasks_pending");
482 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
483 state->AppendBoolean(raster_pending_[task_set]);
484 state->EndArray();
485 state->BeginDictionary("staging_state");
486 StagingStateAsValueInto(state.get());
487 state->EndDictionary();
489 return state;
492 void OneCopyRasterWorkerPool::StagingStateAsValueInto(
493 base::debug::TracedValue* staging_state) const {
494 staging_state->SetInteger("staging_resource_count",
495 resource_pool_->total_resource_count());
496 staging_state->SetInteger("bytes_used_for_staging_resources",
497 resource_pool_->total_memory_usage_bytes());
498 staging_state->SetInteger("pending_copy_count",
499 resource_pool_->total_resource_count() -
500 resource_pool_->acquired_resource_count());
501 staging_state->SetInteger("bytes_pending_copy",
502 resource_pool_->total_memory_usage_bytes() -
503 resource_pool_->acquired_memory_usage_bytes());
506 } // namespace cc