1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/resources/one_copy_tile_task_worker_pool.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/debug/traced_value.h"
14 #include "cc/resources/raster_buffer.h"
15 #include "cc/resources/resource_pool.h"
16 #include "cc/resources/scoped_resource.h"
17 #include "gpu/command_buffer/client/gles2_interface.h"
18 #include "ui/gfx/gpu_memory_buffer.h"
23 class RasterBufferImpl
: public RasterBuffer
{
25 RasterBufferImpl(OneCopyTileTaskWorkerPool
* worker_pool
,
26 ResourceProvider
* resource_provider
,
27 ResourcePool
* resource_pool
,
28 ResourceFormat resource_format
,
29 const Resource
* resource
)
30 : worker_pool_(worker_pool
),
31 resource_provider_(resource_provider
),
32 resource_pool_(resource_pool
),
35 resource_pool
->AcquireResource(resource
->size(), resource_format
)),
36 lock_(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
38 raster_resource_
->id())),
41 ~RasterBufferImpl() override
{
42 // Release write lock in case a copy was never scheduled.
45 // Make sure any scheduled copy operations are issued before we release the
48 worker_pool_
->AdvanceLastIssuedCopyTo(sequence_
);
50 // Return raster resource to pool so it can be used by another RasterBuffer
53 resource_pool_
->ReleaseResource(raster_resource_
.Pass());
56 // Overridden from RasterBuffer:
57 void Playback(const RasterSource
* raster_source
,
58 const gfx::Rect
& rect
,
59 float scale
) override
{
60 sequence_
= worker_pool_
->PlaybackAndScheduleCopyOnWorkerThread(
61 lock_
.Pass(), raster_resource_
.Pass(), resource_
, raster_source
, rect
,
66 OneCopyTileTaskWorkerPool
* worker_pool_
;
67 ResourceProvider
* resource_provider_
;
68 ResourcePool
* resource_pool_
;
69 const Resource
* resource_
;
70 scoped_ptr
<ScopedResource
> raster_resource_
;
71 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
> lock_
;
72 CopySequenceNumber sequence_
;
74 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl
);
77 // Flush interval when performing copy operations.
78 const int kCopyFlushPeriod
= 4;
80 // Number of in-flight copy operations to allow.
81 const int kMaxCopyOperations
= 32;
83 // Delay been checking for copy operations to complete.
84 const int kCheckForCompletedCopyOperationsTickRateMs
= 1;
86 // Number of failed attempts to allow before we perform a check that will
87 // wait for copy operations to complete if needed.
88 const int kFailedAttemptsBeforeWaitIfNeeded
= 256;
92 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
93 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
> write_lock
,
94 scoped_ptr
<ScopedResource
> src
,
96 : write_lock(write_lock
.Pass()), src(src
.Pass()), dst(dst
) {
99 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
103 scoped_ptr
<TileTaskWorkerPool
> OneCopyTileTaskWorkerPool::Create(
104 base::SequencedTaskRunner
* task_runner
,
105 TaskGraphRunner
* task_graph_runner
,
106 ContextProvider
* context_provider
,
107 ResourceProvider
* resource_provider
,
108 ResourcePool
* resource_pool
) {
109 return make_scoped_ptr
<TileTaskWorkerPool
>(new OneCopyTileTaskWorkerPool(
110 task_runner
, task_graph_runner
, context_provider
, resource_provider
,
114 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
115 base::SequencedTaskRunner
* task_runner
,
116 TaskGraphRunner
* task_graph_runner
,
117 ContextProvider
* context_provider
,
118 ResourceProvider
* resource_provider
,
119 ResourcePool
* resource_pool
)
120 : task_runner_(task_runner
),
121 task_graph_runner_(task_graph_runner
),
122 namespace_token_(task_graph_runner
->GetNamespaceToken()),
123 context_provider_(context_provider
),
124 resource_provider_(resource_provider
),
125 resource_pool_(resource_pool
),
126 last_issued_copy_operation_(0),
127 last_flushed_copy_operation_(0),
129 copy_operation_count_cv_(&lock_
),
130 scheduled_copy_operation_count_(0),
131 issued_copy_operation_count_(0),
132 next_copy_operation_sequence_(1),
133 check_for_completed_copy_operations_pending_(false),
135 weak_ptr_factory_(this),
136 task_set_finished_weak_ptr_factory_(this) {
137 DCHECK(context_provider_
);
140 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
141 DCHECK_EQ(scheduled_copy_operation_count_
, 0u);
144 TileTaskRunner
* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
148 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient
* client
) {
152 void OneCopyTileTaskWorkerPool::Shutdown() {
153 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
156 base::AutoLock
lock(lock_
);
159 copy_operation_count_cv_
.Signal();
163 task_graph_runner_
->ScheduleTasks(namespace_token_
, &empty
);
164 task_graph_runner_
->WaitForTasksToFinishRunning(namespace_token_
);
167 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue
* queue
) {
168 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
172 base::AutoLock
lock(lock_
);
177 if (tasks_pending_
.none())
178 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
180 // Mark all task sets as pending.
181 tasks_pending_
.set();
183 unsigned priority
= kTileTaskPriorityBase
;
187 // Cancel existing OnTaskSetFinished callbacks.
188 task_set_finished_weak_ptr_factory_
.InvalidateWeakPtrs();
190 scoped_refptr
<TileTask
> new_task_set_finished_tasks
[kNumberOfTaskSets
];
192 size_t task_count
[kNumberOfTaskSets
] = {0};
194 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
195 new_task_set_finished_tasks
[task_set
] = CreateTaskSetFinishedTask(
197 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished
,
198 task_set_finished_weak_ptr_factory_
.GetWeakPtr(), task_set
));
201 resource_pool_
->CheckBusyResources(false);
203 for (TileTaskQueue::Item::Vector::const_iterator it
= queue
->items
.begin();
204 it
!= queue
->items
.end(); ++it
) {
205 const TileTaskQueue::Item
& item
= *it
;
206 RasterTask
* task
= item
.task
;
207 DCHECK(!task
->HasCompleted());
209 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
210 if (!item
.task_sets
[task_set
])
213 ++task_count
[task_set
];
215 graph_
.edges
.push_back(
216 TaskGraph::Edge(task
, new_task_set_finished_tasks
[task_set
].get()));
219 InsertNodesForRasterTask(&graph_
, task
, task
->dependencies(), priority
++);
222 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
223 InsertNodeForTask(&graph_
, new_task_set_finished_tasks
[task_set
].get(),
224 kTaskSetFinishedTaskPriorityBase
+ task_set
,
225 task_count
[task_set
]);
228 ScheduleTasksOnOriginThread(this, &graph_
);
229 task_graph_runner_
->ScheduleTasks(namespace_token_
, &graph_
);
231 std::copy(new_task_set_finished_tasks
,
232 new_task_set_finished_tasks
+ kNumberOfTaskSets
,
233 task_set_finished_tasks_
);
235 resource_pool_
->ReduceResourceUsage();
237 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
241 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
242 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
244 task_graph_runner_
->CollectCompletedTasks(namespace_token_
,
247 for (Task::Vector::const_iterator it
= completed_tasks_
.begin();
248 it
!= completed_tasks_
.end(); ++it
) {
249 TileTask
* task
= static_cast<TileTask
*>(it
->get());
251 task
->WillComplete();
252 task
->CompleteOnOriginThread(this);
255 task
->RunReplyOnOriginThread();
257 completed_tasks_
.clear();
260 ResourceFormat
OneCopyTileTaskWorkerPool::GetResourceFormat() {
261 return resource_provider_
->best_texture_format();
264 scoped_ptr
<RasterBuffer
> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
265 const Resource
* resource
) {
266 DCHECK_EQ(resource
->format(), resource_provider_
->best_texture_format());
267 return make_scoped_ptr
<RasterBuffer
>(
268 new RasterBufferImpl(this, resource_provider_
, resource_pool_
,
269 resource_provider_
->best_texture_format(),
273 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
274 scoped_ptr
<RasterBuffer
> buffer
) {
275 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
279 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
280 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
> write_lock
,
281 scoped_ptr
<ScopedResource
> src
,
283 const RasterSource
* raster_source
,
284 const gfx::Rect
& rect
,
286 base::AutoLock
lock(lock_
);
288 int failed_attempts
= 0;
289 while ((scheduled_copy_operation_count_
+ issued_copy_operation_count_
) >=
290 kMaxCopyOperations
) {
291 // Ignore limit when shutdown is set.
297 // Schedule a check that will also wait for operations to complete
298 // after too many failed attempts.
299 bool wait_if_needed
= failed_attempts
> kFailedAttemptsBeforeWaitIfNeeded
;
301 // Schedule a check for completed copy operations if too many operations
302 // are currently in-flight.
303 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed
);
306 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
308 // Wait for in-flight copy operations to drop below limit.
309 copy_operation_count_cv_
.Wait();
313 // Increment |scheduled_copy_operation_count_| before releasing |lock_|.
314 ++scheduled_copy_operation_count_
;
316 // There may be more work available, so wake up another worker thread.
317 copy_operation_count_cv_
.Signal();
320 base::AutoUnlock
unlock(lock_
);
322 gfx::GpuMemoryBuffer
* gpu_memory_buffer
= write_lock
->GetGpuMemoryBuffer();
323 if (gpu_memory_buffer
) {
325 bool rv
= gpu_memory_buffer
->Map(&data
);
328 gpu_memory_buffer
->GetStride(&stride
);
329 TileTaskWorkerPool::PlaybackToMemory(data
, src
->format(), src
->size(),
330 stride
, raster_source
, rect
, scale
);
331 gpu_memory_buffer
->Unmap();
335 pending_copy_operations_
.push_back(
336 make_scoped_ptr(new CopyOperation(write_lock
.Pass(), src
.Pass(), dst
)));
338 // Acquire a sequence number for this copy operation.
339 CopySequenceNumber sequence
= next_copy_operation_sequence_
++;
341 // Post task that will advance last flushed copy operation to |sequence|
342 // if we have reached the flush period.
343 if ((sequence
% kCopyFlushPeriod
) == 0) {
344 task_runner_
->PostTask(
346 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo
,
347 weak_ptr_factory_
.GetWeakPtr(), sequence
));
353 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
354 CopySequenceNumber sequence
) {
355 if (last_issued_copy_operation_
>= sequence
)
358 IssueCopyOperations(sequence
- last_issued_copy_operation_
);
359 last_issued_copy_operation_
= sequence
;
362 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
363 CopySequenceNumber sequence
) {
364 if (last_flushed_copy_operation_
>= sequence
)
367 AdvanceLastIssuedCopyTo(sequence
);
369 // Flush all issued copy operations.
370 context_provider_
->ContextGL()->ShallowFlushCHROMIUM();
371 last_flushed_copy_operation_
= last_issued_copy_operation_
;
374 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set
) {
375 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
378 DCHECK(tasks_pending_
[task_set
]);
379 tasks_pending_
[task_set
] = false;
380 if (tasks_pending_
.any()) {
381 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
382 "state", StateAsValue());
384 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
386 client_
->DidFinishRunningTileTasks(task_set
);
389 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count
) {
390 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
393 CopyOperation::Deque copy_operations
;
396 base::AutoLock
lock(lock_
);
398 for (int64 i
= 0; i
< count
; ++i
) {
399 DCHECK(!pending_copy_operations_
.empty());
400 copy_operations
.push_back(pending_copy_operations_
.take_front());
403 // Decrement |scheduled_copy_operation_count_| and increment
404 // |issued_copy_operation_count_| to reflect the transition of copy
405 // operations from "pending" to "issued" state.
406 DCHECK_GE(scheduled_copy_operation_count_
, copy_operations
.size());
407 scheduled_copy_operation_count_
-= copy_operations
.size();
408 issued_copy_operation_count_
+= copy_operations
.size();
411 while (!copy_operations
.empty()) {
412 scoped_ptr
<CopyOperation
> copy_operation
= copy_operations
.take_front();
414 // Remove the write lock.
415 copy_operation
->write_lock
.reset();
417 // Copy contents of source resource to destination resource.
418 resource_provider_
->CopyResource(copy_operation
->src
->id(),
419 copy_operation
->dst
->id());
421 // Return source resource to pool where it can be reused once copy
422 // operation has completed and resource is no longer busy.
423 resource_pool_
->ReleaseResource(copy_operation
->src
.Pass());
427 void OneCopyTileTaskWorkerPool::
428 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
429 bool wait_if_needed
) {
430 lock_
.AssertAcquired();
432 if (check_for_completed_copy_operations_pending_
)
435 base::TimeTicks now
= base::TimeTicks::Now();
437 // Schedule a check for completed copy operations as soon as possible but
438 // don't allow two consecutive checks to be scheduled to run less than the
440 base::TimeTicks next_check_for_completed_copy_operations_time
=
441 std::max(last_check_for_completed_copy_operations_time_
+
442 base::TimeDelta::FromMilliseconds(
443 kCheckForCompletedCopyOperationsTickRateMs
),
446 task_runner_
->PostDelayedTask(
448 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations
,
449 weak_ptr_factory_
.GetWeakPtr(), wait_if_needed
),
450 next_check_for_completed_copy_operations_time
- now
);
452 last_check_for_completed_copy_operations_time_
=
453 next_check_for_completed_copy_operations_time
;
454 check_for_completed_copy_operations_pending_
= true;
457 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
458 bool wait_if_needed
) {
460 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
461 "wait_if_needed", wait_if_needed
);
463 resource_pool_
->CheckBusyResources(wait_if_needed
);
466 base::AutoLock
lock(lock_
);
468 DCHECK(check_for_completed_copy_operations_pending_
);
469 check_for_completed_copy_operations_pending_
= false;
471 // The number of busy resources in the pool reflects the number of issued
472 // copy operations that have not yet completed.
473 issued_copy_operation_count_
= resource_pool_
->busy_resource_count();
475 // There may be work blocked on too many in-flight copy operations, so wake
476 // up a worker thread.
477 copy_operation_count_cv_
.Signal();
481 scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
482 OneCopyTileTaskWorkerPool::StateAsValue() const {
483 scoped_refptr
<base::trace_event::TracedValue
> state
=
484 new base::trace_event::TracedValue();
486 state
->BeginArray("tasks_pending");
487 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
)
488 state
->AppendBoolean(tasks_pending_
[task_set
]);
490 state
->BeginDictionary("staging_state");
491 StagingStateAsValueInto(state
.get());
492 state
->EndDictionary();
497 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
498 base::trace_event::TracedValue
* staging_state
) const {
499 staging_state
->SetInteger("staging_resource_count",
500 resource_pool_
->total_resource_count());
501 staging_state
->SetInteger("bytes_used_for_staging_resources",
502 resource_pool_
->total_memory_usage_bytes());
503 staging_state
->SetInteger("pending_copy_count",
504 resource_pool_
->total_resource_count() -
505 resource_pool_
->acquired_resource_count());
506 staging_state
->SetInteger("bytes_pending_copy",
507 resource_pool_
->total_memory_usage_bytes() -
508 resource_pool_
->acquired_memory_usage_bytes());