1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/base/math_util.h"
14 #include "cc/debug/traced_value.h"
15 #include "cc/raster/raster_buffer.h"
16 #include "cc/resources/resource_pool.h"
17 #include "cc/resources/scoped_resource.h"
18 #include "gpu/command_buffer/client/gles2_interface.h"
19 #include "ui/gfx/gpu_memory_buffer.h"
24 class RasterBufferImpl
: public RasterBuffer
{
26 RasterBufferImpl(OneCopyTileTaskWorkerPool
* worker_pool
,
27 ResourceProvider
* resource_provider
,
28 ResourcePool
* resource_pool
,
29 ResourceFormat resource_format
,
30 const Resource
* resource
)
31 : worker_pool_(worker_pool
),
32 resource_provider_(resource_provider
),
33 resource_pool_(resource_pool
),
36 resource_pool
->AcquireResource(resource
->size(), resource_format
)),
37 lock_(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
39 raster_resource_
->id())),
42 ~RasterBufferImpl() override
{
43 // Release write lock in case a copy was never scheduled.
46 // Make sure any scheduled copy operations are issued before we release the
49 worker_pool_
->AdvanceLastIssuedCopyTo(sequence_
);
51 // Return raster resource to pool so it can be used by another RasterBuffer
54 resource_pool_
->ReleaseResource(raster_resource_
.Pass());
57 // Overridden from RasterBuffer:
58 void Playback(const RasterSource
* raster_source
,
59 const gfx::Rect
& rect
,
60 float scale
) override
{
61 sequence_
= worker_pool_
->PlaybackAndScheduleCopyOnWorkerThread(
62 lock_
.Pass(), raster_resource_
.get(), resource_
, raster_source
, rect
,
67 OneCopyTileTaskWorkerPool
* worker_pool_
;
68 ResourceProvider
* resource_provider_
;
69 ResourcePool
* resource_pool_
;
70 const Resource
* resource_
;
71 scoped_ptr
<ScopedResource
> raster_resource_
;
72 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
> lock_
;
73 CopySequenceNumber sequence_
;
75 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl
);
78 // Flush interval when performing copy operations.
79 const int kCopyFlushPeriod
= 4;
81 // Number of in-flight copy operations to allow.
82 const int kMaxCopyOperations
= 32;
84 // Delay been checking for copy operations to complete.
85 const int kCheckForCompletedCopyOperationsTickRateMs
= 1;
87 // Number of failed attempts to allow before we perform a check that will
88 // wait for copy operations to complete if needed.
89 const int kFailedAttemptsBeforeWaitIfNeeded
= 256;
93 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
94 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
> write_lock
,
97 const gfx::Rect
& rect
)
98 : write_lock(write_lock
.Pass()), src(src
), dst(dst
), rect(rect
) {
101 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
105 scoped_ptr
<TileTaskWorkerPool
> OneCopyTileTaskWorkerPool::Create(
106 base::SequencedTaskRunner
* task_runner
,
107 TaskGraphRunner
* task_graph_runner
,
108 ContextProvider
* context_provider
,
109 ResourceProvider
* resource_provider
,
110 ResourcePool
* resource_pool
,
111 size_t max_bytes_per_copy_operation
) {
112 return make_scoped_ptr
<TileTaskWorkerPool
>(new OneCopyTileTaskWorkerPool(
113 task_runner
, task_graph_runner
, context_provider
, resource_provider
,
114 resource_pool
, max_bytes_per_copy_operation
));
117 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
118 base::SequencedTaskRunner
* task_runner
,
119 TaskGraphRunner
* task_graph_runner
,
120 ContextProvider
* context_provider
,
121 ResourceProvider
* resource_provider
,
122 ResourcePool
* resource_pool
,
123 size_t max_bytes_per_copy_operation
)
124 : task_runner_(task_runner
),
125 task_graph_runner_(task_graph_runner
),
126 namespace_token_(task_graph_runner
->GetNamespaceToken()),
127 context_provider_(context_provider
),
128 resource_provider_(resource_provider
),
129 resource_pool_(resource_pool
),
130 max_bytes_per_copy_operation_(max_bytes_per_copy_operation
),
131 last_issued_copy_operation_(0),
132 last_flushed_copy_operation_(0),
134 copy_operation_count_cv_(&lock_
),
135 issued_copy_operation_count_(0),
136 next_copy_operation_sequence_(1),
137 check_for_completed_copy_operations_pending_(false),
139 weak_ptr_factory_(this),
140 task_set_finished_weak_ptr_factory_(this) {
141 DCHECK(context_provider_
);
144 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
145 DCHECK_EQ(pending_copy_operations_
.size(), 0u);
148 TileTaskRunner
* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
152 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient
* client
) {
156 void OneCopyTileTaskWorkerPool::Shutdown() {
157 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
160 base::AutoLock
lock(lock_
);
163 copy_operation_count_cv_
.Signal();
167 task_graph_runner_
->ScheduleTasks(namespace_token_
, &empty
);
168 task_graph_runner_
->WaitForTasksToFinishRunning(namespace_token_
);
171 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue
* queue
) {
172 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
176 base::AutoLock
lock(lock_
);
181 if (tasks_pending_
.none())
182 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
184 // Mark all task sets as pending.
185 tasks_pending_
.set();
187 unsigned priority
= kTileTaskPriorityBase
;
191 // Cancel existing OnTaskSetFinished callbacks.
192 task_set_finished_weak_ptr_factory_
.InvalidateWeakPtrs();
194 scoped_refptr
<TileTask
> new_task_set_finished_tasks
[kNumberOfTaskSets
];
196 size_t task_count
[kNumberOfTaskSets
] = {0};
198 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
199 new_task_set_finished_tasks
[task_set
] = CreateTaskSetFinishedTask(
201 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished
,
202 task_set_finished_weak_ptr_factory_
.GetWeakPtr(), task_set
));
205 resource_pool_
->CheckBusyResources(false);
207 for (TileTaskQueue::Item::Vector::const_iterator it
= queue
->items
.begin();
208 it
!= queue
->items
.end(); ++it
) {
209 const TileTaskQueue::Item
& item
= *it
;
210 RasterTask
* task
= item
.task
;
211 DCHECK(!task
->HasCompleted());
213 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
214 if (!item
.task_sets
[task_set
])
217 ++task_count
[task_set
];
219 graph_
.edges
.push_back(
220 TaskGraph::Edge(task
, new_task_set_finished_tasks
[task_set
].get()));
223 InsertNodesForRasterTask(&graph_
, task
, task
->dependencies(), priority
++);
226 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
227 InsertNodeForTask(&graph_
, new_task_set_finished_tasks
[task_set
].get(),
228 kTaskSetFinishedTaskPriorityBase
+ task_set
,
229 task_count
[task_set
]);
232 ScheduleTasksOnOriginThread(this, &graph_
);
233 task_graph_runner_
->ScheduleTasks(namespace_token_
, &graph_
);
235 std::copy(new_task_set_finished_tasks
,
236 new_task_set_finished_tasks
+ kNumberOfTaskSets
,
237 task_set_finished_tasks_
);
239 resource_pool_
->ReduceResourceUsage();
241 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
245 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
246 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
248 task_graph_runner_
->CollectCompletedTasks(namespace_token_
,
251 for (Task::Vector::const_iterator it
= completed_tasks_
.begin();
252 it
!= completed_tasks_
.end(); ++it
) {
253 TileTask
* task
= static_cast<TileTask
*>(it
->get());
255 task
->WillComplete();
256 task
->CompleteOnOriginThread(this);
259 task
->RunReplyOnOriginThread();
261 completed_tasks_
.clear();
264 ResourceFormat
OneCopyTileTaskWorkerPool::GetResourceFormat() {
265 return resource_provider_
->best_texture_format();
268 scoped_ptr
<RasterBuffer
> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
269 const Resource
* resource
) {
270 DCHECK_EQ(resource
->format(), resource_provider_
->best_texture_format());
271 return make_scoped_ptr
<RasterBuffer
>(
272 new RasterBufferImpl(this, resource_provider_
, resource_pool_
,
273 resource_provider_
->best_texture_format(),
277 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
278 scoped_ptr
<RasterBuffer
> buffer
) {
279 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
283 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
284 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
> write_lock
,
287 const RasterSource
* raster_source
,
288 const gfx::Rect
& rect
,
290 gfx::GpuMemoryBuffer
* gpu_memory_buffer
= write_lock
->GetGpuMemoryBuffer();
291 if (gpu_memory_buffer
) {
293 bool rv
= gpu_memory_buffer
->Map(&data
);
296 gpu_memory_buffer
->GetStride(&stride
);
297 TileTaskWorkerPool::PlaybackToMemory(data
, src
->format(), src
->size(),
298 stride
, raster_source
, rect
, scale
);
299 gpu_memory_buffer
->Unmap();
302 base::AutoLock
lock(lock_
);
304 CopySequenceNumber sequence
= 0;
305 size_t bytes_per_row
=
306 (BitsPerPixel(src
->format()) * src
->size().width()) / 8;
307 size_t chunk_size_in_rows
= std::max(
308 static_cast<size_t>(1), max_bytes_per_copy_operation_
/ bytes_per_row
);
309 // Align chunk size to 4. Required to support compressed texture formats.
311 MathUtil::RoundUp(chunk_size_in_rows
, static_cast<size_t>(4));
313 size_t height
= src
->size().height();
315 int failed_attempts
= 0;
316 while ((pending_copy_operations_
.size() + issued_copy_operation_count_
) >=
317 kMaxCopyOperations
) {
318 // Ignore limit when shutdown is set.
324 // Schedule a check that will also wait for operations to complete
325 // after too many failed attempts.
326 bool wait_if_needed
= failed_attempts
> kFailedAttemptsBeforeWaitIfNeeded
;
328 // Schedule a check for completed copy operations if too many operations
329 // are currently in-flight.
330 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed
);
333 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
335 // Wait for in-flight copy operations to drop below limit.
336 copy_operation_count_cv_
.Wait();
340 // There may be more work available, so wake up another worker thread.
341 copy_operation_count_cv_
.Signal();
343 // Copy at most |chunk_size_in_rows|.
344 size_t rows_to_copy
= std::min(chunk_size_in_rows
, height
- y
);
346 // |write_lock| is passed to the first copy operation as it needs to be
347 // released before we can issue a copy.
348 pending_copy_operations_
.push_back(make_scoped_ptr(
349 new CopyOperation(write_lock
.Pass(), src
, dst
,
350 gfx::Rect(0, y
, src
->size().width(), rows_to_copy
))));
354 // Acquire a sequence number for this copy operation.
355 sequence
= next_copy_operation_sequence_
++;
357 // Post task that will advance last flushed copy operation to |sequence|
358 // if we have reached the flush period.
359 if ((sequence
% kCopyFlushPeriod
) == 0) {
360 task_runner_
->PostTask(
362 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo
,
363 weak_ptr_factory_
.GetWeakPtr(), sequence
));
370 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
371 CopySequenceNumber sequence
) {
372 if (last_issued_copy_operation_
>= sequence
)
375 IssueCopyOperations(sequence
- last_issued_copy_operation_
);
376 last_issued_copy_operation_
= sequence
;
379 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
380 CopySequenceNumber sequence
) {
381 if (last_flushed_copy_operation_
>= sequence
)
384 AdvanceLastIssuedCopyTo(sequence
);
386 // Flush all issued copy operations.
387 context_provider_
->ContextGL()->ShallowFlushCHROMIUM();
388 last_flushed_copy_operation_
= last_issued_copy_operation_
;
391 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set
) {
392 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
395 DCHECK(tasks_pending_
[task_set
]);
396 tasks_pending_
[task_set
] = false;
397 if (tasks_pending_
.any()) {
398 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
399 "state", StateAsValue());
401 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
403 client_
->DidFinishRunningTileTasks(task_set
);
406 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count
) {
407 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
410 CopyOperation::Deque copy_operations
;
413 base::AutoLock
lock(lock_
);
415 for (int64 i
= 0; i
< count
; ++i
) {
416 DCHECK(!pending_copy_operations_
.empty());
417 copy_operations
.push_back(pending_copy_operations_
.take_front());
420 // Increment |issued_copy_operation_count_| to reflect the transition of
421 // copy operations from "pending" to "issued" state.
422 issued_copy_operation_count_
+= copy_operations
.size();
425 while (!copy_operations
.empty()) {
426 scoped_ptr
<CopyOperation
> copy_operation
= copy_operations
.take_front();
428 // Remove the write lock.
429 copy_operation
->write_lock
.reset();
431 // Copy contents of source resource to destination resource.
432 resource_provider_
->CopyResource(copy_operation
->src
->id(),
433 copy_operation
->dst
->id(),
434 copy_operation
->rect
);
438 void OneCopyTileTaskWorkerPool::
439 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
440 bool wait_if_needed
) {
441 lock_
.AssertAcquired();
443 if (check_for_completed_copy_operations_pending_
)
446 base::TimeTicks now
= base::TimeTicks::Now();
448 // Schedule a check for completed copy operations as soon as possible but
449 // don't allow two consecutive checks to be scheduled to run less than the
451 base::TimeTicks next_check_for_completed_copy_operations_time
=
452 std::max(last_check_for_completed_copy_operations_time_
+
453 base::TimeDelta::FromMilliseconds(
454 kCheckForCompletedCopyOperationsTickRateMs
),
457 task_runner_
->PostDelayedTask(
459 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations
,
460 weak_ptr_factory_
.GetWeakPtr(), wait_if_needed
),
461 next_check_for_completed_copy_operations_time
- now
);
463 last_check_for_completed_copy_operations_time_
=
464 next_check_for_completed_copy_operations_time
;
465 check_for_completed_copy_operations_pending_
= true;
468 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
469 bool wait_if_needed
) {
471 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
472 "wait_if_needed", wait_if_needed
);
474 resource_pool_
->CheckBusyResources(wait_if_needed
);
477 base::AutoLock
lock(lock_
);
479 DCHECK(check_for_completed_copy_operations_pending_
);
480 check_for_completed_copy_operations_pending_
= false;
482 // The number of busy resources in the pool reflects the number of issued
483 // copy operations that have not yet completed.
484 issued_copy_operation_count_
= resource_pool_
->busy_resource_count();
486 // There may be work blocked on too many in-flight copy operations, so wake
487 // up a worker thread.
488 copy_operation_count_cv_
.Signal();
492 scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
493 OneCopyTileTaskWorkerPool::StateAsValue() const {
494 scoped_refptr
<base::trace_event::TracedValue
> state
=
495 new base::trace_event::TracedValue();
497 state
->BeginArray("tasks_pending");
498 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
)
499 state
->AppendBoolean(tasks_pending_
[task_set
]);
501 state
->BeginDictionary("staging_state");
502 StagingStateAsValueInto(state
.get());
503 state
->EndDictionary();
508 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
509 base::trace_event::TracedValue
* staging_state
) const {
510 staging_state
->SetInteger("staging_resource_count",
511 resource_pool_
->total_resource_count());
512 staging_state
->SetInteger("bytes_used_for_staging_resources",
513 resource_pool_
->total_memory_usage_bytes());
514 staging_state
->SetInteger("pending_copy_count",
515 resource_pool_
->total_resource_count() -
516 resource_pool_
->acquired_resource_count());
517 staging_state
->SetInteger("bytes_pending_copy",
518 resource_pool_
->total_memory_usage_bytes() -
519 resource_pool_
->acquired_memory_usage_bytes());