1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/raster/one_copy_tile_task_worker_pool.h"
10 #include "base/strings/stringprintf.h"
11 #include "base/trace_event/trace_event.h"
12 #include "base/trace_event/trace_event_argument.h"
13 #include "cc/base/math_util.h"
14 #include "cc/debug/traced_value.h"
15 #include "cc/raster/raster_buffer.h"
16 #include "cc/resources/platform_color.h"
17 #include "cc/resources/resource_pool.h"
18 #include "cc/resources/scoped_resource.h"
19 #include "gpu/command_buffer/client/gles2_interface.h"
20 #include "ui/gfx/gpu_memory_buffer.h"
25 class RasterBufferImpl
: public RasterBuffer
{
27 RasterBufferImpl(OneCopyTileTaskWorkerPool
* worker_pool
,
28 ResourceProvider
* resource_provider
,
29 ResourcePool
* resource_pool
,
30 ResourceFormat resource_format
,
31 const Resource
* output_resource
,
32 uint64_t previous_content_id
)
33 : worker_pool_(worker_pool
),
34 resource_provider_(resource_provider
),
35 resource_pool_(resource_pool
),
36 output_resource_(output_resource
),
37 raster_content_id_(0),
39 if (worker_pool
->have_persistent_gpu_memory_buffers() &&
40 previous_content_id
) {
42 resource_pool
->TryAcquireResourceWithContentId(previous_content_id
);
44 if (raster_resource_
) {
45 raster_content_id_
= previous_content_id
;
46 DCHECK_EQ(resource_format
, raster_resource_
->format());
47 DCHECK_EQ(output_resource
->size().ToString(),
48 raster_resource_
->size().ToString());
50 raster_resource_
= resource_pool
->AcquireResource(output_resource
->size(),
54 lock_
.reset(new ResourceProvider::ScopedWriteLockGpuMemoryBuffer(
55 resource_provider_
, raster_resource_
->id()));
58 ~RasterBufferImpl() override
{
59 // Release write lock in case a copy was never scheduled.
62 // Make sure any scheduled copy operations are issued before we release the
65 worker_pool_
->AdvanceLastIssuedCopyTo(sequence_
);
67 // Return resources to pool so they can be used by another RasterBuffer
69 resource_pool_
->ReleaseResource(raster_resource_
.Pass(),
73 // Overridden from RasterBuffer:
74 void Playback(const RasterSource
* raster_source
,
75 const gfx::Rect
& raster_full_rect
,
76 const gfx::Rect
& raster_dirty_rect
,
77 uint64_t new_content_id
,
79 bool include_images
) override
{
80 // If there's a raster_content_id_, we are reusing a resource with that
82 bool reusing_raster_resource
= raster_content_id_
!= 0;
83 sequence_
= worker_pool_
->PlaybackAndScheduleCopyOnWorkerThread(
84 reusing_raster_resource
, lock_
.Pass(), raster_resource_
.get(),
85 output_resource_
, raster_source
, raster_full_rect
, raster_dirty_rect
,
86 scale
, include_images
);
87 // Store the content id of the resource to return to the pool.
88 raster_content_id_
= new_content_id
;
92 OneCopyTileTaskWorkerPool
* worker_pool_
;
93 ResourceProvider
* resource_provider_
;
94 ResourcePool
* resource_pool_
;
95 const Resource
* output_resource_
;
96 uint64_t raster_content_id_
;
97 scoped_ptr
<ScopedResource
> raster_resource_
;
98 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
> lock_
;
99 CopySequenceNumber sequence_
;
101 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl
);
104 // Number of in-flight copy operations to allow.
105 const int kMaxCopyOperations
= 32;
107 // Delay been checking for copy operations to complete.
108 const int kCheckForCompletedCopyOperationsTickRateMs
= 1;
110 // Number of failed attempts to allow before we perform a check that will
111 // wait for copy operations to complete if needed.
112 const int kFailedAttemptsBeforeWaitIfNeeded
= 256;
114 // 4MiB is the size of 4 512x512 tiles, which has proven to be a good
115 // default batch size for copy operations.
116 const int kMaxBytesPerCopyOperation
= 1024 * 1024 * 4;
120 OneCopyTileTaskWorkerPool::CopyOperation::CopyOperation(
121 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
> src_write_lock
,
124 const gfx::Rect
& rect
)
125 : src_write_lock(src_write_lock
.Pass()), src(src
), dst(dst
), rect(rect
) {
128 OneCopyTileTaskWorkerPool::CopyOperation::~CopyOperation() {
132 scoped_ptr
<TileTaskWorkerPool
> OneCopyTileTaskWorkerPool::Create(
133 base::SequencedTaskRunner
* task_runner
,
134 TaskGraphRunner
* task_graph_runner
,
135 ContextProvider
* context_provider
,
136 ResourceProvider
* resource_provider
,
137 ResourcePool
* resource_pool
,
138 int max_copy_texture_chromium_size
,
139 bool have_persistent_gpu_memory_buffers
) {
140 return make_scoped_ptr
<TileTaskWorkerPool
>(new OneCopyTileTaskWorkerPool(
141 task_runner
, task_graph_runner
, context_provider
, resource_provider
,
142 resource_pool
, max_copy_texture_chromium_size
,
143 have_persistent_gpu_memory_buffers
));
146 OneCopyTileTaskWorkerPool::OneCopyTileTaskWorkerPool(
147 base::SequencedTaskRunner
* task_runner
,
148 TaskGraphRunner
* task_graph_runner
,
149 ContextProvider
* context_provider
,
150 ResourceProvider
* resource_provider
,
151 ResourcePool
* resource_pool
,
152 int max_copy_texture_chromium_size
,
153 bool have_persistent_gpu_memory_buffers
)
154 : task_runner_(task_runner
),
155 task_graph_runner_(task_graph_runner
),
156 namespace_token_(task_graph_runner
->GetNamespaceToken()),
157 context_provider_(context_provider
),
158 resource_provider_(resource_provider
),
159 resource_pool_(resource_pool
),
160 max_bytes_per_copy_operation_(
161 max_copy_texture_chromium_size
162 ? std::min(kMaxBytesPerCopyOperation
,
163 max_copy_texture_chromium_size
)
164 : kMaxBytesPerCopyOperation
),
165 have_persistent_gpu_memory_buffers_(have_persistent_gpu_memory_buffers
),
166 last_issued_copy_operation_(0),
167 last_flushed_copy_operation_(0),
169 copy_operation_count_cv_(&lock_
),
170 bytes_scheduled_since_last_flush_(0),
171 issued_copy_operation_count_(0),
172 next_copy_operation_sequence_(1),
173 check_for_completed_copy_operations_pending_(false),
175 weak_ptr_factory_(this),
176 task_set_finished_weak_ptr_factory_(this) {
177 DCHECK(context_provider_
);
180 OneCopyTileTaskWorkerPool::~OneCopyTileTaskWorkerPool() {
181 DCHECK_EQ(pending_copy_operations_
.size(), 0u);
184 TileTaskRunner
* OneCopyTileTaskWorkerPool::AsTileTaskRunner() {
188 void OneCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient
* client
) {
192 void OneCopyTileTaskWorkerPool::Shutdown() {
193 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::Shutdown");
196 base::AutoLock
lock(lock_
);
199 copy_operation_count_cv_
.Signal();
203 task_graph_runner_
->ScheduleTasks(namespace_token_
, &empty
);
204 task_graph_runner_
->WaitForTasksToFinishRunning(namespace_token_
);
207 void OneCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue
* queue
) {
208 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::ScheduleTasks");
212 base::AutoLock
lock(lock_
);
217 if (tasks_pending_
.none())
218 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
220 // Mark all task sets as pending.
221 tasks_pending_
.set();
223 size_t priority
= kTileTaskPriorityBase
;
227 // Cancel existing OnTaskSetFinished callbacks.
228 task_set_finished_weak_ptr_factory_
.InvalidateWeakPtrs();
230 scoped_refptr
<TileTask
> new_task_set_finished_tasks
[kNumberOfTaskSets
];
232 size_t task_count
[kNumberOfTaskSets
] = {0};
234 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
235 new_task_set_finished_tasks
[task_set
] = CreateTaskSetFinishedTask(
237 base::Bind(&OneCopyTileTaskWorkerPool::OnTaskSetFinished
,
238 task_set_finished_weak_ptr_factory_
.GetWeakPtr(), task_set
));
241 resource_pool_
->CheckBusyResources(false);
243 for (TileTaskQueue::Item::Vector::const_iterator it
= queue
->items
.begin();
244 it
!= queue
->items
.end(); ++it
) {
245 const TileTaskQueue::Item
& item
= *it
;
246 RasterTask
* task
= item
.task
;
247 DCHECK(!task
->HasCompleted());
249 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
250 if (!item
.task_sets
[task_set
])
253 ++task_count
[task_set
];
255 graph_
.edges
.push_back(
256 TaskGraph::Edge(task
, new_task_set_finished_tasks
[task_set
].get()));
259 InsertNodesForRasterTask(&graph_
, task
, task
->dependencies(), priority
++);
262 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
263 InsertNodeForTask(&graph_
, new_task_set_finished_tasks
[task_set
].get(),
264 kTaskSetFinishedTaskPriorityBase
+ task_set
,
265 task_count
[task_set
]);
268 ScheduleTasksOnOriginThread(this, &graph_
);
269 task_graph_runner_
->ScheduleTasks(namespace_token_
, &graph_
);
271 std::copy(new_task_set_finished_tasks
,
272 new_task_set_finished_tasks
+ kNumberOfTaskSets
,
273 task_set_finished_tasks_
);
275 resource_pool_
->ReduceResourceUsage();
277 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
281 void OneCopyTileTaskWorkerPool::CheckForCompletedTasks() {
282 TRACE_EVENT0("cc", "OneCopyTileTaskWorkerPool::CheckForCompletedTasks");
284 task_graph_runner_
->CollectCompletedTasks(namespace_token_
,
287 for (Task::Vector::const_iterator it
= completed_tasks_
.begin();
288 it
!= completed_tasks_
.end(); ++it
) {
289 TileTask
* task
= static_cast<TileTask
*>(it
->get());
291 task
->WillComplete();
292 task
->CompleteOnOriginThread(this);
295 task
->RunReplyOnOriginThread();
297 completed_tasks_
.clear();
300 ResourceFormat
OneCopyTileTaskWorkerPool::GetResourceFormat() const {
301 return resource_provider_
->best_texture_format();
304 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const {
305 return !PlatformColor::SameComponentOrder(GetResourceFormat());
308 scoped_ptr
<RasterBuffer
> OneCopyTileTaskWorkerPool::AcquireBufferForRaster(
309 const Resource
* resource
,
310 uint64_t resource_content_id
,
311 uint64_t previous_content_id
) {
312 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload
314 DCHECK_EQ(resource
->format(), resource_provider_
->best_texture_format());
315 return make_scoped_ptr
<RasterBuffer
>(
316 new RasterBufferImpl(this, resource_provider_
, resource_pool_
,
317 resource_provider_
->best_texture_format(), resource
,
318 previous_content_id
));
321 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster(
322 scoped_ptr
<RasterBuffer
> buffer
) {
323 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
327 OneCopyTileTaskWorkerPool::PlaybackAndScheduleCopyOnWorkerThread(
328 bool reusing_raster_resource
,
329 scoped_ptr
<ResourceProvider::ScopedWriteLockGpuMemoryBuffer
>
330 raster_resource_write_lock
,
331 const Resource
* raster_resource
,
332 const Resource
* output_resource
,
333 const RasterSource
* raster_source
,
334 const gfx::Rect
& raster_full_rect
,
335 const gfx::Rect
& raster_dirty_rect
,
337 bool include_images
) {
338 gfx::GpuMemoryBuffer
* gpu_memory_buffer
=
339 raster_resource_write_lock
->GetGpuMemoryBuffer();
340 if (gpu_memory_buffer
) {
342 bool rv
= gpu_memory_buffer
->Map(&data
);
345 gpu_memory_buffer
->GetStride(&stride
);
346 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
347 DCHECK_GE(stride
, 0);
349 gfx::Rect playback_rect
= raster_full_rect
;
350 if (reusing_raster_resource
) {
351 playback_rect
.Intersect(raster_dirty_rect
);
353 DCHECK(!playback_rect
.IsEmpty())
354 << "Why are we rastering a tile that's not dirty?";
355 TileTaskWorkerPool::PlaybackToMemory(
356 data
, raster_resource
->format(), raster_resource
->size(),
357 static_cast<size_t>(stride
), raster_source
, raster_full_rect
,
358 playback_rect
, scale
, include_images
);
359 gpu_memory_buffer
->Unmap();
362 base::AutoLock
lock(lock_
);
364 CopySequenceNumber sequence
= 0;
365 int bytes_per_row
= (BitsPerPixel(raster_resource
->format()) *
366 raster_resource
->size().width()) /
368 int chunk_size_in_rows
=
369 std::max(1, max_bytes_per_copy_operation_
/ bytes_per_row
);
370 // Align chunk size to 4. Required to support compressed texture formats.
371 chunk_size_in_rows
= MathUtil::UncheckedRoundUp(chunk_size_in_rows
, 4);
373 int height
= raster_resource
->size().height();
375 int failed_attempts
= 0;
376 while ((pending_copy_operations_
.size() + issued_copy_operation_count_
) >=
377 kMaxCopyOperations
) {
378 // Ignore limit when shutdown is set.
384 // Schedule a check that will also wait for operations to complete
385 // after too many failed attempts.
386 bool wait_if_needed
= failed_attempts
> kFailedAttemptsBeforeWaitIfNeeded
;
388 // Schedule a check for completed copy operations if too many operations
389 // are currently in-flight.
390 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(wait_if_needed
);
393 TRACE_EVENT0("cc", "WaitingForCopyOperationsToComplete");
395 // Wait for in-flight copy operations to drop below limit.
396 copy_operation_count_cv_
.Wait();
400 // There may be more work available, so wake up another worker thread.
401 copy_operation_count_cv_
.Signal();
403 // Copy at most |chunk_size_in_rows|.
404 int rows_to_copy
= std::min(chunk_size_in_rows
, height
- y
);
405 DCHECK_GT(rows_to_copy
, 0);
407 // |raster_resource_write_lock| is passed to the first copy operation as it
408 // needs to be released before we can issue a copy.
409 pending_copy_operations_
.push_back(make_scoped_ptr(new CopyOperation(
410 raster_resource_write_lock
.Pass(), raster_resource
, output_resource
,
411 gfx::Rect(0, y
, raster_resource
->size().width(), rows_to_copy
))));
414 // Acquire a sequence number for this copy operation.
415 sequence
= next_copy_operation_sequence_
++;
417 // Increment |bytes_scheduled_since_last_flush_| by the amount of memory
418 // used for this copy operation.
419 bytes_scheduled_since_last_flush_
+= rows_to_copy
* bytes_per_row
;
421 // Post task that will advance last flushed copy operation to |sequence|
422 // when |bytes_scheduled_since_last_flush_| has reached
423 // |max_bytes_per_copy_operation_|.
424 if (bytes_scheduled_since_last_flush_
>= max_bytes_per_copy_operation_
) {
425 task_runner_
->PostTask(
427 base::Bind(&OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo
,
428 weak_ptr_factory_
.GetWeakPtr(), sequence
));
429 bytes_scheduled_since_last_flush_
= 0;
436 void OneCopyTileTaskWorkerPool::AdvanceLastIssuedCopyTo(
437 CopySequenceNumber sequence
) {
438 if (last_issued_copy_operation_
>= sequence
)
441 IssueCopyOperations(sequence
- last_issued_copy_operation_
);
442 last_issued_copy_operation_
= sequence
;
445 void OneCopyTileTaskWorkerPool::AdvanceLastFlushedCopyTo(
446 CopySequenceNumber sequence
) {
447 if (last_flushed_copy_operation_
>= sequence
)
450 AdvanceLastIssuedCopyTo(sequence
);
452 // Flush all issued copy operations.
453 context_provider_
->ContextGL()->ShallowFlushCHROMIUM();
454 last_flushed_copy_operation_
= last_issued_copy_operation_
;
457 void OneCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set
) {
458 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::OnTaskSetFinished", "task_set",
461 DCHECK(tasks_pending_
[task_set
]);
462 tasks_pending_
[task_set
] = false;
463 if (tasks_pending_
.any()) {
464 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
465 "state", StateAsValue());
467 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
469 client_
->DidFinishRunningTileTasks(task_set
);
472 void OneCopyTileTaskWorkerPool::IssueCopyOperations(int64 count
) {
473 TRACE_EVENT1("cc", "OneCopyTileTaskWorkerPool::IssueCopyOperations", "count",
476 CopyOperation::Deque copy_operations
;
479 base::AutoLock
lock(lock_
);
481 for (int64 i
= 0; i
< count
; ++i
) {
482 DCHECK(!pending_copy_operations_
.empty());
483 copy_operations
.push_back(pending_copy_operations_
.take_front());
486 // Increment |issued_copy_operation_count_| to reflect the transition of
487 // copy operations from "pending" to "issued" state.
488 issued_copy_operation_count_
+= copy_operations
.size();
491 while (!copy_operations
.empty()) {
492 scoped_ptr
<CopyOperation
> copy_operation
= copy_operations
.take_front();
494 // Remove the write lock.
495 copy_operation
->src_write_lock
.reset();
497 // Copy contents of source resource to destination resource.
498 resource_provider_
->CopyResource(copy_operation
->src
->id(),
499 copy_operation
->dst
->id(),
500 copy_operation
->rect
);
504 void OneCopyTileTaskWorkerPool::
505 ScheduleCheckForCompletedCopyOperationsWithLockAcquired(
506 bool wait_if_needed
) {
507 lock_
.AssertAcquired();
509 if (check_for_completed_copy_operations_pending_
)
512 base::TimeTicks now
= base::TimeTicks::Now();
514 // Schedule a check for completed copy operations as soon as possible but
515 // don't allow two consecutive checks to be scheduled to run less than the
517 base::TimeTicks next_check_for_completed_copy_operations_time
=
518 std::max(last_check_for_completed_copy_operations_time_
+
519 base::TimeDelta::FromMilliseconds(
520 kCheckForCompletedCopyOperationsTickRateMs
),
523 task_runner_
->PostDelayedTask(
525 base::Bind(&OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations
,
526 weak_ptr_factory_
.GetWeakPtr(), wait_if_needed
),
527 next_check_for_completed_copy_operations_time
- now
);
529 last_check_for_completed_copy_operations_time_
=
530 next_check_for_completed_copy_operations_time
;
531 check_for_completed_copy_operations_pending_
= true;
534 void OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations(
535 bool wait_if_needed
) {
537 "OneCopyTileTaskWorkerPool::CheckForCompletedCopyOperations",
538 "wait_if_needed", wait_if_needed
);
540 resource_pool_
->CheckBusyResources(wait_if_needed
);
543 base::AutoLock
lock(lock_
);
545 DCHECK(check_for_completed_copy_operations_pending_
);
546 check_for_completed_copy_operations_pending_
= false;
548 // The number of busy resources in the pool reflects the number of issued
549 // copy operations that have not yet completed.
550 issued_copy_operation_count_
= resource_pool_
->busy_resource_count();
552 // There may be work blocked on too many in-flight copy operations, so wake
553 // up a worker thread.
554 copy_operation_count_cv_
.Signal();
558 scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
559 OneCopyTileTaskWorkerPool::StateAsValue() const {
560 scoped_refptr
<base::trace_event::TracedValue
> state
=
561 new base::trace_event::TracedValue();
563 state
->BeginArray("tasks_pending");
564 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
)
565 state
->AppendBoolean(tasks_pending_
[task_set
]);
567 state
->BeginDictionary("staging_state");
568 StagingStateAsValueInto(state
.get());
569 state
->EndDictionary();
574 void OneCopyTileTaskWorkerPool::StagingStateAsValueInto(
575 base::trace_event::TracedValue
* staging_state
) const {
576 staging_state
->SetInteger(
577 "staging_resource_count",
578 static_cast<int>(resource_pool_
->total_resource_count()));
579 staging_state
->SetInteger(
580 "bytes_used_for_staging_resources",
581 static_cast<int>(resource_pool_
->total_memory_usage_bytes()));
582 staging_state
->SetInteger(
583 "pending_copy_count",
584 static_cast<int>(resource_pool_
->total_resource_count() -
585 resource_pool_
->acquired_resource_count()));
586 staging_state
->SetInteger(
587 "bytes_pending_copy",
588 static_cast<int>(resource_pool_
->total_memory_usage_bytes() -
589 resource_pool_
->acquired_memory_usage_bytes()));