1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/raster/zero_copy_tile_task_worker_pool.h"
9 #include "base/strings/stringprintf.h"
10 #include "base/trace_event/trace_event.h"
11 #include "base/trace_event/trace_event_argument.h"
12 #include "cc/debug/traced_value.h"
13 #include "cc/raster/raster_buffer.h"
14 #include "cc/resources/platform_color.h"
15 #include "cc/resources/resource.h"
16 #include "ui/gfx/gpu_memory_buffer.h"
21 class RasterBufferImpl
: public RasterBuffer
{
23 RasterBufferImpl(ResourceProvider
* resource_provider
,
24 const Resource
* resource
)
25 : lock_(resource_provider
, resource
->id()), resource_(resource
) {}
27 // Overridden from RasterBuffer:
28 void Playback(const RasterSource
* raster_source
,
29 const gfx::Rect
& raster_full_rect
,
30 const gfx::Rect
& raster_dirty_rect
,
31 uint64_t new_content_id
,
32 float scale
) override
{
33 gfx::GpuMemoryBuffer
* gpu_memory_buffer
= lock_
.GetGpuMemoryBuffer();
34 if (!gpu_memory_buffer
)
37 bool rv
= gpu_memory_buffer
->Map(&data
);
40 gpu_memory_buffer
->GetStride(&stride
);
41 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
43 // TODO(danakj): Implement partial raster with raster_dirty_rect.
44 TileTaskWorkerPool::PlaybackToMemory(
45 data
, resource_
->format(), resource_
->size(),
46 static_cast<size_t>(stride
), raster_source
, raster_full_rect
,
47 raster_full_rect
, scale
);
48 gpu_memory_buffer
->Unmap();
52 ResourceProvider::ScopedWriteLockGpuMemoryBuffer lock_
;
53 const Resource
* resource_
;
55 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl
);
61 scoped_ptr
<TileTaskWorkerPool
> ZeroCopyTileTaskWorkerPool::Create(
62 base::SequencedTaskRunner
* task_runner
,
63 TaskGraphRunner
* task_graph_runner
,
64 ResourceProvider
* resource_provider
) {
65 return make_scoped_ptr
<TileTaskWorkerPool
>(new ZeroCopyTileTaskWorkerPool(
66 task_runner
, task_graph_runner
, resource_provider
));
69 ZeroCopyTileTaskWorkerPool::ZeroCopyTileTaskWorkerPool(
70 base::SequencedTaskRunner
* task_runner
,
71 TaskGraphRunner
* task_graph_runner
,
72 ResourceProvider
* resource_provider
)
73 : task_runner_(task_runner
),
74 task_graph_runner_(task_graph_runner
),
75 namespace_token_(task_graph_runner
->GetNamespaceToken()),
76 resource_provider_(resource_provider
),
77 task_set_finished_weak_ptr_factory_(this) {
80 ZeroCopyTileTaskWorkerPool::~ZeroCopyTileTaskWorkerPool() {
83 TileTaskRunner
* ZeroCopyTileTaskWorkerPool::AsTileTaskRunner() {
87 void ZeroCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient
* client
) {
91 void ZeroCopyTileTaskWorkerPool::Shutdown() {
92 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::Shutdown");
95 task_graph_runner_
->ScheduleTasks(namespace_token_
, &empty
);
96 task_graph_runner_
->WaitForTasksToFinishRunning(namespace_token_
);
99 void ZeroCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue
* queue
) {
100 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::ScheduleTasks");
102 if (tasks_pending_
.none())
103 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
105 // Mark all task sets as pending.
106 tasks_pending_
.set();
108 size_t priority
= kTileTaskPriorityBase
;
112 // Cancel existing OnTaskSetFinished callbacks.
113 task_set_finished_weak_ptr_factory_
.InvalidateWeakPtrs();
115 scoped_refptr
<TileTask
> new_task_set_finished_tasks
[kNumberOfTaskSets
];
117 size_t task_count
[kNumberOfTaskSets
] = {0};
119 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
120 new_task_set_finished_tasks
[task_set
] = CreateTaskSetFinishedTask(
122 base::Bind(&ZeroCopyTileTaskWorkerPool::OnTaskSetFinished
,
123 task_set_finished_weak_ptr_factory_
.GetWeakPtr(), task_set
));
126 for (TileTaskQueue::Item::Vector::const_iterator it
= queue
->items
.begin();
127 it
!= queue
->items
.end(); ++it
) {
128 const TileTaskQueue::Item
& item
= *it
;
129 RasterTask
* task
= item
.task
;
130 DCHECK(!task
->HasCompleted());
132 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
133 if (!item
.task_sets
[task_set
])
136 ++task_count
[task_set
];
138 graph_
.edges
.push_back(
139 TaskGraph::Edge(task
, new_task_set_finished_tasks
[task_set
].get()));
142 InsertNodesForRasterTask(&graph_
, task
, task
->dependencies(), priority
++);
145 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
) {
146 InsertNodeForTask(&graph_
, new_task_set_finished_tasks
[task_set
].get(),
147 kTaskSetFinishedTaskPriorityBase
+ task_set
,
148 task_count
[task_set
]);
151 ScheduleTasksOnOriginThread(this, &graph_
);
152 task_graph_runner_
->ScheduleTasks(namespace_token_
, &graph_
);
154 std::copy(new_task_set_finished_tasks
,
155 new_task_set_finished_tasks
+ kNumberOfTaskSets
,
156 task_set_finished_tasks_
);
158 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
162 void ZeroCopyTileTaskWorkerPool::CheckForCompletedTasks() {
163 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::CheckForCompletedTasks");
165 task_graph_runner_
->CollectCompletedTasks(namespace_token_
,
167 for (Task::Vector::const_iterator it
= completed_tasks_
.begin();
168 it
!= completed_tasks_
.end(); ++it
) {
169 TileTask
* task
= static_cast<TileTask
*>(it
->get());
171 task
->WillComplete();
172 task
->CompleteOnOriginThread(this);
175 task
->RunReplyOnOriginThread();
177 completed_tasks_
.clear();
180 ResourceFormat
ZeroCopyTileTaskWorkerPool::GetResourceFormat() const {
181 return resource_provider_
->best_texture_format();
184 bool ZeroCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const {
185 return !PlatformColor::SameComponentOrder(GetResourceFormat());
188 scoped_ptr
<RasterBuffer
> ZeroCopyTileTaskWorkerPool::AcquireBufferForRaster(
189 const Resource
* resource
,
190 uint64_t resource_content_id
,
191 uint64_t previous_content_id
) {
192 return make_scoped_ptr
<RasterBuffer
>(
193 new RasterBufferImpl(resource_provider_
, resource
));
196 void ZeroCopyTileTaskWorkerPool::ReleaseBufferForRaster(
197 scoped_ptr
<RasterBuffer
> buffer
) {
198 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
201 void ZeroCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set
) {
202 TRACE_EVENT1("cc", "ZeroCopyTileTaskWorkerPool::OnTaskSetFinished",
203 "task_set", task_set
);
205 DCHECK(tasks_pending_
[task_set
]);
206 tasks_pending_
[task_set
] = false;
207 if (tasks_pending_
.any()) {
208 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
209 "state", StateAsValue());
211 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
213 client_
->DidFinishRunningTileTasks(task_set
);
216 scoped_refptr
<base::trace_event::ConvertableToTraceFormat
>
217 ZeroCopyTileTaskWorkerPool::StateAsValue() const {
218 scoped_refptr
<base::trace_event::TracedValue
> state
=
219 new base::trace_event::TracedValue();
221 state
->BeginArray("tasks_pending");
222 for (TaskSet task_set
= 0; task_set
< kNumberOfTaskSets
; ++task_set
)
223 state
->AppendBoolean(tasks_pending_
[task_set
]);