Infobar material design refresh: bg color
[chromium-blink-merge.git] / cc / raster / zero_copy_tile_task_worker_pool.cc
blob2e3dca1407f3358802bbd612554bf5f22ea46c4d
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/raster/zero_copy_tile_task_worker_pool.h"
7 #include <algorithm>
9 #include "base/strings/stringprintf.h"
10 #include "base/trace_event/trace_event.h"
11 #include "base/trace_event/trace_event_argument.h"
12 #include "cc/debug/traced_value.h"
13 #include "cc/raster/raster_buffer.h"
14 #include "cc/resources/platform_color.h"
15 #include "cc/resources/resource.h"
16 #include "ui/gfx/buffer_format_util.h"
17 #include "ui/gfx/gpu_memory_buffer.h"
19 namespace cc {
20 namespace {
22 class RasterBufferImpl : public RasterBuffer {
23 public:
24 RasterBufferImpl(ResourceProvider* resource_provider,
25 const Resource* resource)
26 : lock_(resource_provider, resource->id()), resource_(resource) {}
28 // Overridden from RasterBuffer:
29 void Playback(const RasterSource* raster_source,
30 const gfx::Rect& raster_full_rect,
31 const gfx::Rect& raster_dirty_rect,
32 uint64_t new_content_id,
33 float scale,
34 bool include_images) override {
35 gfx::GpuMemoryBuffer* gpu_memory_buffer = lock_.GetGpuMemoryBuffer();
36 if (!gpu_memory_buffer)
37 return;
38 DCHECK_EQ(
39 1u, gfx::NumberOfPlanesForBufferFormat(gpu_memory_buffer->GetFormat()));
40 void* data = NULL;
41 bool rv = gpu_memory_buffer->Map(&data);
42 DCHECK(rv);
43 int stride;
44 gpu_memory_buffer->GetStride(&stride);
45 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides.
46 DCHECK_GE(stride, 0);
47 // TODO(danakj): Implement partial raster with raster_dirty_rect.
48 TileTaskWorkerPool::PlaybackToMemory(
49 data, resource_->format(), resource_->size(),
50 static_cast<size_t>(stride), raster_source, raster_full_rect,
51 raster_full_rect, scale, include_images);
52 gpu_memory_buffer->Unmap();
55 private:
56 ResourceProvider::ScopedWriteLockGpuMemoryBuffer lock_;
57 const Resource* resource_;
59 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
62 } // namespace
64 // static
65 scoped_ptr<TileTaskWorkerPool> ZeroCopyTileTaskWorkerPool::Create(
66 base::SequencedTaskRunner* task_runner,
67 TaskGraphRunner* task_graph_runner,
68 ResourceProvider* resource_provider) {
69 return make_scoped_ptr<TileTaskWorkerPool>(new ZeroCopyTileTaskWorkerPool(
70 task_runner, task_graph_runner, resource_provider));
73 ZeroCopyTileTaskWorkerPool::ZeroCopyTileTaskWorkerPool(
74 base::SequencedTaskRunner* task_runner,
75 TaskGraphRunner* task_graph_runner,
76 ResourceProvider* resource_provider)
77 : task_runner_(task_runner),
78 task_graph_runner_(task_graph_runner),
79 namespace_token_(task_graph_runner->GetNamespaceToken()),
80 resource_provider_(resource_provider),
81 task_set_finished_weak_ptr_factory_(this) {
84 ZeroCopyTileTaskWorkerPool::~ZeroCopyTileTaskWorkerPool() {
87 TileTaskRunner* ZeroCopyTileTaskWorkerPool::AsTileTaskRunner() {
88 return this;
91 void ZeroCopyTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
92 client_ = client;
95 void ZeroCopyTileTaskWorkerPool::Shutdown() {
96 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::Shutdown");
98 TaskGraph empty;
99 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
100 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
103 void ZeroCopyTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
104 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::ScheduleTasks");
106 if (tasks_pending_.none())
107 TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
109 // Mark all task sets as pending.
110 tasks_pending_.set();
112 size_t priority = kTileTaskPriorityBase;
114 graph_.Reset();
116 // Cancel existing OnTaskSetFinished callbacks.
117 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
119 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
121 size_t task_count[kNumberOfTaskSets] = {0};
123 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
124 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
125 task_runner_.get(),
126 base::Bind(&ZeroCopyTileTaskWorkerPool::OnTaskSetFinished,
127 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
130 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
131 it != queue->items.end(); ++it) {
132 const TileTaskQueue::Item& item = *it;
133 RasterTask* task = item.task;
134 DCHECK(!task->HasCompleted());
136 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
137 if (!item.task_sets[task_set])
138 continue;
140 ++task_count[task_set];
142 graph_.edges.push_back(
143 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
146 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
149 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
150 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
151 kTaskSetFinishedTaskPriorityBase + task_set,
152 task_count[task_set]);
155 ScheduleTasksOnOriginThread(this, &graph_);
156 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
158 std::copy(new_task_set_finished_tasks,
159 new_task_set_finished_tasks + kNumberOfTaskSets,
160 task_set_finished_tasks_);
162 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running", "state",
163 StateAsValue());
166 void ZeroCopyTileTaskWorkerPool::CheckForCompletedTasks() {
167 TRACE_EVENT0("cc", "ZeroCopyTileTaskWorkerPool::CheckForCompletedTasks");
169 task_graph_runner_->CollectCompletedTasks(namespace_token_,
170 &completed_tasks_);
171 for (Task::Vector::const_iterator it = completed_tasks_.begin();
172 it != completed_tasks_.end(); ++it) {
173 TileTask* task = static_cast<TileTask*>(it->get());
175 task->WillComplete();
176 task->CompleteOnOriginThread(this);
177 task->DidComplete();
179 completed_tasks_.clear();
182 ResourceFormat ZeroCopyTileTaskWorkerPool::GetResourceFormat() const {
183 return resource_provider_->memory_efficient_texture_format();
186 bool ZeroCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const {
187 return !PlatformColor::SameComponentOrder(GetResourceFormat());
190 scoped_ptr<RasterBuffer> ZeroCopyTileTaskWorkerPool::AcquireBufferForRaster(
191 const Resource* resource,
192 uint64_t resource_content_id,
193 uint64_t previous_content_id) {
194 return make_scoped_ptr<RasterBuffer>(
195 new RasterBufferImpl(resource_provider_, resource));
198 void ZeroCopyTileTaskWorkerPool::ReleaseBufferForRaster(
199 scoped_ptr<RasterBuffer> buffer) {
200 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
203 void ZeroCopyTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
204 TRACE_EVENT1("cc", "ZeroCopyTileTaskWorkerPool::OnTaskSetFinished",
205 "task_set", task_set);
207 DCHECK(tasks_pending_[task_set]);
208 tasks_pending_[task_set] = false;
209 if (tasks_pending_.any()) {
210 TRACE_EVENT_ASYNC_STEP_INTO1("cc", "ScheduledTasks", this, "running",
211 "state", StateAsValue());
212 } else {
213 TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
215 client_->DidFinishRunningTileTasks(task_set);
218 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
219 ZeroCopyTileTaskWorkerPool::StateAsValue() const {
220 scoped_refptr<base::trace_event::TracedValue> state =
221 new base::trace_event::TracedValue();
223 state->BeginArray("tasks_pending");
224 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set)
225 state->AppendBoolean(tasks_pending_[task_set]);
226 state->EndArray();
227 return state;
230 } // namespace cc