Use multiline attribute to check for IA2_STATE_MULTILINE.
[chromium-blink-merge.git] / cc / resources / gpu_tile_task_worker_pool.cc
blobbfda392b9f33fd72e0954469866e8766db1cf4b0
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/resources/gpu_tile_task_worker_pool.h"
7 #include <algorithm>
9 #include "base/trace_event/trace_event.h"
10 #include "cc/resources/gpu_rasterizer.h"
11 #include "cc/resources/raster_buffer.h"
12 #include "cc/resources/raster_source.h"
13 #include "cc/resources/resource.h"
14 #include "cc/resources/scoped_gpu_raster.h"
15 #include "gpu/command_buffer/client/gles2_interface.h"
16 #include "third_party/skia/include/core/SkMultiPictureDraw.h"
17 #include "third_party/skia/include/core/SkPictureRecorder.h"
18 #include "third_party/skia/include/core/SkSurface.h"
19 #include "third_party/skia/include/gpu/GrContext.h"
21 namespace cc {
22 namespace {
24 class RasterBufferImpl : public RasterBuffer {
25 public:
26 RasterBufferImpl(GpuRasterizer* rasterizer, const Resource* resource)
27 : rasterizer_(rasterizer),
28 lock_(rasterizer->resource_provider(), resource->id()),
29 resource_(resource) {}
31 // Overridden from RasterBuffer:
32 void Playback(const RasterSource* raster_source,
33 const gfx::Rect& rect,
34 float scale) override {
35 TRACE_EVENT0("cc", "RasterBufferImpl::Playback");
36 ContextProvider* context_provider = rasterizer_->resource_provider()
37 ->output_surface()
38 ->worker_context_provider();
40 // The context lock must be held while accessing the context on a
41 // worker thread.
42 base::AutoLock context_lock(*context_provider->GetLock());
44 // Allow this worker thread to bind to context_provider.
45 context_provider->DetachFromThread();
47 // Rasterize source into resource.
48 rasterizer_->RasterizeSource(&lock_, raster_source, rect, scale);
50 // Barrier to sync worker context output to cc context.
51 context_provider->ContextGL()->OrderingBarrierCHROMIUM();
53 // Allow compositor thread to bind to context_provider.
54 context_provider->DetachFromThread();
57 private:
58 GpuRasterizer* rasterizer_;
59 ResourceProvider::ScopedWriteLockGr lock_;
60 const Resource* resource_;
62 DISALLOW_COPY_AND_ASSIGN(RasterBufferImpl);
65 } // namespace
67 // static
68 scoped_ptr<TileTaskWorkerPool> GpuTileTaskWorkerPool::Create(
69 base::SequencedTaskRunner* task_runner,
70 TaskGraphRunner* task_graph_runner,
71 ContextProvider* context_provider,
72 ResourceProvider* resource_provider,
73 bool use_distance_field_text,
74 int gpu_rasterization_msaa_sample_count) {
75 return make_scoped_ptr<TileTaskWorkerPool>(new GpuTileTaskWorkerPool(
76 task_runner, task_graph_runner, context_provider, resource_provider,
77 use_distance_field_text, gpu_rasterization_msaa_sample_count));
80 GpuTileTaskWorkerPool::GpuTileTaskWorkerPool(
81 base::SequencedTaskRunner* task_runner,
82 TaskGraphRunner* task_graph_runner,
83 ContextProvider* context_provider,
84 ResourceProvider* resource_provider,
85 bool use_distance_field_text,
86 int gpu_rasterization_msaa_sample_count)
87 : task_runner_(task_runner),
88 task_graph_runner_(task_graph_runner),
89 namespace_token_(task_graph_runner_->GetNamespaceToken()),
90 rasterizer_(new GpuRasterizer(context_provider,
91 resource_provider,
92 use_distance_field_text,
93 gpu_rasterization_msaa_sample_count)),
94 task_set_finished_weak_ptr_factory_(this),
95 weak_ptr_factory_(this) {
98 GpuTileTaskWorkerPool::~GpuTileTaskWorkerPool() {
99 DCHECK_EQ(0u, completed_tasks_.size());
102 TileTaskRunner* GpuTileTaskWorkerPool::AsTileTaskRunner() {
103 return this;
106 void GpuTileTaskWorkerPool::SetClient(TileTaskRunnerClient* client) {
107 client_ = client;
110 void GpuTileTaskWorkerPool::Shutdown() {
111 TRACE_EVENT0("cc", "GpuTileTaskWorkerPool::Shutdown");
113 TaskGraph empty;
114 task_graph_runner_->ScheduleTasks(namespace_token_, &empty);
115 task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
118 void GpuTileTaskWorkerPool::ScheduleTasks(TileTaskQueue* queue) {
119 TRACE_EVENT0("cc", "GpuTileTaskWorkerPool::ScheduleTasks");
121 // Mark all task sets as pending.
122 tasks_pending_.set();
124 unsigned priority = kTileTaskPriorityBase;
126 graph_.Reset();
128 // Cancel existing OnTaskSetFinished callbacks.
129 task_set_finished_weak_ptr_factory_.InvalidateWeakPtrs();
131 scoped_refptr<TileTask> new_task_set_finished_tasks[kNumberOfTaskSets];
133 size_t task_count[kNumberOfTaskSets] = {0};
135 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
136 new_task_set_finished_tasks[task_set] = CreateTaskSetFinishedTask(
137 task_runner_.get(),
138 base::Bind(&GpuTileTaskWorkerPool::OnTaskSetFinished,
139 task_set_finished_weak_ptr_factory_.GetWeakPtr(), task_set));
142 for (TileTaskQueue::Item::Vector::const_iterator it = queue->items.begin();
143 it != queue->items.end(); ++it) {
144 const TileTaskQueue::Item& item = *it;
145 RasterTask* task = item.task;
146 DCHECK(!task->HasCompleted());
148 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
149 if (!item.task_sets[task_set])
150 continue;
152 ++task_count[task_set];
154 graph_.edges.push_back(
155 TaskGraph::Edge(task, new_task_set_finished_tasks[task_set].get()));
158 InsertNodesForRasterTask(&graph_, task, task->dependencies(), priority++);
161 for (TaskSet task_set = 0; task_set < kNumberOfTaskSets; ++task_set) {
162 InsertNodeForTask(&graph_, new_task_set_finished_tasks[task_set].get(),
163 kTaskSetFinishedTaskPriorityBase + task_set,
164 task_count[task_set]);
167 ScheduleTasksOnOriginThread(this, &graph_);
169 // Barrier to sync any new resources to the worker context.
170 rasterizer_->resource_provider()
171 ->output_surface()
172 ->context_provider()
173 ->ContextGL()
174 ->OrderingBarrierCHROMIUM();
176 task_graph_runner_->ScheduleTasks(namespace_token_, &graph_);
178 std::copy(new_task_set_finished_tasks,
179 new_task_set_finished_tasks + kNumberOfTaskSets,
180 task_set_finished_tasks_);
183 void GpuTileTaskWorkerPool::CheckForCompletedTasks() {
184 TRACE_EVENT0("cc", "GpuTileTaskWorkerPool::CheckForCompletedTasks");
186 task_graph_runner_->CollectCompletedTasks(namespace_token_,
187 &completed_tasks_);
188 CompleteTasks(completed_tasks_);
189 completed_tasks_.clear();
192 ResourceFormat GpuTileTaskWorkerPool::GetResourceFormat() {
193 return rasterizer_->resource_provider()->best_texture_format();
196 void GpuTileTaskWorkerPool::CompleteTasks(const Task::Vector& tasks) {
197 for (auto& task : tasks) {
198 RasterTask* raster_task = static_cast<RasterTask*>(task.get());
200 raster_task->WillComplete();
201 raster_task->CompleteOnOriginThread(this);
202 raster_task->DidComplete();
204 raster_task->RunReplyOnOriginThread();
206 completed_tasks_.clear();
209 scoped_ptr<RasterBuffer> GpuTileTaskWorkerPool::AcquireBufferForRaster(
210 const Resource* resource) {
211 return make_scoped_ptr<RasterBuffer>(
212 new RasterBufferImpl(rasterizer_.get(), resource));
215 void GpuTileTaskWorkerPool::ReleaseBufferForRaster(
216 scoped_ptr<RasterBuffer> buffer) {
217 // Nothing to do here. RasterBufferImpl destructor cleans up after itself.
220 void GpuTileTaskWorkerPool::OnTaskSetFinished(TaskSet task_set) {
221 TRACE_EVENT1("cc", "GpuTileTaskWorkerPool::OnTaskSetFinished", "task_set",
222 task_set);
224 DCHECK(tasks_pending_[task_set]);
225 tasks_pending_[task_set] = false;
226 client_->DidFinishRunningTileTasks(task_set);
229 } // namespace cc