1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "cc/resources/tile_task_worker_pool.h"
9 #include "base/debug/trace_event.h"
10 #include "base/lazy_instance.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/threading/simple_thread.h"
13 #include "cc/base/scoped_ptr_deque.h"
14 #include "cc/resources/raster_source.h"
15 #include "skia/ext/refptr.h"
16 #include "third_party/skia/include/core/SkCanvas.h"
17 #include "third_party/skia/include/core/SkSurface.h"
22 base::ThreadPriority g_worker_thread_priority
= base::kThreadPriority_Normal
;
24 class TileTaskGraphRunner
: public TaskGraphRunner
,
25 public base::DelegateSimpleThread::Delegate
{
27 TileTaskGraphRunner() {
28 size_t num_threads
= TileTaskWorkerPool::GetNumWorkerThreads();
29 while (workers_
.size() < num_threads
) {
30 scoped_ptr
<base::DelegateSimpleThread
> worker
=
31 make_scoped_ptr(new base::DelegateSimpleThread(
32 this, base::StringPrintf(
33 "CompositorTileWorker%u",
34 static_cast<unsigned>(workers_
.size() + 1)).c_str()));
36 worker
->SetThreadPriority(g_worker_thread_priority
);
37 workers_
.push_back(worker
.Pass());
41 ~TileTaskGraphRunner() override
{ NOTREACHED(); }
44 // Overridden from base::DelegateSimpleThread::Delegate:
45 void Run() override
{ TaskGraphRunner::Run(); }
47 ScopedPtrDeque
<base::DelegateSimpleThread
> workers_
;
50 base::LazyInstance
<TileTaskGraphRunner
>::Leaky g_task_graph_runner
=
51 LAZY_INSTANCE_INITIALIZER
;
53 const int kDefaultNumWorkerThreads
= 1;
55 int g_num_worker_threads
= 0;
57 class TaskSetFinishedTaskImpl
: public TileTask
{
59 explicit TaskSetFinishedTaskImpl(
60 base::SequencedTaskRunner
* task_runner
,
61 const base::Closure
& on_task_set_finished_callback
)
62 : task_runner_(task_runner
),
63 on_task_set_finished_callback_(on_task_set_finished_callback
) {}
65 // Overridden from Task:
66 void RunOnWorkerThread() override
{
67 TRACE_EVENT0("cc", "TaskSetFinishedTaskImpl::RunOnWorkerThread");
71 // Overridden from TileTask:
72 void ScheduleOnOriginThread(TileTaskClient
* client
) override
{}
73 void CompleteOnOriginThread(TileTaskClient
* client
) override
{}
74 void RunReplyOnOriginThread() override
{}
77 ~TaskSetFinishedTaskImpl() override
{}
79 void TaskSetFinished() {
80 task_runner_
->PostTask(FROM_HERE
, on_task_set_finished_callback_
);
84 scoped_refptr
<base::SequencedTaskRunner
> task_runner_
;
85 const base::Closure on_task_set_finished_callback_
;
87 DISALLOW_COPY_AND_ASSIGN(TaskSetFinishedTaskImpl
);
92 // This allows a micro benchmark system to run tasks with highest priority,
93 // since it should finish as quickly as possible.
94 unsigned TileTaskWorkerPool::kBenchmarkTaskPriority
= 0u;
95 // Task priorities that make sure task set finished tasks run before any
96 // other remaining tasks.
97 unsigned TileTaskWorkerPool::kTaskSetFinishedTaskPriority
= 1u;
98 unsigned TileTaskWorkerPool::kTileTaskPriorityBase
= 2u;
100 TileTaskWorkerPool::TileTaskWorkerPool() {
103 TileTaskWorkerPool::~TileTaskWorkerPool() {
107 void TileTaskWorkerPool::SetNumWorkerThreads(int num_threads
) {
108 DCHECK_LT(0, num_threads
);
109 DCHECK_EQ(0, g_num_worker_threads
);
111 g_num_worker_threads
= num_threads
;
115 int TileTaskWorkerPool::GetNumWorkerThreads() {
116 if (!g_num_worker_threads
)
117 g_num_worker_threads
= kDefaultNumWorkerThreads
;
119 return g_num_worker_threads
;
123 void TileTaskWorkerPool::SetWorkerThreadPriority(
124 base::ThreadPriority priority
) {
125 g_worker_thread_priority
= priority
;
129 TaskGraphRunner
* TileTaskWorkerPool::GetTaskGraphRunner() {
130 return g_task_graph_runner
.Pointer();
134 scoped_refptr
<TileTask
> TileTaskWorkerPool::CreateTaskSetFinishedTask(
135 base::SequencedTaskRunner
* task_runner
,
136 const base::Closure
& on_task_set_finished_callback
) {
137 return make_scoped_refptr(
138 new TaskSetFinishedTaskImpl(task_runner
, on_task_set_finished_callback
));
142 void TileTaskWorkerPool::ScheduleTasksOnOriginThread(TileTaskClient
* client
,
144 TRACE_EVENT0("cc", "TileTaskWorkerPool::ScheduleTasksOnOriginThread");
146 for (TaskGraph::Node::Vector::iterator it
= graph
->nodes
.begin();
147 it
!= graph
->nodes
.end(); ++it
) {
148 TaskGraph::Node
& node
= *it
;
149 TileTask
* task
= static_cast<TileTask
*>(node
.task
);
151 if (!task
->HasBeenScheduled()) {
152 task
->WillSchedule();
153 task
->ScheduleOnOriginThread(client
);
160 void TileTaskWorkerPool::InsertNodeForTask(TaskGraph
* graph
,
163 size_t dependencies
) {
164 DCHECK(std::find_if(graph
->nodes
.begin(), graph
->nodes
.end(),
165 TaskGraph::Node::TaskComparator(task
)) ==
167 graph
->nodes
.push_back(TaskGraph::Node(task
, priority
, dependencies
));
171 void TileTaskWorkerPool::InsertNodesForRasterTask(
173 RasterTask
* raster_task
,
174 const ImageDecodeTask::Vector
& decode_tasks
,
176 size_t dependencies
= 0u;
178 // Insert image decode tasks.
179 for (ImageDecodeTask::Vector::const_iterator it
= decode_tasks
.begin();
180 it
!= decode_tasks
.end(); ++it
) {
181 ImageDecodeTask
* decode_task
= it
->get();
183 // Skip if already decoded.
184 if (decode_task
->HasCompleted())
189 // Add decode task if it doesn't already exists in graph.
190 TaskGraph::Node::Vector::iterator decode_it
=
191 std::find_if(graph
->nodes
.begin(), graph
->nodes
.end(),
192 TaskGraph::Node::TaskComparator(decode_task
));
193 if (decode_it
== graph
->nodes
.end())
194 InsertNodeForTask(graph
, decode_task
, priority
, 0u);
196 graph
->edges
.push_back(TaskGraph::Edge(decode_task
, raster_task
));
199 InsertNodeForTask(graph
, raster_task
, priority
, dependencies
);
202 static bool IsSupportedPlaybackToMemoryFormat(ResourceFormat format
) {
220 void TileTaskWorkerPool::PlaybackToMemory(void* memory
,
221 ResourceFormat format
,
222 const gfx::Size
& size
,
224 const RasterSource
* raster_source
,
225 const gfx::Rect
& rect
,
227 DCHECK(IsSupportedPlaybackToMemoryFormat(format
)) << format
;
229 // Uses kPremul_SkAlphaType since the result is not known to be opaque.
231 SkImageInfo::MakeN32(size
.width(), size
.height(), kPremul_SkAlphaType
);
232 SkColorType buffer_color_type
= ResourceFormatToSkColorType(format
);
233 bool needs_copy
= buffer_color_type
!= info
.colorType();
235 // Use unknown pixel geometry to disable LCD text.
236 SkSurfaceProps
surface_props(0, kUnknown_SkPixelGeometry
);
237 if (raster_source
->CanUseLCDText()) {
238 // LegacyFontHost will get LCD text and skia figures out what type to use.
239 surface_props
= SkSurfaceProps(SkSurfaceProps::kLegacyFontHost_InitType
);
243 stride
= info
.minRowBytes();
246 skia::RefPtr
<SkSurface
> surface
= skia::AdoptRef(
247 SkSurface::NewRasterDirect(info
, memory
, stride
, &surface_props
));
248 skia::RefPtr
<SkCanvas
> canvas
= skia::SharePtr(surface
->getCanvas());
249 raster_source
->PlaybackToCanvas(canvas
.get(), rect
, scale
);
253 skia::RefPtr
<SkSurface
> surface
=
254 skia::AdoptRef(SkSurface::NewRaster(info
, &surface_props
));
255 skia::RefPtr
<SkCanvas
> canvas
= skia::SharePtr(surface
->getCanvas());
256 raster_source
->PlaybackToCanvas(canvas
.get(), rect
, scale
);
258 SkImageInfo dst_info
= info
;
259 dst_info
.fColorType
= buffer_color_type
;
260 // TODO(kaanb): The GL pipeline assumes a 4-byte alignment for the
261 // bitmap data. There will be no need to call SkAlign4 once crbug.com/293728
263 const size_t dst_row_bytes
= SkAlign4(dst_info
.minRowBytes());
264 DCHECK_EQ(0u, dst_row_bytes
% 4);
265 bool success
= canvas
->readPixels(dst_info
, memory
, dst_row_bytes
, 0, 0);
266 DCHECK_EQ(true, success
);