1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/gpu_channel_manager.h"
10 #include "base/command_line.h"
11 #include "base/location.h"
12 #include "base/single_thread_task_runner.h"
13 #include "base/thread_task_runner_handle.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_memory_buffer_factory.h"
16 #include "content/common/gpu/gpu_memory_manager.h"
17 #include "content/common/gpu/gpu_messages.h"
18 #include "content/common/message_router.h"
19 #include "gpu/command_buffer/common/value_state.h"
20 #include "gpu/command_buffer/service/feature_info.h"
21 #include "gpu/command_buffer/service/gpu_switches.h"
22 #include "gpu/command_buffer/service/mailbox_manager.h"
23 #include "gpu/command_buffer/service/memory_program_cache.h"
24 #include "gpu/command_buffer/service/shader_translator_cache.h"
25 #include "gpu/command_buffer/service/sync_point_manager.h"
26 #include "ipc/message_filter.h"
27 #include "ui/gl/gl_bindings.h"
28 #include "ui/gl/gl_share_group.h"
32 GpuChannelManager::GpuChannelManager(
33 IPC::SyncChannel
* channel
,
34 GpuWatchdog
* watchdog
,
35 base::SingleThreadTaskRunner
* task_runner
,
36 base::SingleThreadTaskRunner
* io_task_runner
,
37 base::WaitableEvent
* shutdown_event
,
38 gpu::SyncPointManager
* sync_point_manager
,
39 GpuMemoryBufferFactory
* gpu_memory_buffer_factory
)
40 : task_runner_(task_runner
),
41 io_task_runner_(io_task_runner
),
44 shutdown_event_(shutdown_event
),
47 GpuMemoryManager::kDefaultMaxSurfacesWithFrontbufferSoftLimit
),
48 sync_point_manager_(sync_point_manager
),
49 gpu_memory_buffer_factory_(gpu_memory_buffer_factory
),
52 DCHECK(io_task_runner
);
55 GpuChannelManager::~GpuChannelManager() {
56 // Destroy channels before anything else because of dependencies.
57 gpu_channels_
.clear();
58 if (default_offscreen_surface_
.get()) {
59 default_offscreen_surface_
->Destroy();
60 default_offscreen_surface_
= NULL
;
64 gpu::gles2::ProgramCache
* GpuChannelManager::program_cache() {
65 if (!program_cache_
.get() &&
66 (gfx::g_driver_gl
.ext
.b_GL_ARB_get_program_binary
||
67 gfx::g_driver_gl
.ext
.b_GL_OES_get_program_binary
) &&
68 !base::CommandLine::ForCurrentProcess()->HasSwitch(
69 switches::kDisableGpuProgramCache
)) {
70 program_cache_
.reset(new gpu::gles2::MemoryProgramCache());
72 return program_cache_
.get();
75 gpu::gles2::ShaderTranslatorCache
*
76 GpuChannelManager::shader_translator_cache() {
77 if (!shader_translator_cache_
.get())
78 shader_translator_cache_
= new gpu::gles2::ShaderTranslatorCache
;
79 return shader_translator_cache_
.get();
82 gpu::gles2::FramebufferCompletenessCache
*
83 GpuChannelManager::framebuffer_completeness_cache() {
84 if (!framebuffer_completeness_cache_
.get())
85 framebuffer_completeness_cache_
=
86 new gpu::gles2::FramebufferCompletenessCache
;
87 return framebuffer_completeness_cache_
.get();
90 void GpuChannelManager::RemoveChannel(int client_id
) {
91 Send(new GpuHostMsg_DestroyChannel(client_id
));
92 gpu_channels_
.erase(client_id
);
95 int GpuChannelManager::GenerateRouteID() {
96 static int last_id
= 0;
100 void GpuChannelManager::AddRoute(int32 routing_id
, IPC::Listener
* listener
) {
101 router_
.AddRoute(routing_id
, listener
);
104 void GpuChannelManager::RemoveRoute(int32 routing_id
) {
105 router_
.RemoveRoute(routing_id
);
108 GpuChannel
* GpuChannelManager::LookupChannel(int32 client_id
) const {
109 const auto& it
= gpu_channels_
.find(client_id
);
110 return it
!= gpu_channels_
.end() ? it
->second
: nullptr;
113 bool GpuChannelManager::OnControlMessageReceived(const IPC::Message
& msg
) {
115 IPC_BEGIN_MESSAGE_MAP(GpuChannelManager
, msg
)
116 IPC_MESSAGE_HANDLER(GpuMsg_EstablishChannel
, OnEstablishChannel
)
117 IPC_MESSAGE_HANDLER(GpuMsg_CloseChannel
, OnCloseChannel
)
118 IPC_MESSAGE_HANDLER(GpuMsg_CreateViewCommandBuffer
,
119 OnCreateViewCommandBuffer
)
120 IPC_MESSAGE_HANDLER(GpuMsg_DestroyGpuMemoryBuffer
, OnDestroyGpuMemoryBuffer
)
121 IPC_MESSAGE_HANDLER(GpuMsg_LoadedShader
, OnLoadedShader
)
122 IPC_MESSAGE_HANDLER(GpuMsg_UpdateValueState
, OnUpdateValueState
)
123 IPC_MESSAGE_UNHANDLED(handled
= false)
124 IPC_END_MESSAGE_MAP()
128 bool GpuChannelManager::OnMessageReceived(const IPC::Message
& msg
) {
129 if (msg
.routing_id() == MSG_ROUTING_CONTROL
)
130 return OnControlMessageReceived(msg
);
132 return router_
.RouteMessage(msg
);
135 bool GpuChannelManager::Send(IPC::Message
* msg
) {
136 return channel_
->Send(msg
);
139 scoped_ptr
<GpuChannel
> GpuChannelManager::CreateGpuChannel(
140 gfx::GLShareGroup
* share_group
,
141 gpu::gles2::MailboxManager
* mailbox_manager
,
143 uint64_t client_tracing_id
,
144 bool allow_future_sync_points
,
145 bool allow_real_time_streams
) {
146 return make_scoped_ptr(new GpuChannel(
147 this, watchdog_
, share_group
, mailbox_manager
, task_runner_
.get(),
148 io_task_runner_
.get(), client_id
, client_tracing_id
, false,
149 allow_future_sync_points
, allow_real_time_streams
));
152 void GpuChannelManager::OnEstablishChannel(int client_id
,
153 uint64_t client_tracing_id
,
155 bool allow_future_sync_points
,
156 bool allow_real_time_streams
) {
157 gfx::GLShareGroup
* share_group
= nullptr;
158 gpu::gles2::MailboxManager
* mailbox_manager
= nullptr;
160 if (!share_group_
.get()) {
161 share_group_
= new gfx::GLShareGroup
;
162 DCHECK(!mailbox_manager_
.get());
163 mailbox_manager_
= gpu::gles2::MailboxManager::Create();
165 share_group
= share_group_
.get();
166 mailbox_manager
= mailbox_manager_
.get();
169 scoped_ptr
<GpuChannel
> channel
= CreateGpuChannel(
170 share_group
, mailbox_manager
, client_id
, client_tracing_id
,
171 allow_future_sync_points
, allow_real_time_streams
);
172 IPC::ChannelHandle channel_handle
= channel
->Init(shutdown_event_
);
174 gpu_channels_
.set(client_id
, channel
.Pass());
176 Send(new GpuHostMsg_ChannelEstablished(channel_handle
));
179 void GpuChannelManager::OnCloseChannel(
180 const IPC::ChannelHandle
& channel_handle
) {
181 for (auto it
= gpu_channels_
.begin(); it
!= gpu_channels_
.end(); ++it
) {
182 if (it
->second
->channel_id() == channel_handle
.name
) {
183 gpu_channels_
.erase(it
);
189 void GpuChannelManager::OnCreateViewCommandBuffer(
190 const gfx::GLSurfaceHandle
& window
,
193 const GPUCreateCommandBufferConfig
& init_params
,
196 CreateCommandBufferResult result
= CREATE_COMMAND_BUFFER_FAILED
;
198 auto it
= gpu_channels_
.find(client_id
);
199 if (it
!= gpu_channels_
.end()) {
200 result
= it
->second
->CreateViewCommandBuffer(window
, surface_id
,
201 init_params
, route_id
);
204 Send(new GpuHostMsg_CommandBufferCreated(result
));
207 void GpuChannelManager::DestroyGpuMemoryBuffer(
208 gfx::GpuMemoryBufferId id
,
210 io_task_runner_
->PostTask(
211 FROM_HERE
, base::Bind(&GpuChannelManager::DestroyGpuMemoryBufferOnIO
,
212 base::Unretained(this), id
, client_id
));
215 void GpuChannelManager::DestroyGpuMemoryBufferOnIO(
216 gfx::GpuMemoryBufferId id
,
218 gpu_memory_buffer_factory_
->DestroyGpuMemoryBuffer(id
, client_id
);
221 void GpuChannelManager::OnDestroyGpuMemoryBuffer(
222 gfx::GpuMemoryBufferId id
,
226 DestroyGpuMemoryBuffer(id
, client_id
);
228 sync_point_manager()->AddSyncPointCallback(
230 base::Bind(&GpuChannelManager::DestroyGpuMemoryBuffer
,
231 base::Unretained(this),
237 void GpuChannelManager::OnUpdateValueState(
238 int client_id
, unsigned int target
, const gpu::ValueState
& state
) {
239 // Only pass updated state to the channel corresponding to the
240 // render_widget_host where the event originated.
241 auto it
= gpu_channels_
.find(client_id
);
242 if (it
!= gpu_channels_
.end())
243 it
->second
->HandleUpdateValueState(target
, state
);
246 void GpuChannelManager::OnLoadedShader(std::string program_proto
) {
248 program_cache()->LoadProgram(program_proto
);
251 uint32_t GpuChannelManager::GetUnprocessedOrderNum() const {
252 uint32_t unprocessed_order_num
= 0;
253 for (auto& kv
: gpu_channels_
) {
254 unprocessed_order_num
=
255 std::max(unprocessed_order_num
, kv
.second
->GetUnprocessedOrderNum());
257 return unprocessed_order_num
;
260 uint32_t GpuChannelManager::GetProcessedOrderNum() const {
261 uint32_t processed_order_num
= 0;
262 for (auto& kv
: gpu_channels_
) {
263 processed_order_num
=
264 std::max(processed_order_num
, kv
.second
->GetProcessedOrderNum());
266 return processed_order_num
;
269 void GpuChannelManager::LoseAllContexts() {
270 for (auto& kv
: gpu_channels_
) {
271 kv
.second
->MarkAllContextsLost();
273 task_runner_
->PostTask(FROM_HERE
,
274 base::Bind(&GpuChannelManager::OnLoseAllContexts
,
275 weak_factory_
.GetWeakPtr()));
278 void GpuChannelManager::OnLoseAllContexts() {
279 gpu_channels_
.clear();
282 gfx::GLSurface
* GpuChannelManager::GetDefaultOffscreenSurface() {
283 if (!default_offscreen_surface_
.get()) {
284 default_offscreen_surface_
=
285 gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size());
287 return default_offscreen_surface_
.get();
290 } // namespace content