1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/gpu_channel_manager.h"
8 #include "base/command_line.h"
9 #include "content/common/gpu/gpu_channel.h"
10 #include "content/common/gpu/gpu_memory_buffer_factory.h"
11 #include "content/common/gpu/gpu_memory_manager.h"
12 #include "content/common/gpu/gpu_messages.h"
13 #include "content/common/gpu/sync_point_manager.h"
14 #include "content/common/message_router.h"
15 #include "gpu/command_buffer/service/feature_info.h"
16 #include "gpu/command_buffer/service/gpu_switches.h"
17 #include "gpu/command_buffer/service/mailbox_manager.h"
18 #include "gpu/command_buffer/service/memory_program_cache.h"
19 #include "gpu/command_buffer/service/shader_translator_cache.h"
20 #include "ui/gl/gl_bindings.h"
21 #include "ui/gl/gl_share_group.h"
25 GpuChannelManager::GpuMemoryBufferOperation::GpuMemoryBufferOperation(
27 base::Closure callback
)
28 : sync_point(sync_point
), callback(callback
) {
31 GpuChannelManager::GpuMemoryBufferOperation::~GpuMemoryBufferOperation() {
34 GpuChannelManager::GpuChannelManager(MessageRouter
* router
,
35 GpuWatchdog
* watchdog
,
36 base::MessageLoopProxy
* io_message_loop
,
37 base::WaitableEvent
* shutdown_event
)
38 : weak_factory_(this),
39 io_message_loop_(io_message_loop
),
40 shutdown_event_(shutdown_event
),
44 GpuMemoryManager::kDefaultMaxSurfacesWithFrontbufferSoftLimit
),
46 sync_point_manager_(new SyncPointManager
),
47 gpu_memory_buffer_factory_(GpuMemoryBufferFactory::Create()) {
49 DCHECK(io_message_loop
);
50 DCHECK(shutdown_event
);
53 GpuChannelManager::~GpuChannelManager() {
54 gpu_channels_
.clear();
55 if (default_offscreen_surface_
.get()) {
56 default_offscreen_surface_
->Destroy();
57 default_offscreen_surface_
= NULL
;
59 DCHECK(gpu_memory_buffer_operations_
.empty());
62 gpu::gles2::ProgramCache
* GpuChannelManager::program_cache() {
63 if (!program_cache_
.get() &&
64 (gfx::g_driver_gl
.ext
.b_GL_ARB_get_program_binary
||
65 gfx::g_driver_gl
.ext
.b_GL_OES_get_program_binary
) &&
66 !CommandLine::ForCurrentProcess()->HasSwitch(
67 switches::kDisableGpuProgramCache
)) {
68 program_cache_
.reset(new gpu::gles2::MemoryProgramCache());
70 return program_cache_
.get();
73 gpu::gles2::ShaderTranslatorCache
*
74 GpuChannelManager::shader_translator_cache() {
75 if (!shader_translator_cache_
.get())
76 shader_translator_cache_
= new gpu::gles2::ShaderTranslatorCache
;
77 return shader_translator_cache_
.get();
80 void GpuChannelManager::RemoveChannel(int client_id
) {
81 Send(new GpuHostMsg_DestroyChannel(client_id
));
82 gpu_channels_
.erase(client_id
);
85 int GpuChannelManager::GenerateRouteID() {
86 static int last_id
= 0;
90 void GpuChannelManager::AddRoute(int32 routing_id
, IPC::Listener
* listener
) {
91 router_
->AddRoute(routing_id
, listener
);
94 void GpuChannelManager::RemoveRoute(int32 routing_id
) {
95 router_
->RemoveRoute(routing_id
);
98 GpuChannel
* GpuChannelManager::LookupChannel(int32 client_id
) {
99 GpuChannelMap::const_iterator iter
= gpu_channels_
.find(client_id
);
100 if (iter
== gpu_channels_
.end())
106 bool GpuChannelManager::OnMessageReceived(const IPC::Message
& msg
) {
108 IPC_BEGIN_MESSAGE_MAP(GpuChannelManager
, msg
)
109 IPC_MESSAGE_HANDLER(GpuMsg_EstablishChannel
, OnEstablishChannel
)
110 IPC_MESSAGE_HANDLER(GpuMsg_CloseChannel
, OnCloseChannel
)
111 IPC_MESSAGE_HANDLER(GpuMsg_CreateViewCommandBuffer
,
112 OnCreateViewCommandBuffer
)
113 IPC_MESSAGE_HANDLER(GpuMsg_CreateGpuMemoryBuffer
, OnCreateGpuMemoryBuffer
)
114 IPC_MESSAGE_HANDLER(GpuMsg_DestroyGpuMemoryBuffer
, OnDestroyGpuMemoryBuffer
)
115 IPC_MESSAGE_HANDLER(GpuMsg_LoadedShader
, OnLoadedShader
)
116 IPC_MESSAGE_UNHANDLED(handled
= false)
117 IPC_END_MESSAGE_MAP()
121 bool GpuChannelManager::Send(IPC::Message
* msg
) { return router_
->Send(msg
); }
123 void GpuChannelManager::OnEstablishChannel(int client_id
,
125 bool allow_future_sync_points
) {
126 IPC::ChannelHandle channel_handle
;
128 gfx::GLShareGroup
* share_group
= NULL
;
129 gpu::gles2::MailboxManager
* mailbox_manager
= NULL
;
131 if (!share_group_
.get()) {
132 share_group_
= new gfx::GLShareGroup
;
133 DCHECK(!mailbox_manager_
.get());
134 mailbox_manager_
= new gpu::gles2::MailboxManager
;
136 share_group
= share_group_
.get();
137 mailbox_manager
= mailbox_manager_
.get();
140 scoped_ptr
<GpuChannel
> channel(new GpuChannel(this,
146 allow_future_sync_points
));
147 channel
->Init(io_message_loop_
.get(), shutdown_event_
);
148 channel_handle
.name
= channel
->GetChannelName();
150 #if defined(OS_POSIX)
151 // On POSIX, pass the renderer-side FD. Also mark it as auto-close so
152 // that it gets closed after it has been sent.
153 int renderer_fd
= channel
->TakeRendererFileDescriptor();
154 DCHECK_NE(-1, renderer_fd
);
155 channel_handle
.socket
= base::FileDescriptor(renderer_fd
, true);
158 gpu_channels_
.set(client_id
, channel
.Pass());
160 Send(new GpuHostMsg_ChannelEstablished(channel_handle
));
163 void GpuChannelManager::OnCloseChannel(
164 const IPC::ChannelHandle
& channel_handle
) {
165 for (GpuChannelMap::iterator iter
= gpu_channels_
.begin();
166 iter
!= gpu_channels_
.end(); ++iter
) {
167 if (iter
->second
->GetChannelName() == channel_handle
.name
) {
168 gpu_channels_
.erase(iter
);
174 void GpuChannelManager::OnCreateViewCommandBuffer(
175 const gfx::GLSurfaceHandle
& window
,
178 const GPUCreateCommandBufferConfig
& init_params
,
181 CreateCommandBufferResult result
= CREATE_COMMAND_BUFFER_FAILED
;
183 GpuChannelMap::const_iterator iter
= gpu_channels_
.find(client_id
);
184 if (iter
!= gpu_channels_
.end()) {
185 result
= iter
->second
->CreateViewCommandBuffer(
186 window
, surface_id
, init_params
, route_id
);
189 Send(new GpuHostMsg_CommandBufferCreated(result
));
192 void GpuChannelManager::CreateGpuMemoryBuffer(
193 const gfx::GpuMemoryBufferHandle
& handle
,
194 const gfx::Size
& size
,
195 unsigned internalformat
,
197 Send(new GpuHostMsg_GpuMemoryBufferCreated(
198 gpu_memory_buffer_factory_
->CreateGpuMemoryBuffer(
199 handle
, size
, internalformat
, usage
)));
202 void GpuChannelManager::OnCreateGpuMemoryBuffer(
203 const gfx::GpuMemoryBufferHandle
& handle
,
204 const gfx::Size
& size
,
205 unsigned internalformat
,
207 if (gpu_memory_buffer_operations_
.empty()) {
208 CreateGpuMemoryBuffer(handle
, size
, internalformat
, usage
);
210 gpu_memory_buffer_operations_
.push_back(new GpuMemoryBufferOperation(
212 base::Bind(&GpuChannelManager::CreateGpuMemoryBuffer
,
213 base::Unretained(this),
221 void GpuChannelManager::DestroyGpuMemoryBuffer(
222 const gfx::GpuMemoryBufferHandle
& handle
) {
223 gpu_memory_buffer_factory_
->DestroyGpuMemoryBuffer(handle
);
226 void GpuChannelManager::OnDestroyGpuMemoryBuffer(
227 const gfx::GpuMemoryBufferHandle
& handle
,
229 if (!sync_point
&& gpu_memory_buffer_operations_
.empty()) {
230 DestroyGpuMemoryBuffer(handle
);
232 gpu_memory_buffer_operations_
.push_back(new GpuMemoryBufferOperation(
234 base::Bind(&GpuChannelManager::DestroyGpuMemoryBuffer
,
235 base::Unretained(this),
238 sync_point_manager()->AddSyncPointCallback(
241 &GpuChannelManager::OnDestroyGpuMemoryBufferSyncPointRetired
,
242 base::Unretained(this),
243 gpu_memory_buffer_operations_
.back()));
248 void GpuChannelManager::OnDestroyGpuMemoryBufferSyncPointRetired(
249 GpuMemoryBufferOperation
* gpu_memory_buffer_operation
) {
250 // Mark operation as no longer having a pending sync point.
251 gpu_memory_buffer_operation
->sync_point
= 0;
253 // De-queue operations until we reach a pending sync point.
254 while (!gpu_memory_buffer_operations_
.empty()) {
255 // Check if operation has a pending sync point.
256 if (gpu_memory_buffer_operations_
.front()->sync_point
)
259 gpu_memory_buffer_operations_
.front()->callback
.Run();
260 delete gpu_memory_buffer_operations_
.front();
261 gpu_memory_buffer_operations_
.pop_front();
265 void GpuChannelManager::OnLoadedShader(std::string program_proto
) {
267 program_cache()->LoadProgram(program_proto
);
270 bool GpuChannelManager::HandleMessagesScheduled() {
271 for (GpuChannelMap::iterator iter
= gpu_channels_
.begin();
272 iter
!= gpu_channels_
.end(); ++iter
) {
273 if (iter
->second
->handle_messages_scheduled())
279 uint64
GpuChannelManager::MessagesProcessed() {
280 uint64 messages_processed
= 0;
282 for (GpuChannelMap::iterator iter
= gpu_channels_
.begin();
283 iter
!= gpu_channels_
.end(); ++iter
) {
284 messages_processed
+= iter
->second
->messages_processed();
286 return messages_processed
;
289 void GpuChannelManager::LoseAllContexts() {
290 for (GpuChannelMap::iterator iter
= gpu_channels_
.begin();
291 iter
!= gpu_channels_
.end(); ++iter
) {
292 iter
->second
->MarkAllContextsLost();
294 base::MessageLoop::current()->PostTask(
296 base::Bind(&GpuChannelManager::OnLoseAllContexts
,
297 weak_factory_
.GetWeakPtr()));
300 void GpuChannelManager::OnLoseAllContexts() {
301 gpu_channels_
.clear();
304 gfx::GLSurface
* GpuChannelManager::GetDefaultOffscreenSurface() {
305 if (!default_offscreen_surface_
.get()) {
306 default_offscreen_surface_
=
307 gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size());
309 return default_offscreen_surface_
.get();
312 } // namespace content