GPU workaround to simulate Out of Memory errors with large textures
[chromium-blink-merge.git] / content / common / gpu / gpu_channel_manager.cc
blob3d98e58e7292bc3f3cd18a0253dabe647897ca68
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/gpu_channel_manager.h"
7 #include "base/bind.h"
8 #include "base/command_line.h"
9 #include "content/common/gpu/gpu_channel.h"
10 #include "content/common/gpu/gpu_memory_buffer_factory.h"
11 #include "content/common/gpu/gpu_memory_manager.h"
12 #include "content/common/gpu/gpu_messages.h"
13 #include "content/common/message_router.h"
14 #include "gpu/command_buffer/common/value_state.h"
15 #include "gpu/command_buffer/service/feature_info.h"
16 #include "gpu/command_buffer/service/gpu_switches.h"
17 #include "gpu/command_buffer/service/mailbox_manager_impl.h"
18 #include "gpu/command_buffer/service/memory_program_cache.h"
19 #include "gpu/command_buffer/service/shader_translator_cache.h"
20 #include "gpu/command_buffer/service/sync_point_manager.h"
21 #include "ipc/message_filter.h"
22 #include "ui/gl/gl_bindings.h"
23 #include "ui/gl/gl_share_group.h"
24 #if defined(USE_OZONE)
25 #include "ui/ozone/public/gpu_platform_support.h"
26 #include "ui/ozone/public/ozone_platform.h"
27 #endif
29 namespace content {
31 namespace {
33 class GpuChannelManagerMessageFilter : public IPC::MessageFilter {
34 public:
35 GpuChannelManagerMessageFilter(
36 GpuMemoryBufferFactory* gpu_memory_buffer_factory)
37 : sender_(NULL), gpu_memory_buffer_factory_(gpu_memory_buffer_factory) {}
39 void OnFilterAdded(IPC::Sender* sender) override {
40 DCHECK(!sender_);
41 sender_ = sender;
44 void OnFilterRemoved() override {
45 DCHECK(sender_);
46 sender_ = NULL;
49 bool OnMessageReceived(const IPC::Message& message) override {
50 DCHECK(sender_);
51 bool handled = true;
52 IPC_BEGIN_MESSAGE_MAP(GpuChannelManagerMessageFilter, message)
53 IPC_MESSAGE_HANDLER(GpuMsg_CreateGpuMemoryBuffer, OnCreateGpuMemoryBuffer)
54 IPC_MESSAGE_UNHANDLED(handled = false)
55 IPC_END_MESSAGE_MAP()
56 return handled;
59 protected:
60 ~GpuChannelManagerMessageFilter() override {}
62 void OnCreateGpuMemoryBuffer(
63 const GpuMsg_CreateGpuMemoryBuffer_Params& params) {
64 TRACE_EVENT2("gpu",
65 "GpuChannelManagerMessageFilter::OnCreateGpuMemoryBuffer",
66 "id", params.id, "client_id", params.client_id);
67 sender_->Send(new GpuHostMsg_GpuMemoryBufferCreated(
68 gpu_memory_buffer_factory_->CreateGpuMemoryBuffer(
69 params.id, params.size, params.format, params.usage,
70 params.client_id, params.surface_handle)));
73 IPC::Sender* sender_;
74 GpuMemoryBufferFactory* gpu_memory_buffer_factory_;
77 gfx::GpuMemoryBufferType GetGpuMemoryBufferFactoryType() {
78 std::vector<gfx::GpuMemoryBufferType> supported_types;
79 GpuMemoryBufferFactory::GetSupportedTypes(&supported_types);
80 DCHECK(!supported_types.empty());
81 return supported_types[0];
84 } // namespace
86 GpuChannelManager::GpuChannelManager(MessageRouter* router,
87 GpuWatchdog* watchdog,
88 base::MessageLoopProxy* io_message_loop,
89 base::WaitableEvent* shutdown_event,
90 IPC::SyncChannel* channel)
91 : io_message_loop_(io_message_loop),
92 shutdown_event_(shutdown_event),
93 router_(router),
94 gpu_memory_manager_(
95 this,
96 GpuMemoryManager::kDefaultMaxSurfacesWithFrontbufferSoftLimit),
97 watchdog_(watchdog),
98 sync_point_manager_(gpu::SyncPointManager::Create(false)),
99 gpu_memory_buffer_factory_(
100 GpuMemoryBufferFactory::Create(GetGpuMemoryBufferFactoryType())),
101 channel_(channel),
102 filter_(
103 new GpuChannelManagerMessageFilter(gpu_memory_buffer_factory_.get())),
104 relinquish_resources_pending_(false),
105 weak_factory_(this) {
106 DCHECK(router_);
107 DCHECK(io_message_loop);
108 DCHECK(shutdown_event);
109 channel_->AddFilter(filter_.get());
112 GpuChannelManager::~GpuChannelManager() {
113 gpu_channels_.clear();
114 if (default_offscreen_surface_.get()) {
115 default_offscreen_surface_->Destroy();
116 default_offscreen_surface_ = NULL;
120 gpu::gles2::ProgramCache* GpuChannelManager::program_cache() {
121 if (!program_cache_.get() &&
122 (gfx::g_driver_gl.ext.b_GL_ARB_get_program_binary ||
123 gfx::g_driver_gl.ext.b_GL_OES_get_program_binary) &&
124 !base::CommandLine::ForCurrentProcess()->HasSwitch(
125 switches::kDisableGpuProgramCache)) {
126 program_cache_.reset(new gpu::gles2::MemoryProgramCache());
128 return program_cache_.get();
131 gpu::gles2::ShaderTranslatorCache*
132 GpuChannelManager::shader_translator_cache() {
133 if (!shader_translator_cache_.get())
134 shader_translator_cache_ = new gpu::gles2::ShaderTranslatorCache;
135 return shader_translator_cache_.get();
138 void GpuChannelManager::RemoveChannel(int client_id) {
139 Send(new GpuHostMsg_DestroyChannel(client_id));
140 gpu_channels_.erase(client_id);
141 CheckRelinquishGpuResources();
144 int GpuChannelManager::GenerateRouteID() {
145 static int last_id = 0;
146 return ++last_id;
149 void GpuChannelManager::AddRoute(int32 routing_id, IPC::Listener* listener) {
150 router_->AddRoute(routing_id, listener);
153 void GpuChannelManager::RemoveRoute(int32 routing_id) {
154 router_->RemoveRoute(routing_id);
157 GpuChannel* GpuChannelManager::LookupChannel(int32 client_id) {
158 GpuChannelMap::const_iterator iter = gpu_channels_.find(client_id);
159 if (iter == gpu_channels_.end())
160 return NULL;
161 else
162 return iter->second;
165 bool GpuChannelManager::OnMessageReceived(const IPC::Message& msg) {
166 bool handled = true;
167 IPC_BEGIN_MESSAGE_MAP(GpuChannelManager, msg)
168 IPC_MESSAGE_HANDLER(GpuMsg_EstablishChannel, OnEstablishChannel)
169 IPC_MESSAGE_HANDLER(GpuMsg_CloseChannel, OnCloseChannel)
170 IPC_MESSAGE_HANDLER(GpuMsg_CreateViewCommandBuffer,
171 OnCreateViewCommandBuffer)
172 IPC_MESSAGE_HANDLER(GpuMsg_DestroyGpuMemoryBuffer, OnDestroyGpuMemoryBuffer)
173 IPC_MESSAGE_HANDLER(GpuMsg_LoadedShader, OnLoadedShader)
174 IPC_MESSAGE_HANDLER(GpuMsg_RelinquishResources, OnRelinquishResources)
175 IPC_MESSAGE_HANDLER(GpuMsg_UpdateValueState, OnUpdateValueState)
176 IPC_MESSAGE_UNHANDLED(handled = false)
177 IPC_END_MESSAGE_MAP()
178 return handled;
181 bool GpuChannelManager::Send(IPC::Message* msg) { return router_->Send(msg); }
183 void GpuChannelManager::OnEstablishChannel(int client_id,
184 bool share_context,
185 bool allow_future_sync_points) {
186 IPC::ChannelHandle channel_handle;
188 gfx::GLShareGroup* share_group = NULL;
189 gpu::gles2::MailboxManager* mailbox_manager = NULL;
190 if (share_context) {
191 if (!share_group_.get()) {
192 share_group_ = new gfx::GLShareGroup;
193 DCHECK(!mailbox_manager_.get());
194 mailbox_manager_ = new gpu::gles2::MailboxManagerImpl;
196 share_group = share_group_.get();
197 mailbox_manager = mailbox_manager_.get();
200 scoped_ptr<GpuChannel> channel(new GpuChannel(this,
201 watchdog_,
202 share_group,
203 mailbox_manager,
204 client_id,
205 false,
206 allow_future_sync_points));
207 channel->Init(io_message_loop_.get(), shutdown_event_);
208 channel_handle.name = channel->GetChannelName();
210 #if defined(OS_POSIX)
211 // On POSIX, pass the renderer-side FD. Also mark it as auto-close so
212 // that it gets closed after it has been sent.
213 base::ScopedFD renderer_fd = channel->TakeRendererFileDescriptor();
214 DCHECK(renderer_fd.is_valid());
215 channel_handle.socket = base::FileDescriptor(renderer_fd.Pass());
216 #endif
218 gpu_channels_.set(client_id, channel.Pass());
220 Send(new GpuHostMsg_ChannelEstablished(channel_handle));
223 void GpuChannelManager::OnCloseChannel(
224 const IPC::ChannelHandle& channel_handle) {
225 for (GpuChannelMap::iterator iter = gpu_channels_.begin();
226 iter != gpu_channels_.end(); ++iter) {
227 if (iter->second->GetChannelName() == channel_handle.name) {
228 gpu_channels_.erase(iter);
229 CheckRelinquishGpuResources();
230 return;
235 void GpuChannelManager::OnCreateViewCommandBuffer(
236 const gfx::GLSurfaceHandle& window,
237 int32 surface_id,
238 int32 client_id,
239 const GPUCreateCommandBufferConfig& init_params,
240 int32 route_id) {
241 DCHECK(surface_id);
242 CreateCommandBufferResult result = CREATE_COMMAND_BUFFER_FAILED;
244 GpuChannelMap::const_iterator iter = gpu_channels_.find(client_id);
245 if (iter != gpu_channels_.end()) {
246 result = iter->second->CreateViewCommandBuffer(
247 window, surface_id, init_params, route_id);
250 Send(new GpuHostMsg_CommandBufferCreated(result));
253 void GpuChannelManager::DestroyGpuMemoryBuffer(
254 gfx::GpuMemoryBufferId id,
255 int client_id) {
256 io_message_loop_->PostTask(
257 FROM_HERE,
258 base::Bind(&GpuChannelManager::DestroyGpuMemoryBufferOnIO,
259 base::Unretained(this),
261 client_id));
264 void GpuChannelManager::DestroyGpuMemoryBufferOnIO(
265 gfx::GpuMemoryBufferId id,
266 int client_id) {
267 gpu_memory_buffer_factory_->DestroyGpuMemoryBuffer(id, client_id);
270 void GpuChannelManager::OnDestroyGpuMemoryBuffer(
271 gfx::GpuMemoryBufferId id,
272 int client_id,
273 int32 sync_point) {
274 if (!sync_point) {
275 DestroyGpuMemoryBuffer(id, client_id);
276 } else {
277 sync_point_manager()->AddSyncPointCallback(
278 sync_point,
279 base::Bind(&GpuChannelManager::DestroyGpuMemoryBuffer,
280 base::Unretained(this),
282 client_id));
286 void GpuChannelManager::OnUpdateValueState(
287 int client_id, unsigned int target, const gpu::ValueState& state) {
288 // Only pass updated state to the channel corresponding to the
289 // render_widget_host where the event originated.
290 GpuChannelMap::const_iterator iter = gpu_channels_.find(client_id);
291 if (iter != gpu_channels_.end()) {
292 iter->second->HandleUpdateValueState(target, state);
296 void GpuChannelManager::OnLoadedShader(std::string program_proto) {
297 if (program_cache())
298 program_cache()->LoadProgram(program_proto);
301 bool GpuChannelManager::HandleMessagesScheduled() {
302 for (GpuChannelMap::iterator iter = gpu_channels_.begin();
303 iter != gpu_channels_.end(); ++iter) {
304 if (iter->second->handle_messages_scheduled())
305 return true;
307 return false;
310 uint64 GpuChannelManager::MessagesProcessed() {
311 uint64 messages_processed = 0;
313 for (GpuChannelMap::iterator iter = gpu_channels_.begin();
314 iter != gpu_channels_.end(); ++iter) {
315 messages_processed += iter->second->messages_processed();
317 return messages_processed;
320 void GpuChannelManager::LoseAllContexts() {
321 for (GpuChannelMap::iterator iter = gpu_channels_.begin();
322 iter != gpu_channels_.end(); ++iter) {
323 iter->second->MarkAllContextsLost();
325 base::MessageLoop::current()->PostTask(
326 FROM_HERE,
327 base::Bind(&GpuChannelManager::OnLoseAllContexts,
328 weak_factory_.GetWeakPtr()));
331 void GpuChannelManager::OnLoseAllContexts() {
332 gpu_channels_.clear();
333 CheckRelinquishGpuResources();
336 gfx::GLSurface* GpuChannelManager::GetDefaultOffscreenSurface() {
337 if (!default_offscreen_surface_.get()) {
338 default_offscreen_surface_ =
339 gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size());
341 return default_offscreen_surface_.get();
344 void GpuChannelManager::OnRelinquishResources() {
345 relinquish_resources_pending_ = true;
346 CheckRelinquishGpuResources();
349 void GpuChannelManager::CheckRelinquishGpuResources() {
350 if (relinquish_resources_pending_ && gpu_channels_.size() <= 1) {
351 relinquish_resources_pending_ = false;
352 if (default_offscreen_surface_.get()) {
353 default_offscreen_surface_->DestroyAndTerminateDisplay();
354 default_offscreen_surface_ = NULL;
356 #if defined(USE_OZONE)
357 ui::OzonePlatform::GetInstance()
358 ->GetGpuPlatformSupport()
359 ->RelinquishGpuResources(
360 base::Bind(&GpuChannelManager::OnResourcesRelinquished,
361 weak_factory_.GetWeakPtr()));
362 #else
363 OnResourcesRelinquished();
364 #endif
368 void GpuChannelManager::OnResourcesRelinquished() {
369 Send(new GpuHostMsg_ResourcesRelinquished());
372 } // namespace content