1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ui/gl/gl_context_cgl.h"
7 #include <OpenGL/CGLRenderers.h>
8 #include <OpenGL/CGLTypes.h>
11 #include "base/debug/trace_event.h"
12 #include "base/logging.h"
13 #include "base/memory/scoped_ptr.h"
14 #include "ui/gl/gl_bindings.h"
15 #include "ui/gl/gl_implementation.h"
16 #include "ui/gl/gl_surface.h"
17 #include "ui/gl/gpu_switching_manager.h"
23 bool g_support_renderer_switching
;
25 struct CGLRendererInfoObjDeleter
{
26 void operator()(CGLRendererInfoObj
* x
) {
28 CGLDestroyRendererInfo(*x
);
34 static CGLPixelFormatObj
GetPixelFormat() {
35 static CGLPixelFormatObj format
;
38 std::vector
<CGLPixelFormatAttribute
> attribs
;
39 // If the system supports dual gpus then allow offline renderers for every
40 // context, so that they can all be in the same share group.
41 if (ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus()) {
42 attribs
.push_back(kCGLPFAAllowOfflineRenderers
);
43 g_support_renderer_switching
= true;
45 if (GetGLImplementation() == kGLImplementationAppleGL
) {
46 attribs
.push_back(kCGLPFARendererID
);
47 attribs
.push_back((CGLPixelFormatAttribute
) kCGLRendererGenericFloatID
);
48 g_support_renderer_switching
= false;
50 attribs
.push_back((CGLPixelFormatAttribute
) 0);
52 GLint num_virtual_screens
;
53 if (CGLChoosePixelFormat(&attribs
.front(),
55 &num_virtual_screens
) != kCGLNoError
) {
56 LOG(ERROR
) << "Error choosing pixel format.";
60 LOG(ERROR
) << "format == 0.";
63 DCHECK_NE(num_virtual_screens
, 0);
67 GLContextCGL::GLContextCGL(GLShareGroup
* share_group
)
68 : GLContextReal(share_group
),
70 gpu_preference_(PreferIntegratedGpu
),
71 discrete_pixelformat_(NULL
),
74 safe_to_force_gpu_switch_(false) {
77 bool GLContextCGL::Initialize(GLSurface
* compatible_surface
,
78 GpuPreference gpu_preference
) {
79 DCHECK(compatible_surface
);
81 gpu_preference
= ui::GpuSwitchingManager::GetInstance()->AdjustGpuPreference(
84 GLContextCGL
* share_context
= share_group() ?
85 static_cast<GLContextCGL
*>(share_group()->GetContext()) : NULL
;
87 CGLPixelFormatObj format
= GetPixelFormat();
91 // If using the discrete gpu, create a pixel format requiring it before we
92 // create the context.
93 if (!ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus() ||
94 gpu_preference
== PreferDiscreteGpu
) {
95 std::vector
<CGLPixelFormatAttribute
> discrete_attribs
;
96 discrete_attribs
.push_back((CGLPixelFormatAttribute
) 0);
97 GLint num_pixel_formats
;
98 if (CGLChoosePixelFormat(&discrete_attribs
.front(),
99 &discrete_pixelformat_
,
100 &num_pixel_formats
) != kCGLNoError
) {
101 LOG(ERROR
) << "Error choosing pixel format.";
104 // The renderer might be switched after this, so ignore the saved ID.
105 share_group()->SetRendererID(-1);
108 CGLError res
= CGLCreateContext(
111 static_cast<CGLContextObj
>(share_context
->GetHandle()) : NULL
,
112 reinterpret_cast<CGLContextObj
*>(&context_
));
113 if (res
!= kCGLNoError
) {
114 LOG(ERROR
) << "Error creating context.";
119 gpu_preference_
= gpu_preference
;
123 void GLContextCGL::Destroy() {
124 if (discrete_pixelformat_
) {
125 // Delay releasing the pixel format for 10 seconds to reduce the number of
126 // unnecessary GPU switches.
127 base::MessageLoop::current()->PostDelayedTask(
129 base::Bind(&CGLReleasePixelFormat
, discrete_pixelformat_
),
130 base::TimeDelta::FromSeconds(10));
131 discrete_pixelformat_
= NULL
;
134 CGLDestroyContext(static_cast<CGLContextObj
>(context_
));
139 bool GLContextCGL::ForceGpuSwitchIfNeeded() {
142 // The call to CGLSetVirtualScreen can hang on some AMD drivers
143 // http://crbug.com/227228
144 if (safe_to_force_gpu_switch_
) {
145 int renderer_id
= share_group()->GetRendererID();
147 CGLGetVirtualScreen(static_cast<CGLContextObj
>(context_
), &screen
);
149 if (g_support_renderer_switching
&&
150 !discrete_pixelformat_
&& renderer_id
!= -1 &&
151 (screen
!= screen_
|| renderer_id
!= renderer_id_
)) {
152 // Attempt to find a virtual screen that's using the requested renderer,
153 // and switch the context to use that screen. Don't attempt to switch if
154 // the context requires the discrete GPU.
155 CGLPixelFormatObj format
= GetPixelFormat();
156 int virtual_screen_count
;
157 if (CGLDescribePixelFormat(format
, 0, kCGLPFAVirtualScreenCount
,
158 &virtual_screen_count
) != kCGLNoError
)
161 for (int i
= 0; i
< virtual_screen_count
; ++i
) {
162 int screen_renderer_id
;
163 if (CGLDescribePixelFormat(format
, i
, kCGLPFARendererID
,
164 &screen_renderer_id
) != kCGLNoError
)
167 screen_renderer_id
&= kCGLRendererIDMatchingMask
;
168 if (screen_renderer_id
== renderer_id
) {
169 CGLSetVirtualScreen(static_cast<CGLContextObj
>(context_
), i
);
174 renderer_id_
= renderer_id
;
180 bool GLContextCGL::MakeCurrent(GLSurface
* surface
) {
183 if (!ForceGpuSwitchIfNeeded())
186 if (IsCurrent(surface
))
189 ScopedReleaseCurrent release_current
;
190 TRACE_EVENT0("gpu", "GLContextCGL::MakeCurrent");
192 if (CGLSetCurrentContext(
193 static_cast<CGLContextObj
>(context_
)) != kCGLNoError
) {
194 LOG(ERROR
) << "Unable to make gl context current.";
198 // Set this as soon as the context is current, since we might call into GL.
202 if (!InitializeDynamicBindings()) {
206 if (!surface
->OnMakeCurrent(this)) {
207 LOG(ERROR
) << "Unable to make gl context current.";
211 release_current
.Cancel();
215 void GLContextCGL::ReleaseCurrent(GLSurface
* surface
) {
216 if (!IsCurrent(surface
))
220 CGLSetCurrentContext(NULL
);
223 bool GLContextCGL::IsCurrent(GLSurface
* surface
) {
224 bool native_context_is_current
= CGLGetCurrentContext() == context_
;
226 // If our context is current then our notion of which GLContext is
227 // current must be correct. On the other hand, third-party code
228 // using OpenGL might change the current context.
229 DCHECK(!native_context_is_current
|| (GetRealCurrent() == this));
231 if (!native_context_is_current
)
237 void* GLContextCGL::GetHandle() {
241 void GLContextCGL::SetSwapInterval(int interval
) {
242 DCHECK(IsCurrent(NULL
));
245 bool GLContextCGL::GetTotalGpuMemory(size_t* bytes
) {
249 CGLContextObj context
= reinterpret_cast<CGLContextObj
>(context_
);
253 // Retrieve the current renderer ID
254 GLint current_renderer_id
= 0;
255 if (CGLGetParameter(context
,
256 kCGLCPCurrentRendererID
,
257 ¤t_renderer_id
) != kCGLNoError
)
260 // Iterate through the list of all renderers
261 GLuint display_mask
= static_cast<GLuint
>(-1);
262 CGLRendererInfoObj renderer_info
= NULL
;
263 GLint num_renderers
= 0;
264 if (CGLQueryRendererInfo(display_mask
,
266 &num_renderers
) != kCGLNoError
)
269 scoped_ptr
<CGLRendererInfoObj
,
270 CGLRendererInfoObjDeleter
> scoper(&renderer_info
);
272 for (GLint renderer_index
= 0;
273 renderer_index
< num_renderers
;
275 // Skip this if this renderer is not the current renderer.
276 GLint renderer_id
= 0;
277 if (CGLDescribeRenderer(renderer_info
,
280 &renderer_id
) != kCGLNoError
)
282 if (renderer_id
!= current_renderer_id
)
284 // Retrieve the video memory for the renderer.
285 GLint video_memory
= 0;
286 if (CGLDescribeRenderer(renderer_info
,
289 &video_memory
) != kCGLNoError
)
291 *bytes
= video_memory
;
298 void GLContextCGL::SetSafeToForceGpuSwitch() {
299 safe_to_force_gpu_switch_
= true;
303 GLContextCGL::~GLContextCGL() {
307 GpuPreference
GLContextCGL::GetGpuPreference() {
308 return gpu_preference_
;