Add more components tests to GN build.
[chromium-blink-merge.git] / ui / gl / gl_context_cgl.cc
blobe2e4af6824356d149c82fbd81d0ee6cead912800
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "ui/gl/gl_context_cgl.h"
7 #include <OpenGL/CGLRenderers.h>
8 #include <OpenGL/CGLTypes.h>
9 #include <vector>
11 #include "base/logging.h"
12 #include "base/memory/scoped_ptr.h"
13 #include "base/trace_event/trace_event.h"
14 #include "ui/gl/gl_bindings.h"
15 #include "ui/gl/gl_implementation.h"
16 #include "ui/gl/gl_surface.h"
17 #include "ui/gl/gpu_switching_manager.h"
19 namespace gfx {
21 namespace {
23 bool g_support_renderer_switching;
25 struct CGLRendererInfoObjDeleter {
26 void operator()(CGLRendererInfoObj* x) {
27 if (x)
28 CGLDestroyRendererInfo(*x);
32 } // namespace
34 static CGLPixelFormatObj GetPixelFormat() {
35 static CGLPixelFormatObj format;
36 if (format)
37 return format;
38 std::vector<CGLPixelFormatAttribute> attribs;
39 // If the system supports dual gpus then allow offline renderers for every
40 // context, so that they can all be in the same share group.
41 if (ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus()) {
42 attribs.push_back(kCGLPFAAllowOfflineRenderers);
43 g_support_renderer_switching = true;
45 if (GetGLImplementation() == kGLImplementationAppleGL) {
46 attribs.push_back(kCGLPFARendererID);
47 attribs.push_back((CGLPixelFormatAttribute) kCGLRendererGenericFloatID);
48 g_support_renderer_switching = false;
50 if (GetGLImplementation() == kGLImplementationDesktopGLCoreProfile) {
51 // These constants don't exist in the 10.6 SDK against which
52 // Chromium currently compiles.
53 const int kOpenGLProfile = 99;
54 const int kOpenGL3_2Core = 0x3200;
55 attribs.push_back(static_cast<CGLPixelFormatAttribute>(kOpenGLProfile));
56 attribs.push_back(static_cast<CGLPixelFormatAttribute>(kOpenGL3_2Core));
59 attribs.push_back((CGLPixelFormatAttribute) 0);
61 GLint num_virtual_screens;
62 if (CGLChoosePixelFormat(&attribs.front(),
63 &format,
64 &num_virtual_screens) != kCGLNoError) {
65 LOG(ERROR) << "Error choosing pixel format.";
66 return nullptr;
68 if (!format) {
69 LOG(ERROR) << "format == 0.";
70 return nullptr;
72 DCHECK_NE(num_virtual_screens, 0);
73 return format;
76 GLContextCGL::GLContextCGL(GLShareGroup* share_group)
77 : GLContextReal(share_group),
78 context_(nullptr),
79 gpu_preference_(PreferIntegratedGpu),
80 discrete_pixelformat_(nullptr),
81 screen_(-1),
82 renderer_id_(-1),
83 safe_to_force_gpu_switch_(false) {
86 bool GLContextCGL::Initialize(GLSurface* compatible_surface,
87 GpuPreference gpu_preference) {
88 DCHECK(compatible_surface);
90 gpu_preference = ui::GpuSwitchingManager::GetInstance()->AdjustGpuPreference(
91 gpu_preference);
93 GLContextCGL* share_context = share_group() ?
94 static_cast<GLContextCGL*>(share_group()->GetContext()) : nullptr;
96 CGLPixelFormatObj format = GetPixelFormat();
97 if (!format)
98 return false;
100 // If using the discrete gpu, create a pixel format requiring it before we
101 // create the context.
102 if (!ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus() ||
103 gpu_preference == PreferDiscreteGpu) {
104 std::vector<CGLPixelFormatAttribute> discrete_attribs;
105 discrete_attribs.push_back((CGLPixelFormatAttribute) 0);
106 GLint num_pixel_formats;
107 if (CGLChoosePixelFormat(&discrete_attribs.front(),
108 &discrete_pixelformat_,
109 &num_pixel_formats) != kCGLNoError) {
110 LOG(ERROR) << "Error choosing pixel format.";
111 return false;
113 // The renderer might be switched after this, so ignore the saved ID.
114 share_group()->SetRendererID(-1);
117 CGLError res = CGLCreateContext(
118 format,
119 share_context ?
120 static_cast<CGLContextObj>(share_context->GetHandle()) : nullptr,
121 reinterpret_cast<CGLContextObj*>(&context_));
122 if (res != kCGLNoError) {
123 LOG(ERROR) << "Error creating context.";
124 Destroy();
125 return false;
128 gpu_preference_ = gpu_preference;
129 return true;
132 void GLContextCGL::Destroy() {
133 if (discrete_pixelformat_) {
134 if (base::MessageLoop::current() != nullptr) {
135 // Delay releasing the pixel format for 10 seconds to reduce the number of
136 // unnecessary GPU switches.
137 base::MessageLoop::current()->PostDelayedTask(
138 FROM_HERE, base::Bind(&CGLReleasePixelFormat, discrete_pixelformat_),
139 base::TimeDelta::FromSeconds(10));
140 } else {
141 CGLReleasePixelFormat(discrete_pixelformat_);
143 discrete_pixelformat_ = nullptr;
145 if (context_) {
146 CGLDestroyContext(static_cast<CGLContextObj>(context_));
147 context_ = nullptr;
151 bool GLContextCGL::ForceGpuSwitchIfNeeded() {
152 DCHECK(context_);
154 // The call to CGLSetVirtualScreen can hang on some AMD drivers
155 // http://crbug.com/227228
156 if (safe_to_force_gpu_switch_) {
157 int renderer_id = share_group()->GetRendererID();
158 int screen;
159 CGLGetVirtualScreen(static_cast<CGLContextObj>(context_), &screen);
161 if (g_support_renderer_switching &&
162 !discrete_pixelformat_ && renderer_id != -1 &&
163 (screen != screen_ || renderer_id != renderer_id_)) {
164 // Attempt to find a virtual screen that's using the requested renderer,
165 // and switch the context to use that screen. Don't attempt to switch if
166 // the context requires the discrete GPU.
167 CGLPixelFormatObj format = GetPixelFormat();
168 int virtual_screen_count;
169 if (CGLDescribePixelFormat(format, 0, kCGLPFAVirtualScreenCount,
170 &virtual_screen_count) != kCGLNoError)
171 return false;
173 for (int i = 0; i < virtual_screen_count; ++i) {
174 int screen_renderer_id;
175 if (CGLDescribePixelFormat(format, i, kCGLPFARendererID,
176 &screen_renderer_id) != kCGLNoError)
177 return false;
179 screen_renderer_id &= kCGLRendererIDMatchingMask;
180 if (screen_renderer_id == renderer_id) {
181 CGLSetVirtualScreen(static_cast<CGLContextObj>(context_), i);
182 screen_ = i;
183 break;
186 renderer_id_ = renderer_id;
189 return true;
192 bool GLContextCGL::MakeCurrent(GLSurface* surface) {
193 DCHECK(context_);
195 if (!ForceGpuSwitchIfNeeded())
196 return false;
198 if (IsCurrent(surface))
199 return true;
201 ScopedReleaseCurrent release_current;
202 TRACE_EVENT0("gpu", "GLContextCGL::MakeCurrent");
204 if (CGLSetCurrentContext(
205 static_cast<CGLContextObj>(context_)) != kCGLNoError) {
206 LOG(ERROR) << "Unable to make gl context current.";
207 return false;
210 // Set this as soon as the context is current, since we might call into GL.
211 SetRealGLApi();
213 SetCurrent(surface);
214 if (!InitializeDynamicBindings()) {
215 return false;
218 if (!surface->OnMakeCurrent(this)) {
219 LOG(ERROR) << "Unable to make gl context current.";
220 return false;
223 release_current.Cancel();
224 return true;
227 void GLContextCGL::ReleaseCurrent(GLSurface* surface) {
228 if (!IsCurrent(surface))
229 return;
231 SetCurrent(nullptr);
232 CGLSetCurrentContext(nullptr);
235 bool GLContextCGL::IsCurrent(GLSurface* surface) {
236 bool native_context_is_current = CGLGetCurrentContext() == context_;
238 // If our context is current then our notion of which GLContext is
239 // current must be correct. On the other hand, third-party code
240 // using OpenGL might change the current context.
241 DCHECK(!native_context_is_current || (GetRealCurrent() == this));
243 if (!native_context_is_current)
244 return false;
246 return true;
249 void* GLContextCGL::GetHandle() {
250 return context_;
253 void GLContextCGL::OnSetSwapInterval(int interval) {
254 DCHECK(IsCurrent(nullptr));
257 bool GLContextCGL::GetTotalGpuMemory(size_t* bytes) {
258 DCHECK(bytes);
259 *bytes = 0;
261 CGLContextObj context = reinterpret_cast<CGLContextObj>(context_);
262 if (!context)
263 return false;
265 // Retrieve the current renderer ID
266 GLint current_renderer_id = 0;
267 if (CGLGetParameter(context,
268 kCGLCPCurrentRendererID,
269 &current_renderer_id) != kCGLNoError)
270 return false;
272 // Iterate through the list of all renderers
273 GLuint display_mask = static_cast<GLuint>(-1);
274 CGLRendererInfoObj renderer_info = nullptr;
275 GLint num_renderers = 0;
276 if (CGLQueryRendererInfo(display_mask,
277 &renderer_info,
278 &num_renderers) != kCGLNoError)
279 return false;
281 scoped_ptr<CGLRendererInfoObj,
282 CGLRendererInfoObjDeleter> scoper(&renderer_info);
284 for (GLint renderer_index = 0;
285 renderer_index < num_renderers;
286 ++renderer_index) {
287 // Skip this if this renderer is not the current renderer.
288 GLint renderer_id = 0;
289 if (CGLDescribeRenderer(renderer_info,
290 renderer_index,
291 kCGLRPRendererID,
292 &renderer_id) != kCGLNoError)
293 continue;
294 if (renderer_id != current_renderer_id)
295 continue;
296 // Retrieve the video memory for the renderer.
297 GLint video_memory = 0;
298 if (CGLDescribeRenderer(renderer_info,
299 renderer_index,
300 kCGLRPVideoMemory,
301 &video_memory) != kCGLNoError)
302 continue;
303 *bytes = video_memory;
304 return true;
307 return false;
310 void GLContextCGL::SetSafeToForceGpuSwitch() {
311 safe_to_force_gpu_switch_ = true;
315 GLContextCGL::~GLContextCGL() {
316 Destroy();
319 GpuPreference GLContextCGL::GetGpuPreference() {
320 return gpu_preference_;
323 } // namespace gfx