Changes.
[cairo/gpu.git] / src / gpu / cairo-gpu-impl-space-gl.h
blob104f6f3d6a9ea90b1a7384a7015af49c94d768a6
1 #include <dlfcn.h>
3 typedef struct
5 cairo_gpu_gl_state_t user;
6 cairo_gpu_context_t* ctx;
7 int depth;
8 } cairo_gpu_gl_tls_t;
10 __thread cairo_gpu_gl_tls_t _cairo_gl_tls;
12 static cairo_gpu_context_t*
13 _cairo_gpu_context__create(cairo_gpu_space_t *space, cairo_gpu_subspace_t *sub)
15 cairo_gpu_context_t* ctx = calloc(1, sizeof(cairo_gpu_context_t));
17 ctx->space = space;
18 ctx->subspace = sub;
20 ctx->color_mask = 0xf;
21 ctx->blend_func = ~0;
22 ctx->frag = ~0;
23 ctx->vert = 0;
24 ctx->fragp = ~0;
25 ctx->vertp = ~0;
26 ctx->constant_unit = -1;
27 ctx->viewport_width = -1;
28 ctx->viewport_height = -1;
29 return ctx;
32 static cairo_bool_t
33 _cairo_gpu_hash_keys_equal(const void *key_a, const void *key_b)
35 return ((cairo_hash_entry_t*)key_a)->hash == ((cairo_hash_entry_t*)key_b)->hash;
38 static cairo_gpu_space_t *
39 _cairo_gpu_space__begin_create(void)
41 cairo_gpu_space_t *space;
43 space = calloc(1, sizeof(cairo_gpu_space_t));
44 if(!space)
45 return 0;
47 CAIRO_REFERENCE_COUNT_INIT(&space->base.ref_count, 1);
48 CAIRO_MUTEX_INIT(space->mutex);
49 CAIRO_MUTEX_INIT(space->cached_mask_surface_mutex);
50 space->base.backend = &_cairo_gpu_space_backend;
51 space->tls_list.prev = &space->tls_list;
52 space->tls_list.next = &space->tls_list;
53 pthread_key_create(&space->tls, _cairo_gpu_space_tls_dtor);
54 pthread_key_create(&space->subspace.context_tls, 0);
55 space->table = _cairo_hash_table_create(_cairo_gpu_hash_keys_equal);
57 return space;
60 static cairo_space_t*
61 _cairo_gpu_space__finish_create(cairo_gpu_space_t* space, unsigned flags);
63 #include "cairo-gpu-impl-space-gl-osmesa.h"
64 #include "cairo-gpu-impl-space-gl-glx.h"
66 static inline unsigned
67 _cairo_gl_error(cairo_gpu_context_t* ctx)
69 unsigned err;
70 if((err = ctx->gl.GetError()))
72 while(ctx->gl.GetError())
75 return err;
78 static inline void
79 _cairo_gpu_assert_no_gl_error(cairo_gpu_context_t* ctx)
81 static int errors = 0;
82 unsigned err;
83 while((err = ctx->gl.GetError()))
85 // race condition here but we don't really care
86 int n = errors;
87 if(n == 10)
89 printf("WARNING: cairo-gpu: not showing any more OpenGL errors\n");
90 errors = n +1;
92 else if(n < 10)
94 printf("WARNING: cairo-gpu: unexpected OpenGL error: %x\n", err);
95 errors = n + 1;
100 #define GL_ERROR(x) (_cairo_gpu_assert_no_gl_error(ctx), x, _cairo_gl_error(ctx))
102 static void
103 _cairo_gpu_gl_init(cairo_gpu_gl_t* gl, PFNGLGETPROCADDRESSPROC GetProcAddress)
105 #define DO(x) gl->x = GetProcAddress("gl" #x)
106 DO(ActiveTextureARB);
107 DO(BindBufferARB);
108 DO(BindFramebufferEXT);
109 DO(BindProgramARB);
110 DO(BindRenderbufferEXT);
111 DO(BindTextureEXT);
112 DO(BindVertexArray);
113 DO(BlendColorEXT);
114 DO(BlendEquation);
115 DO(BlendFunc);
116 DO(BlendFuncSeparateEXT);
117 DO(BlitFramebufferEXT);
118 DO(BufferDataARB);
119 DO(CheckFramebufferStatusEXT);
120 DO(Clear);
121 DO(ClearColor);
122 DO(ClientActiveTextureARB);
123 DO(Color4fv);
124 DO(ColorMask);
125 DO(ColorPointer);
126 DO(CopyPixels);
127 DO(CopyTexSubImage2D);
128 DO(DeleteBuffersARB);
129 DO(DeleteFramebuffersEXT);
130 DO(DeleteProgramsARB);
131 DO(DeleteRenderbuffersEXT);
132 DO(DeleteTexturesEXT);
133 DO(DeleteVertexArrays);
134 DO(Disable);
135 DO(DisableClientState);
136 DO(DrawArraysEXT);
137 DO(DrawBuffer);
138 DO(DrawPixels);
139 DO(Enable);
140 DO(EnableClientState);
141 DO(EnableVertexAttribArrayARB);
142 DO(Finish);
143 DO(Flush);
144 DO(FramebufferRenderbufferEXT);
145 DO(FramebufferTexture2DEXT);
146 DO(GenBuffersARB);
147 DO(GenerateMipmapEXT);
148 DO(GenFramebuffersEXT);
149 DO(GenProgramsARB);
150 DO(GenRenderbuffersEXT);
151 DO(GenTexturesEXT);
152 DO(GenVertexArrays);
153 DO(GetError);
154 DO(GetFloatv);
155 DO(GetIntegerv);
156 DO(GetString);
157 DO(GetTexImage);
158 DO(Hint);
159 DO(LoadIdentity);
160 DO(LoadMatrixd);
161 DO(LoadMatrixf);
162 DO(MapBufferARB);
163 DO(MapBufferRange);
164 DO(MatrixMode);
165 DO(Ortho);
166 DO(PixelMapfv);
167 DO(PixelStorei);
168 DO(PixelTransferf);
169 DO(PixelZoom);
170 DO(ProgramEnvParameter4fvARB);
171 DO(ProgramStringARB);
172 DO(RasterPos2i);
173 DO(ReadBuffer);
174 DO(ReadPixels);
175 DO(Recti);
176 DO(RenderbufferStorageMultisampleCoverageNV);
177 DO(RenderbufferStorageMultisampleEXT);
178 DO(Scissor);
179 DO(TexCoordPointer);
180 DO(TexEnvfv);
181 DO(TexEnvi);
182 DO(TexGenfv);
183 DO(TexGeni);
184 DO(TexImage2D);
185 DO(TexParameteri);
186 DO(TexSubImage2D);
187 DO(Translated);
188 DO(Translatef);
189 DO(UnmapBufferARB);
190 DO(VertexPointer);
191 DO(Viewport);
192 DO(WindowPos2i);
193 #undef DO
196 static void
197 _cairo_gpu_space__fini_entry(void* abstract_entry, void* closure)
199 cairo_gpu_context_t* ctx = closure;
200 cairo_gpu_int_entry_t* int_entry = (cairo_gpu_int_entry_t*)abstract_entry;
201 //cairo_gpu_ptr_entry* ptr_entry = (cairo_gpu_ptr_entry_t*)abstract_entry;
202 switch(int_entry->base.hash & TABLE_MASK)
204 case TABLE_FRAG:
205 case TABLE_VERT:
206 ctx->gl.DeleteProgramsARB(1, &int_entry->v);
207 break;
208 default:
209 break;
212 _cairo_hash_table_remove(ctx->space->table, (cairo_hash_entry_t*)abstract_entry);
213 free(abstract_entry);
216 static void
217 _cairo_gpu_space__fini(cairo_gpu_context_t * ctx)
219 cairo_gpu_space_t* space = ctx->space;
221 _cairo_gpu_texture_fini(ctx, &space->dummy_texture);
223 _cairo_hash_table_foreach (space->table, _cairo_gpu_space__fini_entry, ctx);
224 _cairo_hash_table_destroy(space->table);
227 // ctx == 0, user.api < 0 => we have an user context bound, or nothing
228 // ctx != 0, user.api < 0 => we have either an user context bound, or nothing, or ctx
229 // ctx != 0, user_api >= 0 => we have ctx bound, and may have an user context saved
230 // ctx == 0, user.api > 0 => we have an user context bound, and we have an user context saved
231 // ctx == 0, user.api == 0 => we have no context bound, and we have no user context saved
233 static inline void
234 _cairo_gpu_enter(void)
236 ++_cairo_gl_tls.depth;
239 static inline void
240 _cairo_gl_make_current(cairo_gpu_space_t* space, cairo_gpu_gl_state_t* user)
242 API_SPACE(make_current)(space, user);
245 static void
246 _cairo_gpu_space__do_destroy(cairo_gpu_space_t* space)
248 API_SPACE(do_destroy)(space);
250 if(space->owns_libgl)
251 dlclose(space->libgl);
252 free(space);
255 static void
256 _cairo_gpu_context__do_destroy(cairo_gpu_context_t * ctx)
258 cairo_gpu_space_t* space = ctx->space;
260 API_CTX(do_destroy)(ctx);
262 free(ctx);
264 if(space->destroy_on_unbind)
265 _cairo_gpu_space__do_destroy(space);
268 static cairo_gpu_context_t*
269 _cairo_gpu_context__unbind_internal(void)
271 cairo_gpu_context_t* ctx = _cairo_gl_tls.ctx;
272 API_CTX(flush)(ctx);
274 _cairo_gpu_assert_no_gl_error(ctx);
276 if(ctx->destroy_on_unbind)
277 return ctx;
279 return 0;
282 static void
283 _cairo_gpu_do_exit(void)
285 cairo_gpu_context_t* ctx = _cairo_gl_tls.ctx;
286 cairo_gpu_context_t* destroy_ctx = 0;
287 if(ctx)
289 destroy_ctx = _cairo_gpu_context__unbind_internal();
291 if(destroy_ctx && !_cairo_gl_tls.user.api)
293 cairo_gpu_space_t* space = ctx->space;
294 API_SPACE(unbind_context)(space);
296 _cairo_gl_tls.ctx = 0;
300 if(_cairo_gl_tls.user.api)
302 _cairo_gl_make_current(ctx->space, &_cairo_gl_tls.user);
303 _cairo_gl_tls.user.api = 0;
304 _cairo_gl_tls.ctx = 0;
307 _cairo_gl_tls.user.api = -1;
309 if(destroy_ctx)
310 _cairo_gpu_context__do_destroy(destroy_ctx);
314 * NOTE: if no OpenGL context was set, we leak our current context.
315 * This allows to run many Cairo commands without rebinding the context every time.
316 * The risk is that the user may accidentally mess up our GL context.
318 static inline void
319 _cairo_gpu_exit(void)
321 if(!--_cairo_gl_tls.depth && _cairo_gl_tls.user.api >= 0)
322 _cairo_gpu_do_exit();
325 // you must bind a context after calling this
326 static void
327 _cairo_gpu_context__update_state(cairo_gpu_context_t* ctx, unsigned api)
329 cairo_gpu_gl_state_t* user = &_cairo_gl_tls.user;
330 user->api = 0;
331 API_CTX(update_state)(ctx, user);
334 static void
335 _cairo_gpu_context__do_bind(cairo_gpu_context_t* ctx, cairo_gpu_surface_t * surface)
337 cairo_gpu_context_t* destroy_ctx = 0;
338 if(_cairo_gl_tls.ctx)
339 destroy_ctx = _cairo_gpu_context__unbind_internal();
341 API_CTX(do_bind)(ctx, surface);
343 if(!ctx->gl_inited)
345 _cairo_gpu_gl_init(&ctx->gl, ctx->space->GetProcAddress);
346 ctx->gl_inited = 1;
348 _cairo_gl_tls.ctx = ctx;
350 if(destroy_ctx)
351 _cairo_gpu_context__do_destroy(destroy_ctx);
354 static inline void
355 _cairo_gl_context_bind_surface(cairo_gpu_context_t* ctx, cairo_gpu_surface_t * surface)
357 assert(_cairo_gl_tls.depth);
359 if(_cairo_gl_tls.user.api < 0)
360 _cairo_gpu_context__update_state(ctx, ctx->space->api);
362 if(_cairo_gl_tls.ctx == ctx)
364 if(surface && surface->has_drawable)
366 // lenient one-way version
367 if(!API_CTX(is_surface_bound)(ctx, surface))
368 goto bind;
371 else
373 bind:
374 _cairo_gpu_context__do_bind(ctx, surface);
378 static inline void
379 _cairo_gpu_context_bind(cairo_gpu_context_t* ctx)
381 assert(_cairo_gl_tls.depth);
383 if(_cairo_gl_tls.user.api < 0)
384 _cairo_gpu_context__update_state(ctx, ctx->space->api);
386 if(_cairo_gl_tls.ctx == ctx)
387 return;
388 else
389 _cairo_gpu_context__do_bind(ctx, 0);
392 static void
393 _cairo_gpu_context__destroy(cairo_gpu_context_t * ctx)
395 assert(_cairo_gl_tls.depth);
397 if(_cairo_gl_tls.user.api < 0)
398 _cairo_gpu_context__update_state(ctx, ctx->space->api);
400 if(ctx == _cairo_gl_tls.ctx)
402 ctx->destroy_on_unbind = 1;
403 return;
406 _cairo_gpu_context__do_destroy(ctx);
409 cairo_public cairo_space_t *
410 cairo_gl_hardware_space_create(void* libgl, unsigned flags)
412 cairo_space_t *space;
414 space = cairo_glx_space_create(libgl, 0, 1, 0, flags);
415 if(space)
417 if(space->is_software)
418 cairo_space_destroy(space);
419 else
420 return space;
423 return 0;
426 cairo_public cairo_space_t *
427 cairo_gl_space_create(void* libgl, unsigned flags)
429 cairo_space_t *space;
431 space = cairo_glx_space_create(libgl, 0, 1, 0, flags);
432 if(space)
433 return space;
435 space = cairo_osmesa_space_create(libgl, 0, flags);
436 if(space)
437 return space;
439 return 0;
442 static inline cairo_gpu_space_tls_t*
443 _cairo_gpu_space_alloc_tls(cairo_gpu_space_t* space)
445 return (cairo_gpu_space_tls_t*)calloc(sizeof(cairo_gpu_space_tls_t), 1);
448 static cairo_font_options_t *
449 _cairo_gpu_get_font_options (cairo_gpu_space_t* space)
451 if (space->has_font_options)
452 return &space->font_options;
454 CAIRO_MUTEX_LOCK(space->mutex);
455 if (! space->has_font_options) {
456 _cairo_font_options_init_default (&space->font_options);
458 space->font_options.antialias = CAIRO_ANTIALIAS_SUBPIXEL;
460 API_SPACE(init_font_options)(space);
462 space->has_font_options = TRUE;
464 CAIRO_MUTEX_UNLOCK(space->mutex);
466 return &space->font_options;
469 static void
470 _cairo_gpu_set_surface_subspace(cairo_gpu_surface_t* surface)
472 cairo_gpu_space_t* space = surface->space;
473 API_SPACE(set_surface_subspace)(space, surface);
476 static cairo_gpu_context_t *
477 _cairo_gpu_space_tls__create_context(cairo_gpu_space_tls_t* tls, cairo_gpu_subspace_t* sub)
479 cairo_gpu_space_t* space = tls->space;
480 cairo_gpu_context_t* ctx;
482 if(!sub)
484 // use the last subspace so that we hopefully minimize the number of created context
485 sub = &space->subspace;
486 while(sub->next)
487 sub = sub->next;
490 ctx = API_SPACE(create_context)(space, sub);
492 ctx->tls = tls;
493 ctx->next = tls->contexts;
494 tls->contexts = ctx;
496 pthread_setspecific(sub->context_tls, ctx);
497 tls->last_context = ctx;
498 return ctx;
501 static inline void
502 _cairo_gpu_space_tls_destroy_contexts(cairo_gpu_space_tls_t* tls)
504 cairo_gpu_context_t* ctx;
505 cairo_gpu_context_t* ctx_next;
506 for(ctx = tls->contexts; ctx; ctx = ctx_next)
508 ctx_next = ctx->next;
510 _cairo_gpu_context__destroy(ctx);
514 static inline cairo_gpu_context_t *
515 _cairo_gpu_space_tls_lookup_context(cairo_gpu_space_tls_t* tls)
517 cairo_gpu_context_t* ctx = 0;
519 ctx = tls->last_context;
521 if(!ctx)
522 ctx = _cairo_gpu_space_tls__create_context(tls, 0);
523 return ctx;
526 static void
527 _cairo_gpu_space__destroy(cairo_gpu_space_t * space)
529 cairo_gpu_subspace_t * sub;
530 cairo_gpu_subspace_t * sub_next;
531 cairo_gpu_space_tls_t* tls;
532 cairo_gpu_space_tls_t* tls_next;
534 pthread_key_delete(space->tls);
537 cairo_gpu_context_t* ctx = _cairo_gpu_space_bind(space);
538 _cairo_gpu_space__fini(ctx);
541 pthread_key_delete(space->subspace.context_tls);
543 for(tls = (cairo_gpu_space_tls_t*)space->tls_list.next; (list_node_t*)tls != &space->tls_list; tls = tls_next)
545 tls_next = (cairo_gpu_space_tls_t*)tls->node.next;
547 _cairo_gpu_space_tls_destroy(tls);
550 for(sub = space->subspace.next; sub; sub = sub_next)
552 sub_next = sub->next;
554 pthread_key_delete(sub->context_tls);
556 API_SPACE(destroy_subspace)(space, sub);
558 free(sub);
561 space->destroy_on_unbind = 1;
564 static void
565 _cairo_gpu_space_destroy(void* abstract_space)
567 cairo_gpu_space_t * space = abstract_space;
569 _cairo_gpu_enter();
570 _cairo_gpu_space__destroy(space);
571 _cairo_gpu_exit();
574 /* only current thread */
575 static cairo_status_t
576 _cairo_gpu_space_sync(void* abstract_space)
578 cairo_gpu_space_t* space = abstract_space;
579 cairo_gpu_space_tls_t* tls = (cairo_gpu_space_tls_t*)_cairo_gpu_space_get_tls(space);
580 cairo_gpu_context_t* ctx;
582 _cairo_gpu_enter();
583 for(ctx = tls->contexts; ctx; ctx = ctx->next)
585 _cairo_gpu_context_bind(ctx);
586 ctx->gl.Finish();
588 _cairo_gpu_exit();
589 return CAIRO_STATUS_SUCCESS;
592 static int
593 _cairo_gpu_space__init(cairo_gpu_context_t *ctx, unsigned flags)
595 cairo_gpu_space_t* space = ctx->space;
596 char *vendor;
597 char *renderer;
598 char* extensions;
599 unsigned dummy_tex_data = 0;
601 // TODO: this assumes that GL extensions don't change between context
602 extensions = (char *)ctx->gl.GetString(GL_EXTENSIONS);
603 vendor = (char *)ctx->gl.GetString(GL_VENDOR);
604 renderer = (char *)ctx->gl.GetString(GL_RENDERER);
606 // TODO: any better solutions?
607 space->base.is_software = strstr(renderer, "software") || strstr(renderer, "Software");
609 // these can be implemented on any hardware
610 if(!strstr(extensions, "EXT_texture_object") || !strstr(extensions, "EXT_vertex_array"))
612 fprintf(stderr, "ERROR: the current OpenGL driver is severely deficient: it is missing EXT_texture_object or EXT_vertex_array!\n");
613 goto fail;
616 space->tex_units = 1;
617 if(!!strstr(extensions, "ARB_multitexture"))
619 int tex_units;
620 int limit;
621 ctx->gl.GetIntegerv(GL_MAX_TEXTURE_UNITS_ARB, &tex_units);
623 // 4 is enough for us
624 limit = 4 - ((flags >> CAIRO_GPU_GL_FORCE_TEX_UNITS_SHIFT) & 3);
626 if(tex_units > limit)
627 tex_units = limit;
628 space->tex_units = tex_units;
631 space->use_fbo = !(flags & CAIRO_GPU_GL_DISABLE_FRAMEBUFFER_OBJECT) && strstr(extensions, "EXT_framebuffer_object");
633 if(!space->use_fbo)
635 if(!API_SPACE(has_offscreen_drawables)(space))
636 goto fail;
639 space->fb_blit = !(flags & CAIRO_GPU_GL_DISABLE_FRAMEBUFFER_BLIT) && strstr(extensions, "EXT_framebuffer_blit");
641 ctx->space->use_vbo = !!strstr(extensions, "EXT_vertex_buffer_object");
643 if(!(flags & CAIRO_GPU_GL_DISABLE_TEXTURE_NON_POWER_OF_TWO) &&
644 strstr(extensions, "ARB_texture_non_power_of_two"))
645 space->tex_npot = 1;
647 if(!(flags & CAIRO_GPU_GL_DISABLE_TEXTURE_RECTANGLE)
648 && (strstr(extensions, "ARB_texture_rectangle") || strstr(extensions, "EXT_texture_rectangle") || strstr(extensions, "NV_texture_rectangle")))
649 space->tex_rectangle = 1;
651 space->extend_mask = 1 << CAIRO_EXTEND_REPEAT;
653 /* These are pretty much guaranteed */
654 if(strstr(extensions, "ARB_texture_border_clamp") || strstr(extensions, "SGIS_texture_border_clamp"))
655 space->extend_mask |= 1 << CAIRO_EXTEND_NONE;
657 if(strstr(extensions, "EXT_texture_edge_clamp") || strstr(extensions, "SGIS_texture_edge_clamp"))
658 space->extend_mask |= 1 << CAIRO_EXTEND_PAD;
660 if(strstr(extensions, "ARB_texture_mirrored_repeat"))
661 space->extend_mask |= 1 << CAIRO_EXTEND_REFLECT;
663 space->use_vert_prog = !(flags & CAIRO_GPU_GL_DISABLE_VERTEX_PROGRAM) && strstr(extensions, "ARB_vertex_program");
665 space->vert_op = space->vert_passthru = space->use_vert_prog;
667 space->use_frag_prog = !(flags & CAIRO_GPU_GL_DISABLE_FRAGMENT_PROGRAM) && strstr(extensions, "ARB_fragment_program");
669 space->has_combine = !(flags & CAIRO_GPU_GL_DISABLE_TEX_ENV_COMBINE) && !!strstr(extensions, "ARB_texture_env_combine");
671 space->use_intensity = !space->use_frag_prog && !space->has_combine;
673 // we do this because we need GL_INTENSITY textures for GL_MODULATE, that wouldn't be renderable and in addition hardware/drivers without ARB_texture_env_combine are less likely to support FBOs
674 // TODO: don't do this, but add new SURF_ALPHA / SURF_INTENSITY texture "aliases"
675 if(!space->use_frag_prog && !space->has_combine)
676 space->use_fbo = 0;
678 space->crossbar = strstr(extensions, "ARB_texture_env_crossbar") || strstr(extensions, "NV_texture_env_combine4");
680 space->per_component = (space->use_frag_prog || (space->has_combine && space->crossbar && strstr(extensions, "ARB_texture_env_dot3")));
681 space->radial = space->frag_div_alpha = space->discontinuous = space->use_frag_prog;
682 space->tex_aaaa_111a = space->frag_mul_alpha = space->use_frag_prog || space->has_combine;
684 space->frag_passthru = space->use_frag_prog || strstr(extensions, "NV_texture_shader");
686 space->blend_func_separate = !(flags & CAIRO_GPU_GL_DISABLE_BLEND_FUNC_SEPARATE) && strstr(extensions, "EXT_blend_func_separate");
687 space->blend_color = !!strstr(extensions, "EXT_blend_color");
688 space->blend_subtract = !!strstr(extensions, "EXT_blend_subtract"),
689 space->has_window_pos = !!strstr(extensions, "ARB_window_pos");
690 space->has_framebuffer_multisample = !!strstr(extensions, "EXT_framebuffer_multisample");
691 space->has_framebuffer_multisample_coverage = !!strstr(extensions, "NV_framebuffer_multisample_coverage");
692 space->has_fragment_program2 = !!strstr(extensions, "NV_fragment_program2");
693 space->has_gpu_program4 = !!strstr(extensions, "NV_gpu_program4");
695 space->msaa_samples = 16; /* optimistic value, will be downgraded if necessary */
697 // TODO: this is true on nVidia G70, which does the equivalent of 2x2 MSAA
698 space->fastest_polygon_smooth_samples = 4;
699 space->nicest_polygon_smooth_samples = 4;
701 space->max_anisotropy = 1.0;
702 if(strstr(extensions, "EXT_texture_filter_anisotropic"))
703 ctx->gl.GetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &space->max_anisotropy);
705 /* Set up the dummy texture for tex_env_combine with constant color. */
706 _cairo_gpu_texture_create(ctx, &space->dummy_texture, 1, 1);
707 _cairo_gl_context_set_active_texture(ctx, _cairo_gl_context_set_texture(ctx, -1, &space->dummy_texture));
708 ctx->gl.TexImage2D(_cairo_gl_context_active_target(ctx), 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, &dummy_tex_data);
709 return 0;
711 fail:
712 return -1;
715 static inline cairo_bool_t
716 _cairo_gpu_space_is_frag_supported(cairo_gpu_space_t* space, unsigned frag)
718 if(space->use_frag_prog)
719 return 1;
720 else
722 unsigned tex0 = (frag >> (FRAG_TEX_SHIFT)) & FRAG_TEX_MASK;
723 unsigned tex1 = (frag >> (FRAG_TEX_SHIFT + FRAG_TEX_BITS)) & FRAG_TEX_MASK;
724 unsigned tex01 = tex0 | tex1;
725 unsigned op = frag & FRAG_OP_MASK;
728 (op && op != (OP_MUL_ALPHA << FRAG_OP_SHIFT))
729 || (tex01 & (FRAG_TEX_RADIAL | FRAG_TEX_DISCONTINUOUS))
730 || (tex01 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_111CA
731 || (tex1 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_111C
732 || (!space->tex_aaaa_111a && (
733 ((tex01 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_AAAA)
734 || ((tex01 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_111A) ))
736 return 0;
738 // 4 is enough for everyone
739 if(space->tex_units < 4)
741 int n = -1; // first unit has two slots
742 if(tex1 && space->tex_units < 2)
743 return 0;
745 if(tex0)
747 ++n;
749 if((tex0 & FRAG_TEX_COLOR_MASK) == FRAG_TEX_COLOR_111C)
750 n += 2;
751 else
753 if((frag & (FRAG_OP_MASK | FRAG_OPPOS_TEX1)) == (OP_MUL_ALPHA << FRAG_OP_SHIFT))
754 ++n;
758 // slot #1 is unused
759 if(!space->crossbar && (frag & FRAG_OPPOS_TEX1))
760 ++n;
762 if((frag & (FRAG_OP_MASK | FRAG_OPPOS_TEX1)) == ((OP_MUL_ALPHA << FRAG_OP_SHIFT) | FRAG_OPPOS_TEX1))
763 ++n;
765 if(tex1)
766 ++n;
768 if(frag & (FRAG_CONSTANT | FRAG_PRIMARY))
769 ++n;
771 if(n > space->tex_units)
772 return 0;
775 return 1;
778 static cairo_space_t*
779 _cairo_gpu_space__finish_create(cairo_gpu_space_t* space, unsigned flags)
781 cairo_gpu_context_t* ctx;
782 int init;
783 const char* env = getenv("__CAIRO_GPU_GL_FLAGS");
784 if(env)
785 flags |= atoi(env);
787 _cairo_gpu_enter();
789 // this calls _cairo_gpu_space__init
790 ctx = _cairo_gpu_space_bind(space);
791 init = _cairo_gpu_space__init(ctx, flags);
793 if(init)
795 _cairo_gpu_space__destroy(space);
796 space = 0;
798 _cairo_gpu_exit();
799 return (cairo_space_t*)space;
802 cairo_space_t *
803 cairo_gl_space_create_from_current_context(void* libgl, unsigned flags)
805 cairo_space_t* space;
807 space = cairo_glx_space_create_from_current_context(libgl, flags);
808 if(space)
809 return space;
811 space = cairo_osmesa_space_create_from_current_context(libgl, flags);
812 if(space)
813 return space;
815 return 0;
818 cairo_public cairo_gl_api_t
819 cairo_gl_space_get_api(cairo_space_t * abstract_space)
821 cairo_gpu_space_t *space = (cairo_gpu_space_t*)abstract_space;
823 if(abstract_space->backend != &_cairo_gpu_space_backend)
824 return CAIRO_GL_API_NONE;
826 return space->api;
829 cairo_public void*
830 cairo_gl_space_get_libgl(cairo_space_t * abstract_space)
832 cairo_gpu_space_t *space = (cairo_gpu_space_t*)abstract_space;
834 if(abstract_space->backend != &_cairo_gpu_space_backend)
835 return 0;
837 return space->libgl;
840 void*
841 cairo_gl_space_get_proc_address(cairo_space_t * abstract_space, const char* name)
843 cairo_gpu_space_t *space = (cairo_gpu_space_t*)abstract_space;
845 if(abstract_space->backend != &_cairo_gpu_space_backend)
846 return 0;
848 return space->GetProcAddress(name);
851 void*
852 cairo_gl_space_create_context(cairo_space_t * abstract_space, cairo_surface_t* surface)
854 cairo_gpu_space_t *space = (cairo_gpu_space_t*)abstract_space;
856 if(abstract_space->backend != &_cairo_gpu_space_backend)
857 return 0;
859 if(surface && surface->backend != &_cairo_gpu_surface_backend)
860 return 0;
862 return API_SPACE(create_gl_context)(space, surface);
865 void
866 cairo_gl_space_destroy_context(cairo_space_t * abstract_space, void* context)
868 cairo_gpu_space_t *space = (cairo_gpu_space_t*)abstract_space;
870 if(abstract_space->backend != &_cairo_gpu_space_backend)
871 return;
873 API_SPACE(destroy_gl_context)(space, context);
876 cairo_bool_t
877 cairo_gl_space_make_current(cairo_space_t* abstract_space, void* context, cairo_surface_t* abstract_draw_surface, cairo_surface_t* abstract_read_surface)
879 cairo_gpu_space_t* space = (cairo_gpu_space_t*)abstract_space;
880 cairo_gpu_surface_t* read_surface = (cairo_gpu_surface_t*)abstract_read_surface;
881 cairo_gpu_surface_t* draw_surface = (cairo_gpu_surface_t*)abstract_draw_surface;
882 int api = 0;
884 if(read_surface)
886 if(read_surface->base.backend != &_cairo_gpu_surface_backend)
887 return 0;
888 api = read_surface->space->api;
891 if(draw_surface)
893 if(draw_surface->base.backend != &_cairo_gpu_surface_backend)
894 return 0;
895 if(api && api != draw_surface->space->api)
896 return 0;
898 api = draw_surface->space->api;
901 if(!api)
903 if(!space)
904 return 0;
905 if(space->base.backend != &_cairo_gpu_space_backend)
906 return 0;
907 api = space->api;
910 return API_SPACE(make_gl_context_current)(space, context, draw_surface, read_surface);