5 cairo_gpu_gl_state_t user
;
6 cairo_gpu_context_t
* ctx
;
10 __thread cairo_gpu_gl_tls_t _cairo_gl_tls
;
12 static cairo_gpu_context_t
*
13 _cairo_gpu_context__create(cairo_gpu_space_t
*space
, cairo_gpu_subspace_t
*sub
)
15 cairo_gpu_context_t
* ctx
= calloc(1, sizeof(cairo_gpu_context_t
));
20 ctx
->color_mask
= 0xf;
26 ctx
->constant_unit
= -1;
27 ctx
->viewport_width
= -1;
28 ctx
->viewport_height
= -1;
33 _cairo_gpu_hash_keys_equal(const void *key_a
, const void *key_b
)
35 return ((cairo_hash_entry_t
*)key_a
)->hash
== ((cairo_hash_entry_t
*)key_b
)->hash
;
38 static cairo_gpu_space_t
*
39 _cairo_gpu_space__begin_create(void)
41 cairo_gpu_space_t
*space
;
43 space
= calloc(1, sizeof(cairo_gpu_space_t
));
47 CAIRO_REFERENCE_COUNT_INIT(&space
->base
.ref_count
, 1);
48 CAIRO_MUTEX_INIT(space
->mutex
);
49 CAIRO_MUTEX_INIT(space
->cached_mask_surface_mutex
);
50 space
->base
.backend
= &_cairo_gpu_space_backend
;
51 space
->tls_list
.prev
= &space
->tls_list
;
52 space
->tls_list
.next
= &space
->tls_list
;
53 pthread_key_create(&space
->tls
, _cairo_gpu_space_tls_dtor
);
54 pthread_key_create(&space
->subspace
.context_tls
, 0);
55 space
->table
= _cairo_hash_table_create(_cairo_gpu_hash_keys_equal
);
61 _cairo_gpu_space__finish_create(cairo_gpu_space_t
* space
, unsigned flags
);
63 #include "cairo-gpu-impl-space-gl-osmesa.h"
64 #include "cairo-gpu-impl-space-gl-glx.h"
66 static inline unsigned
67 _cairo_gl_error(cairo_gpu_context_t
* ctx
)
70 if((err
= ctx
->gl
.GetError()))
72 while(ctx
->gl
.GetError())
79 _cairo_gpu_assert_no_gl_error(cairo_gpu_context_t
* ctx
)
81 static int errors
= 0;
83 while((err
= ctx
->gl
.GetError()))
85 // race condition here but we don't really care
89 printf("WARNING: cairo-gpu: not showing any more OpenGL errors\n");
94 printf("WARNING: cairo-gpu: unexpected OpenGL error: %x\n", err
);
100 #define GL_ERROR(x) (_cairo_gpu_assert_no_gl_error(ctx), x, _cairo_gl_error(ctx))
103 _cairo_gpu_gl_init(cairo_gpu_gl_t
* gl
, PFNGLGETPROCADDRESSPROC GetProcAddress
)
105 #define DO(x) gl->x = GetProcAddress("gl" #x)
106 DO(ActiveTextureARB
);
108 DO(BindFramebufferEXT
);
110 DO(BindRenderbufferEXT
);
116 DO(BlendFuncSeparateEXT
);
117 DO(BlitFramebufferEXT
);
119 DO(CheckFramebufferStatusEXT
);
122 DO(ClientActiveTextureARB
);
127 DO(CopyTexSubImage2D
);
128 DO(DeleteBuffersARB
);
129 DO(DeleteFramebuffersEXT
);
130 DO(DeleteProgramsARB
);
131 DO(DeleteRenderbuffersEXT
);
132 DO(DeleteTexturesEXT
);
133 DO(DeleteVertexArrays
);
135 DO(DisableClientState
);
140 DO(EnableClientState
);
141 DO(EnableVertexAttribArrayARB
);
144 DO(FramebufferRenderbufferEXT
);
145 DO(FramebufferTexture2DEXT
);
147 DO(GenerateMipmapEXT
);
148 DO(GenFramebuffersEXT
);
150 DO(GenRenderbuffersEXT
);
170 DO(ProgramEnvParameter4fvARB
);
171 DO(ProgramStringARB
);
176 DO(RenderbufferStorageMultisampleCoverageNV
);
177 DO(RenderbufferStorageMultisampleEXT
);
197 _cairo_gpu_space__fini_entry(void* abstract_entry
, void* closure
)
199 cairo_gpu_context_t
* ctx
= closure
;
200 cairo_gpu_int_entry_t
* int_entry
= (cairo_gpu_int_entry_t
*)abstract_entry
;
201 //cairo_gpu_ptr_entry* ptr_entry = (cairo_gpu_ptr_entry_t*)abstract_entry;
202 switch(int_entry
->base
.hash
& TABLE_MASK
)
206 ctx
->gl
.DeleteProgramsARB(1, &int_entry
->v
);
212 _cairo_hash_table_remove(ctx
->space
->table
, (cairo_hash_entry_t
*)abstract_entry
);
213 free(abstract_entry
);
217 _cairo_gpu_space__fini(cairo_gpu_context_t
* ctx
)
219 cairo_gpu_space_t
* space
= ctx
->space
;
221 _cairo_gpu_texture_fini(ctx
, &space
->dummy_texture
);
223 _cairo_hash_table_foreach (space
->table
, _cairo_gpu_space__fini_entry
, ctx
);
224 _cairo_hash_table_destroy(space
->table
);
227 // ctx == 0, user.api < 0 => we have an user context bound, or nothing
228 // ctx != 0, user.api < 0 => we have either an user context bound, or nothing, or ctx
229 // ctx != 0, user_api >= 0 => we have ctx bound, and may have an user context saved
230 // ctx == 0, user.api > 0 => we have an user context bound, and we have an user context saved
231 // ctx == 0, user.api == 0 => we have no context bound, and we have no user context saved
234 _cairo_gpu_enter(void)
236 ++_cairo_gl_tls
.depth
;
240 _cairo_gl_make_current(cairo_gpu_space_t
* space
, cairo_gpu_gl_state_t
* user
)
242 API_SPACE(make_current
)(space
, user
);
246 _cairo_gpu_space__do_destroy(cairo_gpu_space_t
* space
)
248 API_SPACE(do_destroy
)(space
);
250 if(space
->owns_libgl
)
251 dlclose(space
->libgl
);
256 _cairo_gpu_context__do_destroy(cairo_gpu_context_t
* ctx
)
258 cairo_gpu_space_t
* space
= ctx
->space
;
260 API_CTX(do_destroy
)(ctx
);
264 if(space
->destroy_on_unbind
)
265 _cairo_gpu_space__do_destroy(space
);
268 static cairo_gpu_context_t
*
269 _cairo_gpu_context__unbind_internal(void)
271 cairo_gpu_context_t
* ctx
= _cairo_gl_tls
.ctx
;
274 _cairo_gpu_assert_no_gl_error(ctx
);
276 if(ctx
->destroy_on_unbind
)
283 _cairo_gpu_do_exit(void)
285 cairo_gpu_context_t
* ctx
= _cairo_gl_tls
.ctx
;
286 cairo_gpu_context_t
* destroy_ctx
= 0;
289 destroy_ctx
= _cairo_gpu_context__unbind_internal();
291 if(destroy_ctx
&& !_cairo_gl_tls
.user
.api
)
293 cairo_gpu_space_t
* space
= ctx
->space
;
294 API_SPACE(unbind_context
)(space
);
296 _cairo_gl_tls
.ctx
= 0;
300 if(_cairo_gl_tls
.user
.api
)
302 _cairo_gl_make_current(ctx
->space
, &_cairo_gl_tls
.user
);
303 _cairo_gl_tls
.user
.api
= 0;
304 _cairo_gl_tls
.ctx
= 0;
307 _cairo_gl_tls
.user
.api
= -1;
310 _cairo_gpu_context__do_destroy(destroy_ctx
);
314 * NOTE: if no OpenGL context was set, we leak our current context.
315 * This allows to run many Cairo commands without rebinding the context every time.
316 * The risk is that the user may accidentally mess up our GL context.
319 _cairo_gpu_exit(void)
321 if(!--_cairo_gl_tls
.depth
&& _cairo_gl_tls
.user
.api
>= 0)
322 _cairo_gpu_do_exit();
325 // you must bind a context after calling this
327 _cairo_gpu_context__update_state(cairo_gpu_context_t
* ctx
, unsigned api
)
329 cairo_gpu_gl_state_t
* user
= &_cairo_gl_tls
.user
;
331 API_CTX(update_state
)(ctx
, user
);
335 _cairo_gpu_context__do_bind(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
)
337 cairo_gpu_context_t
* destroy_ctx
= 0;
338 if(_cairo_gl_tls
.ctx
)
339 destroy_ctx
= _cairo_gpu_context__unbind_internal();
341 API_CTX(do_bind
)(ctx
, surface
);
345 _cairo_gpu_gl_init(&ctx
->gl
, ctx
->space
->GetProcAddress
);
348 _cairo_gl_tls
.ctx
= ctx
;
351 _cairo_gpu_context__do_destroy(destroy_ctx
);
355 _cairo_gl_context_bind_surface(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
)
357 assert(_cairo_gl_tls
.depth
);
359 if(_cairo_gl_tls
.user
.api
< 0)
360 _cairo_gpu_context__update_state(ctx
, ctx
->space
->api
);
362 if(_cairo_gl_tls
.ctx
== ctx
)
364 if(surface
&& surface
->has_drawable
)
366 // lenient one-way version
367 if(!API_CTX(is_surface_bound
)(ctx
, surface
))
374 _cairo_gpu_context__do_bind(ctx
, surface
);
379 _cairo_gpu_context_bind(cairo_gpu_context_t
* ctx
)
381 assert(_cairo_gl_tls
.depth
);
383 if(_cairo_gl_tls
.user
.api
< 0)
384 _cairo_gpu_context__update_state(ctx
, ctx
->space
->api
);
386 if(_cairo_gl_tls
.ctx
== ctx
)
389 _cairo_gpu_context__do_bind(ctx
, 0);
393 _cairo_gpu_context__destroy(cairo_gpu_context_t
* ctx
)
395 assert(_cairo_gl_tls
.depth
);
397 if(_cairo_gl_tls
.user
.api
< 0)
398 _cairo_gpu_context__update_state(ctx
, ctx
->space
->api
);
400 if(ctx
== _cairo_gl_tls
.ctx
)
402 ctx
->destroy_on_unbind
= 1;
406 _cairo_gpu_context__do_destroy(ctx
);
409 cairo_public cairo_space_t
*
410 cairo_gl_hardware_space_create(void* libgl
, unsigned flags
)
412 cairo_space_t
*space
;
414 space
= cairo_glx_space_create(libgl
, 0, 1, 0, flags
);
417 if(space
->is_software
)
418 cairo_space_destroy(space
);
426 cairo_public cairo_space_t
*
427 cairo_gl_space_create(void* libgl
, unsigned flags
)
429 cairo_space_t
*space
;
431 space
= cairo_glx_space_create(libgl
, 0, 1, 0, flags
);
435 space
= cairo_osmesa_space_create(libgl
, 0, flags
);
442 static inline cairo_gpu_space_tls_t
*
443 _cairo_gpu_space_alloc_tls(cairo_gpu_space_t
* space
)
445 return (cairo_gpu_space_tls_t
*)calloc(sizeof(cairo_gpu_space_tls_t
), 1);
448 static cairo_font_options_t
*
449 _cairo_gpu_get_font_options (cairo_gpu_space_t
* space
)
451 if (space
->has_font_options
)
452 return &space
->font_options
;
454 CAIRO_MUTEX_LOCK(space
->mutex
);
455 if (! space
->has_font_options
) {
456 _cairo_font_options_init_default (&space
->font_options
);
458 space
->font_options
.antialias
= CAIRO_ANTIALIAS_SUBPIXEL
;
460 API_SPACE(init_font_options
)(space
);
462 space
->has_font_options
= TRUE
;
464 CAIRO_MUTEX_UNLOCK(space
->mutex
);
466 return &space
->font_options
;
470 _cairo_gpu_set_surface_subspace(cairo_gpu_surface_t
* surface
)
472 cairo_gpu_space_t
* space
= surface
->space
;
473 API_SPACE(set_surface_subspace
)(space
, surface
);
476 static cairo_gpu_context_t
*
477 _cairo_gpu_space_tls__create_context(cairo_gpu_space_tls_t
* tls
, cairo_gpu_subspace_t
* sub
)
479 cairo_gpu_space_t
* space
= tls
->space
;
480 cairo_gpu_context_t
* ctx
;
484 // use the last subspace so that we hopefully minimize the number of created context
485 sub
= &space
->subspace
;
490 ctx
= API_SPACE(create_context
)(space
, sub
);
493 ctx
->next
= tls
->contexts
;
496 pthread_setspecific(sub
->context_tls
, ctx
);
497 tls
->last_context
= ctx
;
502 _cairo_gpu_space_tls_destroy_contexts(cairo_gpu_space_tls_t
* tls
)
504 cairo_gpu_context_t
* ctx
;
505 cairo_gpu_context_t
* ctx_next
;
506 for(ctx
= tls
->contexts
; ctx
; ctx
= ctx_next
)
508 ctx_next
= ctx
->next
;
510 _cairo_gpu_context__destroy(ctx
);
514 static inline cairo_gpu_context_t
*
515 _cairo_gpu_space_tls_lookup_context(cairo_gpu_space_tls_t
* tls
)
517 cairo_gpu_context_t
* ctx
= 0;
519 ctx
= tls
->last_context
;
522 ctx
= _cairo_gpu_space_tls__create_context(tls
, 0);
527 _cairo_gpu_space__destroy(cairo_gpu_space_t
* space
)
529 cairo_gpu_subspace_t
* sub
;
530 cairo_gpu_subspace_t
* sub_next
;
531 cairo_gpu_space_tls_t
* tls
;
532 cairo_gpu_space_tls_t
* tls_next
;
534 pthread_key_delete(space
->tls
);
537 cairo_gpu_context_t
* ctx
= _cairo_gpu_space_bind(space
);
538 _cairo_gpu_space__fini(ctx
);
541 pthread_key_delete(space
->subspace
.context_tls
);
543 for(tls
= (cairo_gpu_space_tls_t
*)space
->tls_list
.next
; (list_node_t
*)tls
!= &space
->tls_list
; tls
= tls_next
)
545 tls_next
= (cairo_gpu_space_tls_t
*)tls
->node
.next
;
547 _cairo_gpu_space_tls_destroy(tls
);
550 for(sub
= space
->subspace
.next
; sub
; sub
= sub_next
)
552 sub_next
= sub
->next
;
554 pthread_key_delete(sub
->context_tls
);
556 API_SPACE(destroy_subspace
)(space
, sub
);
561 space
->destroy_on_unbind
= 1;
565 _cairo_gpu_space_destroy(void* abstract_space
)
567 cairo_gpu_space_t
* space
= abstract_space
;
570 _cairo_gpu_space__destroy(space
);
574 /* only current thread */
575 static cairo_status_t
576 _cairo_gpu_space_sync(void* abstract_space
)
578 cairo_gpu_space_t
* space
= abstract_space
;
579 cairo_gpu_space_tls_t
* tls
= (cairo_gpu_space_tls_t
*)_cairo_gpu_space_get_tls(space
);
580 cairo_gpu_context_t
* ctx
;
583 for(ctx
= tls
->contexts
; ctx
; ctx
= ctx
->next
)
585 _cairo_gpu_context_bind(ctx
);
589 return CAIRO_STATUS_SUCCESS
;
593 _cairo_gpu_space__init(cairo_gpu_context_t
*ctx
, unsigned flags
)
595 cairo_gpu_space_t
* space
= ctx
->space
;
599 unsigned dummy_tex_data
= 0;
601 // TODO: this assumes that GL extensions don't change between context
602 extensions
= (char *)ctx
->gl
.GetString(GL_EXTENSIONS
);
603 vendor
= (char *)ctx
->gl
.GetString(GL_VENDOR
);
604 renderer
= (char *)ctx
->gl
.GetString(GL_RENDERER
);
606 // TODO: any better solutions?
607 space
->base
.is_software
= strstr(renderer
, "software") || strstr(renderer
, "Software");
609 // these can be implemented on any hardware
610 if(!strstr(extensions
, "EXT_texture_object") || !strstr(extensions
, "EXT_vertex_array"))
612 fprintf(stderr
, "ERROR: the current OpenGL driver is severely deficient: it is missing EXT_texture_object or EXT_vertex_array!\n");
616 space
->tex_units
= 1;
617 if(!!strstr(extensions
, "ARB_multitexture"))
621 ctx
->gl
.GetIntegerv(GL_MAX_TEXTURE_UNITS_ARB
, &tex_units
);
623 // 4 is enough for us
624 limit
= 4 - ((flags
>> CAIRO_GPU_GL_FORCE_TEX_UNITS_SHIFT
) & 3);
626 if(tex_units
> limit
)
628 space
->tex_units
= tex_units
;
631 space
->use_fbo
= !(flags
& CAIRO_GPU_GL_DISABLE_FRAMEBUFFER_OBJECT
) && strstr(extensions
, "EXT_framebuffer_object");
635 if(!API_SPACE(has_offscreen_drawables
)(space
))
639 space
->fb_blit
= !(flags
& CAIRO_GPU_GL_DISABLE_FRAMEBUFFER_BLIT
) && strstr(extensions
, "EXT_framebuffer_blit");
641 ctx
->space
->use_vbo
= !!strstr(extensions
, "EXT_vertex_buffer_object");
643 if(!(flags
& CAIRO_GPU_GL_DISABLE_TEXTURE_NON_POWER_OF_TWO
) &&
644 strstr(extensions
, "ARB_texture_non_power_of_two"))
647 if(!(flags
& CAIRO_GPU_GL_DISABLE_TEXTURE_RECTANGLE
)
648 && (strstr(extensions
, "ARB_texture_rectangle") || strstr(extensions
, "EXT_texture_rectangle") || strstr(extensions
, "NV_texture_rectangle")))
649 space
->tex_rectangle
= 1;
651 space
->extend_mask
= 1 << CAIRO_EXTEND_REPEAT
;
653 /* These are pretty much guaranteed */
654 if(strstr(extensions
, "ARB_texture_border_clamp") || strstr(extensions
, "SGIS_texture_border_clamp"))
655 space
->extend_mask
|= 1 << CAIRO_EXTEND_NONE
;
657 if(strstr(extensions
, "EXT_texture_edge_clamp") || strstr(extensions
, "SGIS_texture_edge_clamp"))
658 space
->extend_mask
|= 1 << CAIRO_EXTEND_PAD
;
660 if(strstr(extensions
, "ARB_texture_mirrored_repeat"))
661 space
->extend_mask
|= 1 << CAIRO_EXTEND_REFLECT
;
663 space
->use_vert_prog
= !(flags
& CAIRO_GPU_GL_DISABLE_VERTEX_PROGRAM
) && strstr(extensions
, "ARB_vertex_program");
665 space
->vert_op
= space
->vert_passthru
= space
->use_vert_prog
;
667 space
->use_frag_prog
= !(flags
& CAIRO_GPU_GL_DISABLE_FRAGMENT_PROGRAM
) && strstr(extensions
, "ARB_fragment_program");
669 space
->has_combine
= !(flags
& CAIRO_GPU_GL_DISABLE_TEX_ENV_COMBINE
) && !!strstr(extensions
, "ARB_texture_env_combine");
671 space
->use_intensity
= !space
->use_frag_prog
&& !space
->has_combine
;
673 // we do this because we need GL_INTENSITY textures for GL_MODULATE, that wouldn't be renderable and in addition hardware/drivers without ARB_texture_env_combine are less likely to support FBOs
674 // TODO: don't do this, but add new SURF_ALPHA / SURF_INTENSITY texture "aliases"
675 if(!space
->use_frag_prog
&& !space
->has_combine
)
678 space
->crossbar
= strstr(extensions
, "ARB_texture_env_crossbar") || strstr(extensions
, "NV_texture_env_combine4");
680 space
->per_component
= (space
->use_frag_prog
|| (space
->has_combine
&& space
->crossbar
&& strstr(extensions
, "ARB_texture_env_dot3")));
681 space
->radial
= space
->frag_div_alpha
= space
->discontinuous
= space
->use_frag_prog
;
682 space
->tex_aaaa_111a
= space
->frag_mul_alpha
= space
->use_frag_prog
|| space
->has_combine
;
684 space
->frag_passthru
= space
->use_frag_prog
|| strstr(extensions
, "NV_texture_shader");
686 space
->blend_func_separate
= !(flags
& CAIRO_GPU_GL_DISABLE_BLEND_FUNC_SEPARATE
) && strstr(extensions
, "EXT_blend_func_separate");
687 space
->blend_color
= !!strstr(extensions
, "EXT_blend_color");
688 space
->blend_subtract
= !!strstr(extensions
, "EXT_blend_subtract"),
689 space
->has_window_pos
= !!strstr(extensions
, "ARB_window_pos");
690 space
->has_framebuffer_multisample
= !!strstr(extensions
, "EXT_framebuffer_multisample");
691 space
->has_framebuffer_multisample_coverage
= !!strstr(extensions
, "NV_framebuffer_multisample_coverage");
692 space
->has_fragment_program2
= !!strstr(extensions
, "NV_fragment_program2");
693 space
->has_gpu_program4
= !!strstr(extensions
, "NV_gpu_program4");
695 space
->msaa_samples
= 16; /* optimistic value, will be downgraded if necessary */
697 // TODO: this is true on nVidia G70, which does the equivalent of 2x2 MSAA
698 space
->fastest_polygon_smooth_samples
= 4;
699 space
->nicest_polygon_smooth_samples
= 4;
701 space
->max_anisotropy
= 1.0;
702 if(strstr(extensions
, "EXT_texture_filter_anisotropic"))
703 ctx
->gl
.GetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT
, &space
->max_anisotropy
);
705 /* Set up the dummy texture for tex_env_combine with constant color. */
706 _cairo_gpu_texture_create(ctx
, &space
->dummy_texture
, 1, 1);
707 _cairo_gl_context_set_active_texture(ctx
, _cairo_gl_context_set_texture(ctx
, -1, &space
->dummy_texture
));
708 ctx
->gl
.TexImage2D(_cairo_gl_context_active_target(ctx
), 0, GL_RGBA
, 1, 1, 0, GL_RGBA
, GL_UNSIGNED_BYTE
, &dummy_tex_data
);
715 static inline cairo_bool_t
716 _cairo_gpu_space_is_frag_supported(cairo_gpu_space_t
* space
, unsigned frag
)
718 if(space
->use_frag_prog
)
722 unsigned tex0
= (frag
>> (FRAG_TEX_SHIFT
)) & FRAG_TEX_MASK
;
723 unsigned tex1
= (frag
>> (FRAG_TEX_SHIFT
+ FRAG_TEX_BITS
)) & FRAG_TEX_MASK
;
724 unsigned tex01
= tex0
| tex1
;
725 unsigned op
= frag
& FRAG_OP_MASK
;
728 (op
&& op
!= (OP_MUL_ALPHA
<< FRAG_OP_SHIFT
))
729 || (tex01
& (FRAG_TEX_RADIAL
| FRAG_TEX_DISCONTINUOUS
))
730 || (tex01
& FRAG_TEX_COLOR_MASK
) == FRAG_TEX_COLOR_111CA
731 || (tex1
& FRAG_TEX_COLOR_MASK
) == FRAG_TEX_COLOR_111C
732 || (!space
->tex_aaaa_111a
&& (
733 ((tex01
& FRAG_TEX_COLOR_MASK
) == FRAG_TEX_COLOR_AAAA
)
734 || ((tex01
& FRAG_TEX_COLOR_MASK
) == FRAG_TEX_COLOR_111A
) ))
738 // 4 is enough for everyone
739 if(space
->tex_units
< 4)
741 int n
= -1; // first unit has two slots
742 if(tex1
&& space
->tex_units
< 2)
749 if((tex0
& FRAG_TEX_COLOR_MASK
) == FRAG_TEX_COLOR_111C
)
753 if((frag
& (FRAG_OP_MASK
| FRAG_OPPOS_TEX1
)) == (OP_MUL_ALPHA
<< FRAG_OP_SHIFT
))
759 if(!space
->crossbar
&& (frag
& FRAG_OPPOS_TEX1
))
762 if((frag
& (FRAG_OP_MASK
| FRAG_OPPOS_TEX1
)) == ((OP_MUL_ALPHA
<< FRAG_OP_SHIFT
) | FRAG_OPPOS_TEX1
))
768 if(frag
& (FRAG_CONSTANT
| FRAG_PRIMARY
))
771 if(n
> space
->tex_units
)
778 static cairo_space_t
*
779 _cairo_gpu_space__finish_create(cairo_gpu_space_t
* space
, unsigned flags
)
781 cairo_gpu_context_t
* ctx
;
783 const char* env
= getenv("__CAIRO_GPU_GL_FLAGS");
789 // this calls _cairo_gpu_space__init
790 ctx
= _cairo_gpu_space_bind(space
);
791 init
= _cairo_gpu_space__init(ctx
, flags
);
795 _cairo_gpu_space__destroy(space
);
799 return (cairo_space_t
*)space
;
803 cairo_gl_space_create_from_current_context(void* libgl
, unsigned flags
)
805 cairo_space_t
* space
;
807 space
= cairo_glx_space_create_from_current_context(libgl
, flags
);
811 space
= cairo_osmesa_space_create_from_current_context(libgl
, flags
);
818 cairo_public cairo_gl_api_t
819 cairo_gl_space_get_api(cairo_space_t
* abstract_space
)
821 cairo_gpu_space_t
*space
= (cairo_gpu_space_t
*)abstract_space
;
823 if(abstract_space
->backend
!= &_cairo_gpu_space_backend
)
824 return CAIRO_GL_API_NONE
;
830 cairo_gl_space_get_libgl(cairo_space_t
* abstract_space
)
832 cairo_gpu_space_t
*space
= (cairo_gpu_space_t
*)abstract_space
;
834 if(abstract_space
->backend
!= &_cairo_gpu_space_backend
)
841 cairo_gl_space_get_proc_address(cairo_space_t
* abstract_space
, const char* name
)
843 cairo_gpu_space_t
*space
= (cairo_gpu_space_t
*)abstract_space
;
845 if(abstract_space
->backend
!= &_cairo_gpu_space_backend
)
848 return space
->GetProcAddress(name
);
852 cairo_gl_space_create_context(cairo_space_t
* abstract_space
, cairo_surface_t
* surface
)
854 cairo_gpu_space_t
*space
= (cairo_gpu_space_t
*)abstract_space
;
856 if(abstract_space
->backend
!= &_cairo_gpu_space_backend
)
859 if(surface
&& surface
->backend
!= &_cairo_gpu_surface_backend
)
862 return API_SPACE(create_gl_context
)(space
, surface
);
866 cairo_gl_space_destroy_context(cairo_space_t
* abstract_space
, void* context
)
868 cairo_gpu_space_t
*space
= (cairo_gpu_space_t
*)abstract_space
;
870 if(abstract_space
->backend
!= &_cairo_gpu_space_backend
)
873 API_SPACE(destroy_gl_context
)(space
, context
);
877 cairo_gl_space_make_current(cairo_space_t
* abstract_space
, void* context
, cairo_surface_t
* abstract_draw_surface
, cairo_surface_t
* abstract_read_surface
)
879 cairo_gpu_space_t
* space
= (cairo_gpu_space_t
*)abstract_space
;
880 cairo_gpu_surface_t
* read_surface
= (cairo_gpu_surface_t
*)abstract_read_surface
;
881 cairo_gpu_surface_t
* draw_surface
= (cairo_gpu_surface_t
*)abstract_draw_surface
;
886 if(read_surface
->base
.backend
!= &_cairo_gpu_surface_backend
)
888 api
= read_surface
->space
->api
;
893 if(draw_surface
->base
.backend
!= &_cairo_gpu_surface_backend
)
895 if(api
&& api
!= draw_surface
->space
->api
)
898 api
= draw_surface
->space
->api
;
905 if(space
->base
.backend
!= &_cairo_gpu_space_backend
)
910 return API_SPACE(make_gl_context_current
)(space
, context
, draw_surface
, read_surface
);