1 #include "cairo-gpu-impl-surface-gl-glx.h"
4 _cairo_gpu_surface_destination_drawable(cairo_gpu_surface_t
* surface
)
7 API_SURFACE_(idx
, get_destination_idx_drawable
)(surface
);
11 static inline cairo_bool_t
12 _cairo_gpu_surface_has_physical_alpha(cairo_gpu_surface_t
* surface
)
14 cairo_bool_t v
= TRUE
;
15 // We assume that the unused alpha channel is set to 1
16 // TODO: due to this, we don't try to use the alpha channels of drawables; check whether we can do something about this
17 API_SURFACE_(v
, has_physical_alpha
)(surface
);
22 _cairo_gpu_surface_destination_msaa(cairo_gpu_surface_t
* surface
, int want_msaa
)
24 /* Currently, if we have a drawable, we always draw on that.
25 * This reduces surprises for the user.
28 if(surface
->msaa_fb
&& want_msaa
)
30 else if(surface
->has_drawable
)
31 return _cairo_gpu_surface_destination_drawable(surface
);
36 else if(surface->autoflush && surface->buffer)
37 return _cairo_gpu_surface_destination_drawable(surface);
38 else if(surface->valid_mask & (1 << SURF_TEX))
40 else if(surface->buffer == GL_BACK && surface->valid_mask & (1 << SURF_BACK))
42 else if(surface->buffer == GL_FRONT && surface->valid_mask & (1 << SURF_FRONT))
44 //else if(surface->buffer)
45 // return _cairo_gpu_surface_destination_drawable(surface);
47 return SURF_TEX; // draw to the texture by default so that nothing will be drawn if the user forgets to either flush or enable autoflushing
52 _cairo_gpu_surface_destination(cairo_gpu_surface_t
* surface
)
54 return _cairo_gpu_surface_destination_msaa(surface
, surface
->want_msaa
);
58 _cairo_gpu_surface__create_fb(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
)
60 ctx
->gl
.GenFramebuffersEXT(1, &surface
->fb
);
61 _cairo_gl_context_set_framebuffer(ctx
, FB_DRAW
, surface
->fb
, surface
->texture
.id
, GL_COLOR_ATTACHMENT0
, -1);
62 ctx
->gl
.FramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT
, GL_COLOR_ATTACHMENT0_EXT
, _cairo_gl_target(surface
->texture
.target_idx
), surface
->texture
.tex
, 0);
64 return ctx
->gl
.CheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT
);
68 _cairo_gpu_surface__create_tex(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
)
70 // NO: unsigned format = (surface->base.content == CAIRO_CONTENT_COLOR) ? GL_RGB : GL_RGBA;
71 // we must have an alpha channel to support EXTEND_NONE (and we also assume it exists to draw trapezoid masks)
74 // TODO: only do this if, after checking, render-to-alpha turns out to be not supported
75 if(ctx
->space
->use_fbo
&& ctx
->space
->alpha_fbo_unsupported
)
78 format
= (surface
->base
.content
== CAIRO_CONTENT_ALPHA
) ? (ctx
->space
->use_intensity
? GL_INTENSITY
: GL_ALPHA
) : GL_RGBA
;
80 _cairo_gpu_texture_realize(ctx
, &surface
->texture
);
81 _cairo_gl_context_set_active_texture(ctx
, _cairo_gl_context_set_texture(ctx
, -1, &surface
->texture
));
83 _cairo_gpu_context__set_mipmap_hint(ctx
);
85 surface
->texture
.unsized_format
= format
;
86 GL_ERROR(ctx
->gl
.TexImage2D(_cairo_gl_context_active_target(ctx
), 0, format
, surface
->texture
.width
, surface
->texture
.height
, 0, format
== GL_INTENSITY
? GL_ALPHA
: format
, GL_UNSIGNED_BYTE
, NULL
));
88 // TODO: generalize to support checking non-8-bit formats
89 if(ctx
->space
->use_fbo
&& format
!= GL_RGBA
&& !ctx
->space
->alpha_fbo_supported
)
91 unsigned status
= _cairo_gpu_surface__create_fb(ctx
, surface
);
92 if(status
&& status
!= GL_FRAMEBUFFER_COMPLETE_EXT
)
94 ctx
->space
->alpha_fbo_unsupported
= 1;
99 ctx
->space
->alpha_fbo_supported
= 1;
102 // uninitialized, so set fully dirty
103 surface
->bbox
[SURF_TEX
].x
= 0;
104 surface
->bbox
[SURF_TEX
].y
= 0;
105 surface
->bbox
[SURF_TEX
].width
= surface
->width
;
106 surface
->bbox
[SURF_TEX
].height
= surface
->height
;
109 // guaranteed to set the surface
111 _cairo_gpu_surface__create_tex_fb(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
)
113 assert(surface
->space
->use_fbo
);
115 // this is disallowed because FBOs are currently always treated as upside-down and if there is a drawable, the texture (and thus FBO) may be non-upside-down
116 assert(!surface
->has_drawable
);
118 if(!surface
->texture
.tex
)
119 _cairo_gpu_surface__create_tex(ctx
, surface
);
125 status
= _cairo_gpu_surface__create_fb(ctx
, surface
);
126 if(status
&& status
!= GL_FRAMEBUFFER_COMPLETE_EXT
)
127 fprintf(stderr
, "surface is framebuffer incomplete: status is %x\n", status
);
134 _cairo_gpu_surface__set(cairo_gpu_surface_t
* surface
, cairo_gpu_context_t
* ctx
, int mask
, int i
)
138 int flip_height
= -1;
145 flip_height
= surface
->buffer_non_upside_down
? (int)surface
->height
: -1;
147 else if(i
== SURF_BACK
)
152 flip_height
= surface
->buffer_non_upside_down
? (int)surface
->height
: -1;
154 else if(i
== SURF_TEX
)
157 _cairo_gpu_surface__create_tex_fb(ctx
, surface
);
160 id
= surface
->texture
.id
;
161 buffer
= GL_COLOR_ATTACHMENT0
;
163 else if(i
== SURF_MSAA
)
165 fb
= surface
->msaa_fb
;
166 id
= surface
->msaa_id
;
167 buffer
= GL_COLOR_ATTACHMENT0
;
172 _cairo_gl_context_set_framebuffer(ctx
, mask
, fb
, id
, buffer
, flip_height
);
177 _cairo_gpu_blit_image(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* dst
, cairo_gpu_texture_t
* texture
, int dst_x
, int dst_y
, int src_x
, int src_y
, int width
, int height
, int zoom_x
, int zoom_y
);
179 static __attribute__((unused
)) void
180 _cairo_gpu_fill_rect_diff(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
, cairo_rectangle_int_t
* out
, cairo_rectangle_int_t
* in
, float r
, float g
, float b
, float a
);
182 // may clobber read fbo and draw state
184 _cairo_gpu_surface__update_draw(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
, int dst
, int src
)
186 cairo_content_t content
;
187 assert(!(surface
->valid_mask
& (1 << dst
)));
189 content
= surface
->base
.content
;
190 if(surface
->clear_rgba
)
191 surface
->base
.content
= CAIRO_CONTENT_COLOR_ALPHA
;
195 cairo_rectangle_int_t
*out
= &surface
->bbox
[dst
];
196 cairo_rectangle_int_t
*in
= &surface
->bbox
[src
];
198 //printf("update draw %i = %i [%i %i %i %i : %i %i %i %i]\n", j, i, out->x, out->y, out->width, out->height, in->x, in->y, in->width, in->height);
200 _cairo_gpu_fill_rect_diff(ctx
, surface
, out
, in
, 0, 0, 0, 0);
202 if(src
!= SURF_TEX
|| surface
->fb
)
204 _cairo_gpu_surface__set(surface
, ctx
, FB_READ
, src
);
206 //printf("blit %i to %i: (%i, %i, %i, %i) -> (%i, %i, %i, %i)\n", i, j, in->x, in->y, in->width, in->height, out->x, out->y, out->width, out->height);
207 //_cairo_gpu_context_fill_rect(ctx, out->x, out->y, out->width, out->height, 0, 0, 0, 0);
209 _cairo_gpu_context_blit_same(ctx
, surface
, in
->x
, in
->y
, in
->width
, in
->height
);
213 // may happen if we switched between different drawables
214 _cairo_gpu_blit_image(ctx
, surface
, &surface
->texture
, in
->x
, in
->y
, in
->x
, in
->y
, in
->width
, in
->height
, 1, 1);
217 memcpy(out
, in
, sizeof(*in
));
221 _cairo_gpu_context__set_color_mask(ctx
, 0xf);
222 if(surface
->base
.content
== CAIRO_CONTENT_COLOR
)
223 ctx
->gl
.ClearColor(0, 0, 0, 1);
225 ctx
->gl
.ClearColor(0, 0, 0, 0);
226 ctx
->gl
.Clear(GL_COLOR_BUFFER_BIT
);
229 surface
->base
.content
= content
;
230 surface
->valid_mask
|= 1 << dst
;
233 static inline cairo_gpu_context_t
*
234 _cairo_gpu_surface__do_lookup_context(cairo_gpu_surface_t
* surface
)
236 cairo_gpu_context_t
* ctx
= 0;
237 cairo_gpu_space_tls_t
* tls
;
239 tls
= _cairo_gpu_space_get_tls(surface
->space
);
241 if(likely(!surface
->subspace
))
242 return _cairo_gpu_space_tls_lookup_context(tls
);
244 ctx
= (cairo_gpu_context_t
*)pthread_getspecific(surface
->subspace
->context_tls
);
247 ctx
= _cairo_gpu_space_tls__create_context(tls
, surface
->subspace
);
249 tls
->last_context
= ctx
;
253 static inline cairo_gpu_context_t
*
254 _cairo_gpu_surface__lookup_context(cairo_gpu_surface_t
* surface
, unsigned mask
, int idx
)
257 if(!(surface
->valid_mask
& (1 << idx
)))
261 for(i
= 0; i
< 4; ++i
)
263 if(surface
->valid_mask
& (1 << i
))
276 if(SURF_IS_DRAWABLE(idx
) || SURF_IS_DRAWABLE(src
))
277 return _cairo_gpu_surface__do_lookup_context(surface
);
279 return _cairo_gpu_space_tls_lookup_context(_cairo_gpu_space_get_tls(surface
->space
));
282 static inline cairo_gpu_context_t
*
283 _cairo_gpu_surface_lookup_context(cairo_gpu_surface_t
* surface
, unsigned mask
)
285 return _cairo_gpu_surface__lookup_context(surface
, mask
, _cairo_gpu_surface_destination(surface
));
289 _cairo_gpu_surface__bind_to(cairo_gpu_surface_t
* surface
, cairo_gpu_context_t
* ctx
, unsigned mask
, int idx
)
292 if(!(surface
->valid_mask
& (1 << idx
)))
296 for(i
= 0; i
< 4; ++i
)
298 if(surface
->valid_mask
& (1 << i
))
308 if(SURF_IS_DRAWABLE(idx
) || SURF_IS_DRAWABLE(src
))
309 _cairo_gl_context_bind_surface(ctx
, surface
);
311 _cairo_gpu_context_bind(ctx
);
316 _cairo_gpu_surface__set(surface
, ctx
, mask
, idx
);
317 _cairo_gpu_surface__update_draw(ctx
, surface
, idx
, src
);
320 _cairo_gpu_surface__set(surface
, ctx
, mask
, idx
);
324 _cairo_gpu_surface_bind_to(cairo_gpu_surface_t
* surface
, cairo_gpu_context_t
* ctx
, unsigned mask
)
326 _cairo_gpu_surface__bind_to(surface
, ctx
, mask
, _cairo_gpu_surface_destination(surface
));
329 static inline cairo_gpu_context_t
*
330 _cairo_gpu_surface__bind(cairo_gpu_surface_t
* surface
, unsigned mask
, int idx
)
332 cairo_gpu_context_t
* ctx
;
333 ctx
= _cairo_gpu_surface__lookup_context(surface
, mask
, idx
);
335 _cairo_gpu_surface__bind_to(surface
, ctx
, mask
, idx
);
339 static inline cairo_gpu_context_t
*
340 _cairo_gpu_surface_bind(cairo_gpu_surface_t
* surface
, unsigned mask
)
342 return _cairo_gpu_surface__bind(surface
, mask
, _cairo_gpu_surface_destination(surface
));
345 static inline cairo_gpu_context_t
*
346 _cairo_gpu_surface_bind_drawable(cairo_gpu_surface_t
* surface
, unsigned mask
)
348 return _cairo_gpu_surface__bind(surface
, mask
, _cairo_gpu_surface_destination_drawable(surface
));
352 _cairo_gpu_surface__flip_drawable(cairo_gpu_surface_t
* surface
)
354 surface
->buffer_non_upside_down
^= 1;
355 surface
->bbox
[SURF_FRONT
].y
= surface
->height
- surface
->bbox
[SURF_FRONT
].height
- surface
->bbox
[SURF_FRONT
].y
;
356 surface
->bbox
[SURF_BACK
].y
= surface
->height
- surface
->bbox
[SURF_BACK
].height
- surface
->bbox
[SURF_BACK
].y
;
360 _cairo_gpu_surface__flip_texture(cairo_gpu_surface_t
* surface
)
362 surface
->texture
.non_upside_down
^= 1;
363 surface
->bbox
[SURF_TEX
].y
= surface
->texture
.height
- surface
->bbox
[SURF_TEX
].height
- surface
->bbox
[SURF_TEX
].y
;
365 // padding space may also be dirty
366 if(!surface
->transparent_padding
)
368 surface
->bbox
[SURF_TEX
].height
+= surface
->bbox
[SURF_TEX
].y
;
369 surface
->bbox
[SURF_TEX
].y
= 0;
370 surface
->bbox
[SURF_TEX
].x
= 0;
371 surface
->bbox
[SURF_TEX
].width
= surface
->width
;
374 // the flipped dirty area may go into the padding space. If so, fix it and invalidate the padding space.
375 if(surface
->bbox
[SURF_TEX
].y
>= (int)surface
->height
)
377 surface
->bbox
[SURF_TEX
].width
= surface
->bbox
[SURF_TEX
].height
= 0;
378 surface
->transparent_padding
= 0;
380 else if(surface
->bbox
[SURF_TEX
].height
> ((int)surface
->height
- surface
->bbox
[SURF_TEX
].y
))
382 surface
->bbox
[SURF_TEX
].height
= surface
->height
- surface
->bbox
[SURF_TEX
].y
;
383 surface
->transparent_padding
= 0;
386 surface
->valid_mask
&=~ (1 << SURF_PSEUDO_PADDING
);
389 // may clobber read fb, blend/color mask
390 // you must rebind your old context, if any!
391 static cairo_gpu_context_t
*
392 _cairo_gpu_surface__update_tex(cairo_gpu_surface_t
* surface
)
396 cairo_gpu_context_t
* ctx
= 0;
398 for(i
= 0; i
< 4; ++i
)
400 if(surface
->valid_mask
& (1 << i
))
403 // TODO: how about always using blits?
406 cairo_rectangle_int_t
*in
;
407 cairo_rectangle_int_t
*out
;
408 cairo_rectangle_int_t rect
;
412 in
= &surface
->bbox
[i
];
413 out
= &surface
->bbox
[j
];
415 if((!out
->width
|| !out
->height
) && (!in
->width
|| !in
->height
))
418 alpha_to_red
= (surface
->texture
.unsized_format
== GL_INTENSITY
&& !(surface
->valid_mask
& (1 << SURF_PSEUDO_ALPHA_TO_RED
)));
419 ctx
= _cairo_gpu_surface__bind(surface
, FB_READ
| (alpha_to_red
? FB_DRAW
: 0), i
);
421 //printf("update tex %i = %i [%i %i %i %i : %i %i %i %i]\n", j, i, out->x, out->y, out->width, out->height, in->x, in->y, in->width, in->height);
423 if(!surface
->texture
.tex
)
424 _cairo_gpu_surface__create_tex(ctx
, surface
);
426 if(surface
->buffer_non_upside_down
!= surface
->texture
.non_upside_down
)
427 _cairo_gpu_surface__flip_texture(surface
);
429 if(out
->width
&& out
->height
&& in
->width
&& in
->height
)
431 rect
.x
= MIN(in
->x
, out
->x
);
432 rect
.y
= MIN(in
->y
, out
->y
);
433 rect
.width
= MAX(in
->x
+ in
->width
, out
->x
+ out
->width
) - rect
.x
;
434 rect
.height
= MAX(in
->y
+ in
->height
, out
->y
+ out
->height
) - rect
.y
;
436 else if(out
->width
&& out
->height
)
438 else if(in
->width
&& in
->height
)
443 assert(i
== SURF_FRONT
|| i
== SURF_BACK
);
447 cairo_gpu_blend_t blend
;
449 blend
.src_rgb
= BLEND_DST_ALPHA
;
450 if(!surface
->space
->blend_func_separate
)
451 blend
.src_alpha
= blend
.src_rgb
;
452 blend
.color_mask
= 1;
454 _cairo_gpu_context_set_viewport(ctx
, 0, 0, surface
->width
, surface
->height
);
455 _cairo_gpu_context_set_blend(ctx
, blend
.v
);
456 _cairo_gpu_context_set_raster(ctx
, 0);
457 _cairo_gpu_context_set_vert_frag(ctx
, 0, FRAG_CONSTANT
);
458 _cairo_gpu_context_set_constant_color(ctx
, &_cairo_gpu_white
);
459 _cairo_gpu_context_set_translation(ctx
, 0, 0);
460 _cairo_gpu_context_draw_rect(ctx
, 0, 0, surface
->width
, surface
->height
);
461 surface
->valid_mask
|= (1 << SURF_PSEUDO_ALPHA_TO_RED
);
464 idx
= _cairo_gl_context_set_texture(ctx
, -1, &surface
->texture
);
465 _cairo_gl_context_set_active_texture(ctx
, idx
);
466 //printf("CopyTexSubImage2D (%lx)\n", surface->glx.glxdrawable);
467 ctx
->gl
.CopyTexSubImage2D(_cairo_gl_context_active_target(ctx
), 0, rect
.x
, rect
.y
, rect
.x
, rect
.y
, rect
.width
, rect
.height
);
468 memcpy(out
, in
, sizeof(*in
));
469 surface
->valid_mask
|= 1 << j
;
474 // we need to do multisample resolve via blit, just use update_draw
475 return _cairo_gpu_surface__bind(surface
, 0, j
);
480 // there isn't any ClearTexSubImage2D...
481 if(!surface
->texture
.tex
|| (surface
->bbox
[j
].width
&& surface
->bbox
[j
].height
))
483 i
= _cairo_gpu_surface_destination(surface
);
484 ctx
= _cairo_gpu_surface__bind(surface
, FB_READ
| FB_DRAW
, i
);
489 surface
->valid_mask
|= 1 << j
;
493 static cairo_gpu_context_t
*
494 _cairo_gpu_surface_bind_tex_update(cairo_gpu_surface_t
* surface
)
496 cairo_gpu_context_t
* ctx
= 0;
497 if(!(surface
->valid_mask
& (1 << SURF_TEX
)))
498 ctx
= _cairo_gpu_surface__update_tex(surface
);
502 ctx
= _cairo_gpu_space_bind(surface
->space
);
503 if(!surface
->texture
.tex
)
504 _cairo_gpu_surface__create_tex(ctx
, surface
);
509 // ctx may be 0 and doesn't need to be bound (and may be unbound if it is)
510 static inline cairo_gpu_texture_t
*
511 _cairo_gpu_surface_begin_texture(cairo_gpu_surface_t
* surface
, cairo_gpu_context_t
* ctx
, int idx
)
513 cairo_gpu_texture_t
* texture
= 0;
514 if(!surface
->valid_mask
)
515 texture
= &surface
->space
->dummy_texture
;
516 else if(surface
->valid_mask
& (1 << SURF_TEX
))
518 if(!surface
->texture
.tex
)
519 texture
= &surface
->space
->dummy_texture
;
521 texture
= &surface
->texture
;
525 // if in the update, we happen to be able to use context ctx, then also use the final texture unit, so we don't have to bind the texture twice
527 ctx
->preferred_texture
= idx
;
529 API_SURFACE_(texture
, begin_texture
)(surface
);
532 _cairo_gpu_surface__update_tex(surface
);
533 texture
= &surface
->texture
;
537 // TODO: test this, decide whether to do this and enable this
539 if(!(surface
->valid_mask
& (1 << SURF_PSEUDO_MIPMAPS
)) && !surface
->tex_rectangle
)
541 _cairo_gl_context_set_active_texture(ctx
, i
);
542 ctx
->gl
.GenerateMipmapEXT(GL_TEXTURE_2D
);
543 surface
->valid_mask
|= 1 << SURF_PSEUDO_MIPMAPS
;
550 _cairo_gpu_surface_end_texture(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
, cairo_gpu_texture_t
* texture
)
552 API_SURFACE(end_texture
)(surface
, ctx
, texture
);
556 _cairo_gpu_surface__msaa_fini(cairo_gpu_context_t
* ctx
, cairo_gpu_surface_t
* surface
)
560 GLenum fba
= _cairo_gl_context_set_any_framebuffer(ctx
, surface
->msaa_fb
, surface
->msaa_id
, 0);
561 ctx
->gl
.FramebufferRenderbufferEXT(fba
, GL_COLOR_ATTACHMENT0_EXT
, GL_RENDERBUFFER_EXT
, 0);
562 ctx
->gl
.DeleteRenderbuffersEXT(1, &surface
->msaa_rb
);
563 ctx
->gl
.DeleteFramebuffersEXT(1, &surface
->msaa_fb
);
565 surface
->msaa_rb
= 0;
566 surface
->msaa_fb
= 0;
568 surface
->bbox
[SURF_MSAA
].width
= 0;
569 surface
->bbox
[SURF_MSAA
].height
= 0;
571 surface
->valid_mask
&=~ (1 << SURF_MSAA
);
575 static cairo_int_status_t
576 _cairo_gpu_surface__msaa_realize(void* abstract_surface
, unsigned coverage_samples
, unsigned color_samples
)
578 cairo_int_status_t status
;
579 cairo_gpu_surface_t
* surface
= abstract_surface
;
584 cairo_gpu_context_t
*ctx
= 0;
585 unsigned gl_coverage_samples
= coverage_samples
;
586 unsigned gl_color_samples
= color_samples
;
588 if(coverage_samples
!= color_samples
&& !surface
->space
->has_framebuffer_multisample_coverage
)
589 gl_coverage_samples
= gl_color_samples
= MAX(color_samples
, coverage_samples
);
591 format
= (surface
->base
.content
== CAIRO_CONTENT_COLOR
) ? GL_RGB
: GL_RGBA
;
593 if(surface
->valid_mask
== (1 << SURF_MSAA
))
595 int idx
= _cairo_gpu_surface_destination_msaa(surface
, 0);
596 ctx
= _cairo_gpu_surface__bind(surface
, 0, idx
);
599 if(coverage_samples
>= 1 || color_samples
>= 1)
604 ctx
= _cairo_gpu_space_bind(surface
->space
);
605 ctx
->gl
.GenRenderbuffersEXT(1, &rb
);
606 ctx
->gl
.BindRenderbufferEXT(GL_RENDERBUFFER_EXT
, rb
);
607 if(gl_coverage_samples
!= gl_color_samples
)
608 ctx
->gl
.RenderbufferStorageMultisampleCoverageNV(GL_RENDERBUFFER_EXT
, gl_coverage_samples
, gl_color_samples
, format
, surface
->width
, surface
->height
);
610 ctx
->gl
.RenderbufferStorageMultisampleEXT(GL_RENDERBUFFER_EXT
, gl_color_samples
, format
, surface
->width
, surface
->height
);
611 if(ctx
->gl
.GetError())
613 ctx
->gl
.DeleteRenderbuffersEXT(1, &rb
);
614 status
= CAIRO_STATUS_NO_MEMORY
;
618 fba
= _cairo_gl_context_set_any_framebuffer(ctx
, fb
, id
, 0);
619 ctx
->gl
.GenFramebuffersEXT(1, &fb
);
620 ctx
->gl
.BindFramebufferEXT(fba
, fb
);
621 ctx
->gl
.FramebufferRenderbufferEXT(fba
, GL_COLOR_ATTACHMENT0_EXT
, GL_RENDERBUFFER_EXT
, rb
);
622 if(ctx
->gl
.CheckFramebufferStatusEXT(fba
) != GL_FRAMEBUFFER_COMPLETE_EXT
)
624 ctx
->gl
.FramebufferRenderbufferEXT(fba
, GL_COLOR_ATTACHMENT0_EXT
, GL_RENDERBUFFER_EXT
, 0);
625 ctx
->gl
.DeleteRenderbuffersEXT(1, &rb
);
626 ctx
->gl
.DeleteFramebuffersEXT(1, &fb
);
627 status
= CAIRO_STATUS_NO_MEMORY
;
631 // uninitialized, thus fully dirty
632 surface
->bbox
[SURF_MSAA
].x
= 0;
633 surface
->bbox
[SURF_MSAA
].y
= 0;
634 surface
->bbox
[SURF_MSAA
].width
= surface
->width
;
635 surface
->bbox
[SURF_MSAA
].height
= surface
->height
;
638 _cairo_gpu_surface__msaa_fini(ctx
, surface
);
642 surface
->msaa_rb
= rb
;
643 surface
->msaa_fb
= fb
;
644 surface
->msaa_id
= id
;
647 surface
->coverage_samples
= coverage_samples
;
648 surface
->color_samples
= color_samples
;
649 status
= CAIRO_STATUS_SUCCESS
;
654 static cairo_int_status_t
655 _cairo_gpu_surface_set_samples(void* abstract_surface
, unsigned coverage_samples
, unsigned color_samples
)
657 cairo_int_status_t status
;
658 cairo_gpu_surface_t
* surface
= abstract_surface
;
660 if(coverage_samples
== surface
->coverage_samples
&& color_samples
== surface
->color_samples
)
661 return CAIRO_STATUS_SUCCESS
;
663 if(!surface
->space
->use_fbo
|| !surface
->space
->has_framebuffer_multisample
|| coverage_samples
>= 256 || color_samples
>= 256)
664 return CAIRO_STATUS_NO_MEMORY
; // TODO: cairo doesn't have a generic invalid status?!?
667 status
= _cairo_gpu_surface__msaa_realize(surface
, coverage_samples
, color_samples
);
672 static int _cairo_gpu_aa_formats
[] = {
683 _cairo_gpu_surface__set_size(void* abstract_surface
, cairo_content_t content
, unsigned width
, unsigned height
)
685 cairo_gpu_surface_t
* surface
= abstract_surface
;
687 if(width
== surface
->width
&& height
== surface
->height
&& width
&& height
)
695 surface
->width
= width
;
696 surface
->height
= height
;
697 surface
->base
.content
= content
;
699 _cairo_gpu_texture_init(surface
->space
, &surface
->texture
, width
, height
);
703 _cairo_gpu_surface__init(cairo_gpu_surface_t
* surface
, cairo_gpu_space_t
* space
)
706 surface
->height
= ~0;
709 static cairo_gpu_surface_t
*
710 _cairo_gpu_surface__create(cairo_gpu_space_t
* space
)
712 cairo_gpu_surface_t
*surface
;
714 surface
= calloc(1, sizeof(cairo_gpu_surface_t
));
715 if(unlikely(surface
== NULL
))
716 return (cairo_gpu_surface_t
*)_cairo_surface_create_in_error(_cairo_error(CAIRO_STATUS_NO_MEMORY
));
718 _cairo_surface_init(&surface
->base
, &_cairo_gpu_surface_backend
, 0);
719 _cairo_gpu_surface__init(surface
, space
);
725 _cairo_gpu_surface__fini(cairo_gpu_surface_t
* surface
)
727 cairo_gpu_context_t
* ctx
= _cairo_gpu_space_bind(surface
->space
);
729 _cairo_gpu_surface__msaa_fini(ctx
, surface
);
733 ctx
->gl
.DeleteFramebuffersEXT(1, &surface
->fb
);
737 _cairo_gpu_texture_fini(ctx
, &surface
->texture
);
739 surface
->valid_mask
&=~ ((1 << SURF_TEX
) | (1 << SURF_MSAA
));
743 _cairo_gpu_surface__unset_drawable(cairo_gpu_surface_t
* surface
)
745 API_SURFACE(unset_drawable
)(surface
);
746 surface
->has_drawable
= 0;
747 surface
->public_drawable
= 0;
751 static cairo_status_t
752 _cairo_gpu_surface_finish(void *abstract_surface
)
754 cairo_gpu_surface_t
*surface
= abstract_surface
;
758 _cairo_gpu_surface__unset_drawable(surface
);
759 _cairo_gpu_surface__fini(surface
);
763 cairo_space_destroy(&surface
->space
->base
);
769 return CAIRO_STATUS_SUCCESS
;
772 static cairo_status_t
773 _cairo_gpu_surface__set_drawable(void* abstract_surface
, cairo_content_t content
, unsigned long drawable
, unsigned long visualid
, cairo_bool_t double_buffer
, double width
, double height
)
775 cairo_int_status_t status
;
776 cairo_gpu_surface_t
* surface
= abstract_surface
;
779 return CAIRO_STATUS_NO_MEMORY
;
781 status
= CAIRO_STATUS_SURFACE_TYPE_MISMATCH
;
782 API_SURFACE_(status
, set_drawable_visualid
)(surface
, content
, drawable
, visualid
, double_buffer
, width
, height
);
786 static cairo_status_t
787 _cairo_gpu_surface__set_offscreen(void* abstract_surface
, cairo_content_t content
, double width
, double height
, int color_mantissa_bits
, int alpha_mantissa_bits
, int exponent_bits
, int shared_exponent_bits
)
789 cairo_gpu_surface_t
* surface
= abstract_surface
;
790 cairo_status_t status
= CAIRO_STATUS_SUCCESS
;
792 API_SURFACE_(status
, set_offscreen
)(surface
, content
, width
, height
, color_mantissa_bits
, alpha_mantissa_bits
, exponent_bits
, shared_exponent_bits
);
797 static cairo_surface_t
*
798 _cairo_gpu_surface_create(void *abstract_space
, void* abstract_surface
, cairo_content_t content
,
799 unsigned long drawable
, unsigned long visualid
, cairo_bool_t double_buffered
,
800 double width
, double height
, int color_mantissa_bits
, int alpha_mantissa_bits
, int exponent_bits
, int shared_exponent_bits
)
802 cairo_gpu_space_t
* space
= abstract_space
;
803 cairo_gpu_surface_t
* surface
= abstract_surface
;
804 cairo_bool_t same_size
= 0;
805 unsigned long surf_drawable
= 0;
807 if(surface
&& surface
->base
.backend
!= &_cairo_gpu_surface_backend
)
808 return _cairo_surface_create_in_error(CAIRO_STATUS_SURFACE_TYPE_MISMATCH
);
813 return _cairo_surface_create_in_error(CAIRO_STATUS_SURFACE_FINISHED
);
815 space
= surface
->space
;
823 API_SURFACE_(drawable
, get_drawable
)(surface
);
830 return _cairo_surface_create_in_error(CAIRO_STATUS_SURFACE_FINISHED
);
831 width
= surface
->width
;
837 return _cairo_surface_create_in_error(CAIRO_STATUS_SURFACE_FINISHED
);
838 height
= surface
->height
;
842 if(space
&& surface
&& space
!= surface
->space
)
844 _cairo_gpu_surface__fini(surface
);
845 _cairo_gpu_surface__init(surface
, space
);
848 surface
= _cairo_gpu_surface__create(space
);
851 if(width
!= (int)surface
->width
|| height
!= (int)surface
->height
)
852 _cairo_gpu_surface__fini(surface
);
857 API_SURFACE_(surf_drawable
, get_drawable
)(surface
);
859 if(surf_drawable
!= drawable
)
860 _cairo_gpu_surface__unset_drawable(surface
);
865 cairo_space_destroy(&surface
->space
->base
);
868 surface
->space
= (cairo_gpu_space_t
*)cairo_space_reference(&space
->base
);
872 if(surf_drawable
!= drawable
)
873 _cairo_gpu_surface__set_drawable(surface
, content
, drawable
, visualid
, double_buffered
, width
, height
);
876 surface
->bbox
[SURF_FRONT
].x
= 0;
877 surface
->bbox
[SURF_FRONT
].y
= 0;
878 surface
->bbox
[SURF_FRONT
].width
= width
;
879 surface
->bbox
[SURF_FRONT
].height
= height
;
880 memcpy(&surface
->bbox
[SURF_BACK
], &surface
->bbox
[SURF_FRONT
], sizeof(surface
->bbox
[SURF_BACK
]));
884 /* TODO: we currently force 8-bit formats, but we should stop this.
885 * If we fix this, change dst_alpha stealing to ensure it has enough alpha bits.
888 if(content
== CAIRO_CONTENT_ALPHA
)
889 color_mantissa_bits
= 0;
890 else //if(!color_mantissa_bits)
891 color_mantissa_bits
= 8;
893 // we always need at least one bit
894 if(content
== CAIRO_CONTENT_COLOR
)
895 alpha_mantissa_bits
= 1;
896 else //if(!alpha_mantissa_bits)
897 alpha_mantissa_bits
= 8;
899 _cairo_gpu_surface__set_offscreen(surface
, content
, width
, height
, color_mantissa_bits
, alpha_mantissa_bits
, exponent_bits
, shared_exponent_bits
);
904 _cairo_gpu_surface__set_size(surface
, content
, width
, height
);
905 if(surface
->color_samples
)
906 _cairo_gpu_surface__msaa_realize(surface
, surface
->coverage_samples
, surface
->color_samples
);
911 return &surface
->base
;
914 static cairo_int_status_t
915 _cairo_gpu_surface_copy_page(void* abstract_surface
)
917 cairo_gpu_surface_t
* surface
= abstract_surface
;
918 if(!surface
->draw_to_back_buffer
|| (surface
->valid_mask
& (1 << SURF_FRONT
)))
919 return CAIRO_STATUS_SUCCESS
;
923 _cairo_gpu_surface__bind(surface
, 0, SURF_BACK
);
925 memcpy(&surface
->bbox
[SURF_FRONT
], &surface
->bbox
[SURF_BACK
], sizeof(surface
->bbox
[SURF_BACK
]));
926 surface
->valid_mask
&=~ (1 << SURF_BACK
);
927 surface
->valid_mask
= 1 << SURF_FRONT
;
929 // OpenGL defines the back buffer as becoming undefined
930 surface
->bbox
[SURF_BACK
].x
= 0;
931 surface
->bbox
[SURF_BACK
].y
= 0;
932 surface
->bbox
[SURF_BACK
].width
= surface
->width
;
933 surface
->bbox
[SURF_BACK
].height
= surface
->height
;
935 API_SURFACE(swap_buffers
)(surface
);
938 return CAIRO_STATUS_SUCCESS
;
941 static cairo_status_t
942 _cairo_gpu_surface_get_image(cairo_gpu_surface_t
* surface
, cairo_rectangle_int_t
* interest
, cairo_image_surface_t
** image_out
, cairo_rectangle_int_t
* rect_out
, void** extra
)
944 cairo_image_surface_t
* image
;
945 cairo_rectangle_int_t extents
;
950 unsigned stride
, dheight
;
952 pixman_format_code_t pixman_format
;
953 cairo_gpu_context_t
* ctx
;
955 int idx
= _cairo_gpu_surface_destination(surface
);
959 extents
.width
= surface
->width
;
960 extents
.height
= surface
->height
;
964 if(!_cairo_rectangle_intersect(&extents
, interest
))
967 return CAIRO_STATUS_SUCCESS
;
974 /* Want to use a switch statement here but the compiler gets whiny. */
975 if(surface
->base
.content
== CAIRO_CONTENT_COLOR_ALPHA
)
978 pixman_format
= PIXMAN_a8r8g8b8
;
979 type
= GL_UNSIGNED_INT_8_8_8_8_REV
;
982 else if(surface
->base
.content
== CAIRO_CONTENT_COLOR
)
985 pixman_format
= PIXMAN_x8r8g8b8
;
986 type
= GL_UNSIGNED_INT_8_8_8_8_REV
;
989 else if(surface
->base
.content
== CAIRO_CONTENT_ALPHA
)
992 pixman_format
= PIXMAN_a8
;
993 type
= GL_UNSIGNED_BYTE
;
998 fprintf(stderr
, "get_image fallback: %d\n", surface
->base
.content
);
999 return CAIRO_INT_STATUS_UNSUPPORTED
;
1002 // if((surface->valid_mask & ((1 << idx) | (1 << SURF_TEX))) == (1 << SURF_TEX)
1005 // avoid ctx->gl.GetTexImage for partial reads
1007 if((surface->valid_mask & (1 << SURF_TEX))
1008 && !extents.x && !extents.y && extents.width == (int)surface->width && extents.height == (int)surface->height
1009 && surface->texture.width == surface->width && surface->texture.height == surface->height)
1012 /* avoid this, as it seems to not work on bound framebuffers on nVidia */
1015 stride
= MAX(extents
.width
, (int)surface
->texture
.width
) * cpp
;
1016 dheight
= MAX(extents
.height
, (int)surface
->texture
.height
);
1018 data
= malloc(dheight
* stride
* cpp
);
1020 return CAIRO_STATUS_NO_MEMORY
;
1022 if(surface
->texture
.non_upside_down
)
1025 ctx
= _cairo_gpu_space_bind(surface
->space
);
1026 _cairo_gl_context_set_active_texture(ctx
, _cairo_gl_context_set_texture(ctx
, -1, &surface
->texture
));
1027 ctx
->gl
.PixelStorei(GL_PACK_ALIGNMENT
, 1);
1028 ctx
->gl
.GetTexImage(_cairo_gl_context_active_target(ctx
), 0, format
, type
, data
);
1034 rect_out
->width
= surface
->width
;
1035 rect_out
->height
= surface
->height
;
1041 (idx
== SURF_FRONT
|| idx
== SURF_BACK
)
1042 && surface
->buffer_non_upside_down
1046 stride
= extents
.width
* cpp
;
1047 dheight
= extents
.height
;
1049 data
= malloc(dheight
* stride
* cpp
);
1051 return CAIRO_STATUS_NO_MEMORY
;
1053 ctx
= _cairo_gpu_surface_bind(surface
, FB_READ
);
1055 ctx
->gl
.PixelStorei(GL_PACK_ALIGNMENT
, 1);
1056 ctx
->gl
.ReadPixels(extents
.x
, extents
.y
, extents
.width
, extents
.height
, format
, type
, data
);
1061 unsigned char* data2
= malloc(extents
.width
* extents
.height
* cpp
);
1062 for(y
= 0; y
< extents
.height
; y
++)
1063 memcpy((char *)data2
+ (extents
.height
- y
- 1) * stride
, data
+ y
* extents
.width
* cpp
, extents
.width
* cpp
);
1066 stride
= extents
.width
;
1067 dheight
= extents
.height
;
1071 image
= (cairo_image_surface_t
*)_cairo_image_surface_create_with_pixman_format (data
, pixman_format
, extents
.width
, extents
.height
, stride
);
1072 if(image
->base
.status
)
1073 return image
->base
.status
;
1075 _cairo_image_surface_assume_ownership_of_data(image
);
1079 while((err
= ctx
->gl
.GetError()))
1080 fprintf(stderr
, "GL error 0x%08x\n", (int)err
);
1082 return CAIRO_STATUS_SUCCESS
;
1086 _cairo_gpu_surface_modified(cairo_gpu_surface_t
* surface
, int x
, int y
, int width
, int height
);
1089 _cairo_gpu_surface_modified_(cairo_gpu_surface_t
* surface
, int idx
, int x
, int y
, int width
, int height
);
1091 static cairo_status_t
1092 _cairo_gpu_surface_draw_image(cairo_gpu_surface_t
* dst
, cairo_image_surface_t
* src
, int src_x
, int src_y
, int width
, int height
, int dst_x
, int dst_y
)
1094 cairo_gpu_context_t
* ctx
;
1095 cairo_gpu_space_tls_t
* tls
;
1096 int idx
= _cairo_gpu_surface_destination(dst
);
1097 int mask
= ((1 << SURF_TEX
) | (1 << idx
));
1098 int valid
= (dst
->valid_mask
& mask
);
1100 tls
= _cairo_gpu_space_get_tls(dst
->space
);
1102 if(dst_x
== 0 && dst_y
== 0 && width
== (int)dst
->width
&& height
== (int)dst
->height
)
1103 dst
->valid_mask
|= 1 << SURF_TEX
;
1105 if(!dst
->texture
.non_upside_down
&& (idx
== SURF_TEX
||
1106 ((!valid
|| valid
== mask
) && tls
->in_acquire
) ||
1107 (valid
== (1 << SURF_TEX
))
1110 ctx
= _cairo_gpu_surface_bind_tex_update(dst
);
1111 _cairo_gpu_context_upload_pixels(ctx
, dst
, -1, &dst
->texture
, src
, src_x
, src_y
, width
, height
, dst_x
, dst_y
);
1112 _cairo_gpu_surface_modified_(dst
, SURF_TEX
, dst_x
, dst_y
, width
, height
);
1116 ctx
= _cairo_gpu_surface_bind(dst
, FB_DRAW
);
1117 _cairo_gpu_context_blit_pixels(ctx
, dst
, src
, dst_x
, dst_y
, src_x
, src_y
, width
, height
);
1118 _cairo_gpu_surface_modified(dst
, dst_x
, dst_y
, width
, height
);
1121 return CAIRO_STATUS_SUCCESS
;
1124 static cairo_status_t
1125 _cairo_gpu_surface_put_image(cairo_gpu_surface_t
* dst
, cairo_image_surface_t
* src
, int src_x
, int src_y
, int width
, int height
, int dst_x
, int dst_y
, int is_write
, void* extra
)
1128 return CAIRO_STATUS_SUCCESS
;
1130 return _cairo_gpu_surface_draw_image(dst
, src
, src_x
, src_y
, width
, height
, dst_x
, dst_y
);
1134 cairo_gl_surface_get_texture(cairo_surface_t
* abstract_surface
)
1136 cairo_gpu_surface_t
* surface
= (cairo_gpu_surface_t
*)abstract_surface
;
1137 if(abstract_surface
->backend
!= &_cairo_gpu_surface_backend
)
1140 if(!surface
->texture
.tex
)
1142 cairo_gpu_context_t
* ctx
;
1144 ctx
= _cairo_gpu_space_bind(surface
->space
);
1145 _cairo_gpu_surface__create_tex(ctx
, surface
);
1149 return surface
->texture
.tex
;
1153 cairo_gl_surface_get_framebuffer(cairo_surface_t
* abstract_surface
)
1155 cairo_gpu_surface_t
* surface
= (cairo_gpu_surface_t
*)abstract_surface
;
1156 if(abstract_surface
->backend
!= &_cairo_gpu_surface_backend
)
1159 if(_cairo_gpu_surface_use_msaa(surface
))
1160 return surface
->msaa_fb
;
1162 if(!surface
->fb
&& surface
->space
->use_fbo
)
1164 cairo_gpu_context_t
* ctx
;
1166 ctx
= _cairo_gpu_space_bind(surface
->space
);
1167 _cairo_gpu_surface__create_tex_fb(ctx
, surface
);
1175 cairo_gl_surface_get_renderbuffer(cairo_surface_t
* abstract_surface
)
1177 cairo_gpu_surface_t
* surface
= (cairo_gpu_surface_t
*)abstract_surface
;
1178 if(abstract_surface
->backend
!= &_cairo_gpu_surface_backend
)
1181 if(_cairo_gpu_surface_use_msaa(surface
))
1182 return surface
->msaa_rb
;
1188 cairo_gl_surface_get_drawable(cairo_surface_t
* abstract_surface
)
1190 cairo_gpu_surface_t
* surface
= (cairo_gpu_surface_t
*)abstract_surface
;
1191 unsigned long drawable
= 0;
1192 if(abstract_surface
->backend
!= &_cairo_gpu_surface_backend
)
1195 API_SURFACE_(drawable
, get_drawable
)(surface
);
1200 cairo_gl_surface_get_gl_drawable(cairo_surface_t
* abstract_surface
)
1202 cairo_gpu_surface_t
* surface
= (cairo_gpu_surface_t
*)abstract_surface
;
1203 unsigned long drawable
= 0;
1204 if(abstract_surface
->backend
!= &_cairo_gpu_surface_backend
)
1207 API_SURFACE_(drawable
, get_gl_drawable
)(surface
);
1212 _cairo_gpu_surface_orient_drawable_like_texture(cairo_gpu_surface_t
* draw_surf
, cairo_gpu_surface_t
* tex_surf
)
1214 if(draw_surf
->buffer_non_upside_down
!= tex_surf
->texture
.non_upside_down
)
1215 _cairo_gpu_surface__flip_drawable(draw_surf
);