7 } _cairo_gpu_blend_factors
[] =
11 {1, 1 | A
}, /* Over */
14 {A
, 1 | A
}, /* Atop */
16 {1 | A
, 1}, /* DestOver */
18 {0, 1 | A
}, /* DestOut */
19 {1 | A
, A
}, /* DestAtop */
20 {1 | A
, 1 | A
}, /* Xor */
22 {1 | A
, 1} /* Saturate */
26 static inline cairo_status_t
27 _cairo_gpu_composite__do_init(cairo_gpu_composite_setup_t
* setup
, cairo_operator_t op
)
29 #ifdef DEBUG_FORCE_SOURCE
30 op
= CAIRO_OPERATOR_SOURCE
;
33 #ifdef DEBUG_FORCE_ADD
34 op
= CAIRO_OPERATOR_ADD
;
37 if(op
> CAIRO_OPERATOR_ADD
)
39 // either SATURATE or invalid operator
40 if(op
> CAIRO_OPERATOR_SATURATE
)
41 return CAIRO_INT_STATUS_UNSUPPORTED
;
44 setup
->unpremultiplied
= setup
->dst
->base
.content
!= CAIRO_CONTENT_ALPHA
;
47 setup
->saturate
= setup
->unpremultiplied
= 0;
49 setup
->primary_chan
= 0;
50 setup
->constant_chan
= 0;
51 setup
->operands_chan
= 0;
52 setup
->c
.r
= setup
->c
.g
= setup
->c
.b
= setup
->a
.r
= setup
->a
.g
= setup
->a
.b
= setup
->ka
= 1.0;
54 setup
->blend_dst
= _cairo_gpu_blend_factors
[op
].dst
;
55 setup
->blend_src
= _cairo_gpu_blend_factors
[op
].src
;
58 setup
->dst_alpha_mask
= 0;
60 memset(setup
->operands
, 0, sizeof(setup
->operands
));
62 return CAIRO_STATUS_SUCCESS
;
65 /* dst_x and dst_y are the eye coords corresponding to object coords (0,0) */
66 static inline cairo_status_t
67 _cairo_gpu_composite_init(cairo_gpu_composite_setup_t
* setup
, cairo_operator_t op
, cairo_gpu_surface_t
* dst
, int dst_x
, int dst_y
, int obj_x
, int obj_y
, int width
, int height
)
75 setup
->height
= height
;
77 return _cairo_gpu_composite__do_init(setup
, op
);
80 static inline cairo_status_t
81 _cairo_gpu_composite_init_smooth(cairo_gpu_composite_setup_t
* setup
, cairo_operator_t op
, cairo_gpu_surface_t
* dst
, int dst_x
, int dst_y
, int obj_x
, int obj_y
, int width
, int height
, int smooth
, int dst_alpha_mask
)
83 cairo_status_t status
;
84 status
= _cairo_gpu_composite_init(setup
, op
, dst
, dst_x
, dst_y
, obj_x
, obj_y
, width
, height
);
88 if(smooth
&& setup
->blend_src
&& setup
->dst
->base
.content
!= CAIRO_CONTENT_ALPHA
)
89 setup
->unpremultiplied
= 1;
90 setup
->smooth
= smooth
;
91 setup
->dst_alpha_mask
= dst_alpha_mask
;
92 return CAIRO_STATUS_SUCCESS
;
96 /* src_x and src_y are the texture coords corresponding to object coords (0,0) */
98 _cairo_gpu_composite_operand(cairo_gpu_composite_setup_t
* setup
, const cairo_pattern_t
* pattern
, int src_x
, int src_y
, cairo_bool_t is_mask
, int coords
)
100 cairo_solid_pattern_t
*solid
= (cairo_solid_pattern_t
*) pattern
;
101 cairo_gradient_pattern_t
* gradient
= (cairo_gradient_pattern_t
*)pattern
;
102 cairo_linear_pattern_t
* linear
= (cairo_linear_pattern_t
*)gradient
;
103 cairo_radial_pattern_t
* radial
= (cairo_radial_pattern_t
*)gradient
;
104 const cairo_color_t
* color
;
105 cairo_gpu_composite_operand_t
* operand
;
109 cairo_bool_t multiple
= 0;
113 i
= !!setup
->operands
[0].src
;
114 operand
= &setup
->operands
[i
];
116 switch (pattern
->type
)
118 case CAIRO_PATTERN_TYPE_LINEAR
:
119 case CAIRO_PATTERN_TYPE_RADIAL
:
120 for (i
= 1; i
< gradient
->n_stops
; i
++)
122 if (! _cairo_color_equal (&gradient
->stops
[0].color
, &gradient
->stops
[i
].color
))
128 // TODO: check for degenerate radial
129 if(i
>= gradient
->n_stops
||
130 ((pattern
->type
== CAIRO_PATTERN_TYPE_LINEAR
)
131 && linear
->p1
.x
== linear
->p2
.x
132 && linear
->p1
.y
== linear
->p2
.y
134 ((pattern
->type
== CAIRO_PATTERN_TYPE_RADIAL
)
135 && radial
->c1
.x
== radial
->c2
.x
136 && radial
->c1
.y
== radial
->c2
.y
137 && radial
->r1
== radial
->r2
141 if(gradient
->n_stops
&& gradient
->base
.extend
!= CAIRO_EXTEND_NONE
)
142 color
= &gradient
->stops
[0].color
;
144 color
= CAIRO_COLOR_TRANSPARENT
;
148 // TODO: use vertex programs to setup colors for 2-stop gradients
149 for (i
= 0; i
< gradient
->n_stops
; i
++)
151 if(gradient
->stops
[i
].color
.alpha
!= 1.0)
153 if(gradient
->stops
[i
].color
.red
!= 1.0 || gradient
->stops
[i
].color
.green
!= 1.0 || gradient
->stops
[i
].color
.blue
!= 1.0)
154 chan
|= CHAN_C
| CHAN_A
;
157 if(setup
->unpremultiplied
)
158 operand
->unpremultiplied
= 1;
159 else if((operand
->chan
& CHAN_KA
) && (operand
->chan
& ~CHAN_KA
))
162 /* We interpolate premultiplied coordinates on the GPU, but we should interpolate non-premultiplied ones.
163 * So we only do it on the GPU if there is no difference.
165 for(i
= 1; i
< gradient
->n_stops
; ++i
)
167 if(gradient
->stops
[i
- 1].color
.alpha
!= gradient
->stops
[i
].color
.alpha
)
171 if(i
< gradient
->n_stops
)
173 for(i
= 1; i
< gradient
->n_stops
; ++i
)
175 if((gradient
->stops
[i
- 1].color
.red
!= gradient
->stops
[i
].color
.red
) ||
176 (gradient
->stops
[i
- 1].color
.green
!= gradient
->stops
[i
].color
.green
) ||
177 (gradient
->stops
[i
- 1].color
.blue
!= gradient
->stops
[i
].color
.blue
)
182 if(i
< gradient
->n_stops
)
184 if(!setup
->dst
->space
->frag_mul_alpha
)
189 operand
->unpremultiplied
= 1;
194 denom
= _cairo_gradient_pattern_compute_stops_common_denominator(gradient
, setup
->dst
->space
->discontinuous
? &multiple
: 0, 65536);
196 if(gradient
->base
.extend
== CAIRO_EXTEND_NONE
|| gradient
->base
.extend
== CAIRO_EXTEND_PAD
)
199 if(!setup
->dst
->space
->tex_npot
&& !is_pow2(width
))
200 width
= higher_pow2(width
);
204 if(!setup
->dst
->space
->tex_npot
&& !is_pow2(denom
))
206 else if(gradient
->base
.extend
== CAIRO_EXTEND_REPEAT
)
208 else if(gradient
->base
.extend
== CAIRO_EXTEND_REFLECT
)
214 if(setup
->unpremultiplied
&& !setup
->dst
->space
->frag_mul_alpha
&& !denom
)
215 return CAIRO_INT_STATUS_UNSUPPORTED
;
217 //printf("denominator is %u multiple is %i extend is %i\n", denom, multiple, gradient->base.extend);
219 case CAIRO_PATTERN_TYPE_SOLID
:
220 color
= &solid
->color
;
222 if(is_mask
&& !pattern
->component_alpha
)
224 setup
->c
.r
*= color
->alpha
;
225 setup
->c
.g
*= color
->alpha
;
226 setup
->c
.b
*= color
->alpha
;
230 setup
->c
.r
*= color
->red
* color
->alpha
;
231 setup
->c
.g
*= color
->green
* color
->alpha
;
232 setup
->c
.b
*= color
->blue
* color
->alpha
;
235 if(is_mask
&& pattern
->component_alpha
)
237 setup
->a
.r
*= color
->red
* color
->alpha
;
238 setup
->a
.g
*= color
->green
* color
->alpha
;
239 setup
->a
.b
*= color
->blue
* color
->alpha
;
243 setup
->a
.r
*= color
->alpha
;
244 setup
->a
.g
*= color
->alpha
;
245 setup
->a
.b
*= color
->alpha
;
248 setup
->ka
*= color
->alpha
;
249 return CAIRO_STATUS_SUCCESS
;
251 case CAIRO_PATTERN_TYPE_SURFACE
:
252 if(((cairo_surface_pattern_t
*)pattern
)->surface
->content
== CAIRO_CONTENT_ALPHA
)
253 chan
= CHAN_C
| CHAN_KA
; // CHAN_C is because alpha-only surfaces are black...
254 else if(((cairo_surface_pattern_t
*)pattern
)->surface
->content
== CAIRO_CONTENT_COLOR
)
255 chan
= CHAN_C
| CHAN_A
;
257 chan
= CHAN_C
| CHAN_A
| CHAN_KA
;
263 if(pattern
->component_alpha
)
266 chan
&=~ (CHAN_C
| CHAN_A
);
271 if(pattern
->type
== CAIRO_PATTERN_TYPE_SURFACE
)
273 if(setup
->unpremultiplied
)
275 if((chan
& (CHAN_C
| CHAN_A
)))
277 // TODO: maybe unpremultiply in software
278 if(!setup
->dst
->space
->frag_div_alpha
)
279 return CAIRO_INT_STATUS_UNSUPPORTED
;
283 // TODO: clone to GL_ALPHA
284 if(!setup
->dst
->space
->tex_aaaa_111a
)
285 return CAIRO_INT_STATUS_UNSUPPORTED
;
290 // TODO: clone to GL_INTENSITY
291 if( (((cairo_surface_pattern_t
*)pattern
)->surface
->content
!= CAIRO_CONTENT_ALPHA
)
292 && !(chan
& (CHAN_C
| CHAN_A
)) && !setup
->dst
->space
->tex_aaaa_111a
)
293 return CAIRO_INT_STATUS_UNSUPPORTED
;
297 /* The extended area is transparent and thus has != 1 alpha */
298 if(pattern
->extend
== CAIRO_EXTEND_NONE
)
301 operand
->surface
= 0;
302 operand
->chan
= chan
;
303 operand
->has_coords
= coords
>= 0;
304 operand
->src
= (cairo_pattern_t
*)pattern
;
305 operand
->src_x
= src_x
;
306 operand
->src_y
= src_y
;
307 operand
->gradient_width
= width
;
308 operand
->gradient_denominator
= denom
;
309 operand
->gradient_discontinuous
= multiple
;
311 return CAIRO_STATUS_SUCCESS
;
315 _cairo_gpu_prepare_color(float* p
, cairo_color_t
* color
, int unpremultiplied
)
319 p
[0] = (float)(color
->red
);
320 p
[1] = (float)(color
->green
);
321 p
[2] = (float)(color
->blue
);
325 p
[0] = (float)(color
->red
* color
->alpha
);
326 p
[1] = (float)(color
->green
* color
->alpha
);
327 p
[2] = (float)(color
->blue
* color
->alpha
);
333 _cairo_gpu_composite_get_constant_color(cairo_gpu_composite_setup_t
* setup
, cairo_gpu_color4_t
* p
)
335 cairo_gpu_color4_t u
;
339 if(setup
->unpremultiplied
)
354 static void _cairo_gpu_acquire_repeat_reflect(cairo_gpu_surface_t
** psurface
, cairo_surface_attributes_t
* attrs
, int x
, int y
, unsigned width
, unsigned height
, int* x_off
, int* y_off
)
356 cairo_gpu_surface_t
* surface
= *psurface
;
357 cairo_gpu_surface_t
* dst
;
358 cairo_gpu_context_t
* ctx
;
359 cairo_gpu_geometry_t
* geometry
;
360 double x1
, y1
, x2
, y2
;
361 int rx
, ry
, mx
, my
, flipx
, oflipx
, flipy
;
362 cairo_rectangle_int_t sampled_area
;
367 cairo_surface_attributes_t tex_attrs
;
368 cairo_gpu_texture_t
* texture
;
370 x1
= x
+ attrs
->x_offset
;
371 y1
= y
+ attrs
->y_offset
;
372 x2
= x
+ (int) width
;
373 y2
= y
+ (int) height
;
375 _cairo_matrix_transform_bounding_box (&attrs
->matrix
,
379 pad
= (attrs
->filter
== CAIRO_FILTER_GOOD
|| attrs
->filter
== CAIRO_FILTER_BEST
|| attrs
->filter
== CAIRO_FILTER_BILINEAR
) ? 0.5 : 0.0;
381 sampled_area
.x
= floor (x1
- pad
);
382 sampled_area
.y
= floor (y1
- pad
);
383 sampled_area
.width
= ceil (x2
+ pad
) - sampled_area
.x
;
384 sampled_area
.height
= ceil (y2
+ pad
) - sampled_area
.y
;
386 dst
= (cairo_gpu_surface_t
*)_cairo_gpu_surface_create_similar(surface
, surface
->base
.content
, sampled_area
.width
, sampled_area
.height
);
388 dst
->base
.device_transform
= surface
->base
.device_transform
;
389 dst
->base
.device_transform_inverse
= surface
->base
.device_transform_inverse
;
391 ctx
= _cairo_gpu_surface_lookup_context(surface
, FB_DRAW
);
392 texture
= _cairo_gpu_surface_begin_texture(surface
, ctx
, 0);
393 _cairo_gpu_surface_bind_to(surface
, ctx
, FB_DRAW
);
395 _cairo_gpu_context_set_viewport(ctx
, 0, 0, dst
->width
, dst
->height
);
396 _cairo_gpu_context_set_translation(ctx
, -sampled_area
.x
, -sampled_area
.y
);
398 _cairo_gpu_context_set_vert_frag(ctx
, 1 << VERT_TEX_SHIFT
,
399 (((texture
->target_idx
== TARGET_RECTANGLE
) ? FRAG_TEX_RECTANGLE
: 0) | FRAG_TEX_COLOR_RGBA
) << FRAG_TEX_SHIFT
);
401 cairo_matrix_init_identity(&tex_attrs
.matrix
);
402 _cairo_gpu_texture_adjust_matrix(texture
, &tex_attrs
.matrix
);
403 tex_attrs
.extend
= CAIRO_EXTEND_NONE
;
404 tex_attrs
.filter
= CAIRO_FILTER_NEAREST
;
406 _cairo_gpu_context_set_texture_and_attributes(ctx
, 0, texture
, &tex_attrs
);
408 _cairo_gpu_context_set_blend(ctx
, (surface
->base
.content
== CAIRO_CONTENT_COLOR_ALPHA
) ? BLEND_SOURCE
:
409 ((surface
->base
.content
== CAIRO_CONTENT_COLOR
) ? BLEND_SOURCE_COLORONLY
: BLEND_SOURCE_ALPHAONLY
));
411 _cairo_gpu_context_set_raster(ctx
, 0);
413 geometry
= _cairo_gpu_context_geometry(ctx
, GEOM_TEMP
);
416 mx
= rx
% (int)surface
->width
;
417 oflipx
= (rx
/ (int)surface
->width
) & 1;
420 mx
+= surface
->width
;
425 my
= ry
% (int)surface
->height
;
426 flipy
= (ry
/ (int)surface
->height
) & 1;
429 my
+= surface
->height
;
433 quads
= ((sampled_area
.width
+ mx
+ surface
->width
- 1) / surface
->width
)
434 * ((sampled_area
.height
+ my
+ surface
->height
- 1) / surface
->height
);
435 v
= vertices
= _cairo_gpu_geometry_begin(ctx
, geometry
, PRIM_QUADS
, quads
* 4, 2, 0, 2);
437 for(ry
= sampled_area
.y
- my
; ry
< (sampled_area
.y
+ sampled_area
.height
); ry
+= surface
->height
, flipy
^= 1)
439 for(rx
= sampled_area
.x
- mx
, flipx
= oflipx
; rx
< (sampled_area
.x
+ sampled_area
.width
); rx
+= surface
->width
, flipx
^= 1)
441 int dx
= MAX(sampled_area
.x
- rx
, 0);
442 int dy
= MAX(sampled_area
.y
- ry
, 0);
443 int w
= MIN((int)surface
->width
, sampled_area
.x
+ sampled_area
.width
- rx
) - dx
;
444 int h
= MIN((int)surface
->height
, sampled_area
.y
+ sampled_area
.height
- ry
) - dy
;
449 if(attrs
->extend
== CAIRO_EXTEND_REFLECT
)
453 tx
= surface
->width
- tx
;
458 ty
= surface
->height
- ty
;
463 _cairo_gpu_emit_rect_tex_(&v
, rx
+ dx
, ry
+ dy
, tx
, ty
, w
, h
, tw
, th
);
466 _cairo_gpu_geometry_end(ctx
, geometry
, quads
* 4);
467 _cairo_gpu_context_set_geometry(ctx
, geometry
);
469 _cairo_gpu_context_draw(ctx
);
471 _cairo_gpu_geometry_put(ctx
, geometry
);
473 _cairo_gpu_surface_modified(dst
, 0, 0, sampled_area
.width
, sampled_area
.height
);
474 _cairo_gpu_surface_end_texture(ctx
, surface
, texture
);
476 cairo_surface_destroy(&surface
->base
);
479 attrs
->extend
= CAIRO_EXTEND_NONE
;
480 *x_off
= -sampled_area
.x
;
481 *y_off
= -sampled_area
.y
;
485 #include <xmmintrin.h>
488 static cairo_status_t
_cairo_gpu_composite_prepare_operands(cairo_gpu_composite_setup_t
* setup
)
491 cairo_gpu_context_t
* ctx
;
493 for(op
= 0; op
< MAX_OPERANDS
; ++op
)
496 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[op
];
497 cairo_pattern_t
* src
= operand
->src
;
498 cairo_surface_attributes_t
* attributes
= &operand
->attributes
;
499 if(!(setup
->operands
[op
].chan
& setup
->operands_chan
))
502 attributes
->extra
= 0;
503 attributes
->extend
= src
->extend
;
504 attributes
->filter
= src
->filter
;
505 attributes
->x_offset
= operand
->src_x
- setup
->obj_x
;
506 attributes
->y_offset
= operand
->src_y
- setup
->obj_y
;
508 attributes
->matrix
= src
->matrix
;
510 // we do this lazily in setup_pass
511 if(src
->type
== CAIRO_PATTERN_TYPE_SURFACE
)
513 int tex_xdiv
, tex_ydiv
;
514 cairo_surface_t
* surface
= ((cairo_surface_pattern_t
*)src
)->surface
;
515 cairo_gpu_texture_t
* adjust_texture
;
516 if(!(setup
->dst
->space
->extend_mask
& (1 << attributes
->extend
)))
519 if(_cairo_surface_is_image(surface
))
521 cairo_image_surface_t
* image_surface
= (cairo_image_surface_t
*)(((cairo_surface_pattern_t
*)src
)->surface
);
522 int width
= image_surface
->width
;
523 int height
= image_surface
->height
;
525 /* use general path for large image surfaces since it can acquire subrectangles */
526 if(width
> 32 || height
> 32)
531 if(!setup
->dst
->space
->tex_npot
&& (!is_pow2(width
) || !is_pow2(height
)))
533 if(attributes
->extend
== CAIRO_EXTEND_REFLECT
|| attributes
->extend
== CAIRO_EXTEND_REPEAT
)
536 if(!setup
->dst
->space
->tex_rectangle
)
543 adjust_texture
= operand
->texture
= (cairo_gpu_texture_t
*)malloc(sizeof(cairo_gpu_texture_t
));
544 ctx
= _cairo_gpu_space_bind(setup
->dst
->space
);
545 _cairo_gpu_texture_create(ctx
, operand
->texture
, image_surface
->width
, image_surface
->height
);
546 _cairo_gpu_context_upload_pixels(ctx
, 0, op
, operand
->texture
, image_surface
, 0, 0, image_surface
->width
, image_surface
->height
, -1, -1);
548 operand
->owns_texture
= 1;
550 else if(_cairo_gpu_surface_is_compatible(setup
->dst
, surface
))
552 cairo_gpu_surface_t
* surf
= (cairo_gpu_surface_t
*)surface
;
553 if((surf
->texture
.target_idx
== TARGET_RECTANGLE
|| surf
->texture
.width
!= surf
->width
|| surf
->texture
.height
!= surf
->height
) &&
554 (attributes
->extend
== CAIRO_EXTEND_REFLECT
|| attributes
->extend
== CAIRO_EXTEND_REPEAT
))
557 if(surf
->texture
.width
!= surf
->width
|| surf
->texture
.height
!= surf
->height
)
558 _cairo_gpu_surface_fix_pad(surf
, attributes
->extend
);
560 operand
->surface
= surf
;
562 adjust_texture
= &surf
->texture
;
567 attributes
->matrix
.x0
+= attributes
->x_offset
* attributes
->matrix
.xx
+ attributes
->y_offset
* attributes
->matrix
.xy
;
568 attributes
->matrix
.y0
+= attributes
->x_offset
* attributes
->matrix
.yx
+ attributes
->y_offset
* attributes
->matrix
.yy
;
570 _cairo_gpu_texture_adjust_matrix(adjust_texture
, &attributes
->matrix
);
572 if(_cairo_matrix_is_pixel_exact(&attributes
->matrix
))
573 attributes
->filter
= CAIRO_FILTER_NEAREST
;
575 else if(src
->type
== CAIRO_PATTERN_TYPE_LINEAR
576 || (src
->type
== CAIRO_PATTERN_TYPE_RADIAL
&& setup
->dst
->space
->radial
))
578 cairo_gradient_pattern_t
* gradient
= (cairo_gradient_pattern_t
*)src
;
579 unsigned denom
, width
, swidth
;
580 cairo_bool_t multiple
;
582 #ifdef DEBUG_DISABLE_GRADIENTS
586 multiple
= operand
->gradient_discontinuous
;
587 denom
= operand
->gradient_denominator
;
588 width
= operand
->gradient_width
;
592 // note that we never use texture rectangles here
599 if(src
->type
== CAIRO_PATTERN_TYPE_LINEAR
)
601 cairo_linear_pattern_t
* linear
= (cairo_linear_pattern_t
*)gradient
;
602 double x0
, y0
, x1
, y1
, dx
, dy
;
605 if(linear
->base
.base
.extend
== CAIRO_EXTEND_NONE
|| linear
->base
.base
.extend
== CAIRO_EXTEND_PAD
)
607 x0
= _cairo_fixed_to_double(linear
->p1
.x
) * (1 - linear
->base
.stops
[0].offset
) + _cairo_fixed_to_double(linear
->p2
.x
) * linear
->base
.stops
[0].offset
;
608 y0
= _cairo_fixed_to_double(linear
->p1
.y
) * (1 - linear
->base
.stops
[0].offset
) + _cairo_fixed_to_double(linear
->p2
.y
) * linear
->base
.stops
[0].offset
;
609 x1
= _cairo_fixed_to_double(linear
->p1
.x
) * (1 - linear
->base
.stops
[linear
->base
.n_stops
- 1].offset
) + _cairo_fixed_to_double(linear
->p2
.x
) * linear
->base
.stops
[linear
->base
.n_stops
- 1].offset
;
610 y1
= _cairo_fixed_to_double(linear
->p1
.y
) * (1 - linear
->base
.stops
[linear
->base
.n_stops
- 1].offset
) + _cairo_fixed_to_double(linear
->p2
.y
) * linear
->base
.stops
[linear
->base
.n_stops
- 1].offset
;
614 x0
= _cairo_fixed_to_double(linear
->p1
.x
);
615 y0
= _cairo_fixed_to_double(linear
->p1
.y
);
616 x1
= _cairo_fixed_to_double(linear
->p2
.x
);
617 y1
= _cairo_fixed_to_double(linear
->p2
.y
);
622 l
= (dx
* dx
+ dy
* dy
);
627 n
= (double)denom
/ l
;
631 attributes
->matrix
.x0
+= attributes
->x_offset
* attributes
->matrix
.xx
+ attributes
->y_offset
* attributes
->matrix
.xy
;
632 attributes
->matrix
.y0
+= attributes
->x_offset
* attributes
->matrix
.yx
+ attributes
->y_offset
* attributes
->matrix
.yy
;
636 m
.x0
= (multiple
? 0.0 : 1.0 / (2 * swidth
)) - (dx
* x0
+ dy
* y0
) * n
;
640 cairo_matrix_multiply(&attributes
->matrix
, &attributes
->matrix
, &m
);
644 // move (xc, yc) to 0 and yd to (sqrt((xd - xc)^2 + (yd - yc)^2), 0)
645 cairo_radial_pattern_t
* radial
= (cairo_radial_pattern_t
*)gradient
;
647 attributes
->matrix
.x0
+= attributes
->x_offset
* attributes
->matrix
.xx
+ attributes
->y_offset
* attributes
->matrix
.xy
648 - _cairo_fixed_to_double(radial
->c1
.x
);
649 attributes
->matrix
.y0
+= attributes
->x_offset
* attributes
->matrix
.yx
+ attributes
->y_offset
* attributes
->matrix
.yy
650 - _cairo_fixed_to_double(radial
->c1
.y
);
658 unsigned prev_stop
= 0;
667 ctx
= _cairo_gpu_space_bind(setup
->dst
->space
);
669 if(gradient
->base
.extend
== CAIRO_EXTEND_NONE
|| gradient
->base
.extend
== CAIRO_EXTEND_PAD
)
671 scale
= 1.0 / (gradient
->stops
[gradient
->n_stops
- 1].offset
- gradient
->stops
[0].offset
);
672 off
= -gradient
->stops
[0].offset
* scale
;
675 next_stop
= lround((gradient
->stops
[0].offset
* scale
+ off
) * denom
);
677 if(gradient
->base
.extend
!= CAIRO_EXTEND_NONE
)
678 _cairo_gpu_prepare_color(next_color
, &gradient
->stops
[0].color
, operand
->unpremultiplied
);
682 memset(next_color
, 0, sizeof(next_color
));
684 if(gradient
->base
.extend
== CAIRO_EXTEND_REPEAT
)
686 prev_stop
= lround((gradient
->stops
[gradient
->n_stops
- 1].offset
* scale
+ off
) * denom
) - denom
;
687 _cairo_gpu_prepare_color(prev_color
, &gradient
->stops
[gradient
->n_stops
- 1].color
, operand
->unpremultiplied
);
689 else if(gradient
->base
.extend
== CAIRO_EXTEND_REFLECT
)
691 prev_stop
= -next_stop
;
692 memcpy(prev_color
, next_color
, sizeof(next_color
));
697 operand
->texture
= _cairo_gpu_temp_1d_image(ctx
, op
, &swidth
, &owned
);
698 operand
->owns_texture
= owned
;
701 p
= data
= alloca(swidth
* sizeof(float) * 4);
703 //printf("denom is %u width is %u swidth is %u extend is %i\n", denom, width, swidth, gradient->base.extend);
704 if(gradient
->base
.extend
== CAIRO_EXTEND_PAD
)
705 assert(next_stop
== 0);
710 unsigned end
= next_stop
;
717 float c
= 1.0 / (next_stop
- prev_stop
);
720 // TODO: this will currently only be used for x86-64 or x86 with -msse: we should detect at runtime on i386
721 __m128 cv
= _mm_set1_ps(c
);
722 __m128 prev_b
= _mm_set1_ps(next_stop
- start
);
723 __m128 next_b
= _mm_set1_ps(start
- prev_stop
);
724 __m128 a
= (*(__m128
*)next_color
- *(__m128
*)prev_color
) * cv
;
725 __m128 v
= (prev_b
* *(__m128
*)prev_color
+ next_b
* *(__m128
*)next_color
) * cv
;
729 __m128
* pend
= (__m128
*)p
+ len
;
730 for(; (__m128
*)p
< pend
; p
+= 4)
738 __m128
* pend
= (__m128
*)p
+ (len
<< 1);
739 for(; (__m128
*)p
< pend
; p
+= 8)
750 v
[0] = ((next_stop
- start
) * prev_color
[0] + (start
- prev_stop
) * next_color
[0]) * c
;
751 v
[1] = ((next_stop
- start
) * prev_color
[1] + (start
- prev_stop
) * next_color
[1]) * c
;
752 v
[2] = ((next_stop
- start
) * prev_color
[2] + (start
- prev_stop
) * next_color
[2]) * c
;
753 v
[3] = ((next_stop
- start
) * prev_color
[3] + (start
- prev_stop
) * next_color
[3]) * c
;
754 a
[0] = (next_color
[0] - prev_color
[0]) * c
;
755 a
[1] = (next_color
[1] - prev_color
[1]) * c
;
756 a
[2] = (next_color
[2] - prev_color
[2]) * c
;
757 a
[3] = (next_color
[3] - prev_color
[3]) * c
;
759 //#define DO(i) p[i] = (((next_stop - x) * prev_color[i] + (x - prev_stop) * next_color[i]) / (next_stop - prev_stop))
761 //#define DO(i) p[i] = (((next_stop - (end - x)) * prev_color[i] + ((end - x) - prev_stop) * next_color[i]) / (next_stop - prev_stop))
764 float* pend
= p
+ (len
<< 2);
765 for(; p
< pend
; p
+= 4)
779 float* pend
= p
+ (len
<< 3);
780 for(; p
< pend
; p
+= 8)
801 memcpy(p
, next_color
, sizeof(next_color
));
806 prev_stop
= next_stop
;
807 memcpy(prev_color
, next_color
, sizeof(next_color
));
814 else if(stopi
== (int)gradient
->n_stops
)
816 if(gradient
->base
.extend
== CAIRO_EXTEND_NONE
)
818 unsigned len
= width
- (next_stop
+ 1);
819 unsigned size
= multiple
? (4 + len
* 8) : len
* 4;
824 else if(gradient
->base
.extend
== CAIRO_EXTEND_REPEAT
)
829 else if(gradient
->base
.extend
== CAIRO_EXTEND_REFLECT
)
836 else if(gradient
->base
.extend
== CAIRO_EXTEND_PAD
)
838 unsigned len
= width
- (next_stop
+ 1);
841 memcpy(p
, next_color
, sizeof(next_color
));
848 // this only happens for padded-to-power-of-two textures
849 for(x
= 0; x
< len
; ++x
)
850 memcpy(p
+ (x
<< 2), next_color
, sizeof(next_color
));
858 //printf("stopi is %i / %i\n", stopi, gradient->n_stops);
859 next_stop
= lround((gradient
->stops
[stopi
].offset
* scale
+ off
) * denom
);
860 _cairo_gpu_prepare_color(next_color
, &gradient
->stops
[stopi
].color
, operand
->unpremultiplied
);
862 if(next_stop
!= prev_stop
)
868 memcpy(p
, prev_color
, sizeof(prev_color
));
872 start
= prev_stop
+ 1;
876 //printf("uploading gradient with width %u\n", width);
878 // TODO: maybe we want to use a float32 or float16 texture here
879 _cairo_gpu_context_upload_data(ctx
, op
, operand
->texture
, data
, swidth
, 1);
882 /* we draw REFLECT gradients on a double-sized texture */
883 if(operand
->attributes
.extend
== CAIRO_EXTEND_REFLECT
)
884 operand
->attributes
.extend
= CAIRO_EXTEND_REPEAT
;
886 operand
->attributes
.filter
= CAIRO_FILTER_BILINEAR
;
887 operand
->attributes
.extra
= (void*)1;
888 operand
->gradient_width
= width
;
892 cairo_status_t status
;
893 cairo_gpu_surface_t
* surface
;
895 cairo_gpu_space_tls_t
* tls
;
897 /* this recurses with the context locked (IF we locked it!)
900 // we check this in composite_operand
901 assert(!setup
->unpremultiplied
|| setup
->dst
->space
->frag_div_alpha
);
903 tls
= _cairo_gpu_space_get_tls(setup
->dst
->space
);
905 status
= _cairo_pattern_acquire_surface(
906 src
, &setup
->dst
->base
,
907 (operand
->chan
& CHAN_KA
) ? ((operand
->chan
& (CHAN_C
| CHAN_A
)) ? CAIRO_CONTENT_COLOR_ALPHA
: CAIRO_CONTENT_ALPHA
) : CAIRO_CONTENT_COLOR
,
908 operand
->src_x
, operand
->src_y
, setup
->width
, setup
->height
,
909 CAIRO_PATTERN_ACQUIRE_COMPONENT_ALPHA
|
910 (setup
->dst
->space
->extend_mask
& (1 << CAIRO_EXTEND_REFLECT
)) ? 0 : CAIRO_PATTERN_ACQUIRE_NO_REFLECT
,
911 (cairo_surface_t
**) & surface
, attributes
);
917 if((surface
->texture
.target_idx
== TARGET_RECTANGLE
|| surface
->texture
.width
!= surface
->width
|| surface
->texture
.height
!= surface
->height
)
918 && (attributes
->extend
== CAIRO_EXTEND_REPEAT
|| attributes
->extend
== CAIRO_EXTEND_REFLECT
))
919 _cairo_gpu_acquire_repeat_reflect(&surface
, attributes
, operand
->src_x
, operand
->src_y
, setup
->width
, setup
->height
, &x_off
, &y_off
);
920 else if(surface
->texture
.width
!= surface
->width
|| surface
->texture
.height
!= surface
->height
)
921 _cairo_gpu_surface_fix_pad(surface
, attributes
->extend
);
923 operand
->surface
= surface
;
924 operand
->owns_surface
= 1;
926 attributes
->x_offset
+= operand
->src_x
- setup
->obj_x
;
927 attributes
->y_offset
+= operand
->src_y
- setup
->obj_y
;
929 assert(surface
->base
.backend
== &_cairo_gpu_surface_backend
);
931 attributes
->matrix
.x0
+= x_off
+ attributes
->x_offset
* attributes
->matrix
.xx
+ attributes
->y_offset
* attributes
->matrix
.xy
;
932 attributes
->matrix
.y0
+= y_off
+ attributes
->x_offset
* attributes
->matrix
.yx
+ attributes
->y_offset
* attributes
->matrix
.yy
;
934 _cairo_gpu_texture_adjust_matrix(&surface
->texture
, &attributes
->matrix
);
937 return CAIRO_STATUS_SUCCESS
;
941 _cairo_gpu_composite__set_operand(cairo_gpu_context_t
* ctx
, int op_idx
, cairo_gpu_composite_operand_t
* operand
)
943 cairo_surface_attributes_t
* attributes
= &operand
->attributes
;
944 cairo_pattern_t
* src
= operand
->src
;
948 if(operand
->gradient_denominator
&& src
->type
== CAIRO_PATTERN_TYPE_RADIAL
)
950 cairo_radial_pattern_t
* radial
= (cairo_radial_pattern_t
*)src
;
951 double dx
= _cairo_fixed_to_double(radial
->c2
.x
) - _cairo_fixed_to_double(radial
->c1
.x
);
952 double dy
= _cairo_fixed_to_double(radial
->c2
.y
) - _cairo_fixed_to_double(radial
->c1
.y
);
953 double r0
= _cairo_fixed_to_double(radial
->r1
);
954 double r1
= _cairo_fixed_to_double(radial
->r2
);
957 double a
= dx
* dy
+ dy
* dy
- dr
* dr
;
959 // XXX: all this is likely not numerically stable.
960 // XXX: we should use an algorithm like the one described in Jim Blinn's "How to Solve a Quadratic Equation"
962 // TODO: this is not a good thing to do...
967 float sign
= (a
> 0) ? 1 : -1;
968 float mac
[4] = {-a
, -a
, 1.0, r0
* r0
* a
}; // -ac
969 float so
[4] = {sign
* operand
->gradient_denominator
/ a
, 0.0, 0, 0.5};
970 float mbd2
[4] = {dx
* sign
, dy
* sign
, r0
* dr
* sign
, 0.0}; // -b/2
972 if(!operand
->gradient_discontinuous
)
974 so
[0] /= (double)operand
->gradient_width
;
975 so
[2] = 0.5 / (double)operand
->gradient_width
;
978 _cairo_gpu_context_set_texture_and_attributes_(ctx
, idx
, operand
->texture
, attributes
, mbd2
);
979 _cairo_gpu_context_set_radial_gradient(ctx
, idx
, mac
, so
);
983 _cairo_gpu_context_set_texture_and_attributes(ctx
, idx
, operand
->texture
, attributes
);
985 if(operand
->gradient_discontinuous
)
986 _cairo_gpu_context_set_discontinuous_width(ctx
, idx
, operand
->gradient_width
);
991 _cairo_gpu_composite__set_pass(cairo_gpu_context_t
* ctx
, cairo_gpu_composite_setup_t
* setup
, cairo_gpu_composite_pass_t
* pass
)
993 unsigned schan
= pass
->chan
;
994 cairo_gpu_color4_t u
;
997 if(pass
->frag
& FRAG_CONSTANT
)
999 u
.c
= (schan
& CHAN_C
) ? setup
->c
: setup
->a
;
1002 if(pass
->unpremultiplied
)
1005 u
.c
.r
/= setup
->a
.r
;
1008 u
.c
.g
/= setup
->a
.g
;
1011 u
.c
.b
/= setup
->a
.b
;
1013 if(pass
->unpremultiplied_one_alpha
)
1018 _cairo_gpu_context_set_vert_frag(ctx
, pass
->vert
, pass
->frag
);
1020 if(pass
->frag
& FRAG_CONSTANT
)
1021 _cairo_gpu_context_set_constant_color(ctx
, &u
);
1023 _cairo_gpu_context_set_blend(ctx
, pass
->blend
.v
);
1024 if(pass
->blend_color
== BLEND_COLOR_C_DIV_A
)
1026 cairo_gpu_color4_t bc
;
1028 bc
.c
.r
= setup
->c
.r
/ setup
->a
.r
;
1030 bc
.c
.g
= setup
->c
.g
/ setup
->a
.g
;
1032 bc
.c
.b
= setup
->c
.b
/ setup
->a
.b
;
1034 _cairo_gpu_context_set_blend_color(ctx
, &bc
);
1036 else if(pass
->blend_color
== BLEND_COLOR_A
)
1038 cairo_gpu_color4_t bc
;
1041 _cairo_gpu_context_set_blend_color(ctx
, &bc
);
1043 else if(pass
->blend_color
== BLEND_COLOR_1_MINUS_A
)
1045 cairo_gpu_color4_t bc
;
1046 bc
.c
.r
= 1.0 - setup
->a
.r
;
1047 bc
.c
.g
= 1.0 - setup
->a
.g
;
1048 bc
.c
.b
= 1.0 - setup
->a
.b
;
1049 bc
.ka
= 1.0 - setup
->ka
;
1050 _cairo_gpu_context_set_blend_color(ctx
, &bc
);
1053 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1055 if((pass
->operands
[i
] >= 0) && pass
->operands
[i
] != setup
->cur_operands
[i
])
1057 _cairo_gpu_composite__set_operand(ctx
, i
, &setup
->operands
[(int)pass
->operands
[i
]]);
1058 setup
->cur_operands
[i
] = pass
->operands
[i
];
1062 return pass
->draw_geometry
;
1066 _cairo_gpu_composite_fini(cairo_gpu_composite_setup_t
* setup
)
1069 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1071 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[0];
1074 if(operand
->owns_surface
)
1076 cairo_gpu_surface_t
*surface
= operand
->surface
;
1078 _cairo_pattern_release_surface(operand
->src
, &surface
->base
, &operand
->attributes
);
1079 operand
->owns_surface
= 0;
1080 operand
->surface
= 0;
1087 _cairo_gpu_composite__init_pass(cairo_gpu_composite_setup_t
* setup
, int i
, unsigned schan
)
1089 setup
->operands_chan
|= schan
;
1090 setup
->passes
[i
].chan
= schan
;
1091 setup
->passes
[i
].blend_color
= 0;
1092 setup
->passes
[i
].unpremultiplied
= 0;
1093 setup
->passes
[i
].unpremultiplied_one_alpha
= 0;
1094 setup
->passes
[i
].draw_geometry
= 1;
1095 setup
->passes
[i
].blend
.v
= 0;
1096 setup
->passes
[i
].component
= ~0;
1100 _cairo_gpu_composite__make_pass(cairo_gpu_composite_setup_t
* setup
, int i
, unsigned schan
)
1102 cairo_gpu_blend_t blend
;
1103 int blend_src
= setup
->blend_src
;
1104 int blend_dst
= setup
->blend_dst
;
1106 _cairo_gpu_composite__init_pass(setup
, i
, schan
);
1110 if(blend_src
== ALPHA
)
1111 blend
.src_rgb
= blend
.src_alpha
= BLEND_DST_ALPHA
;
1112 else if(blend_src
== (1 | ALPHA
))
1113 blend
.src_rgb
= blend
.src_alpha
= BLEND_ONE_MINUS_DST_ALPHA
;
1115 blend
.src_rgb
= blend
.src_alpha
= blend_src
;
1117 if(blend_dst
== ALPHA
)
1118 blend
.dst_rgb
= blend
.dst_alpha
= (schan
& CHAN_C
) ? BLEND_SRC_ALPHA
: BLEND_SRC_COLOR
;
1119 else if(blend_dst
== (1 | ALPHA
))
1120 blend
.dst_rgb
= blend
.dst_alpha
= (schan
& CHAN_C
) ? BLEND_ONE_MINUS_SRC_ALPHA
: BLEND_ONE_MINUS_SRC_COLOR
;
1122 blend
.dst_rgb
= blend
.dst_alpha
= blend_dst
;
1124 if(setup
->dst
->base
.content
== CAIRO_CONTENT_COLOR
)
1125 blend
.src_alpha
= blend
.dst_alpha
= 0;
1126 else if(setup
->dst
->base
.content
== CAIRO_CONTENT_ALPHA
)
1127 blend
.src_rgb
= blend
.dst_rgb
= 0;
1129 if(setup
->zero_chan
& schan
& (CHAN_C
| CHAN_A
))
1131 if(setup
->zero_chan
& CHAN_KA
)
1132 blend
.src_alpha
= 0;
1134 if(!setup
->dst
->space
->blend_func_separate
)
1139 blend
.src_rgb
= blend
.src_alpha
;
1141 assert(blend
.src_rgb
== blend
.src_alpha
);
1146 blend
.src_alpha
= blend
.src_rgb
;
1152 blend
.dst_rgb
= blend
.dst_alpha
;
1154 assert(blend
.dst_rgb
== blend
.dst_alpha
);
1159 blend
.dst_alpha
= blend
.dst_rgb
;
1166 assert(blend
.src_rgb
== BLEND_ONE_MINUS_DST_ALPHA
);
1170 setup
->passes
[i
].unpremultiplied
= 1;
1171 blend
.src_rgb
= blend
.src_alpha
= BLEND_SRC_ALPHA_SATURATE
;
1175 assert(!(schan
& CHAN_A
));
1177 blend
.src_rgb
= blend
.src_alpha
= 1;
1185 if(blend
.src_rgb
== 0)
1187 else if(blend
.src_rgb
== 1)
1189 blend
.src_rgb
= BLEND_SRC_ALPHA
;
1190 setup
->passes
[i
].unpremultiplied
= 1;
1196 assert(!(schan
& CHAN_A
));
1199 if(setup
->dst
->base
.content
== CAIRO_CONTENT_COLOR
)
1200 blend
.color_mask
= 7;
1201 else if(setup
->dst
->base
.content
== CAIRO_CONTENT_ALPHA
)
1202 blend
.color_mask
= 8;
1204 blend
.color_mask
= 0xf;
1206 setup
->passes
[i
].blend
.v
= blend
.v
;
1209 /* We use a general component-alpha model, which is simplified down if there is no "component alpha".
1210 * The component alpha model is the following:
1211 * - Each surface is 6-channel with p = (c, a) where c and a are (r, g, b) vectors
1212 * - Operands are multiplied together per-channel.
1213 * - Blending is done this way:
1214 * d.c = sb(d.a) * s.c + db(s.a) * d.c
1215 * d.a = sb(d.a) * s.a + db(s.a) * d.a
1216 * where multiplication is componentwise multiplication of 3-vectors
1218 * There are several mismatches between this model and OpenGL's one:
1219 * 1. OpenGL has no 6-channel textures. This can be solved by using two 3-channel textures, one for C and one for A.
1220 * 2. The blender only has one input, while we would need two
1222 * Another issue is how to handle 4-channel images.
1223 * For inputs, we simply set ra = ga = ba = a.
1224 * For outputs, we need an extended model.
1226 * In the extended model, there is an additional "ka" component, which is grayscale alpha.
1228 * d.c = sb(d.a) * s.c + db(s.a) * d.c
1229 * d.a = sb(d.a) * s.a + db(s.a) * d.a
1230 * d.ka = sb(d.ka) * s.ka + db(s.ka) * d.ka
1232 * We now have 7-channel images, represented as a 3-channel color image, plus a 4-channel alpha texture.
1233 * If the destination is 7-channel, we add a ka computation to pass 3.
1235 * *** THIS IS THE GENERAL CASE WE IMPLEMENT BELOW ***
1236 * If the destination is (r, g, b, ka), we have:
1237 * d.c = sb(d.ka) * s.c + db(s.a) * d.c
1238 * d.ka = sb(d.ka) * s.ka + db(s.ka) * d.ka
1240 * Which can be implemented like this:
1243 * d.c = 0 * s.a + db(s.a) * d.c
1246 * d.c = sb(d.ka) * s.c + 1 * d.c
1249 * d.ka = sb(d.ka) * s.ka + db(s.ka) * d.ka
1251 * If we have GL_EXT_blend_func_separate we can merge pass 2 and pass 3:
1254 * d.c = db(s.a) * d.c
1257 * d.c = sb(d.a) * s.c + 1 * d.c
1258 * d.ka = sb(d.ka) * s.a + db(s.ka) * d.ka
1261 * Or, if sb(d.a) == 0,
1262 * d.c = 0 * s.a + db(s.a) * d.c
1263 * d.ka = sb(d.ka) * s.a + db(s.ka) * d.ka
1266 * d.c = sb(d.ka) * s.c + db(s.a) * d.c
1267 * d.ka = sb(d.ka) * s.ka + db(s.ka) * d.ka
1271 _cairo_gpu_composite_plan_passes(cairo_gpu_composite_setup_t
* setup
)
1274 * Determine constness of source and destination channels
1275 * Zero source blend if zero color, zero alpha
1277 * Demote alpha factors if alpha is const 0 or const 1
1278 * Remove dead inputs (consider non-zeroness and alpha-inclusion of blend factors and colormask here!)
1279 * Separate blend factors, zero-out useless blend factors (preserving func_separate!)
1289 /* 1. Determine input/output channels usefulness and constness */
1291 if(setup
->dst
->base
.content
== CAIRO_CONTENT_COLOR_ALPHA
)
1292 dstchan
= CHAN_C
| CHAN_KA
;
1293 else if(setup
->dst
->base
.content
== CAIRO_CONTENT_COLOR
)
1298 if(!(dstchan
& CHAN_KA
) && (setup
->blend_src
& ALPHA
))
1300 setup
->blend_src
^= ALPHA
| 1;
1301 setup
->saturate
= 0;
1305 if(dstchan
& CHAN_C
)
1307 if(setup
->blend_src
)
1310 if(setup
->blend_dst
& ALPHA
)
1314 if(((dstchan
& CHAN_KA
) && setup
->blend_src
) || setup
->blend_dst
& ALPHA
)
1317 setup
->constant_chan
= 0;
1319 if(setup
->ka
== 0.0)
1321 zchan
|= CHAN_C
| CHAN_A
| CHAN_KA
;
1322 setup
->constant_chan
|= CHAN_C
| CHAN_A
| CHAN_KA
;
1323 setup
->c
.r
= setup
->c
.b
= setup
->c
.g
= setup
->a
.r
= setup
->a
.g
= setup
->a
.b
= setup
->ka
= 0;
1327 if(setup
->ka
!= 1.0)
1328 setup
->constant_chan
|= CHAN_KA
;
1330 if(setup
->a
.r
+ setup
->a
.g
+ setup
->a
.b
== 0.0f
)
1332 zchan
|= CHAN_C
| CHAN_A
;
1333 setup
->constant_chan
|= CHAN_C
| CHAN_A
;
1334 setup
->c
.r
= setup
->c
.b
= setup
->c
.g
= setup
->a
.r
= setup
->a
.g
= setup
->a
.b
= 0;
1338 if(setup
->a
.r
!= setup
->a
.b
|| setup
->a
.b
!= setup
->a
.g
|| setup
->a
.g
!= setup
->ka
)
1339 setup
->constant_chan
|= CHAN_A
;
1341 if((setup
->c
.r
+ setup
->c
.g
+ setup
->c
.b
) == 0.0f
)
1344 setup
->constant_chan
|= CHAN_C
;
1345 setup
->c
.r
= setup
->c
.b
= setup
->c
.g
= 0;
1347 else if((setup
->c
.r
+ setup
->c
.g
+ setup
->c
.b
) != 3.0f
)
1348 setup
->constant_chan
|= CHAN_C
;
1351 setup
->zero_chan
= zchan
;
1355 _cairo_gpu_composite__make_pass(setup
, 0, CHAN_C
| CHAN_A
| CHAN_KA
);
1361 vchan
= setup
->smooth
? CHAN_KA
: 0; /* coverage alpha */
1362 vchan
|= setup
->primary_chan
;
1365 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1366 vchan
|= setup
->operands
[i
].chan
;
1369 chan
= vchan
| setup
->constant_chan
| zchan
;
1371 /* 2. Constant fold channels in blend factors */
1374 setup
->blend_dst
&= ~ALPHA
;
1375 else if(!(chan
& CHAN_KA
) && (setup
->blend_dst
& ALPHA
))
1376 setup
->blend_dst
^= ALPHA
| 1;
1378 // TODO: handle src_alpha == 0
1379 // this is done separately in blend setup
1380 if(((zchan
| ~uchan
) & (CHAN_C
| CHAN_KA
)) == (CHAN_C
| CHAN_KA
))
1382 uchan
&=~ (CHAN_C
| CHAN_KA
);
1383 setup
->blend_src
= 0;
1384 setup
->saturate
= 0;
1388 if(!setup
->blend_src
&& setup
->blend_dst
== 1)
1394 /* 4. Dead input elimination */
1396 setup
->constant_chan
&= uchan
;
1398 if((zchan
| ~uchan
) & CHAN_KA
)
1401 setup
->primary_chan
&= ~zchan
| uchan
;
1404 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1405 setup
->operands
[i
].chan
&= ~zchan
| uchan
;
1408 vchan
&= ~zchan
| uchan
;
1409 chan
&= ~zchan
| uchan
;
1412 * d.ka = (1 - s.ka * cov)
1414 * d.c = sb(1) * s.c + db(1 - d.ka) * d.c
1415 * d.ka = 1 * s.ka + d.ka (= 1)
1418 // if blend_dst == 1, we are adding, so we can just use the vanilla no-component-alpha path
1419 if(setup
->dst_alpha_mask
&& (setup
->blend_dst
!= 1))
1421 cairo_gpu_blend_t blend
;
1422 assert(!(chan
& CHAN_A
));
1424 // if db() = 0/1, use one pass
1425 _cairo_gpu_composite__init_pass(setup
, 0, CHAN_KA
);
1426 setup
->passes
[0].blend
.v
= BLEND_SUB_ALPHAONLY
;
1428 _cairo_gpu_composite__init_pass(setup
, 1, CHAN_C
);
1430 if(setup
->blend_src
== 1)
1431 blend
.src_rgb
= BLEND_ONE_MINUS_DST_ALPHA
;
1432 else if(!setup
->blend_src
)
1434 else // impossible because there is no dst alpha
1437 if(setup
->blend_dst
== ALPHA
)
1438 blend
.dst_rgb
= BLEND_ONE_MINUS_DST_ALPHA
;
1439 else if(setup
->blend_dst
== (1 | ALPHA
))
1440 blend
.dst_rgb
= BLEND_DST_ALPHA
;
1442 blend
.dst_rgb
= setup
->blend_dst
;
1444 if(setup
->dst
->space
->blend_func_separate
)
1446 blend
.src_alpha
= 1;
1447 blend
.dst_alpha
= 1;
1448 blend
.color_mask
= 0xf;
1452 blend
.src_alpha
= blend
.src_rgb
;
1453 blend
.dst_alpha
= blend
.dst_rgb
;
1454 blend
.color_mask
= 0x7;
1457 setup
->passes
[1].blend
.v
= blend
.v
;
1458 setup
->passes
[1].unpremultiplied
= 1;
1459 setup
->passes
[1].unpremultiplied_one_alpha
= 1;
1460 setup
->passes
[1].draw_geometry
= 0;
1462 if(setup
->dst
->space
->blend_func_separate
)
1466 _cairo_gpu_composite__init_pass(setup
, 2, 0);
1467 setup
->passes
[2].blend
.v
= BLEND_SOURCE_ALPHAONLY
;
1468 setup
->passes
[2].draw_geometry
= 0;
1473 /* 5. No component alpha */
1474 if(!(chan
& CHAN_A
))
1476 _cairo_gpu_composite__make_pass(setup
, 0, CHAN_C
| CHAN_A
| CHAN_KA
);
1480 /* 6. Per-component 4-pass algorithm (only used and needed for SATURATE with component alpha) */
1485 if(!setup
->dst
->space
->per_component
)
1488 if(dstchan
& CHAN_C
)
1492 _cairo_gpu_composite__make_pass(setup
, i
, CHAN_C
| CHAN_A
| CHAN_KA
);
1493 setup
->passes
[i
].blend
.color_mask
= 1 << i
;
1494 setup
->passes
[i
].component
= i
;
1495 assert(setup
->passes
[i
].unpremultiplied
);
1499 if(dstchan
& CHAN_KA
)
1501 _cairo_gpu_composite__make_pass(setup
, i
, CHAN_KA
);
1502 setup
->passes
[i
].blend
.color_mask
&= 8;
1508 separate
= setup
->dst
->space
->blend_func_separate
|| !(dstchan
& CHAN_KA
);
1512 * **** THIS IS WHAT WE USE FOR BLACK COMPONENT ALPHA TEXT ***
1513 * d.c = 0 * s.a + db(s.a) * d.c
1514 * d.ka = sb(d.ka) * s.ka + db(s.ka) * d.ka
1516 if((zchan
& CHAN_C
) && separate
)
1518 _cairo_gpu_composite__make_pass(setup
, 0, CHAN_A
| CHAN_KA
);
1519 setup
->passes
[0].blend
.src_rgb
= 0;
1520 if(!(dstchan
& CHAN_KA
))
1521 setup
->passes
[0].blend
.src_alpha
= 0;
1525 // d.c = 0, {1 | d.ka} * s.c, K + 0, {s.a} * d.c, K * d.c, d.c
1527 /* 8. Constant component alpha
1528 * d.c = sb(d.ka) * s.c + K * d.c
1529 * d.ka = sb(d.ka) * s.ka + db(s.ka) * d.ka
1531 if(!(vchan
& (CHAN_A
| CHAN_KA
)) && setup
->dst
->space
->blend_color
&& separate
)
1533 _cairo_gpu_composite__make_pass(setup
, 0, CHAN_C
| CHAN_A
| CHAN_KA
);
1534 if(setup
->passes
[0].blend
.dst_rgb
== BLEND_SRC_ALPHA
)
1536 setup
->passes
[0].blend
.dst_rgb
= BLEND_CONSTANT_COLOR
;
1537 setup
->passes
[0].blend_color
= BLEND_COLOR_A
;
1539 if(setup
->passes
[0].blend
.dst_rgb
== BLEND_ONE_MINUS_SRC_ALPHA
)
1541 setup
->passes
[0].blend
.dst_rgb
= BLEND_CONSTANT_COLOR
;
1542 setup
->passes
[0].blend_color
= BLEND_COLOR_1_MINUS_A
;
1544 if(!(dstchan
& CHAN_KA
) && setup
->passes
[0].blend
.dst_alpha
)
1545 setup
->passes
[0].blend
.dst_alpha
= BLEND_CONSTANT_COLOR
;
1549 /* 9. Constant color, one source blending
1550 **** THIS IS WHAT WE USE FOR SOLID NON-BLACK COMPONENT ALPHA TEXT ***
1551 * d.c = constant_color(s.const_c / s.const_a) * s.a + db(s.a) * d.c
1552 * d.ka = s.ka + db(s.ka) * d.ka
1554 if(!(vchan
& CHAN_C
) && setup
->blend_src
== 1 && setup
->dst
->space
->blend_color
)
1556 _cairo_gpu_composite__make_pass(setup
, 0, CHAN_A
| CHAN_KA
);
1557 setup
->passes
[0].blend_color
= BLEND_COLOR_C_DIV_A
;
1558 setup
->passes
[0].blend
.src_rgb
= BLEND_CONSTANT_COLOR
;
1559 setup
->passes
[0].blend
.src_alpha
= setup
->dst
->space
->blend_func_separate
? 1 : BLEND_CONSTANT_COLOR
;
1563 /* 10. General multipass algorithm */
1566 // d.c = db(s.a) * d.c
1567 // blend_dst is either ALPHA or 1 | ALPHA, otherwise we already hit "no component alpha"
1568 if(dstchan
& CHAN_C
)
1570 _cairo_gpu_composite__make_pass(setup
, i
, CHAN_A
| CHAN_KA
);
1571 setup
->passes
[i
].blend
.src_rgb
= setup
->passes
[i
].blend
.src_alpha
= BLEND_ZERO
;
1572 setup
->passes
[i
].blend
.color_mask
&= ~8;
1576 if((dstchan
& (CHAN_C
| CHAN_KA
)) == (CHAN_C
| CHAN_KA
) && setup
->dst
->space
->blend_func_separate
)
1579 * d.c = sb(d.ka) * s.c + 1 * d.c
1580 * d.ka = sb(d.ka) * s.ka + db(s.ka) * d.ka
1582 _cairo_gpu_composite__make_pass(setup
, i
, CHAN_C
| CHAN_A
| CHAN_KA
);
1583 setup
->passes
[i
].blend
.dst_rgb
= 1;
1588 // d.c = sb(d.ka) * s.c + 1 * d.c
1589 // must be non-trivial, otherwise we already hit "zero color"
1590 if(dstchan
& CHAN_C
)
1592 _cairo_gpu_composite__make_pass(setup
, i
, CHAN_C
| CHAN_A
| CHAN_KA
);
1593 setup
->passes
[i
].blend
.dst_rgb
= setup
->passes
[i
].blend
.dst_alpha
= 1;
1594 setup
->passes
[i
].blend
.color_mask
= 7;
1595 if(setup
->dst
->space
->blend_func_separate
)
1596 setup
->passes
[i
].blend
.dst_alpha
= setup
->passes
[i
].blend
.src_alpha
= 0;
1600 // d.ka = sb(d.ka) * s.ka + db(s.ka) * d.ka
1601 // must be non-trivial, because otherwise db(s.ka) == 0 and we hit "no component alpha"
1602 if(dstchan
& CHAN_KA
)
1604 _cairo_gpu_composite__make_pass(setup
, i
, CHAN_KA
);
1605 setup
->passes
[i
].blend
.color_mask
= 8;
1606 if(setup
->dst
->space
->blend_func_separate
)
1607 setup
->passes
[i
].blend
.dst_rgb
= setup
->passes
[i
].blend
.src_rgb
= 0;
1617 _cairo_gpu_composite_prepare_passes(cairo_gpu_composite_setup_t
* setup
)
1620 for(ipass
= 0; ipass
< setup
->npasses
; ++ipass
)
1622 cairo_gpu_composite_pass_t
* pass
= &setup
->passes
[ipass
];
1626 int vert_care_about_premultiply
= 0;
1627 unsigned operands_mask
= 0;
1628 unsigned schan
= pass
->chan
;
1632 // NOTE: this works because currently unpremultiplied primary color is white+alpha.
1633 // Otherwise, we need to add "component swizzling" to the vertex shader too
1634 if(setup
->primary_chan
& schan
)
1636 frag
|= FRAG_PRIMARY
;
1638 // We assume that the primary color is constant per-primitive. Otherwise, everything is broken!
1640 vert
|= VERT_COLOR_POSTOP
;
1642 if(pass
->blend
.color_mask
& 7)
1643 vert_care_about_premultiply
= 1;
1645 // currently all users of primary colors must bake the constant in and reset constant_chan
1646 // this could be changed, but it would be problematic without ragment programs.
1647 else if(setup
->constant_chan
& schan
)
1648 frag
|= FRAG_CONSTANT
;
1650 // should never happen with current code
1651 if(vert_care_about_premultiply
&& pass
->unpremultiplied
!= setup
->unpremultiplied
)
1653 if(!setup
->dst
->space
->vert_op
)
1656 // XXX: fix op management when we support passthru
1657 if(pass
->unpremultiplied
)
1659 if(pass
->unpremultiplied_one_alpha
)
1660 vert
|= OP_DIV_ALPHA_RGBA
<< VERT_OP_SHIFT
;
1662 vert
|= OP_DIV_ALPHA
<< VERT_OP_SHIFT
;
1665 // TODO: revisit this when implementing a subpixel-antialiasing scan renderer
1666 vert
|= OP_MUL_ALPHA
<< VERT_OP_SHIFT
;
1669 // component alpha must come first because the fixed OpenGL pipeline needs it in the per-component case
1673 for(i = 0; i < MAX_OPERANDS; ++i)
1675 if(setup->operands[i].chan & CHAN_A)
1676 operands[noperands++] = &setup->operands[i];
1680 for(i = 0; i < MAX_OPERANDS; ++i)
1682 if(setup->operands[i].chan & schan && !(setup->operands[i].chan & CHAN_A))
1683 operands[noperands++] = &setup->operands[i];
1687 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1689 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[i
];
1690 if(!(operand
->chan
& schan
))
1693 if(pass
->component
< 3 && operand
->chan
& CHAN_A
)
1694 operand
->unpremultiplied
= 1;
1696 if(!(operand
->chan
& schan
& ~CHAN_KA
))
1697 operand
->unpremultiplied
= setup
->unpremultiplied
;
1700 if(pass
->blend
.color_mask
& 7)
1702 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1704 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[i
];
1705 if(!(operand
->chan
& schan
))
1709 !(pass
->component
< 3 && operand
->chan
& CHAN_A
)
1710 && (operand
->chan
& schan
& ~CHAN_KA
)
1711 && (operand
->unpremultiplied
!= pass
->unpremultiplied
)
1714 pass
->operands
[noperands
++] = i
;
1715 operands_mask
|= 1 << i
;
1719 noperands_op
= noperands
;
1722 // we don't need RGB, so we don't need to apply any operation
1725 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1727 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[i
];
1728 if(!(operand
->chan
& schan
) || (operands_mask
& (1 << i
)))
1731 if(operand
->chan
& CHAN_A
)
1733 pass
->operands
[noperands
++] = i
;
1734 operands_mask
|= 1 << i
;
1738 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1740 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[i
];
1741 if(!(operand
->chan
& schan
) || (operands_mask
& (1 << i
)))
1744 pass
->operands
[noperands
++] = i
;
1745 operands_mask
|= 1 << i
;
1748 for(i
= 0; i
< noperands
; ++i
)
1750 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[(int)pass
->operands
[i
]];
1752 vert
|= (operand
->has_coords
? (i
+ 1) : VERT_TEX_GEN
) << (VERT_TEX_SHIFT
+ i
* VERT_TEX_BITS
);
1754 if(pass
->component
< 3 && operand
->chan
& CHAN_A
)
1756 frag
|= ((1 + pass
->component
) << FRAG_COMPONENT_SHIFT
);
1758 if(pass
->unpremultiplied
)
1759 frag
|= ((operand
->unpremultiplied
? FRAG_TEX_COLOR_111CA
: FRAG_TEX_COLOR_111C
) << (FRAG_TEX_SHIFT
+ i
* FRAG_TEX_BITS
));
1763 else if(!setup
->dst
->space
->tex_aaaa_111a
)
1765 // we assume we failed earlier if we have to
1766 frag
|= FRAG_TEX_COLOR_RGBA
<< (FRAG_TEX_SHIFT
+ i
* FRAG_TEX_BITS
);
1768 else if((pass
->blend
.color_mask
& 7) && operand
->chan
& schan
& ~CHAN_KA
)
1769 frag
|= FRAG_TEX_COLOR_RGBA
<< (FRAG_TEX_SHIFT
+ i
* FRAG_TEX_BITS
);
1770 else if(!pass
->unpremultiplied
)
1771 frag
|= FRAG_TEX_COLOR_AAAA
<< (FRAG_TEX_SHIFT
+ i
* FRAG_TEX_BITS
);
1773 frag
|= FRAG_TEX_COLOR_111A
<< (FRAG_TEX_SHIFT
+ i
* FRAG_TEX_BITS
);
1775 if(operand
->gradient_denominator
&& operand
->src
->type
== CAIRO_PATTERN_TYPE_RADIAL
)
1776 frag
|= FRAG_TEX_RADIAL
<< (FRAG_TEX_SHIFT
+ i
* FRAG_TEX_BITS
);
1778 if(operand
->gradient_discontinuous
)
1779 frag
|= FRAG_TEX_DISCONTINUOUS
<< (FRAG_TEX_SHIFT
+ i
* FRAG_TEX_BITS
);
1784 if(noperands_op
> 1)
1786 assert(noperands_op
== 2);
1787 frag
|= FRAG_OPPOS_TEX1
;
1790 if(pass
->unpremultiplied
)
1792 if(pass
->unpremultiplied_one_alpha
)
1793 frag
|= OP_DIV_ALPHA_RGBA
<< FRAG_OP_SHIFT
;
1795 frag
|= OP_DIV_ALPHA
<< FRAG_OP_SHIFT
;
1798 frag
|= OP_MUL_ALPHA
<< FRAG_OP_SHIFT
;
1801 if(!_cairo_gpu_space_is_frag_supported(setup
->dst
->space
, frag
))
1804 for(i
= noperands
; i
< MAX_OPERANDS
; ++i
)
1805 pass
->operands
[i
] = -1;
1813 static cairo_status_t
1814 _cairo_gpu_composite_plan(cairo_gpu_composite_setup_t
* setup
)
1816 if(0 && setup
->operands
[0].src
&& setup
->operands
[0].src
->type
== CAIRO_PATTERN_TYPE_LINEAR
)
1818 const char* blends_s_a
[] = {"0", "d.c", "s.a * d.c", "(1 - s.a) * d.c"};
1819 const char* blends_s_ka
[] = {"0", "d.ka", "s.ka * d.ka", "(1 - s.ka) * d.ka"};
1820 const char* blends_d_ka
[] = {"0", "s.c", "d.ka * s.c", "(1 - d.ka) * s.c"};
1821 const char* blends_d_ka_ka
[] = {"0", "s.ka", "d.ka * s.ka", "(1 - d.ka) * s.ka"};
1822 const char* chans
[] = {"", "c", "a", "c, a", "ka", "c, ka", "a, ka", "c, a, ka"};
1825 if(setup
->operands
[0].src
)
1826 printf(" op0 = (%s)", chans
[setup
->operands
[0].chan
]);
1827 if(setup
->operands
[0].src
)
1828 printf(" op1 = (%s)", chans
[setup
->operands
[0].chan
]);
1829 if(setup
->primary_chan
)
1830 printf(" primary = (%s)", chans
[setup
->primary_chan
]);
1832 printf(" smooth = %i", setup
->smooth
);
1834 printf(" const = (%f %f %f) (%f %f %f) %f\n", setup
->c
.r
, setup
->c
.g
, setup
->c
.b
, setup
->a
.r
, setup
->a
.g
, setup
->a
.b
, setup
->ka
);
1836 if(setup
->dst
->base
.content
!= CAIRO_CONTENT_ALPHA
)
1837 printf("d.c = %s + %s\n", blends_d_ka
[setup
->blend_src
], blends_s_a
[setup
->blend_dst
]);
1838 if(setup
->dst
->base
.content
!= CAIRO_CONTENT_COLOR
)
1839 printf("d.ka = %s + %s\n", blends_d_ka_ka
[setup
->blend_src
], blends_s_ka
[setup
->blend_dst
]);
1842 setup
->npasses
= _cairo_gpu_composite_plan_passes(setup
);
1843 if(setup
->npasses
< 0)
1844 return CAIRO_INT_STATUS_UNSUPPORTED
;
1846 if(!_cairo_gpu_composite_prepare_passes(setup
))
1847 return CAIRO_INT_STATUS_UNSUPPORTED
;
1849 return _cairo_gpu_composite_prepare_operands(setup
);
1853 _cairo_gpu_composite_draw_prepare(cairo_gpu_context_t
* ctx
, cairo_gpu_composite_setup_t
* setup
)
1858 memset(setup
->cur_operands
, 0xff, sizeof(setup
->cur_operands
));
1859 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1861 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[i
];
1862 if(operand
->surface
&& !operand
->texture
)
1864 operand
->texture
= _cairo_gpu_surface_begin_texture(operand
->surface
, ctx
, i
);
1868 for(ipass
= 0; ipass
< setup
->npasses
; ++ipass
)
1870 cairo_gpu_composite_pass_t
* pass
= &setup
->passes
[ipass
];
1871 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1873 if(pass
->operands
[i
] >= 0)
1875 if(setup
->operands
[(int)pass
->operands
[i
]].texture
->target_idx
== TARGET_RECTANGLE
)
1876 pass
->frag
|= FRAG_TEX_RECTANGLE
<< (FRAG_TEX_SHIFT
+ i
* FRAG_TEX_BITS
);
1882 static inline cairo_gpu_context_t
*
1883 _cairo_gpu_composite_draw_prepare_bind(cairo_gpu_composite_setup_t
* setup
)
1885 cairo_gpu_context_t
* ctx
= _cairo_gpu_surface_lookup_context(setup
->dst
, FB_DRAW
);
1886 _cairo_gpu_composite_draw_prepare(ctx
, setup
);
1887 _cairo_gpu_surface_bind_to(setup
->dst
, ctx
, FB_DRAW
);
1891 static inline cairo_gpu_context_t
*
1892 _cairo_gpu_composite_plan_prepare_bind(cairo_gpu_composite_setup_t
* setup
)
1894 cairo_status_t status
= _cairo_gpu_composite_plan(setup
);
1897 return _cairo_gpu_composite_draw_prepare_bind(setup
);
1901 _cairo_gpu_composite_draw_inner(cairo_gpu_context_t
* ctx
, cairo_gpu_composite_setup_t
* setup
, cairo_gpu_geometry_t
* geometry
)
1904 int discard_geometry
= 0;
1905 _cairo_gpu_context_set_translation(ctx
, setup
->dst_x
- setup
->obj_x
, setup
->dst_y
- setup
->obj_y
);
1907 _cairo_gpu_context_set_geometry(ctx
, geometry
);
1908 else if(setup
->dst
->has_clip
)
1913 pixman_box32_t
*pboxes
= pixman_region32_rectangles(&setup
->dst
->clip
.rgn
, &nboxes
);
1916 int x1
= MAX(setup
->dst_x
, pboxes
[0].x1
);
1917 int y1
= MAX(setup
->dst_y
, pboxes
[0].y1
);
1918 int x2
= MIN(setup
->dst_x
+ setup
->width
, pboxes
[0].x2
);
1919 int y2
= MIN(setup
->dst_y
+ setup
->height
, pboxes
[0].y2
);
1920 setup
->obj_x
+= x1
- setup
->dst_x
;
1921 setup
->obj_y
+= y1
- setup
->dst_y
;
1922 setup
->width
= x2
- x1
;
1923 setup
->height
= y2
- y1
;
1925 geometry
= _cairo_gpu_context_geometry(ctx
, GEOM_TEMP
);
1926 v
= vertices
= _cairo_gpu_geometry_begin(ctx
, geometry
, PRIM_QUADS
, nboxes
* 4, 2, 0, 0);
1928 for(i
= 0; i
< nboxes
; ++i
)
1930 int x1
= MAX(setup
->dst_x
, pboxes
[i
].x1
);
1931 int y1
= MAX(setup
->dst_y
, pboxes
[i
].y1
);
1932 int x2
= MIN(setup
->dst_x
+ setup
->width
, pboxes
[i
].x2
);
1933 int y2
= MIN(setup
->dst_y
+ setup
->height
, pboxes
[i
].y2
);
1935 if(x1
>= x2
|| y1
>= y2
)
1938 //printf("%i %i %i %i %i\n", i, x1, y1, x2, y2);
1940 _cairo_gpu_emit_rect(&v
, x1
, y1
, x2
- x1
, y2
- y1
);
1942 _cairo_gpu_geometry_end(ctx
, geometry
, (v
- vertices
) >> 1);
1944 _cairo_gpu_context_set_translation(ctx
, setup
->dst_x
- setup
->obj_x
, setup
->dst_y
- setup
->obj_y
);
1945 _cairo_gpu_context_set_geometry(ctx
, geometry
);
1946 discard_geometry
= 1;
1949 for(ipass
= 0; ipass
< setup
->npasses
; ++ipass
)
1951 if(_cairo_gpu_composite__set_pass(ctx
, setup
, &setup
->passes
[ipass
]) && geometry
)
1953 _cairo_gpu_context_set_raster(ctx
, setup
->smooth
);
1954 _cairo_gpu_draw_clipped(ctx
, setup
->dst
, setup
->dst_x
, setup
->dst_y
, setup
->width
, setup
->height
);
1958 _cairo_gpu_context_set_raster(ctx
, 0);
1959 _cairo_gpu_context_set_viewport(ctx
, 0, 0, setup
->dst
->width
, setup
->dst
->height
);
1960 _cairo_gpu_context_draw_rect(ctx
, setup
->obj_x
, setup
->obj_y
, setup
->width
, setup
->height
);
1963 if(discard_geometry
)
1964 _cairo_gpu_geometry_put(ctx
, geometry
);
1968 _cairo_gpu_composite_end(cairo_gpu_context_t
* ctx
, cairo_gpu_composite_setup_t
* setup
)
1972 // TODO: may want to split the function here
1974 for(i
= 0; i
< MAX_OPERANDS
; ++i
)
1976 cairo_gpu_composite_operand_t
* operand
= &setup
->operands
[i
];
1977 if(operand
->chan
& setup
->operands_chan
)
1979 if(operand
->surface
)
1980 _cairo_gpu_surface_end_texture(ctx
, operand
->surface
, operand
->texture
);
1982 if(operand
->owns_texture
)
1984 _cairo_gpu_texture_fini(ctx
, operand
->texture
);
1985 free(operand
->texture
);
1986 operand
->texture
= 0;
1987 operand
->owns_texture
= 0;
1994 _cairo_gpu_composite_modified(cairo_gpu_composite_setup_t
* setup
)
1996 _cairo_gpu_surface_modified(setup
->dst
, setup
->dst_x
, setup
->dst_y
, setup
->width
, setup
->height
);
2000 _cairo_gpu_composite_draw_once_modify(cairo_gpu_context_t
* ctx
, cairo_gpu_composite_setup_t
* setup
, cairo_gpu_geometry_t
* geometry
)
2002 _cairo_gpu_composite_draw_inner(ctx
, setup
, geometry
);
2003 _cairo_gpu_composite_modified(setup
);
2004 _cairo_gpu_composite_end(ctx
, setup
);